diff --git a/.changelog/config.toml b/.changelog/config.toml index de0fee50c2f..7d52c95188f 100644 --- a/.changelog/config.toml +++ b/.changelog/config.toml @@ -1 +1,17 @@ -project_url = 'https://github.com/cometbft/cometbft' +project_url = 'https://github.com/cometbft/cometbft' + +sort_releases_by = [ + "date", + "version" +] +release_date_formats = [ + # "*December 1, 2023* + "*%B %d, %Y*", + # "*Dec 1, 2023* + "*%b %d, %Y*", + # "2023-12-01" (ISO format) + "%F", +] + +[change_set_sections] +sort_entries_by = "entry-text" diff --git a/.changelog/unreleased/.gitkeep b/.changelog/unreleased/.gitkeep deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/.changelog/unreleased/breaking-changes/1010-mempool-interface.md b/.changelog/unreleased/breaking-changes/1010-mempool-interface.md deleted file mode 100644 index a81edaef562..00000000000 --- a/.changelog/unreleased/breaking-changes/1010-mempool-interface.md +++ /dev/null @@ -1,4 +0,0 @@ -`[mempool]` Change the signature of `CheckTx` in the `Mempool` interface to -`CheckTx(tx types.Tx) (*abcicli.ReqRes, error)`. Also, add new method -`SetTxRemovedCallback`. -([\#1010](https://github.com/cometbft/cometbft/issues/1010)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1278-slog.md b/.changelog/unreleased/breaking-changes/1278-slog.md new file mode 100644 index 00000000000..c3c69f126af --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1278-slog.md @@ -0,0 +1,2 @@ +- `[libs/log]` Migrate to slog + ([\#1278](https://github.com/cometbft/cometbft/issues/1278)) diff --git a/.changelog/unreleased/breaking-changes/1324-load-state-from-gen-file-api.md b/.changelog/unreleased/breaking-changes/1324-load-state-from-gen-file-api.md index 568142971c8..3c2a52438d3 100644 --- a/.changelog/unreleased/breaking-changes/1324-load-state-from-gen-file-api.md +++ b/.changelog/unreleased/breaking-changes/1324-load-state-from-gen-file-api.md @@ -1,3 +1,3 @@ -- `[node]` Go-API breaking: Change the signature of `LoadStateFromDBOrGenesisDocProvider` +- `[node]` Go-API breaking: Change the signature of `LoadStateFromDBOrGenesisDocProvider` to accept an optional operator provided hash of the genesis file ([\#1324](https://github.com/cometbft/cometbft/pull/1324)). diff --git a/.changelog/unreleased/breaking-changes/1485-blocksync-internalize.md b/.changelog/unreleased/breaking-changes/1485-blocksync-internalize.md new file mode 100644 index 00000000000..a169fac2331 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-blocksync-internalize.md @@ -0,0 +1,2 @@ +- `[blocksync]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-consensus-internalize.md b/.changelog/unreleased/breaking-changes/1485-consensus-internalize.md new file mode 100644 index 00000000000..2739ee8807a --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-consensus-internalize.md @@ -0,0 +1,2 @@ +- `[consensus]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-evidence-internalize.md b/.changelog/unreleased/breaking-changes/1485-evidence-internalize.md new file mode 100644 index 00000000000..067496e1cb2 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-evidence-internalize.md @@ -0,0 +1,2 @@ +- `[evidence]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-inspect-internalize.md b/.changelog/unreleased/breaking-changes/1485-inspect-internalize.md new file mode 100644 index 00000000000..8766b337581 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-inspect-internalize.md @@ -0,0 +1,2 @@ +- `[inspect]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libs-autofile-internalize.md b/.changelog/unreleased/breaking-changes/1485-libs-autofile-internalize.md new file mode 100644 index 00000000000..3fabffd6caf --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libs-autofile-internalize.md @@ -0,0 +1,2 @@ +- `[libs/autofile]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libs-bits-internalize.md b/.changelog/unreleased/breaking-changes/1485-libs-bits-internalize.md new file mode 100644 index 00000000000..4eb86f8a2ad --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libs-bits-internalize.md @@ -0,0 +1,2 @@ +- `[libs/bits]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libs-clist-internalize.md b/.changelog/unreleased/breaking-changes/1485-libs-clist-internalize.md new file mode 100644 index 00000000000..1fa31ff7399 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libs-clist-internalize.md @@ -0,0 +1,2 @@ +- `[libs/clist]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libs-cmap-internalize.md b/.changelog/unreleased/breaking-changes/1485-libs-cmap-internalize.md new file mode 100644 index 00000000000..6b4533b3a7b --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libs-cmap-internalize.md @@ -0,0 +1,2 @@ +- `[libs/cmap]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libs-events-internalize.md b/.changelog/unreleased/breaking-changes/1485-libs-events-internalize.md new file mode 100644 index 00000000000..378d64198bb --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libs-events-internalize.md @@ -0,0 +1,2 @@ +- `[libs/events]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libs-fail-internalize.md b/.changelog/unreleased/breaking-changes/1485-libs-fail-internalize.md new file mode 100644 index 00000000000..7684f8007b4 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libs-fail-internalize.md @@ -0,0 +1,2 @@ +- `[libs/fail]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libs-flowrate-internalize.md b/.changelog/unreleased/breaking-changes/1485-libs-flowrate-internalize.md new file mode 100644 index 00000000000..a6f6d2701cc --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libs-flowrate-internalize.md @@ -0,0 +1,2 @@ +- `[libs/flowrate]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libs-net-internalize.md b/.changelog/unreleased/breaking-changes/1485-libs-net-internalize.md new file mode 100644 index 00000000000..5dd0922bb10 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libs-net-internalize.md @@ -0,0 +1,2 @@ +- `[libs/net]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libs-os-internalize.md b/.changelog/unreleased/breaking-changes/1485-libs-os-internalize.md new file mode 100644 index 00000000000..2f3f7adc9b7 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libs-os-internalize.md @@ -0,0 +1,2 @@ +- `[libs/os]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libs-progressbar-internalize.md b/.changelog/unreleased/breaking-changes/1485-libs-progressbar-internalize.md new file mode 100644 index 00000000000..5b28d72807b --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libs-progressbar-internalize.md @@ -0,0 +1,2 @@ +- `[libs/progressbar]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libs-rand-internalize.md b/.changelog/unreleased/breaking-changes/1485-libs-rand-internalize.md new file mode 100644 index 00000000000..0eb7fa5e956 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libs-rand-internalize.md @@ -0,0 +1,2 @@ +- `[libs/rand]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libs-strings-internalize.md b/.changelog/unreleased/breaking-changes/1485-libs-strings-internalize.md new file mode 100644 index 00000000000..93e8409df29 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libs-strings-internalize.md @@ -0,0 +1,2 @@ +- `[libs/strings]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libs-tempfile-internalize.md b/.changelog/unreleased/breaking-changes/1485-libs-tempfile-internalize.md new file mode 100644 index 00000000000..7d36a9d2e1f --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libs-tempfile-internalize.md @@ -0,0 +1,2 @@ +- `[libs/tempfile]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libs-timer-internalize.md b/.changelog/unreleased/breaking-changes/1485-libs-timer-internalize.md new file mode 100644 index 00000000000..1cdaae8c7f4 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libs-timer-internalize.md @@ -0,0 +1,2 @@ +- `[libs/timer]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1485-libsasync-internalize.md b/.changelog/unreleased/breaking-changes/1485-libsasync-internalize.md new file mode 100644 index 00000000000..460bea88794 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1485-libsasync-internalize.md @@ -0,0 +1,2 @@ +- `[libs/async]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1533-proto-renames-fix-buf-lints.md b/.changelog/unreleased/breaking-changes/1533-proto-renames-fix-buf-lints.md new file mode 100644 index 00000000000..af5d622ef78 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1533-proto-renames-fix-buf-lints.md @@ -0,0 +1,10 @@ +- `[abci]` Renamed the alias types for gRPC requests, responses, and service + instances to follow the naming changes in the proto-derived + `api/cometbft/abci/v1` package + ([\#1533](https://github.com/cometbft/cometbft/pull/1533)): + * The prefixed naming pattern `RequestFoo`, `ReponseFoo` changed to + suffixed `FooRequest`, `FooResponse`. + * Each method gets its own unique request and response type to allow for + independent evolution with backward compatibility. + * `ABCIClient` renamed to `ABCIServiceClient`. + * `ABCIServer` renamed to `ABCIServiceServer`. diff --git a/.changelog/unreleased/breaking-changes/1556-return-meta-with-load-block.md b/.changelog/unreleased/breaking-changes/1556-return-meta-with-load-block.md new file mode 100644 index 00000000000..a21aaeec1d6 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1556-return-meta-with-load-block.md @@ -0,0 +1,2 @@ +- `[store]` Make the `LoadBlock` method also return block metadata + ([\#1556](https://github.com/cometbft/cometbft/issues/1556)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1561-standalone-api-go-mod.md b/.changelog/unreleased/breaking-changes/1561-standalone-api-go-mod.md new file mode 100644 index 00000000000..0dd0f20be65 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1561-standalone-api-go-mod.md @@ -0,0 +1,2 @@ +- `[proto/api]` Made `/api` a standalone Go module with its own `go.mod` + ([\#1561](https://github.com/cometbft/cometbft/issues/1561)) diff --git a/.changelog/unreleased/breaking-changes/1621-rename-version-variables.md b/.changelog/unreleased/breaking-changes/1621-rename-version-variables.md new file mode 100644 index 00000000000..9e78b119690 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1621-rename-version-variables.md @@ -0,0 +1,2 @@ +- `[comet]` Version variables, in `version/version.go`, have been renamed to reflect the CometBFT rebranding. + ([\#1621](https://github.com/cometbft/cometbft/pull/1621)) diff --git a/.changelog/unreleased/breaking-changes/1972-store-prune-abci-responses.md b/.changelog/unreleased/breaking-changes/1972-store-prune-abci-responses.md new file mode 100644 index 00000000000..525cac96d4a --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1972-store-prune-abci-responses.md @@ -0,0 +1 @@ +- `[state/store]` go-API breaking change in `PruneABCIResponses`: added parameter to force compaction. ([\#1972](https://github.com/cometbft/cometbft/pull/1972)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/1972-store-pruning-api-breaking.md b/.changelog/unreleased/breaking-changes/1972-store-pruning-api-breaking.md new file mode 100644 index 00000000000..25fbb8fd6be --- /dev/null +++ b/.changelog/unreleased/breaking-changes/1972-store-pruning-api-breaking.md @@ -0,0 +1 @@ +- `[state/store]` go-API breaking change in `PruneStates`: added parameter to pass the number of pruned states and return pruned entries in current pruning iteration. ([\#1972](https://github.com/cometbft/cometbft/pull/1972)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/2230-rfc-106-remove-get-latest-block.md b/.changelog/unreleased/breaking-changes/2230-rfc-106-remove-get-latest-block.md new file mode 100644 index 00000000000..6ef28211995 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2230-rfc-106-remove-get-latest-block.md @@ -0,0 +1,12 @@ +- `[proto]` Remove stateful block data retrieval methods from the + data companion gRPC API as per + [RFC 106](https://github.com/cometbft/cometbft/blob/main/docs/references/rfc/rfc-106-separate-stateful-methods.md) + ([\#2230](https://github.com/cometbft/cometbft/issues/2230)): + * `GetLatest` from `cometbft.services.block.v1.BlockService`; + * `GetLatestBlockResults` from `cometbft.services.block_results.v1.BlockResultsService`. +- `[rpc/grpc]` Remove support for stateful block data retrieval methods from the + data companion APIs as per [RFC 106](https://github.com/cometbft/cometbft/blob/main/docs/references/rfc/rfc-106-separate-stateful-methods.md) + * `GetLatestBlock` method removed from the `BlockServiceClient` interface. + * `GetLatestBlockResults` method removed from the `BlockResultServiceClient` interface. + * `GetLatest` endpoint is no longer served by `BlockServiceServer` instances. + * `GetLatestBlockResults` endpoint is no longer served by `BlockResultServiceServer` instances. diff --git a/.changelog/unreleased/breaking-changes/2246-p2p-peerset.md b/.changelog/unreleased/breaking-changes/2246-p2p-peerset.md new file mode 100644 index 00000000000..657f9124552 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2246-p2p-peerset.md @@ -0,0 +1,3 @@ +- `[p2p]` Rename `IPeerSet#List` to `Copy`, add `Random`, `ForEach` methods. + Rename `PeerSet#List` to `Copy`, add `Random`, `ForEach` methods. + ([\#2246](https://github.com/cometbft/cometbft/pull/2246)) diff --git a/.changelog/unreleased/breaking-changes/2322-removes-abci-params.md b/.changelog/unreleased/breaking-changes/2322-removes-abci-params.md new file mode 100644 index 00000000000..c985704dce9 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2322-removes-abci-params.md @@ -0,0 +1,3 @@ +- `[abci]` Deprecates `ABCIParams` field of `ConsensusParam` and + introduces replacement in `FeatureParams` to enable Vote Extensions. + ([\#2322](https://github.com/cometbft/cometbft/pull/2322)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/2397-move-median-time.md b/.changelog/unreleased/breaking-changes/2397-move-median-time.md new file mode 100644 index 00000000000..aab1f75bf64 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2397-move-median-time.md @@ -0,0 +1,3 @@ +- `[internal/state]` Moved function `MedianTime` to package `types`, + and made it a method of `Commit` so it can be used by external packages. + ([\#2397](https://github.com/cometbft/cometbft/pull/2397)) diff --git a/.changelog/unreleased/breaking-changes/2692-privval-sign-bytes.md b/.changelog/unreleased/breaking-changes/2692-privval-sign-bytes.md new file mode 100644 index 00000000000..9b18f37e02f --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2692-privval-sign-bytes.md @@ -0,0 +1,2 @@ +- `[privval]` allow privval to sign arbitrary bytes + ([\#2692](https://github.com/cometbft/cometbft/pull/2692)) diff --git a/.changelog/unreleased/breaking-changes/2725-bump-go-version-1-23.md b/.changelog/unreleased/breaking-changes/2725-bump-go-version-1-23.md new file mode 100644 index 00000000000..d3d808f2aee --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2725-bump-go-version-1-23.md @@ -0,0 +1,2 @@ +- `[go/runtime]` Bump minimum Go version to v1.23 + ([\#4039](https://github.com/cometbft/cometbft/issues/4039)) diff --git a/.changelog/unreleased/breaking-changes/2786-remove-cleveldb-boltdb.md b/.changelog/unreleased/breaking-changes/2786-remove-cleveldb-boltdb.md new file mode 100644 index 00000000000..244a0e0e8b0 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2786-remove-cleveldb-boltdb.md @@ -0,0 +1 @@ +- `[config]` Remove `cleveldb` and `boltdb` ([\#2786](https://github.com/cometbft/cometbft/pull/2786)) diff --git a/.changelog/unreleased/breaking-changes/2803-mempool-NewCListMempool.md b/.changelog/unreleased/breaking-changes/2803-mempool-NewCListMempool.md new file mode 100644 index 00000000000..88748d666ba --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2803-mempool-NewCListMempool.md @@ -0,0 +1,2 @@ +- `[mempool]` Add new parameter `lanesInfo *LanesInfo` to `NewCListMempool` +([\#2803](https://github.com/cometbft/cometbft/issues/2803)) diff --git a/.changelog/unreleased/breaking-changes/2803-mempool-replace-iterator.md b/.changelog/unreleased/breaking-changes/2803-mempool-replace-iterator.md new file mode 100644 index 00000000000..f4191edce6d --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2803-mempool-replace-iterator.md @@ -0,0 +1,2 @@ +- `[mempool]` Removed `CListIterator`; use `BlockingIterator` instead + ([\#2803](https://github.com/cometbft/cometbft/issues/2803)). diff --git a/.changelog/unreleased/breaking-changes/2803-proto-mempool-lane.md b/.changelog/unreleased/breaking-changes/2803-proto-mempool-lane.md new file mode 100644 index 00000000000..b7162254548 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2803-proto-mempool-lane.md @@ -0,0 +1,3 @@ +- `[types/proto]` Extend `CheckTxResponse` with new `lane_id` field and `InfoResponse` with + `lane_priorities` and `default_lane` fields + ([#2803](https://github.com/cometbft/cometbft/issues/2803)) diff --git a/.changelog/unreleased/breaking-changes/2803-remove-TxsFront-TxsWaitChan.md b/.changelog/unreleased/breaking-changes/2803-remove-TxsFront-TxsWaitChan.md new file mode 100644 index 00000000000..68e9a160518 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2803-remove-TxsFront-TxsWaitChan.md @@ -0,0 +1,2 @@ +- `[mempool]` Remove methods `TxsFront` and `TxsWaitChan` from `CListMempool`. They should be + replaced by the new iterators ([\#2803](https://github.com/cometbft/cometbft/issues/2803)). diff --git a/.changelog/unreleased/breaking-changes/2843-abci-types-go-api.md b/.changelog/unreleased/breaking-changes/2843-abci-types-go-api.md new file mode 100644 index 00000000000..1046aa25027 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2843-abci-types-go-api.md @@ -0,0 +1,2 @@ + - `[abci/types]` Rename `UpdateValidator` to `NewValidatorUpdate`, remove + `Ed25519ValidatorUpdate` ([\#2843](https://github.com/cometbft/cometbft/pull/2843)) diff --git a/.changelog/unreleased/breaking-changes/2843-abci-types-validator-update-pubkey.md b/.changelog/unreleased/breaking-changes/2843-abci-types-validator-update-pubkey.md new file mode 100644 index 00000000000..57bec94ce96 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2843-abci-types-validator-update-pubkey.md @@ -0,0 +1,3 @@ +- `[abci/types]` Replace `ValidatorUpdate.PubKey` with `PubKeyType` and + `PubKeyBytes` to allow applications to avoid implementing `PubKey` interface. + ([\#2843](https://github.com/cometbft/cometbft/pull/2843)) diff --git a/.changelog/unreleased/breaking-changes/2843-proto-abci-v1-validator-update.md b/.changelog/unreleased/breaking-changes/2843-proto-abci-v1-validator-update.md new file mode 100644 index 00000000000..b276628e74e --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2843-proto-abci-v1-validator-update.md @@ -0,0 +1,2 @@ +- `[proto]` Remove `abci.ValidatorUpdate.pub_key`, add `pub_key_type` and + `pub_key_bytes` ([\#2843](https://github.com/cometbft/cometbft/pull/2843)) diff --git a/.changelog/unreleased/breaking-changes/2878-proto-privval-get-pubkey-response-public-key.md b/.changelog/unreleased/breaking-changes/2878-proto-privval-get-pubkey-response-public-key.md new file mode 100644 index 00000000000..204b2c77f5c --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2878-proto-privval-get-pubkey-response-public-key.md @@ -0,0 +1,2 @@ +- `[proto/privval]` Replace `pub_key` with `pub_key_type` and `pub_key_bytes` in + `PubKeyResponse` ([\#2878](https://github.com/cometbft/cometbft/issues/2878)) diff --git a/.changelog/unreleased/breaking-changes/2878-proto-types-validator-public-key.md b/.changelog/unreleased/breaking-changes/2878-proto-types-validator-public-key.md new file mode 100644 index 00000000000..5ee6f15b41a --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2878-proto-types-validator-public-key.md @@ -0,0 +1,2 @@ +- `[proto/types]` Deprecate `pub_key` in favor of `pub_key_type` and `pub_key_bytes` in + `Validator` ([\#2878](https://github.com/cometbft/cometbft/issues/2878)) diff --git a/.changelog/unreleased/breaking-changes/2892-remove-skip-timeout-commit.md b/.changelog/unreleased/breaking-changes/2892-remove-skip-timeout-commit.md new file mode 100644 index 00000000000..f8ef730e60f --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2892-remove-skip-timeout-commit.md @@ -0,0 +1,2 @@ +- `[config]` Remove `skip_timeout_commit` in favor of `timeout_commit=0` + ([\#2892](https://github.com/cometbft/cometbft/pull/2892)) diff --git a/.changelog/unreleased/breaking-changes/2895-merge-timeout-prevote-precommit.md b/.changelog/unreleased/breaking-changes/2895-merge-timeout-prevote-precommit.md new file mode 100644 index 00000000000..9500fd3386d --- /dev/null +++ b/.changelog/unreleased/breaking-changes/2895-merge-timeout-prevote-precommit.md @@ -0,0 +1,4 @@ +- `[config]` Merge `timeout_prevote` and `timeout_precommit`, + `timeout_prevote_delta` and `timeout_precommit_delta` into `timeout_round` + and `timeout_round_delta` accordingly + ([\#2895](https://github.com/cometbft/cometbft/pull/2895)) diff --git a/.changelog/unreleased/breaking-changes/3084-deprecate-set-response-callback.md b/.changelog/unreleased/breaking-changes/3084-deprecate-set-response-callback.md new file mode 100644 index 00000000000..828ff72f0c7 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/3084-deprecate-set-response-callback.md @@ -0,0 +1,2 @@ +- `[abci/client]` Deprecate `SetResponseCallback(cb Callback)` in the `Client` interface as it is no +longer used. ([\#3084](https://github.com/cometbft/cometbft/issues/3084)) diff --git a/.changelog/unreleased/breaking-changes/3084-mempool-interface.md b/.changelog/unreleased/breaking-changes/3084-mempool-interface.md new file mode 100644 index 00000000000..06e192f65c7 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/3084-mempool-interface.md @@ -0,0 +1,3 @@ +- `[mempool]` Change the signature of `CheckTx` in the `Mempool` interface to +`CheckTx(tx types.Tx, sender p2p.ID) (*abcicli.ReqRes, error)`. +([\#1010](https://github.com/cometbft/cometbft/issues/1010), [\#3084](https://github.com/cometbft/cometbft/issues/3084)) diff --git a/.changelog/unreleased/breaking-changes/3184-remove-PeerSendBytesTotal-metric.md b/.changelog/unreleased/breaking-changes/3184-remove-PeerSendBytesTotal-metric.md new file mode 100644 index 00000000000..39ec52b3fe7 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/3184-remove-PeerSendBytesTotal-metric.md @@ -0,0 +1,3 @@ +- `[p2p]` Remove `p2p_peer_send_bytes_total` and `p2p_peer_receive_bytes_total` + metrics as they are costly to track, and not that informative in debugging + ([\#3184](https://github.com/cometbft/cometbft/issues/3184)) diff --git a/.changelog/unreleased/breaking-changes/3248-crypto-sha256.md b/.changelog/unreleased/breaking-changes/3248-crypto-sha256.md new file mode 100644 index 00000000000..5904407add7 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/3248-crypto-sha256.md @@ -0,0 +1,2 @@ +- `[crypto]` Remove unnecessary `Sha256` wrapper + ([\#3248](https://github.com/cometbft/cometbft/pull/3248)) diff --git a/.changelog/unreleased/breaking-changes/3314-mempool-preupdate.md b/.changelog/unreleased/breaking-changes/3314-mempool-preupdate.md new file mode 100644 index 00000000000..4c2528939f7 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/3314-mempool-preupdate.md @@ -0,0 +1,4 @@ +- `[mempool]` Add to the `Mempool` interface a new method `PreUpdate()`. This method should be + called before acquiring the mempool lock, to signal that a new update is coming. Also add to + `ErrMempoolIsFull` a new field `RecheckFull`. + ([\#3314](https://github.com/cometbft/cometbft/pull/3314)) diff --git a/.changelog/unreleased/breaking-changes/3347-remove-crypto-xchacha20-xsalsa20.md b/.changelog/unreleased/breaking-changes/3347-remove-crypto-xchacha20-xsalsa20.md new file mode 100644 index 00000000000..69358f57358 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/3347-remove-crypto-xchacha20-xsalsa20.md @@ -0,0 +1,2 @@ +- `[crypto]` Remove unnecessary `xchacha20` and `xsalsa20` implementations + ([\#3347](https://github.com/cometbft/cometbft/pull/3347)) diff --git a/.changelog/unreleased/breaking-changes/3595-node-go-api-defaultnewnode.md b/.changelog/unreleased/breaking-changes/3595-node-go-api-defaultnewnode.md new file mode 100644 index 00000000000..1162b100fe6 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/3595-node-go-api-defaultnewnode.md @@ -0,0 +1,3 @@ +- `[node]` Go API breaking change to `DefaultNewNode`. The function passes +`CliParams` to a node now. + ([\#3595](https://github.com/cometbft/cometbft/pull/3595)) diff --git a/.changelog/unreleased/breaking-changes/3595-node-go-api-provider.md b/.changelog/unreleased/breaking-changes/3595-node-go-api-provider.md new file mode 100644 index 00000000000..560b2d74a14 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/3595-node-go-api-provider.md @@ -0,0 +1,3 @@ +- `[node]` Go API breaking change to `Provider`. The function takes +`CliParams` as a parameter now. + ([\#3595](https://github.com/cometbft/cometbft/pull/3595)) diff --git a/.changelog/unreleased/breaking-changes/3606-crypto-equals.md b/.changelog/unreleased/breaking-changes/3606-crypto-equals.md new file mode 100644 index 00000000000..63416518f79 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/3606-crypto-equals.md @@ -0,0 +1,2 @@ +- `[crypto]` Remove `PubKey#Equals` and `PrivKey#Equals` + ([\#3606](https://github.com/cometbft/cometbft/pull/3606)) diff --git a/.changelog/unreleased/breaking-changes/3634-node-handshake-goapi.md b/.changelog/unreleased/breaking-changes/3634-node-handshake-goapi.md new file mode 100644 index 00000000000..4da9b1938e4 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/3634-node-handshake-goapi.md @@ -0,0 +1,2 @@ +- `[consensus/replay]` `Handshake` now takes an additional parameter of type `*abci.InfoResponse` as input + ([#3634](https://github.com/cometbft/cometbft/pull/3634)) diff --git a/.changelog/unreleased/breaking-changes/3646-crypto-remove-sr25519.md b/.changelog/unreleased/breaking-changes/3646-crypto-remove-sr25519.md new file mode 100644 index 00000000000..6c0937aebf8 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/3646-crypto-remove-sr25519.md @@ -0,0 +1,2 @@ +- `[crypto]` Remove Sr25519 curve + ([\#3646](https://github.com/cometbft/cometbft/pull/3646)) diff --git a/.changelog/unreleased/breaking-changes/3659-mempool-add-contains-method.md b/.changelog/unreleased/breaking-changes/3659-mempool-add-contains-method.md new file mode 100644 index 00000000000..f4a85141e73 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/3659-mempool-add-contains-method.md @@ -0,0 +1,2 @@ +- `[mempool]` Add new `Contains` method to `Mempool` interface. + ([\#3659](https://github.com/cometbft/cometbft/pull/3659)) diff --git a/.changelog/unreleased/breaking-changes/3878-statesync-max-discovery-time.md b/.changelog/unreleased/breaking-changes/3878-statesync-max-discovery-time.md new file mode 100644 index 00000000000..30532d07aa9 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/3878-statesync-max-discovery-time.md @@ -0,0 +1,6 @@ +- `[statesync]` If the node can't discover snapshots for 2 min + (`statesync.max_discovery_time`), switch to blocksync. Remove + `statesync.discovery_time` from the configuration. If + `statesync.max_discovery_time` is zero, the node will be retrying + indefinitely. + [\#3878](https://github.com/cometbft/cometbft/issues/3878) diff --git a/.changelog/unreleased/breaking-changes/3980-kvstore-new-application.md b/.changelog/unreleased/breaking-changes/3980-kvstore-new-application.md new file mode 100644 index 00000000000..8f8c2219e8a --- /dev/null +++ b/.changelog/unreleased/breaking-changes/3980-kvstore-new-application.md @@ -0,0 +1,2 @@ +- `[kvstore]` Function `NewApplication` now has an extra `lanes map[string]uint32` parameter + ([\#3980](https://github.com/cometbft/cometbft/pull/3980)) diff --git a/.changelog/unreleased/breaking-changes/4040-abci-client-reqres-methods.md b/.changelog/unreleased/breaking-changes/4040-abci-client-reqres-methods.md new file mode 100644 index 00000000000..91abc64e860 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/4040-abci-client-reqres-methods.md @@ -0,0 +1,3 @@ +- `[abci/client]` `ReqRes`'s `SetCallback` method now takes a function that +returns an error, a new `Error` method is added, and the unused `GetCallback` +method is removed ([\#4040](https://github.com/cometbft/cometbft/pull/4040)). diff --git a/.changelog/unreleased/breaking-changes/4250-node-no-genesis.md b/.changelog/unreleased/breaking-changes/4250-node-no-genesis.md new file mode 100644 index 00000000000..925b331a131 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/4250-node-no-genesis.md @@ -0,0 +1 @@ +- `[node]` Don't store a pointer to a `types.GenesisDoc` after the node is running ([\#4250](https://github.com/cometbft/cometbft/pull/4250)) diff --git a/.changelog/unreleased/breaking-changes/4299-reduce-genesis-chunks-size.md b/.changelog/unreleased/breaking-changes/4299-reduce-genesis-chunks-size.md new file mode 100644 index 00000000000..832fb755c3c --- /dev/null +++ b/.changelog/unreleased/breaking-changes/4299-reduce-genesis-chunks-size.md @@ -0,0 +1,2 @@ +- reduced the size of the chunks into which big genesis files are split to 2MB + (down from 16MB) ([\#4299](https://github.com/cometbft/cometbft/pull/4299)) \ No newline at end of file diff --git a/.changelog/unreleased/breaking-changes/4301-p2p-extract-tcp-transport.md b/.changelog/unreleased/breaking-changes/4301-p2p-extract-tcp-transport.md new file mode 100644 index 00000000000..b051dd853cb --- /dev/null +++ b/.changelog/unreleased/breaking-changes/4301-p2p-extract-tcp-transport.md @@ -0,0 +1,6 @@ +- `[p2p]` Extracted TCP transport into its own package - `transport/tcp` + * Updated `Transport` interface; + * Moved `NetAddress`, `NodeInfo` and `NodeKey` into separate packages - + `netaddress`, `nodeinfo`, `nodekey` accordingly; + * Internalized `fuzz` package. + [\#4301](https://github.com/cometbft/cometbft/issues/4301) diff --git a/.changelog/unreleased/breaking-changes/4340-lazy-hash.md b/.changelog/unreleased/breaking-changes/4340-lazy-hash.md new file mode 100644 index 00000000000..1d1dc8aa985 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/4340-lazy-hash.md @@ -0,0 +1,6 @@ +- `[log]` LazyBlockHash -> LazyHash + * LazyBlockHash replaced with more generic LazyHash which lazily evaluates + a tx or block hash when the stringer interface is invoked. Good for use + with debug statements so the item is only hashed when print is invoked + * tx `Hash` ret type changed to HexBytes to fit this interface + [\#4340](https://github.com/cometbft/cometbft/pull/4340) diff --git a/.changelog/unreleased/breaking-changes/495-proto-version-packages.md b/.changelog/unreleased/breaking-changes/495-proto-version-packages.md new file mode 100644 index 00000000000..fca1bb2b73f --- /dev/null +++ b/.changelog/unreleased/breaking-changes/495-proto-version-packages.md @@ -0,0 +1,10 @@ +- `[proto]` Renamed the packages from `tendermint.*` to `cometbft.*` + and introduced versioned packages to distinguish between proto definitions + released in `0.34.x`, `0.37.x`, `0.38.x`, and `1.x` versions. + Prior to the 1.0 release, the versioned packages are suffixed with + `.v1beta1`, `.v1beta2`, and so on; all definitions describing the protocols + as per the 1.0.0 release are in packages suffixed with `.v1`. + Relocated generated Go code into a new `api` folder and changed the import + paths accordingly. + ([\#495](https://github.com/cometbft/cometbft/pull/495), + [\#1504](https://github.com/cometbft/cometbft/issues/1504)) diff --git a/.changelog/unreleased/breaking-changes/736+-proto-renaming.md b/.changelog/unreleased/breaking-changes/736+-proto-renaming.md new file mode 100644 index 00000000000..782acf6ebe7 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/736+-proto-renaming.md @@ -0,0 +1,28 @@ +- `[proto]` The names in the `cometbft.abci.v1` versioned proto package + are changed to satisfy the + [buf guidelines](https://buf.build/docs/best-practices/style-guide/) + ([#736](https://github.com/cometbft/cometbft/issues/736), + [#1504](https://github.com/cometbft/cometbft/issues/1504), + [#1530](https://github.com/cometbft/cometbft/issues/1530)): + * Names of request and response types used in gRPC changed by making + `Request`/`Response` the suffix instead of the prefix, e.g. + `RequestCheckTx` ⭢ `CheckTxRequest`. + * The `Request` and `Response` multiplex messages are redefined accordingly. + * `CheckTxType` values renamed with the `CHECK_TX_TYPE_` prefix. + * `MisbehaviorType` values renamed with the `MISBEHAVIOR_TYPE_` prefix. + * `Result` enum formerly nested in `ResponseOfferSnapshot` replaced with the package-level + `OfferSnapshotResult`, its values named with the + `OFFER_SNAPSHOT_RESULT_` prefix. + * `Result` enum formerly nested in `ResponseApplyShapshotChunk` replaced with the package-level + `ApplySnapshotChunkResult`, its values named with the + `APPLY_SNAPSHOT_CHUNK_RESULT_` prefix. + * `Status` enum formerly nested in `ResponseProcessProposal` replaced with the package-level + `ProcessProposalStatus`, its values named with the + `PROCESS_PROPOSAL_STATUS_` prefix. + * `Status` enum formerly nested in `ResponseVerifyVoteExtension` replaced with the package-level + `VerifyVoteExtensionStatus`, its values named with the + `VERIFY_VOTE_EXTENSION_STATUS_` prefix. + * New definition of `Misbehavior` using the changed `MisbehaviorType`. + * The gRPC service is renamed `ABCIService` and defined using the types listed above. +- `[proto]` In the `cometbft.state.v1` package, the definition for `ABCIResponsesInfo` + is changed, renaming `response_finalize_block` field to `finalize_block`. diff --git a/.changelog/unreleased/breaking-changes/736-proto-enum-rename.md b/.changelog/unreleased/breaking-changes/736-proto-enum-rename.md new file mode 100644 index 00000000000..892902110bd --- /dev/null +++ b/.changelog/unreleased/breaking-changes/736-proto-enum-rename.md @@ -0,0 +1,7 @@ +- `[abci]` Changed the proto-derived enum type and constant aliases to the + buf-recommended naming conventions adopted in the `abci/v1` proto package. + For example, `ResponseProcessProposal_ACCEPT` is renamed to `PROCESS_PROPOSAL_STATUS_ACCEPT` + ([\#736](https://github.com/cometbft/cometbft/issues/736)). +- `[abci]` The `Type` enum field is now required to be set to a value other + than the default `CHECK_TX_TYPE_UNKNOWN` for a valid `CheckTxRequest` + ([\#736](https://github.com/cometbft/cometbft/issues/736)). diff --git a/.changelog/unreleased/breaking-changes/857-handshake-api-ctx.md b/.changelog/unreleased/breaking-changes/857-handshake-api-ctx.md index 6fe01558506..99740fcf670 100644 --- a/.changelog/unreleased/breaking-changes/857-handshake-api-ctx.md +++ b/.changelog/unreleased/breaking-changes/857-handshake-api-ctx.md @@ -1,2 +1,2 @@ -- `[consensus]` `Handshaker.Handshake` now requires `context.Context` ([cometbft/cometbft\#857](https://github.com/cometbft/cometbft/pull/857)) -- `[node]` `NewNode` now requires `context.Context` as the first parameter ([cometbft/cometbft\#857](https://github.com/cometbft/cometbft/pull/857)) +- `[consensus]` `Handshaker.Handshake` now requires `context.Context` ([\#857](https://github.com/cometbft/cometbft/pull/857)) +- `[node]` `NewNode` now requires `context.Context` as the first parameter ([\#857](https://github.com/cometbft/cometbft/pull/857)) diff --git a/.changelog/unreleased/bug-fixes/0000-asa-2024-001-fix-validate.md b/.changelog/unreleased/bug-fixes/0000-asa-2024-001-fix-validate.md new file mode 100644 index 00000000000..78ac6080322 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/0000-asa-2024-001-fix-validate.md @@ -0,0 +1,2 @@ +- `[consensus]` Fix for Security Advisory `ASA-2024-001`: Validation of `VoteExtensionsEnableHeight` can cause chain halt + ([ASA-2024-001](https://github.com/cometbft/cometbft/security/advisories/GHSA-qr8r-m495-7hc4)) diff --git a/.changelog/unreleased/bug-fixes/0000-asa-2024-008.md b/.changelog/unreleased/bug-fixes/0000-asa-2024-008.md new file mode 100644 index 00000000000..11e6ae2480e --- /dev/null +++ b/.changelog/unreleased/bug-fixes/0000-asa-2024-008.md @@ -0,0 +1,4 @@ +- `[blocksync]` Added peer banning + ([\#ABC-0013](https://github.com/cometbft/cometbft/security/advisories/GHSA-hg58-rf2h-6rr7)) +- `[blockstore]` Send correct error message when vote extensions do not align with received packet + ([\#ABC-0014](https://github.com/cometbft/cometbft/security/advisories/GHSA-hg58-rf2h-6rr7)) diff --git a/.changelog/unreleased/bug-fixes/0016-abc-light-proposer-priorities.md b/.changelog/unreleased/bug-fixes/0016-abc-light-proposer-priorities.md new file mode 100644 index 00000000000..9b2cb2c0c7b --- /dev/null +++ b/.changelog/unreleased/bug-fixes/0016-abc-light-proposer-priorities.md @@ -0,0 +1,2 @@ +- `[light]` Cross-check proposer priorities in retrieved validator sets + ([\#ABC-0016](https://github.com/cometbft/cometbft/security/advisories/GHSA-g5xx-c4hv-9ccc)) diff --git a/.changelog/unreleased/bug-fixes/0021-abc-panic-precommit-validator-index.md b/.changelog/unreleased/bug-fixes/0021-abc-panic-precommit-validator-index.md new file mode 100644 index 00000000000..bc0147562c2 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/0021-abc-panic-precommit-validator-index.md @@ -0,0 +1,3 @@ +- `[consensus]` Do not panic if the validator index of a `Vote` message is out + of bounds, when vote extensions are enabled + ([\#ABC-0021](https://github.com/cometbft/cometbft/security/advisories/GHSA-p7mv-53f2-4cwj)) diff --git a/.changelog/unreleased/bug-fixes/1175-consensus-prevote-improve-logic.md b/.changelog/unreleased/bug-fixes/1175-consensus-prevote-improve-logic.md index b22d5510fce..67afa08deed 100644 --- a/.changelog/unreleased/bug-fixes/1175-consensus-prevote-improve-logic.md +++ b/.changelog/unreleased/bug-fixes/1175-consensus-prevote-improve-logic.md @@ -1,3 +1,3 @@ -- `[consensus]` \#1203 consensus now prevotes `nil` when the proposed value - does not match the value the local validator has locked on - ([\#1203](https://github.com/cometbft/cometbft/pull/1203)) \ No newline at end of file +- `[consensus]` Consensus now prevotes `nil` when the proposed value does not + match the value the local validator has locked on + ([\#1203](https://github.com/cometbft/cometbft/pull/1203)) diff --git a/.changelog/unreleased/bug-fixes/1175-consensus-remove-unlock-behavior.md b/.changelog/unreleased/bug-fixes/1175-consensus-remove-unlock-behavior.md index 9b470531545..644fa34ee35 100644 --- a/.changelog/unreleased/bug-fixes/1175-consensus-remove-unlock-behavior.md +++ b/.changelog/unreleased/bug-fixes/1175-consensus-remove-unlock-behavior.md @@ -1,2 +1,2 @@ -- `[consensus]` \#1175 remove logic to unlock block on +2/3 prevote for nil +- `[consensus]` Remove logic to unlock block on +2/3 prevote for nil ([\#1175](https://github.com/cometbft/cometbft/pull/1175): @BrendanChou) diff --git a/.changelog/unreleased/bug-fixes/1616-pruning-dont-prune-state-when-no-blocks-pruned.md b/.changelog/unreleased/bug-fixes/1616-pruning-dont-prune-state-when-no-blocks-pruned.md new file mode 100644 index 00000000000..81de0a48e62 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1616-pruning-dont-prune-state-when-no-blocks-pruned.md @@ -0,0 +1,2 @@ +- `[state/pruning]` When no blocks are pruned, do not attempt to prune statestore + ([\#1616](https://github.com/cometbft/cometbft/pull/1616)) diff --git a/.changelog/unreleased/bug-fixes/1687-consensus-fix-block-validation.md b/.changelog/unreleased/bug-fixes/1687-consensus-fix-block-validation.md new file mode 100644 index 00000000000..778f0b538b4 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1687-consensus-fix-block-validation.md @@ -0,0 +1,3 @@ +- `[mempool]` The calculation method of tx size returned by calling proxyapp should be consistent with that of mempool + ([\#1687](https://github.com/cometbft/cometbft/pull/1687)) + diff --git a/.changelog/unreleased/bug-fixes/1749-light-client-attack-verify-all-sigs.md b/.changelog/unreleased/bug-fixes/1749-light-client-attack-verify-all-sigs.md new file mode 100644 index 00000000000..1115c4d195a --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1749-light-client-attack-verify-all-sigs.md @@ -0,0 +1,4 @@ +- `[evidence]` When `VerifyCommitLight` & `VerifyCommitLightTrusting` are called as part + of evidence verification, all signatures present in the evidence must be verified + ([\#1749](https://github.com/cometbft/cometbft/pull/1749)) + diff --git a/.changelog/unreleased/bug-fixes/1825-false-on-nil-key.md b/.changelog/unreleased/bug-fixes/1825-false-on-nil-key.md new file mode 100644 index 00000000000..dcd466a39e7 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1825-false-on-nil-key.md @@ -0,0 +1,3 @@ +- `[crypto]` `SupportsBatchVerifier` returns false + if public key is nil instead of dereferencing nil. + ([\#1825](https://github.com/cometbft/cometbft/pull/1825)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/1827-fix-recheck-async.md b/.changelog/unreleased/bug-fixes/1827-fix-recheck-async.md new file mode 100644 index 00000000000..de8fa6157c8 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1827-fix-recheck-async.md @@ -0,0 +1,2 @@ +- `[mempool]` Fix data race when rechecking with async ABCI client + ([\#1827](https://github.com/cometbft/cometbft/issues/1827)) diff --git a/.changelog/unreleased/bug-fixes/1879-blocksync-wait-for-pool-routine.md b/.changelog/unreleased/bug-fixes/1879-blocksync-wait-for-pool-routine.md new file mode 100644 index 00000000000..4c6e488ac1a --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1879-blocksync-wait-for-pool-routine.md @@ -0,0 +1,2 @@ +- `[blocksync]` Wait for `poolRoutine` to stop in `(*Reactor).OnStop` + ([\#1879](https://github.com/cometbft/cometbft/pull/1879)) diff --git a/.changelog/unreleased/bug-fixes/2010-p2p-pex-shutdown.md b/.changelog/unreleased/bug-fixes/2010-p2p-pex-shutdown.md new file mode 100644 index 00000000000..6d76844cc4e --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2010-p2p-pex-shutdown.md @@ -0,0 +1 @@ +- `[p2p/pex]` Gracefully shutdown Reactor ([\#2010](https://github.com/cometbft/cometbft/pull/2010)) diff --git a/.changelog/unreleased/bug-fixes/2047-privval-retry-accepting-conn.md b/.changelog/unreleased/bug-fixes/2047-privval-retry-accepting-conn.md new file mode 100644 index 00000000000..45260721c8b --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2047-privval-retry-accepting-conn.md @@ -0,0 +1 @@ +- `[privval]` Retry accepting a connection ([\#2047](https://github.com/cometbft/cometbft/pull/2047)) diff --git a/.changelog/unreleased/bug-fixes/2136-fix-state-rollback.md b/.changelog/unreleased/bug-fixes/2136-fix-state-rollback.md new file mode 100644 index 00000000000..55cbfe51ed4 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2136-fix-state-rollback.md @@ -0,0 +1,2 @@ +- `[state]` Fix rollback to a specific height + ([\#2136](https://github.com/cometbft/cometbft/pull/2136)) diff --git a/.changelog/unreleased/bug-fixes/2225-fix-checktx-request-returns-error.md b/.changelog/unreleased/bug-fixes/2225-fix-checktx-request-returns-error.md new file mode 100644 index 00000000000..c96a9c37705 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2225-fix-checktx-request-returns-error.md @@ -0,0 +1,2 @@ +- `[mempool]` Panic when a CheckTx request to the app returns an error + ([\#2225](https://github.com/cometbft/cometbft/pull/2225)) diff --git a/.changelog/unreleased/bug-fixes/2443-mempool-fix-flush-mutex.md b/.changelog/unreleased/bug-fixes/2443-mempool-fix-flush-mutex.md new file mode 100644 index 00000000000..4e0626d16b4 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2443-mempool-fix-flush-mutex.md @@ -0,0 +1,2 @@ +- `[mempool]` Fix mutex in `CListMempool.Flush` method, by changing it from read-lock to write-lock + ([\#2443](https://github.com/cometbft/cometbft/issues/2443)). \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/2774-bitarray-unmarshal-json.md b/.changelog/unreleased/bug-fixes/2774-bitarray-unmarshal-json.md new file mode 100644 index 00000000000..9b06e22f868 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2774-bitarray-unmarshal-json.md @@ -0,0 +1,2 @@ +- `[bits]` prevent `BitArray.UnmarshalJSON` from crashing on 0 bits + ([\#2774](https://github.com/cometbft/cometbft/pull/2774)) diff --git a/.changelog/unreleased/bug-fixes/3002-invalid-txs-results.md b/.changelog/unreleased/bug-fixes/3002-invalid-txs-results.md new file mode 100644 index 00000000000..67742d67ec0 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3002-invalid-txs-results.md @@ -0,0 +1,3 @@ +- `[rpc]` Fix an issue where a legacy ABCI response, created on `v0.37` or before, is not returned properly in `v0.38` and up + on the `/block_results` RPC endpoint. + ([\#3002](https://github.com/cometbft/cometbft/issues/3002)) diff --git a/.changelog/unreleased/bug-fixes/3092-consensus-timeout-ticker-data-race.md b/.changelog/unreleased/bug-fixes/3092-consensus-timeout-ticker-data-race.md new file mode 100644 index 00000000000..b27dddc4f02 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3092-consensus-timeout-ticker-data-race.md @@ -0,0 +1,2 @@ +- `[consensus]` Fix a race condition in the consensus timeout ticker. Race is caused by two timeouts being scheduled at the same time. + ([\#3092](https://github.com/cometbft/cometbft/pull/2136)) diff --git a/.changelog/unreleased/bug-fixes/3145-fix-panic-when-log-with-nil-val.md b/.changelog/unreleased/bug-fixes/3145-fix-panic-when-log-with-nil-val.md new file mode 100644 index 00000000000..0b3a8260b98 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3145-fix-panic-when-log-with-nil-val.md @@ -0,0 +1,3 @@ +- `[log]` Fix panic when log with nil val which is a pointer who implements + fmt.Stringer interface + ([\#3145](https://github.com/cometbft/cometbft/pull/3145)) diff --git a/.changelog/unreleased/bug-fixes/3195-batch-verification.md b/.changelog/unreleased/bug-fixes/3195-batch-verification.md new file mode 100644 index 00000000000..2e4104af394 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3195-batch-verification.md @@ -0,0 +1,2 @@ +- `[types]` Do not batch verify a commit if the validator set keys have different + types. ([\#3195](https://github.com/cometbft/cometbft/issues/3195) diff --git a/.changelog/unreleased/bug-fixes/3352-nil-pointer-tx-search.md b/.changelog/unreleased/bug-fixes/3352-nil-pointer-tx-search.md new file mode 100644 index 00000000000..1d568e52b79 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3352-nil-pointer-tx-search.md @@ -0,0 +1,2 @@ +- `[rpc]` Fix nil pointer error in `/tx` and `/tx_search` when block is + absent ([\#3352](https://github.com/cometbft/cometbft/issues/3352)) diff --git a/.changelog/unreleased/bug-fixes/3406-blocksync-dont-stall-if-blocking-chain.md b/.changelog/unreleased/bug-fixes/3406-blocksync-dont-stall-if-blocking-chain.md new file mode 100644 index 00000000000..909e6a56039 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3406-blocksync-dont-stall-if-blocking-chain.md @@ -0,0 +1,3 @@ +- `[blocksync]` Do not stay in blocksync if the node's validator voting power + is high enough to block the chain while it is not online + ([\#3406](https://github.com/cometbft/cometbft/pull/3406)) diff --git a/.changelog/unreleased/bug-fixes/3497-fix-p2p-external-address.md b/.changelog/unreleased/bug-fixes/3497-fix-p2p-external-address.md new file mode 100644 index 00000000000..4b9b6464f47 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3497-fix-p2p-external-address.md @@ -0,0 +1,2 @@ +- `[cmd]` Align `p2p.external_address` argument to set the node P2P external address. + ([\#3460](https://github.com/cometbft/cometbft/issues/3460)) diff --git a/.changelog/unreleased/bug-fixes/3525-3514-genesis-validation-pub-key.md b/.changelog/unreleased/bug-fixes/3525-3514-genesis-validation-pub-key.md new file mode 100644 index 00000000000..88ceeaba6b9 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3525-3514-genesis-validation-pub-key.md @@ -0,0 +1,3 @@ +- `[types]` genesis doc validation verifies that all validators + use PubKey types supported by the chain to be started + ([\#3525](https://github.com/cometbft/cometbft/pull/3525)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/3528-evidence-missing-json-tags.md b/.changelog/unreleased/bug-fixes/3528-evidence-missing-json-tags.md new file mode 100644 index 00000000000..df74e03af20 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3528-evidence-missing-json-tags.md @@ -0,0 +1,2 @@ +- `[types]` Added missing JSON tags to `DuplicateVoteEvidence` and `LightClientAttackEvidence` + types ([\#3528](https://github.com/cometbft/cometbft/issues/3528)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/3541-copy-iter-key-value.md b/.changelog/unreleased/bug-fixes/3541-copy-iter-key-value.md new file mode 100644 index 00000000000..e44439acdc8 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3541-copy-iter-key-value.md @@ -0,0 +1,3 @@ +- code that modifies or stores references to the return value + of Iterator Key() and Value() APIs creates a copy of it + ([\#3541](https://github.com/cometbft/cometbft/pull/3541)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/3544-indexer-break-statement.md b/.changelog/unreleased/bug-fixes/3544-indexer-break-statement.md new file mode 100644 index 00000000000..ef83bdd514e --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3544-indexer-break-statement.md @@ -0,0 +1,3 @@ +- `[indexer]` Fixed ineffective select break statements; they now + point to their enclosing for loop label to exit + ([\#3544](https://github.com/cometbft/cometbft/issues/3544)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/3642-re-enable-vote-extension-checks.md b/.changelog/unreleased/bug-fixes/3642-re-enable-vote-extension-checks.md new file mode 100644 index 00000000000..4c2f2fd7581 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3642-re-enable-vote-extension-checks.md @@ -0,0 +1,3 @@ +- `[privval]` Re-enable some sanity checks related to vote extensions + when signing a vote + ([\#3642](https://github.com/cometbft/cometbft/issues/3642)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/3693-add-func-opt-blocksresultsservice.md b/.changelog/unreleased/bug-fixes/3693-add-func-opt-blocksresultsservice.md new file mode 100644 index 00000000000..7c3c79b3947 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3693-add-func-opt-blocksresultsservice.md @@ -0,0 +1,2 @@ +- added missing optional function for BlocksResultsService in gRPC client + ([\#3693](https://github.com/cometbft/cometbft/pull/3693)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/3694-mempool-update-variables-atomically.md b/.changelog/unreleased/bug-fixes/3694-mempool-update-variables-atomically.md new file mode 100644 index 00000000000..d6c9c6b25be --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3694-mempool-update-variables-atomically.md @@ -0,0 +1,3 @@ +- `[mempool]` Fix race condition when accessing entries by updating variables in + `CListMempool` atomically. + ([\#3694](https://github.com/cometbft/cometbft/issues/3694)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/3828-privval-drop-duplicate-listen.md b/.changelog/unreleased/bug-fixes/3828-privval-drop-duplicate-listen.md new file mode 100644 index 00000000000..3c3ad1f4b66 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3828-privval-drop-duplicate-listen.md @@ -0,0 +1 @@ +- `[privval]` Ignore duplicate privval listen when already connected ([\#3828](https://github.com/cometbft/cometbft/issues/3828) diff --git a/.changelog/unreleased/bug-fixes/3992-light-panic.md b/.changelog/unreleased/bug-fixes/3992-light-panic.md new file mode 100644 index 00000000000..6745b8c992c --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3992-light-panic.md @@ -0,0 +1,4 @@ +- `[light]` Return and log an error when starting from an empty trusted store. + This can happen using the `light` CometBFT command-line command while using + a fresh trusted store and no trusted height and hash are provided. + ([\#3992](https://github.com/cometbft/cometbft/issues/3992)) diff --git a/.changelog/unreleased/bug-fixes/4019-mempool-metric-rejected-txs.md b/.changelog/unreleased/bug-fixes/4019-mempool-metric-rejected-txs.md new file mode 100644 index 00000000000..aba8c6e663c --- /dev/null +++ b/.changelog/unreleased/bug-fixes/4019-mempool-metric-rejected-txs.md @@ -0,0 +1,2 @@ +- `[metrics]` Call unused `rejected_txs` metric in mempool + ([\#4019](https://github.com/cometbft/cometbft/pull/4019)) diff --git a/.changelog/unreleased/bug-fixes/4521-fixes-breaking-mock.md b/.changelog/unreleased/bug-fixes/4521-fixes-breaking-mock.md new file mode 100644 index 00000000000..c3d48c3d4fc --- /dev/null +++ b/.changelog/unreleased/bug-fixes/4521-fixes-breaking-mock.md @@ -0,0 +1,3 @@ +- `[mocks]` Mockery `v2.49.0` broke the mocks. We had to add a `.mockery.yaml` to +properly handle this change. + ([\#4521](https://github.com/cometbft/cometbft/pull/4521)) diff --git a/.changelog/unreleased/bug-fixes/486-p2p-max-outbound.md b/.changelog/unreleased/bug-fixes/486-p2p-max-outbound.md new file mode 100644 index 00000000000..f6507ed9671 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/486-p2p-max-outbound.md @@ -0,0 +1,3 @@ +- `[p2p]` Node respects configured `max_num_outbound_peers` limit when dialing + peers provided by a seed node + ([\#486](https://github.com/cometbft/cometbft/issues/486)) diff --git a/.changelog/unreleased/bug-fixes/642-clist-mempool-data-races.md b/.changelog/unreleased/bug-fixes/642-clist-mempool-data-races.md new file mode 100644 index 00000000000..037bbc9550f --- /dev/null +++ b/.changelog/unreleased/bug-fixes/642-clist-mempool-data-races.md @@ -0,0 +1,2 @@ +- `[mempool]` Fix data races in `CListMempool` by making atomic the types of `height`, `txsBytes`, and + `notifiedTxsAvailable`. ([\#642](https://github.com/cometbft/cometbft/pull/642)) diff --git a/.changelog/unreleased/dependencies/1725-cometbft-db.md b/.changelog/unreleased/dependencies/1725-cometbft-db.md new file mode 100644 index 00000000000..ed74be7ce1d --- /dev/null +++ b/.changelog/unreleased/dependencies/1725-cometbft-db.md @@ -0,0 +1,2 @@ +- Bump cometbft-db to v0.9.0, providing support for RocksDB v8 + ([\#1725](https://github.com/cometbft/cometbft/pull/1725)) \ No newline at end of file diff --git a/.changelog/unreleased/dependencies/3191-api-v1-rc1.md b/.changelog/unreleased/dependencies/3191-api-v1-rc1.md new file mode 100644 index 00000000000..4ca82ea6d1b --- /dev/null +++ b/.changelog/unreleased/dependencies/3191-api-v1-rc1.md @@ -0,0 +1,2 @@ +- Bump api to v1.0.0-rc.1 for v1.0.0 Release Candidate 1 + ([\#3191](https://github.com/cometbft/cometbft/pull/3191)) diff --git a/.changelog/unreleased/dependencies/3527-update-go-version.md b/.changelog/unreleased/dependencies/3527-update-go-version.md new file mode 100644 index 00000000000..5a2ab4c8811 --- /dev/null +++ b/.changelog/unreleased/dependencies/3527-update-go-version.md @@ -0,0 +1,2 @@ +- updated Go version to 1.22.5 + ([\#3527](https://github.com/cometbft/cometbft/pull/3527)) \ No newline at end of file diff --git a/.changelog/unreleased/dependencies/3596-update-cometbft-version.md b/.changelog/unreleased/dependencies/3596-update-cometbft-version.md new file mode 100644 index 00000000000..c5e8733d3dc --- /dev/null +++ b/.changelog/unreleased/dependencies/3596-update-cometbft-version.md @@ -0,0 +1,2 @@ +- updated cometbft-db dependency to v0.13.0 + ([\#3596](https://github.com/cometbft/cometbft/pull/3596)) \ No newline at end of file diff --git a/.changelog/unreleased/dependencies/3661-bump-cometbft-db-version.md b/.changelog/unreleased/dependencies/3661-bump-cometbft-db-version.md new file mode 100644 index 00000000000..973f0e784fe --- /dev/null +++ b/.changelog/unreleased/dependencies/3661-bump-cometbft-db-version.md @@ -0,0 +1,2 @@ +- reinstate BoltDB and ClevelDB as backend DBs, bumped cometbft-db version to + v0.14.0 ([\#3661](https://github.com/cometbft/cometbft/pull/3661)) \ No newline at end of file diff --git a/.changelog/unreleased/dependencies/3728-update-btcec-v2.md b/.changelog/unreleased/dependencies/3728-update-btcec-v2.md new file mode 100644 index 00000000000..361592b2c9c --- /dev/null +++ b/.changelog/unreleased/dependencies/3728-update-btcec-v2.md @@ -0,0 +1,4 @@ +- `[crypto/secp256k1]` Adjust to breaking interface changes in + `btcec/v2` latest release, while avoiding breaking changes to + local CometBFT functions + ([\#3728](https://github.com/cometbft/cometbft/pull/3728)) diff --git a/.changelog/unreleased/deprecations/3303-mempool-deprecate-txsfront-txswaitchan.md b/.changelog/unreleased/deprecations/3303-mempool-deprecate-txsfront-txswaitchan.md new file mode 100644 index 00000000000..022f90fd68f --- /dev/null +++ b/.changelog/unreleased/deprecations/3303-mempool-deprecate-txsfront-txswaitchan.md @@ -0,0 +1,2 @@ +- `[mempool]` Mark methods `TxsFront` and `TxsWaitChan` in `CListMempool` as deprecated. They should + be replaced by the new `CListIterator` ([\#3303](https://github.com/cometbft/cometbft/pull/3303)). diff --git a/.changelog/unreleased/deprecations/3506-mempool-metrics-size.md b/.changelog/unreleased/deprecations/3506-mempool-metrics-size.md new file mode 100644 index 00000000000..33d0a6332b5 --- /dev/null +++ b/.changelog/unreleased/deprecations/3506-mempool-metrics-size.md @@ -0,0 +1,4 @@ +- `[mempool/metrics]` Mark metrics `mempool_size` and `mempool_size_bytes` as + deprecated, as now they can be obtain, respectively, as the sum of + `mempool_lane_size` and `mempool_lane_bytes` + ([\#3506](https://github.com/cometbft/cometbft/issue/3506)). diff --git a/.changelog/unreleased/deprecations/3873-mempool-unused-txs.md b/.changelog/unreleased/deprecations/3873-mempool-unused-txs.md new file mode 100644 index 00000000000..178ecd25bd7 --- /dev/null +++ b/.changelog/unreleased/deprecations/3873-mempool-unused-txs.md @@ -0,0 +1,2 @@ +- `[mempool]` Mark unused `Txs` methods `Len`, `Swap`, and `Less` as deprecated + ([\#3873](https://github.com/cometbft/cometbft/pull/3873)). diff --git a/.changelog/unreleased/features/1097-pruning-service.md b/.changelog/unreleased/features/1097-pruning-service.md index 3bcbfac3c33..caaf9dc05ea 100644 --- a/.changelog/unreleased/features/1097-pruning-service.md +++ b/.changelog/unreleased/features/1097-pruning-service.md @@ -1,23 +1,23 @@ -- `[proto]` Add definitions and generated code for [ADR-101] `PruningService` - in the `tendermint.services.pruning.v1` proto package - ([\#1097](https://github.com/cometbft/cometbft/issues/1097)). +- `[proto]` Add definitions and generated code for + [ADR-101](./docs/architecture/adr-101-data-companion-pull-api.md) + `PruningService` in the `cometbft.services.pruning.v1` proto package + ([\#1097](https://github.com/cometbft/cometbft/issues/1097)) - `[rpc/grpc]` Add privileged gRPC server and client facilities, in `server/privileged` and `client/privileged` packages respectively, to enable a separate API server within the node which serves trusted clients without authentication and should never be exposed to public internet - ([\#1097](https://github.com/cometbft/cometbft/issues/1097)). -- `[rpc/grpc]` Add a pruning service adding on the privileged gRPC - server API to give an [ADR-101] data companion control over block data - retained by the node. The `WithPruningService` option method in - `server/privileged` is provided to configure the pruning service - ([\#1097](https://github.com/cometbft/cometbft/issues/1097)). + ([\#1097](https://github.com/cometbft/cometbft/issues/1097)) +- `[rpc/grpc]` Add a pruning service adding on the privileged gRPC server API to + give an [ADR-101](./docs/architecture/adr-101-data-companion-pull-api.md) data + companion control over block data retained by the node. The + `WithPruningService` option method in `server/privileged` is provided to + configure the pruning service + ([\#1097](https://github.com/cometbft/cometbft/issues/1097)) - `[rpc/grpc]` Add `PruningServiceClient` interface for the gRPC client in `client/privileged` along with a configuration option to enable it - ([\#1097](https://github.com/cometbft/cometbft/issues/1097)). + ([\#1097](https://github.com/cometbft/cometbft/issues/1097)) - `[config]` Add `[grpc.privileged]` section to configure the privileged gRPC server for the node, and `[grpc.privileged.pruning_service]` section to control the pruning service - ([\#1097](https://github.com/cometbft/cometbft/issues/1097)). - -[ADR-101]: https://github.com/cometbft/cometbft/pull/82 + ([\#1097](https://github.com/cometbft/cometbft/issues/1097)) diff --git a/.changelog/unreleased/features/1234-pruning-metrics.md b/.changelog/unreleased/features/1234-pruning-metrics.md index d7d9b8952e5..0afcd759550 100644 --- a/.changelog/unreleased/features/1234-pruning-metrics.md +++ b/.changelog/unreleased/features/1234-pruning-metrics.md @@ -1,2 +1,2 @@ -- `[metrics]` Add metrics to monitor pruning and current available data in stores: `PruningServiceBlockRetainHeight`, `PruningServiceBlockResultsRetainHeight`, `ApplicationBlockRetainHeight`, `BlockStoreBaseHeight`, `ABCIResultsBaseHeight`. +- `[metrics]` Add metrics to monitor pruning and current available data in stores: `PruningServiceBlockRetainHeight`, `PruningServiceBlockResultsRetainHeight`, `ApplicationBlockRetainHeight`, `BlockStoreBaseHeight`, `ABCIResultsBaseHeight`. ([\#1234](https://github.com/cometbft/cometbft/pull/1234)) \ No newline at end of file diff --git a/.changelog/unreleased/features/1247-sync-flag.md b/.changelog/unreleased/features/1247-sync-flag.md new file mode 100644 index 00000000000..15149c3868f --- /dev/null +++ b/.changelog/unreleased/features/1247-sync-flag.md @@ -0,0 +1,3 @@ +- `[proto]` add `syncing_to_height` to `FinalizeBlockRequest` to let the ABCI app + know if the node is syncing or not. + ([\#1247](https://github.com/cometbft/cometbft/issues/1247)) diff --git a/.changelog/unreleased/features/1327-grpc-pruning-indexer.md b/.changelog/unreleased/features/1327-grpc-pruning-indexer.md index c07d7bafb32..e2e6aaee1bd 100644 --- a/.changelog/unreleased/features/1327-grpc-pruning-indexer.md +++ b/.changelog/unreleased/features/1327-grpc-pruning-indexer.md @@ -1,2 +1,2 @@ -`[rpc/grpc]` Add gRPC endpoint for pruning the block and transaction indexes -([\#1327](https://github.com/cometbft/cometbft/pull/1327)) \ No newline at end of file +- `[rpc/grpc]` Add gRPC endpoint for pruning the block and transaction indexes +([\#1327](https://github.com/cometbft/cometbft/pull/1327)) diff --git a/.changelog/unreleased/features/1643-nop-mempool.md b/.changelog/unreleased/features/1643-nop-mempool.md new file mode 100644 index 00000000000..9b829b58522 --- /dev/null +++ b/.changelog/unreleased/features/1643-nop-mempool.md @@ -0,0 +1,14 @@ +- `[mempool]` Add `nop` mempool ([\#1643](https://github.com/cometbft/cometbft/pull/1643)). If you want to use it, change mempool's `type` to `nop`: +```toml + [mempool] + + # The type of mempool for this node to use. + # + # Possible types: + # - "flood" : concurrent linked list mempool with flooding gossip protocol + # (default) + # - "nop" : nop-mempool (short for no operation; the ABCI app is responsible + # for storing, disseminating and proposing txs). "create_empty_blocks=false" + # is not supported. + type = "nop" +``` diff --git a/.changelog/unreleased/features/1972-compaction-config.md b/.changelog/unreleased/features/1972-compaction-config.md new file mode 100644 index 00000000000..62718f13194 --- /dev/null +++ b/.changelog/unreleased/features/1972-compaction-config.md @@ -0,0 +1 @@ +- `[config]` Add configuration parameters to tweak forced compaction. ([\#1972](https://github.com/cometbft/cometbft/pull/1972)) \ No newline at end of file diff --git a/.changelog/unreleased/features/1972-force-compaction-on-pruning.md b/.changelog/unreleased/features/1972-force-compaction-on-pruning.md new file mode 100644 index 00000000000..abe1cbcfc58 --- /dev/null +++ b/.changelog/unreleased/features/1972-force-compaction-on-pruning.md @@ -0,0 +1 @@ +- `[store]` When pruning force compaction of the database. ([\#1972](https://github.com/cometbft/cometbft/pull/1972)) \ No newline at end of file diff --git a/.changelog/unreleased/features/1974-statestore-metrics.md b/.changelog/unreleased/features/1974-statestore-metrics.md new file mode 100644 index 00000000000..02fb8b9e100 --- /dev/null +++ b/.changelog/unreleased/features/1974-statestore-metrics.md @@ -0,0 +1 @@ +- `[metrics]` Added metrics to monitor state store access. ([\#1974](https://github.com/cometbft/cometbft/pull/1974)) diff --git a/.changelog/unreleased/features/1974-store-metrics.md b/.changelog/unreleased/features/1974-store-metrics.md new file mode 100644 index 00000000000..fdd14fc8ce1 --- /dev/null +++ b/.changelog/unreleased/features/1974-store-metrics.md @@ -0,0 +1 @@ +- `[metrics]` Added metrics to monitor block store access. ([\#1974](https://github.com/cometbft/cometbft/pull/1974)) diff --git a/.changelog/unreleased/features/2056-remove-unused-param.md b/.changelog/unreleased/features/2056-remove-unused-param.md new file mode 100644 index 00000000000..72e0cffb34e --- /dev/null +++ b/.changelog/unreleased/features/2056-remove-unused-param.md @@ -0,0 +1,2 @@ +- `[config]` Removed unused `[mempool.max_batch_bytes]` mempool parameter. + ([\#2056](https://github.com/cometbft/cometbft/pull/2056/)) diff --git a/.changelog/unreleased/features/2107-localnet-monitoring.md b/.changelog/unreleased/features/2107-localnet-monitoring.md new file mode 100644 index 00000000000..64bd2ee912c --- /dev/null +++ b/.changelog/unreleased/features/2107-localnet-monitoring.md @@ -0,0 +1 @@ +- `[test]` Added monitoring tools and dashboards for local testing with `localnet`. ([\#2107](https://github.com/cometbft/cometbft/issues/2107)) diff --git a/.changelog/unreleased/features/2132-cometbft-db.md b/.changelog/unreleased/features/2132-cometbft-db.md new file mode 100644 index 00000000000..1ee52beaac5 --- /dev/null +++ b/.changelog/unreleased/features/2132-cometbft-db.md @@ -0,0 +1,2 @@ +- `[config]` Add [`pebbledb`](https://github.com/cockroachdb/pebble). To use, build with + `pebbledb` tag (`go build -tags pebbledb`) ([\#2132](https://github.com/cometbft/cometbft/pull/2132/)) diff --git a/.changelog/unreleased/features/2327-evidence-support-for-ordercode.md b/.changelog/unreleased/features/2327-evidence-support-for-ordercode.md new file mode 100644 index 00000000000..2448dfe9368 --- /dev/null +++ b/.changelog/unreleased/features/2327-evidence-support-for-ordercode.md @@ -0,0 +1 @@ +- `[evidence/store]` Added support for a different DB key representation within the evidence store ([\#2327](https://github.com/cometbft/cometbft/pull/2327/)) diff --git a/.changelog/unreleased/features/2327-lightdb-support-for-ordercode.md b/.changelog/unreleased/features/2327-lightdb-support-for-ordercode.md new file mode 100644 index 00000000000..f1c9f6247cb --- /dev/null +++ b/.changelog/unreleased/features/2327-lightdb-support-for-ordercode.md @@ -0,0 +1 @@ +- `[light/store]` Added support for a different DB key representation within the light block store ([\#2327](https://github.com/cometbft/cometbft/pull/2327/)) diff --git a/.changelog/unreleased/features/2327-store-db-layout-config.md b/.changelog/unreleased/features/2327-store-db-layout-config.md new file mode 100644 index 00000000000..d0b99baf846 --- /dev/null +++ b/.changelog/unreleased/features/2327-store-db-layout-config.md @@ -0,0 +1,3 @@ +- `[config]` Added `[storage.experimental_db_key_layout]` storage parameter, set to "v2" + for order preserving representation +([\#2327](https://github.com/cometbft/cometbft/pull/2327/)) diff --git a/.changelog/unreleased/features/2327-store-support-for-ordercode.md b/.changelog/unreleased/features/2327-store-support-for-ordercode.md new file mode 100644 index 00000000000..30b20b24383 --- /dev/null +++ b/.changelog/unreleased/features/2327-store-support-for-ordercode.md @@ -0,0 +1 @@ +- `[store]` Added support for a different DB key representation to state and block store ([\#2327](https://github.com/cometbft/cometbft/pull/2327/)) diff --git a/.changelog/unreleased/features/2362-e2e-block-max-bytes.md b/.changelog/unreleased/features/2362-e2e-block-max-bytes.md new file mode 100644 index 00000000000..a3b007c3f96 --- /dev/null +++ b/.changelog/unreleased/features/2362-e2e-block-max-bytes.md @@ -0,0 +1,2 @@ +- `[e2e]` Add `block_max_bytes` option to the manifest file. + ([\#2362](https://github.com/cometbft/cometbft/pull/2362)) \ No newline at end of file diff --git a/.changelog/unreleased/features/2433-e2e-testnet-dir.md b/.changelog/unreleased/features/2433-e2e-testnet-dir.md new file mode 100644 index 00000000000..33050d20ca2 --- /dev/null +++ b/.changelog/unreleased/features/2433-e2e-testnet-dir.md @@ -0,0 +1,2 @@ +- `[e2e]` Add new `--testnet-dir` parameter to set a custom directory for the generated testnet files. + ([\#2433](https://github.com/cometbft/cometbft/pull/2433)) \ No newline at end of file diff --git a/.changelog/unreleased/features/2596-storage-report.md b/.changelog/unreleased/features/2596-storage-report.md new file mode 100644 index 00000000000..27fc6d95a8f --- /dev/null +++ b/.changelog/unreleased/features/2596-storage-report.md @@ -0,0 +1 @@ +- `[docs]` Add report on storage improvements and findings. ([\#2569](https://github.com/cometbft/cometbft/pull/2569)) \ No newline at end of file diff --git a/.changelog/unreleased/features/2655-move-timeout-commit-into-finalize-block.md b/.changelog/unreleased/features/2655-move-timeout-commit-into-finalize-block.md new file mode 100644 index 00000000000..95989071422 --- /dev/null +++ b/.changelog/unreleased/features/2655-move-timeout-commit-into-finalize-block.md @@ -0,0 +1,2 @@ +- `[config]` Move `timeout_commit` into the ABCI `FinalizeBlockResponse` + ([\#2655](https://github.com/cometbft/cometbft/issues/2655)) diff --git a/.changelog/unreleased/features/2756-mempool-max-txs-bytes.md b/.changelog/unreleased/features/2756-mempool-max-txs-bytes.md new file mode 100644 index 00000000000..97ab4d1271a --- /dev/null +++ b/.changelog/unreleased/features/2756-mempool-max-txs-bytes.md @@ -0,0 +1,2 @@ +- `[config]` Update the default value of `mempool.max_txs_bytes` to 64 MiB. + ([\#2756](https://github.com/cometbft/cometbft/issues/2756)) diff --git a/.changelog/unreleased/features/2765-bls12-381-curve.md b/.changelog/unreleased/features/2765-bls12-381-curve.md new file mode 100644 index 00000000000..29e0108f4d5 --- /dev/null +++ b/.changelog/unreleased/features/2765-bls12-381-curve.md @@ -0,0 +1,3 @@ +- `[crypto]` Add support for BLS12-381 keys. Since the implementation needs + `cgo` and brings in new dependencies, we use the `bls12381` build flag to + enable it ([\#2765](https://github.com/cometbft/cometbft/pull/2765)) diff --git a/.changelog/unreleased/features/2803-mempool-lanes.md b/.changelog/unreleased/features/2803-mempool-lanes.md new file mode 100644 index 00000000000..8d079f691a6 --- /dev/null +++ b/.changelog/unreleased/features/2803-mempool-lanes.md @@ -0,0 +1,2 @@ +- `[mempool]` Add Lanes to the mempool for providing Quality of Service guarantees +([#2803](https://github.com/cometbft/cometbft/issues/2803)) \ No newline at end of file diff --git a/.changelog/unreleased/features/2803-proto-mempool-lane.md b/.changelog/unreleased/features/2803-proto-mempool-lane.md new file mode 100644 index 00000000000..b7162254548 --- /dev/null +++ b/.changelog/unreleased/features/2803-proto-mempool-lane.md @@ -0,0 +1,3 @@ +- `[types/proto]` Extend `CheckTxResponse` with new `lane_id` field and `InfoResponse` with + `lane_priorities` and `default_lane` fields + ([#2803](https://github.com/cometbft/cometbft/issues/2803)) diff --git a/.changelog/unreleased/features/3008-mempool-async-update.md b/.changelog/unreleased/features/3008-mempool-async-update.md new file mode 100644 index 00000000000..1667aefb637 --- /dev/null +++ b/.changelog/unreleased/features/3008-mempool-async-update.md @@ -0,0 +1,3 @@ +- `[consensus]` Make mempool updates asynchronous from consensus Commit's, + reducing latency for reaching consensus timeouts. + ([#3008](https://github.com/cometbft/cometbft/pull/3008)) diff --git a/.changelog/unreleased/features/3079-unconfirmed-tx.md b/.changelog/unreleased/features/3079-unconfirmed-tx.md new file mode 100644 index 00000000000..ddc4640117b --- /dev/null +++ b/.changelog/unreleased/features/3079-unconfirmed-tx.md @@ -0,0 +1,2 @@ +- `[rpc]` Add `unconfirmed_tx` to support query mempool transaction by transaction hash. + ([\#3079](https://github.com/cometbft/cometbft/pull/3079)) diff --git a/.changelog/unreleased/features/3108-pending-tx-event.md b/.changelog/unreleased/features/3108-pending-tx-event.md new file mode 100644 index 00000000000..8f8fb398356 --- /dev/null +++ b/.changelog/unreleased/features/3108-pending-tx-event.md @@ -0,0 +1,2 @@ +- `[events]` Publish an event when a new transaction is added to the mempool. This is an experimental feature enabled by the `experimental_publish_event_pending_tx` config flag. + ([\#3108](https://github.com/cometbft/cometbft/issues/3108)) diff --git a/.changelog/unreleased/features/3472-p2p-has-channel-api.md b/.changelog/unreleased/features/3472-p2p-has-channel-api.md new file mode 100644 index 00000000000..b554a29ce1d --- /dev/null +++ b/.changelog/unreleased/features/3472-p2p-has-channel-api.md @@ -0,0 +1,3 @@ +- `[p2p]` `HasChannel(chID)` method added to the `Peer` interface, used by + reactors to check whether a peer implements/supports a given channel. + ([#3472](https://github.com/cometbft/cometbft/issues/3472)) diff --git a/.changelog/unreleased/features/3506-mempool-lanes-add-metrics.md b/.changelog/unreleased/features/3506-mempool-lanes-add-metrics.md new file mode 100644 index 00000000000..9da33294d6c --- /dev/null +++ b/.changelog/unreleased/features/3506-mempool-lanes-add-metrics.md @@ -0,0 +1,2 @@ +- `[metrics]` Add new mempool metrics `lane_size`, `lane_bytes`, and `tx_life_span` + ([#3506](https://github.com/cometbft/cometbft/issue/3506)). diff --git a/.changelog/unreleased/features/3517-key-type-all-over.md b/.changelog/unreleased/features/3517-key-type-all-over.md new file mode 100644 index 00000000000..21be64d62af --- /dev/null +++ b/.changelog/unreleased/features/3517-key-type-all-over.md @@ -0,0 +1,3 @@ +- `[privval]` Add `key-type` flag to all command that _may_ generate a `privval` file, + and make `GenFilePV` flexible to accept different key generators. + ([\#3517](https://github.com/cometbft/cometbft/pull/3517)) diff --git a/.changelog/unreleased/features/3593-psql-configurable-tables.md b/.changelog/unreleased/features/3593-psql-configurable-tables.md new file mode 100644 index 00000000000..4b7f7278cbb --- /dev/null +++ b/.changelog/unreleased/features/3593-psql-configurable-tables.md @@ -0,0 +1,2 @@ +- `[indexer]` Introduces configurable table names for the PSQL indexer. + ([\#3593](https://github.com/cometbft/cometbft/issues/3593)) diff --git a/.changelog/unreleased/features/3622-e2e-mempool-lanes.md b/.changelog/unreleased/features/3622-e2e-mempool-lanes.md new file mode 100644 index 00000000000..7d3a01c5b78 --- /dev/null +++ b/.changelog/unreleased/features/3622-e2e-mempool-lanes.md @@ -0,0 +1,2 @@ +- `[e2e]` Added support for mempool lanes in e2e. + ([#3622](https://github.com/cometbft/cometbft/pull/3622)) diff --git a/.changelog/unreleased/features/3622-kvstore-mempool-lanes.md b/.changelog/unreleased/features/3622-kvstore-mempool-lanes.md new file mode 100644 index 00000000000..6d71cc2c395 --- /dev/null +++ b/.changelog/unreleased/features/3622-kvstore-mempool-lanes.md @@ -0,0 +1,2 @@ +- `[kvstore]` Extended `CheckTx` in kvstoreApp to support mempool lanes. + ([#3622](https://github.com/cometbft/cometbft/pull/3622)) diff --git a/.changelog/unreleased/features/3634-query-app-for-lanes-info.md b/.changelog/unreleased/features/3634-query-app-for-lanes-info.md new file mode 100644 index 00000000000..ee89cb520e5 --- /dev/null +++ b/.changelog/unreleased/features/3634-query-app-for-lanes-info.md @@ -0,0 +1,2 @@ +- `[node]` Move the ABCI `Info` call from the `Handshake` function to the `NewNodeWithCliParams` function. + ([#3634](https://github.com/cometbft/cometbft/pull/3634)) diff --git a/.changelog/unreleased/features/3825-e2e-lane-weights.md b/.changelog/unreleased/features/3825-e2e-lane-weights.md new file mode 100644 index 00000000000..5bcd2da4ff6 --- /dev/null +++ b/.changelog/unreleased/features/3825-e2e-lane-weights.md @@ -0,0 +1,3 @@ +- `[e2e]` Add `load_lane_weights` option to manifest for generating transactions with + lanes picked randomly and proportional to their weight. + ([\#3825](https://github.com/cometbft/cometbft/pull/3825)). \ No newline at end of file diff --git a/.changelog/unreleased/features/3963-e2e-load-internal-ip.md b/.changelog/unreleased/features/3963-e2e-load-internal-ip.md new file mode 100644 index 00000000000..23e824fce83 --- /dev/null +++ b/.changelog/unreleased/features/3963-e2e-load-internal-ip.md @@ -0,0 +1,3 @@ +- `[e2e]` Add new `--internal-ip` flag to `load` command for sending the load to + the nodes' internal IP addresses. This is useful when running from inside a + private network ([\#3963](https://github.com/cometbft/cometbft/pull/3963)). \ No newline at end of file diff --git a/.changelog/unreleased/features/4005-e2e-lanes-manifest.md b/.changelog/unreleased/features/4005-e2e-lanes-manifest.md new file mode 100644 index 00000000000..f085df68b79 --- /dev/null +++ b/.changelog/unreleased/features/4005-e2e-lanes-manifest.md @@ -0,0 +1,2 @@ +- `[e2e]` Add `lanes` and `no_lanes` to manifest to customize the list of lanes the app should use +([#4005](https://github.com/cometbft/cometbft/issues/4005)) \ No newline at end of file diff --git a/.changelog/unreleased/features/4294-remove-secp256k1-wrapper.md b/.changelog/unreleased/features/4294-remove-secp256k1-wrapper.md new file mode 100644 index 00000000000..ca1333d6f8b --- /dev/null +++ b/.changelog/unreleased/features/4294-remove-secp256k1-wrapper.md @@ -0,0 +1 @@ +- `[crypto]` use decred secp256k1 directly ([#4294](https://github.com/cometbft/cometbft/pull/4294)) \ No newline at end of file diff --git a/.changelog/unreleased/features/4338-e2e-monitoring.md b/.changelog/unreleased/features/4338-e2e-monitoring.md new file mode 100644 index 00000000000..5fec7df8b41 --- /dev/null +++ b/.changelog/unreleased/features/4338-e2e-monitoring.md @@ -0,0 +1,2 @@ +- `[e2e]` Add `monitor` command to manage Prometheus and Grafana servers +([#4338](https://github.com/cometbft/cometbft/pull/4338)). \ No newline at end of file diff --git a/.changelog/unreleased/features/7354-synchrony-field.md b/.changelog/unreleased/features/7354-synchrony-field.md new file mode 100644 index 00000000000..94505049768 --- /dev/null +++ b/.changelog/unreleased/features/7354-synchrony-field.md @@ -0,0 +1,3 @@ +- `[consensus]` add a new `synchrony` field to the `ConsensusParameter` struct + for controlling the parameters of the proposer-based timestamp algorithm. (@williambanfield) + ([tendermint/tendermint\#7354](https://github.com/tendermint/tendermint/pull/7354)) diff --git a/.changelog/unreleased/features/7376-wait-for-previous-block-time.md b/.changelog/unreleased/features/7376-wait-for-previous-block-time.md new file mode 100644 index 00000000000..b7a101fbcef --- /dev/null +++ b/.changelog/unreleased/features/7376-wait-for-previous-block-time.md @@ -0,0 +1,4 @@ +- `[consensus]` Update the proposal logic per the Propose-based timestamps specification + so that the proposer will wait for the previous block time to occur + before proposing the next block. (@williambanfield) + ([tendermint/tendermint\#7376](https://github.com/tendermint/tendermint/pull/7376)) diff --git a/.changelog/unreleased/features/7382-new-ts-validation.md b/.changelog/unreleased/features/7382-new-ts-validation.md new file mode 100644 index 00000000000..ea7c6e01780 --- /dev/null +++ b/.changelog/unreleased/features/7382-new-ts-validation.md @@ -0,0 +1,3 @@ +- `[consensus]` Update block validation to no longer require the block timestamp + to be the median of the timestamps of the previous commit. (@anca) + ([tendermint/tendermint\#7382](https://github.com/tendermint/tendermint/pull/7382)) diff --git a/.changelog/unreleased/features/7391-use-pbts-for-proposal-ts.md b/.changelog/unreleased/features/7391-use-pbts-for-proposal-ts.md new file mode 100644 index 00000000000..3ed67673809 --- /dev/null +++ b/.changelog/unreleased/features/7391-use-pbts-for-proposal-ts.md @@ -0,0 +1,4 @@ +- `[consensus]` Use the proposed block timestamp as the proposal timestamp. + Update the block validation logic to ensure that the proposed block's timestamp + matches the timestamp in the proposal message. (@williambanfield) + ([tendermint/tendermint\#7391](https://github.com/tendermint/tendermint/pull/7391)) diff --git a/.changelog/unreleased/features/7415-prevote-nil-non-timely.md b/.changelog/unreleased/features/7415-prevote-nil-non-timely.md new file mode 100644 index 00000000000..3677b12514b --- /dev/null +++ b/.changelog/unreleased/features/7415-prevote-nil-non-timely.md @@ -0,0 +1,4 @@ +- `[consensus]` Update proposal validation logic to Prevote nil + if a proposal does not meet the conditions for Timeliness + per the proposer-based timestamp specification. (@anca) + ([tendermint/tendermint\#7415](https://github.com/tendermint/tendermint/pull/7415)) diff --git a/.changelog/unreleased/features/7711-fix-first-height.md b/.changelog/unreleased/features/7711-fix-first-height.md new file mode 100644 index 00000000000..2652371261b --- /dev/null +++ b/.changelog/unreleased/features/7711-fix-first-height.md @@ -0,0 +1,3 @@ +- `[consensus]` Use the proposer timestamp for the first height instead of the genesis time. + Chains will still start consensus at the genesis time. (@anca) + ([tendermint/tendermint\#7711](https://github.com/tendermint/tendermint/pull/7711)) diff --git a/.changelog/unreleased/features/816-config-grpc-version-service.md b/.changelog/unreleased/features/816-config-grpc-version-service.md new file mode 100644 index 00000000000..89bfd74ab26 --- /dev/null +++ b/.changelog/unreleased/features/816-config-grpc-version-service.md @@ -0,0 +1,2 @@ +- `[config]` Added `[grpc.version_service]` section for configuring the gRPC version service. + ([\#816](https://github.com/cometbft/cometbft/issues/816)) diff --git a/.changelog/unreleased/features/816-config-grpc.md b/.changelog/unreleased/features/816-config-grpc.md new file mode 100644 index 00000000000..3bb38abfa37 --- /dev/null +++ b/.changelog/unreleased/features/816-config-grpc.md @@ -0,0 +1,2 @@ +- `[config]` Added `[grpc]` section to configure the gRPC server. + ([\#816](https://github.com/cometbft/cometbft/issues/816)) diff --git a/.changelog/unreleased/improvements/0016-abc-types-validator-set.md b/.changelog/unreleased/improvements/0016-abc-types-validator-set.md new file mode 100644 index 00000000000..2851a4f6524 --- /dev/null +++ b/.changelog/unreleased/improvements/0016-abc-types-validator-set.md @@ -0,0 +1,2 @@ +- `[types]` Check that proposer is one of the validators in `ValidateBasic` + ([\#ABC-0016](https://github.com/cometbft/cometbft/security/advisories/GHSA-g5xx-c4hv-9ccc)) diff --git a/.changelog/unreleased/improvements/1017-remove-genesis-persistence-in-state-db.md b/.changelog/unreleased/improvements/1017-remove-genesis-persistence-in-state-db.md index 13917825084..ba9966bc9fe 100644 --- a/.changelog/unreleased/improvements/1017-remove-genesis-persistence-in-state-db.md +++ b/.changelog/unreleased/improvements/1017-remove-genesis-persistence-in-state-db.md @@ -1,3 +1,3 @@ - `[node]` Remove genesis persistence in state db, replaced by a hash - ([cometbft/cometbft\#1017](https://github.com/cometbft/cometbft/pull/1017), - [cometbft/cometbft\#1295](https://github.com/cometbft/cometbft/pull/1295)) + ([\#1017](https://github.com/cometbft/cometbft/pull/1017), + [\#1295](https://github.com/cometbft/cometbft/pull/1295)) diff --git a/.changelog/unreleased/improvements/1297-remove-genesis-from-db-upgrade.md b/.changelog/unreleased/improvements/1297-remove-genesis-from-db-upgrade.md index e7772829b3b..a099fd07332 100644 --- a/.changelog/unreleased/improvements/1297-remove-genesis-from-db-upgrade.md +++ b/.changelog/unreleased/improvements/1297-remove-genesis-from-db-upgrade.md @@ -1,2 +1,2 @@ - `[node]` On upgrade, after [\#1296](https://github.com/cometbft/cometbft/pull/1296), delete the genesis file existing in the DB. - ([cometbft/cometbft\#1297](https://github.com/cometbft/cometbft/pull/1297) + ([\#1297](https://github.com/cometbft/cometbft/pull/1297)) diff --git a/.changelog/unreleased/improvements/1324-cli-genesis-hash-checked-on-load.md b/.changelog/unreleased/improvements/1324-cli-genesis-hash-checked-on-load.md index 729c5252fec..c8f818dd8dc 100644 --- a/.changelog/unreleased/improvements/1324-cli-genesis-hash-checked-on-load.md +++ b/.changelog/unreleased/improvements/1324-cli-genesis-hash-checked-on-load.md @@ -1,3 +1,3 @@ -- `[cli/node]` The genesis hash provided with the `--genesis-hash` is now +- `[cli/node]` The genesis hash provided with the `--genesis-hash` is now forwarded to the node, instead of reading the file. - ([\#1324](https://github.com/cometbft/cometbft/pull/1324)). \ No newline at end of file + ([\#1324](https://github.com/cometbft/cometbft/pull/1324)) diff --git a/.changelog/unreleased/improvements/1324-config-genesis-hash.md b/.changelog/unreleased/improvements/1324-config-genesis-hash.md new file mode 100644 index 00000000000..cae7c713e8f --- /dev/null +++ b/.changelog/unreleased/improvements/1324-config-genesis-hash.md @@ -0,0 +1,3 @@ +- `[config]` Added `genesis_hash` storage parameter, which when set it is checked + on node startup + ([\#1324](https://github.com/cometbft/cometbft/pull/1324/)) diff --git a/.changelog/unreleased/improvements/1518-reduce-default-maxbytes_increase_maxgas b/.changelog/unreleased/improvements/1518-reduce-default-maxbytes_increase_maxgas.md similarity index 100% rename from .changelog/unreleased/improvements/1518-reduce-default-maxbytes_increase_maxgas rename to .changelog/unreleased/improvements/1518-reduce-default-maxbytes_increase_maxgas.md diff --git a/.changelog/unreleased/improvements/1558-experimental-gossip-limiting.md b/.changelog/unreleased/improvements/1558-experimental-gossip-limiting.md new file mode 100644 index 00000000000..58fc6c6f863 --- /dev/null +++ b/.changelog/unreleased/improvements/1558-experimental-gossip-limiting.md @@ -0,0 +1,9 @@ +- `[mempool]` Add experimental feature to limit the number of persistent peers and non-persistent + peers to which the node gossip transactions. + ([\#1558](https://github.com/cometbft/cometbft/pull/1558)) + ([\#1584](https://github.com/cometbft/cometbft/pull/1584)) +- `[config]` Add mempool parameters `experimental_max_gossip_connections_to_persistent_peers` and + `experimental_max_gossip_connections_to_non_persistent_peers` for limiting the number of peers to + which the node gossip transactions. + ([\#1558](https://github.com/cometbft/cometbft/pull/1558)) + ([\#1584](https://github.com/cometbft/cometbft/pull/1584)) diff --git a/.changelog/unreleased/improvements/1559-e2e-latency-emulation.md b/.changelog/unreleased/improvements/1559-e2e-latency-emulation.md new file mode 100644 index 00000000000..02e8d0a0355 --- /dev/null +++ b/.changelog/unreleased/improvements/1559-e2e-latency-emulation.md @@ -0,0 +1,2 @@ +- `[e2e]` Allow latency emulation between nodes. + ([\#1559](https://github.com/cometbft/cometbft/pull/1559)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1560-e2e-latency-emulation.md b/.changelog/unreleased/improvements/1560-e2e-latency-emulation.md new file mode 100644 index 00000000000..77dc608db3a --- /dev/null +++ b/.changelog/unreleased/improvements/1560-e2e-latency-emulation.md @@ -0,0 +1,2 @@ +- `[e2e]` Allow latency emulation between nodes. + ([\#1560](https://github.com/cometbft/cometbft/pull/1560)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1594-export-makehttpdialer.md b/.changelog/unreleased/improvements/1594-export-makehttpdialer.md new file mode 100644 index 00000000000..dfd1cd81021 --- /dev/null +++ b/.changelog/unreleased/improvements/1594-export-makehttpdialer.md @@ -0,0 +1,2 @@ +- `[rpc]` Export `MakeHTTPDialer` to allow HTTP client constructors more flexibility. + ([\#1594](https://github.com/cometbft/cometbft/pull/1594)) diff --git a/.changelog/unreleased/improvements/1715-validate-validator-address.md b/.changelog/unreleased/improvements/1715-validate-validator-address.md new file mode 100644 index 00000000000..ec7f2c7da6a --- /dev/null +++ b/.changelog/unreleased/improvements/1715-validate-validator-address.md @@ -0,0 +1 @@ +- `[types]` Validate `Validator#Address` in `ValidateBasic` ([\#1715](https://github.com/cometbft/cometbft/pull/1715)) diff --git a/.changelog/unreleased/improvements/1730-increase-abci-socket-message-size-limit.md b/.changelog/unreleased/improvements/1730-increase-abci-socket-message-size-limit.md new file mode 100644 index 00000000000..5246eb57f08 --- /dev/null +++ b/.changelog/unreleased/improvements/1730-increase-abci-socket-message-size-limit.md @@ -0,0 +1 @@ +- `[abci]` Increase ABCI socket message size limit to 2GB ([\#1730](https://github.com/cometbft/cometbft/pull/1730): @troykessler) diff --git a/.changelog/unreleased/improvements/1735-batch-save-state.md b/.changelog/unreleased/improvements/1735-batch-save-state.md new file mode 100644 index 00000000000..721380f6041 --- /dev/null +++ b/.changelog/unreleased/improvements/1735-batch-save-state.md @@ -0,0 +1 @@ +- `[state]` Save the state using a single DB batch ([\#1735](https://github.com/cometbft/cometbft/pull/1735)) diff --git a/.changelog/unreleased/improvements/1755-batch-save-block.md b/.changelog/unreleased/improvements/1755-batch-save-block.md new file mode 100644 index 00000000000..22f15cdb423 --- /dev/null +++ b/.changelog/unreleased/improvements/1755-batch-save-block.md @@ -0,0 +1,2 @@ +- `[store]` Save block using a single DB batch if block is less than 640kB, otherwise each block part is saved individually + ([\#1755](https://github.com/cometbft/cometbft/pull/1755)) diff --git a/.changelog/unreleased/improvements/1757-gen-validator.md b/.changelog/unreleased/improvements/1757-gen-validator.md new file mode 100644 index 00000000000..144f9834d61 --- /dev/null +++ b/.changelog/unreleased/improvements/1757-gen-validator.md @@ -0,0 +1,3 @@ +- `[cmd]` Add support for all key types in `gen-validator` command. Use + `--key-type=` (or `-k`) to specify the key type (e.g., `-k secp256k1`). + ([\#1757](https://github.com/cometbft/cometbft/issues/1757)) diff --git a/.changelog/unreleased/improvements/1827-config-mempool-recheck-timeout.md b/.changelog/unreleased/improvements/1827-config-mempool-recheck-timeout.md new file mode 100644 index 00000000000..4a981e51bc3 --- /dev/null +++ b/.changelog/unreleased/improvements/1827-config-mempool-recheck-timeout.md @@ -0,0 +1,3 @@ +- `[config]` Added `recheck_timeout` mempool parameter to set how much time to wait for recheck + responses from the app (only applies to non-local ABCI clients). + ([\#1827](https://github.com/cometbft/cometbft/issues/1827/)) diff --git a/.changelog/unreleased/improvements/1900-httpproxy-from-env.md b/.changelog/unreleased/improvements/1900-httpproxy-from-env.md new file mode 100644 index 00000000000..fd654ef7ba0 --- /dev/null +++ b/.changelog/unreleased/improvements/1900-httpproxy-from-env.md @@ -0,0 +1,2 @@ +- `[rpc]` Support setting proxy from env to `DefaultHttpClient`. + ([\#1900](https://github.com/cometbft/cometbft/pull/1900)) diff --git a/.changelog/unreleased/improvements/1901-export-p2p-package-errors.md b/.changelog/unreleased/improvements/1901-export-p2p-package-errors.md new file mode 100644 index 00000000000..97dae672c8d --- /dev/null +++ b/.changelog/unreleased/improvements/1901-export-p2p-package-errors.md @@ -0,0 +1 @@ +- `[p2p]` Export p2p package errors ([\#1901](https://github.com/cometbft/cometbft/pull/1901)) (contributes to [\#1140](https://github.com/cometbft/cometbft/issues/1140)) diff --git a/.changelog/unreleased/improvements/1902-rpc-default-port.md b/.changelog/unreleased/improvements/1902-rpc-default-port.md new file mode 100644 index 00000000000..b321bed5394 --- /dev/null +++ b/.changelog/unreleased/improvements/1902-rpc-default-port.md @@ -0,0 +1 @@ +- `[rpc]` Use default port for HTTP(S) URLs when there is no explicit port ([\#1903](https://github.com/cometbft/cometbft/pull/1903)) diff --git a/.changelog/unreleased/improvements/1904-export-light-package-errors.md b/.changelog/unreleased/improvements/1904-export-light-package-errors.md new file mode 100644 index 00000000000..654a7a32d54 --- /dev/null +++ b/.changelog/unreleased/improvements/1904-export-light-package-errors.md @@ -0,0 +1 @@ +- `[light]` Export light package errors ([\#1904](https://github.com/cometbft/cometbft/pull/1904)) (contributes to [\#1140](https://github.com/cometbft/cometbft/issues/1140)) diff --git a/.changelog/unreleased/improvements/1921-crypto-merkle-innerHash.md b/.changelog/unreleased/improvements/1921-crypto-merkle-innerHash.md new file mode 100644 index 00000000000..d3c9dac2cba --- /dev/null +++ b/.changelog/unreleased/improvements/1921-crypto-merkle-innerHash.md @@ -0,0 +1 @@ +- `[crypto/merkle]` faster calculation of hashes ([#1921](https://github.com/cometbft/cometbft/pull/1921)) diff --git a/.changelog/unreleased/improvements/1958-disable-linting.md b/.changelog/unreleased/improvements/1958-disable-linting.md new file mode 100644 index 00000000000..cca9ee63578 --- /dev/null +++ b/.changelog/unreleased/improvements/1958-disable-linting.md @@ -0,0 +1,2 @@ +- `[linting]` Removed undesired linting from `Makefile` and added dependency check for `codespell`. + ([\#1958](https://github.com/cometbft/cometbft/pull/1958/)) diff --git a/.changelog/unreleased/improvements/2016-blocksync-avoid-double-calling-block-from-proto.md b/.changelog/unreleased/improvements/2016-blocksync-avoid-double-calling-block-from-proto.md new file mode 100644 index 00000000000..7251221be18 --- /dev/null +++ b/.changelog/unreleased/improvements/2016-blocksync-avoid-double-calling-block-from-proto.md @@ -0,0 +1,2 @@ +- `[blocksync]` Avoid double-calling `types.BlockFromProto` for performance + reasons ([\#2016](https://github.com/cometbft/cometbft/pull/2016)) diff --git a/.changelog/unreleased/improvements/2017-state-avoid-double-saving-finalize-block-response.md b/.changelog/unreleased/improvements/2017-state-avoid-double-saving-finalize-block-response.md new file mode 100644 index 00000000000..e5df5a0dd1e --- /dev/null +++ b/.changelog/unreleased/improvements/2017-state-avoid-double-saving-finalize-block-response.md @@ -0,0 +1,2 @@ +- `[state]` avoid double-saving `FinalizeBlockResponse` for performance reasons + ([\#2017](https://github.com/cometbft/cometbft/pull/2017)) diff --git a/.changelog/unreleased/improvements/2065-e2e-vote-ext-activation.md b/.changelog/unreleased/improvements/2065-e2e-vote-ext-activation.md new file mode 100644 index 00000000000..9ced3a5da72 --- /dev/null +++ b/.changelog/unreleased/improvements/2065-e2e-vote-ext-activation.md @@ -0,0 +1,5 @@ +- `[e2e]` Add manifest option `VoteExtensionsUpdateHeight` to test + vote extension activation via `InitChain` and `FinalizeBlock`. + Also, extend the manifest generator to produce different values + of this new option + ([\#2065](https://github.com/cometbft/cometbft/pull/2065)) diff --git a/.changelog/unreleased/improvements/2093-metric-chain-size-bytes.md b/.changelog/unreleased/improvements/2093-metric-chain-size-bytes.md new file mode 100644 index 00000000000..afba958e3b7 --- /dev/null +++ b/.changelog/unreleased/improvements/2093-metric-chain-size-bytes.md @@ -0,0 +1,2 @@ +- `[consensus]` Add `chain_size_bytes` metric for measuring the size of the blockchain in bytes + ([\#2093](https://github.com/cometbft/cometbft/pull/2093)) diff --git a/.changelog/unreleased/improvements/2094-e2e-load-max-txs.md b/.changelog/unreleased/improvements/2094-e2e-load-max-txs.md new file mode 100644 index 00000000000..31ca79cfe3b --- /dev/null +++ b/.changelog/unreleased/improvements/2094-e2e-load-max-txs.md @@ -0,0 +1,2 @@ +- `[e2e]` Add manifest option `load_max_txs` to limit the number of transactions generated by the + `load` command. ([\#2094](https://github.com/cometbft/cometbft/pull/2094)) diff --git a/.changelog/unreleased/improvements/2142-psql-optimization.md b/.changelog/unreleased/improvements/2142-psql-optimization.md new file mode 100644 index 00000000000..8de465bd390 --- /dev/null +++ b/.changelog/unreleased/improvements/2142-psql-optimization.md @@ -0,0 +1,2 @@ +- `[indexer]` Optimized the PSQL indexer + ([\#2142](https://github.com/cometbft/cometbft/pull/2142)) thanks to external contributor @k0marov ! diff --git a/.changelog/unreleased/improvements/2192-e2e-make-fast.md b/.changelog/unreleased/improvements/2192-e2e-make-fast.md new file mode 100644 index 00000000000..308918c2d5f --- /dev/null +++ b/.changelog/unreleased/improvements/2192-e2e-make-fast.md @@ -0,0 +1,2 @@ +- `[e2e]` Add new targets `fast` and `clean` to Makefile. + ([\#2192](https://github.com/cometbft/cometbft/pull/2192)) diff --git a/.changelog/unreleased/improvements/2200-export-rpc-packager-errors.md b/.changelog/unreleased/improvements/2200-export-rpc-packager-errors.md new file mode 100644 index 00000000000..b549dc3e783 --- /dev/null +++ b/.changelog/unreleased/improvements/2200-export-rpc-packager-errors.md @@ -0,0 +1 @@ +- `[rpc]` Export RPC package errors ([\#2200](https://github.com/cometbft/cometbft/pull/2200)) (contributes to [\#1140](https://github.com/cometbft/cometbft/issues/1140)) diff --git a/.changelog/unreleased/improvements/2246-make-peerset-remove-more-efficient.md b/.changelog/unreleased/improvements/2246-make-peerset-remove-more-efficient.md new file mode 100644 index 00000000000..793d7eb99fc --- /dev/null +++ b/.changelog/unreleased/improvements/2246-make-peerset-remove-more-efficient.md @@ -0,0 +1 @@ +- `[p2p]` make `PeerSet.Remove` more efficient (Author: @odeke-em) [\#2246](https://github.com/cometbft/cometbft/pull/2246) diff --git a/.changelog/unreleased/improvements/2328-e2e-log-sent-txs.md b/.changelog/unreleased/improvements/2328-e2e-log-sent-txs.md new file mode 100644 index 00000000000..e1b69899f40 --- /dev/null +++ b/.changelog/unreleased/improvements/2328-e2e-log-sent-txs.md @@ -0,0 +1,2 @@ +- `[e2e]` Log the number of transactions that were sent successfully or failed. + ([\#2328](https://github.com/cometbft/cometbft/pull/2328)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2365-deduplicate-light-signature-checks.md b/.changelog/unreleased/improvements/2365-deduplicate-light-signature-checks.md new file mode 100644 index 00000000000..b660537d28f --- /dev/null +++ b/.changelog/unreleased/improvements/2365-deduplicate-light-signature-checks.md @@ -0,0 +1 @@ +- `[light]` Remove duplicated signature checks in `light.VerifyNonAdjacent` ([\#2365](https://github.com/cometbft/cometbft/issues/2365)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2434-jsonrpc-websocket-basic-auth.md b/.changelog/unreleased/improvements/2434-jsonrpc-websocket-basic-auth.md new file mode 100644 index 00000000000..e4db7c06c7d --- /dev/null +++ b/.changelog/unreleased/improvements/2434-jsonrpc-websocket-basic-auth.md @@ -0,0 +1 @@ +- `[jsonrpc]` enable HTTP basic auth in websocket client ([#2434](https://github.com/cometbft/cometbft/pull/2434)) diff --git a/.changelog/unreleased/improvements/2453-e2e-add-clock-skew.md b/.changelog/unreleased/improvements/2453-e2e-add-clock-skew.md new file mode 100644 index 00000000000..5f4e294430c --- /dev/null +++ b/.changelog/unreleased/improvements/2453-e2e-add-clock-skew.md @@ -0,0 +1,3 @@ +- `[e2e]` Introduce the possibility in the manifest for some nodes + to run in a preconfigured clock skew. + ([\#2453](https://github.com/cometbft/cometbft/pull/2453)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2467-decrease-n-of-requested-blocks.md b/.changelog/unreleased/improvements/2467-decrease-n-of-requested-blocks.md new file mode 100644 index 00000000000..4749b7953b9 --- /dev/null +++ b/.changelog/unreleased/improvements/2467-decrease-n-of-requested-blocks.md @@ -0,0 +1,3 @@ +- `[blocksync]` make the max number of downloaded blocks dynamic. + Previously it was a const 600. Now it's `peersCount * maxPendingRequestsPerPeer (20)` + ([\#2467](https://github.com/cometbft/cometbft/pull/2467)) diff --git a/.changelog/unreleased/improvements/2475-blocksync-2nd-request.md b/.changelog/unreleased/improvements/2475-blocksync-2nd-request.md new file mode 100644 index 00000000000..d4061b0dd39 --- /dev/null +++ b/.changelog/unreleased/improvements/2475-blocksync-2nd-request.md @@ -0,0 +1,3 @@ +- `[blocksync]` Request a block from peer B if we are approaching pool's height + (less than 50 blocks) and the current peer A is slow in sending us the + block ([\#2475](https://github.com/cometbft/cometbft/pull/2475)) diff --git a/.changelog/unreleased/improvements/2475-blocksync-no-block-response.md b/.changelog/unreleased/improvements/2475-blocksync-no-block-response.md new file mode 100644 index 00000000000..a603cb2ffdc --- /dev/null +++ b/.changelog/unreleased/improvements/2475-blocksync-no-block-response.md @@ -0,0 +1,3 @@ +- `[blocksync]` Request the block N from peer B immediately after getting + `NoBlockResponse` from peer A + ([\#2475](https://github.com/cometbft/cometbft/pull/2475)) diff --git a/.changelog/unreleased/improvements/2475-blocksync-sort-peers.md b/.changelog/unreleased/improvements/2475-blocksync-sort-peers.md new file mode 100644 index 00000000000..dd1efe035f3 --- /dev/null +++ b/.changelog/unreleased/improvements/2475-blocksync-sort-peers.md @@ -0,0 +1,2 @@ +- `[blocksync]` Sort peers by download rate (the fastest peer is picked first) + ([\#2475](https://github.com/cometbft/cometbft/pull/2475)) diff --git a/.changelog/unreleased/improvements/2496-privval-ext-signature.md b/.changelog/unreleased/improvements/2496-privval-ext-signature.md new file mode 100644 index 00000000000..0d050a737f9 --- /dev/null +++ b/.changelog/unreleased/improvements/2496-privval-ext-signature.md @@ -0,0 +1,4 @@ +- `[privval]` DO NOT require extension signature from privval if vote + extensions are disabled. Remote signers can skip signing the extension if + `skip_extension_signing` flag in `SignVoteRequest` is true. + ([\#2496](https://github.com/cometbft/cometbft/pull/2496)) diff --git a/.changelog/unreleased/improvements/2522-privval-skip-extension-signing.md b/.changelog/unreleased/improvements/2522-privval-skip-extension-signing.md new file mode 100644 index 00000000000..86965e98ab0 --- /dev/null +++ b/.changelog/unreleased/improvements/2522-privval-skip-extension-signing.md @@ -0,0 +1,5 @@ +- `[proto]` Add `skip_extension_signing` field to the `SignVoteRequest` message + in `cometbft.privval.v1` ([\#2522](https://github.com/cometbft/cometbft/pull/2522)). + The `cometbft.privval.v1beta2` package is added to capture the protocol as it was + released in CometBFT 0.38.x + ([\#2529](https://github.com/cometbft/cometbft/pull/2529)). diff --git a/.changelog/unreleased/improvements/2769-merge-config-docs.md b/.changelog/unreleased/improvements/2769-merge-config-docs.md new file mode 100644 index 00000000000..befd7f4d88a --- /dev/null +++ b/.changelog/unreleased/improvements/2769-merge-config-docs.md @@ -0,0 +1,2 @@ +- `[docs]` Merge configuration doc in explanation section with the config.toml document in references. + ([\#2769](https://github.com/cometbft/cometbft/pull/2769)) diff --git a/.changelog/unreleased/improvements/2788-move-ws-info-log-to-debug.md b/.changelog/unreleased/improvements/2788-move-ws-info-log-to-debug.md new file mode 100644 index 00000000000..37d544338c1 --- /dev/null +++ b/.changelog/unreleased/improvements/2788-move-ws-info-log-to-debug.md @@ -0,0 +1,2 @@ +- `[rpc]` Move the websockets info log for successful replies to debug. + ([\#2788](https://github.com/cometbft/cometbft/pull/2788)) diff --git a/.changelog/unreleased/improvements/2839-tx_index-lower-heap-allocation.md b/.changelog/unreleased/improvements/2839-tx_index-lower-heap-allocation.md new file mode 100644 index 00000000000..2c763654201 --- /dev/null +++ b/.changelog/unreleased/improvements/2839-tx_index-lower-heap-allocation.md @@ -0,0 +1,2 @@ +- `[state/indexer]` Lower the heap allocation of transaction searches + ([\#2839](https://github.com/cometbft/cometbft/pull/2839)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2841-speedup-bits-pick-random.md b/.changelog/unreleased/improvements/2841-speedup-bits-pick-random.md new file mode 100644 index 00000000000..44d65c3f617 --- /dev/null +++ b/.changelog/unreleased/improvements/2841-speedup-bits-pick-random.md @@ -0,0 +1,2 @@ +- `[internal/bits]` 10x speedup and remove heap overhead of `bitArray.PickRandom` (used extensively in consensus gossip) + ([\#2841](https://github.com/cometbft/cometbft/pull/2841)). diff --git a/.changelog/unreleased/improvements/2846-speedup-json-encoding.md b/.changelog/unreleased/improvements/2846-speedup-json-encoding.md new file mode 100644 index 00000000000..262253e3614 --- /dev/null +++ b/.changelog/unreleased/improvements/2846-speedup-json-encoding.md @@ -0,0 +1,2 @@ +- `[libs/json]` Lower the memory overhead of JSON encoding by using JSON encoders internally. + ([\#2846](https://github.com/cometbft/cometbft/pull/2846)) diff --git a/.changelog/unreleased/improvements/2847-compile-time-debug-strip.md b/.changelog/unreleased/improvements/2847-compile-time-debug-strip.md new file mode 100644 index 00000000000..293b13fc2db --- /dev/null +++ b/.changelog/unreleased/improvements/2847-compile-time-debug-strip.md @@ -0,0 +1,2 @@ +- `[log]` allow strip out all debug-level code from the binary at compile time using build flags + ([\#2847](https://github.com/cometbft/cometbft/issues/2847)) diff --git a/.changelog/unreleased/improvements/2848-reduce-blockID-marshalling.md b/.changelog/unreleased/improvements/2848-reduce-blockID-marshalling.md new file mode 100644 index 00000000000..81574737426 --- /dev/null +++ b/.changelog/unreleased/improvements/2848-reduce-blockID-marshalling.md @@ -0,0 +1,2 @@ +- `[types]` Small reduction in memory allocation via swapping Key with Equals in VoteSet + ([\#1112](https://github.com/cometbft/cometbft/issues/1112)) diff --git a/.changelog/unreleased/improvements/2853-abci-2-tutorial.md b/.changelog/unreleased/improvements/2853-abci-2-tutorial.md new file mode 100644 index 00000000000..cc0f878b435 --- /dev/null +++ b/.changelog/unreleased/improvements/2853-abci-2-tutorial.md @@ -0,0 +1,2 @@ +- `[docs]` Add a new ABCI 2.0 tutorial. + ([\#2853](https://github.com/cometbft/cometbft/issues/2853)) thanks @alijnmerchant21 for contributions to the tutorial diff --git a/.changelog/unreleased/improvements/2855-fix-txsearch-performance.md b/.changelog/unreleased/improvements/2855-fix-txsearch-performance.md new file mode 100644 index 00000000000..4369ed1a4c4 --- /dev/null +++ b/.changelog/unreleased/improvements/2855-fix-txsearch-performance.md @@ -0,0 +1,2 @@ +- `[state/indexer]` Fix txSearch performance issue + ([\#2855](https://github.com/cometbft/cometbft/pull/2855)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2867-rpc-batch-size-config.md b/.changelog/unreleased/improvements/2867-rpc-batch-size-config.md new file mode 100644 index 00000000000..d353bed0474 --- /dev/null +++ b/.changelog/unreleased/improvements/2867-rpc-batch-size-config.md @@ -0,0 +1,2 @@ +- `[rpc]` Add a configurable maximum batch size for RPC requests. + ([\#2867](https://github.com/cometbft/cometbft/pull/2867)). diff --git a/.changelog/unreleased/improvements/2911-remove-event-bus-debug-logs.md b/.changelog/unreleased/improvements/2911-remove-event-bus-debug-logs.md new file mode 100644 index 00000000000..a008e2482a0 --- /dev/null +++ b/.changelog/unreleased/improvements/2911-remove-event-bus-debug-logs.md @@ -0,0 +1,2 @@ +- `[event-bus]` Remove the debug logs in PublishEventTx, which were noticed production slowdowns. + ([\#2911](https://github.com/cometbft/cometbft/pull/2911)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2924-consensus-cache-block-hash.md b/.changelog/unreleased/improvements/2924-consensus-cache-block-hash.md new file mode 100644 index 00000000000..4f9e5638c45 --- /dev/null +++ b/.changelog/unreleased/improvements/2924-consensus-cache-block-hash.md @@ -0,0 +1,2 @@ +- `[state/execution]` Cache the block hash computation inside of the Block Type, so we only compute it once. + ([\#2924](https://github.com/cometbft/cometbft/pull/2924)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2928-remove-redundant-verifyblock-call-in-finalize-commit.md b/.changelog/unreleased/improvements/2928-remove-redundant-verifyblock-call-in-finalize-commit.md new file mode 100644 index 00000000000..ac975738453 --- /dev/null +++ b/.changelog/unreleased/improvements/2928-remove-redundant-verifyblock-call-in-finalize-commit.md @@ -0,0 +1,2 @@ +- `[consensus/state]` Remove a redundant `VerifyBlock` call in `FinalizeCommit` + ([\#2928](https://github.com/cometbft/cometbft/pull/2928)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2949-reduce-protoio-writer-creation-time.md b/.changelog/unreleased/improvements/2949-reduce-protoio-writer-creation-time.md new file mode 100644 index 00000000000..75838e24882 --- /dev/null +++ b/.changelog/unreleased/improvements/2949-reduce-protoio-writer-creation-time.md @@ -0,0 +1,2 @@ +- `[p2p/channel]` Speedup `ProtoIO` writer creation time, and thereby speedup channel writing by 5%. + ([\#2949](https://github.com/cometbft/cometbft/pull/2949)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2952-lower-next-packet-msg-time.md b/.changelog/unreleased/improvements/2952-lower-next-packet-msg-time.md new file mode 100644 index 00000000000..6a05588c0f6 --- /dev/null +++ b/.changelog/unreleased/improvements/2952-lower-next-packet-msg-time.md @@ -0,0 +1,2 @@ +- `[p2p/conn]` Minor speedup (3%) to connection.WritePacketMsgTo, by removing MinInt calls. + ([\#2952](https://github.com/cometbft/cometbft/pull/2952)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2959-speedup-initialized-bitarray-construction.md b/.changelog/unreleased/improvements/2959-speedup-initialized-bitarray-construction.md new file mode 100644 index 00000000000..7c1b2181d08 --- /dev/null +++ b/.changelog/unreleased/improvements/2959-speedup-initialized-bitarray-construction.md @@ -0,0 +1,2 @@ +- `[internal/bits]` 10x speedup creating initialized bitArrays, which speedsup extendedCommit.BitArray(). This is used in consensus vote gossip. + ([\#2959](https://github.com/cometbft/cometbft/pull/2841)). diff --git a/.changelog/unreleased/improvements/2964-skip-revalidation-of-blockstore-LoadBlockFromMeta-.md b/.changelog/unreleased/improvements/2964-skip-revalidation-of-blockstore-LoadBlockFromMeta-.md new file mode 100644 index 00000000000..26fdb6c7ed2 --- /dev/null +++ b/.changelog/unreleased/improvements/2964-skip-revalidation-of-blockstore-LoadBlockFromMeta-.md @@ -0,0 +1,2 @@ +- `[blockstore]` Remove a redundant `Header.ValidateBasic` call in `LoadBlockMeta`, 75% reducing this time. + ([\#2964](https://github.com/cometbft/cometbft/pull/2964)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2986-lower-memory-allocation-in-packet-writing.md b/.changelog/unreleased/improvements/2986-lower-memory-allocation-in-packet-writing.md new file mode 100644 index 00000000000..9d262798808 --- /dev/null +++ b/.changelog/unreleased/improvements/2986-lower-memory-allocation-in-packet-writing.md @@ -0,0 +1,2 @@ +- `[p2p/conn]` Speedup connection.WritePacketMsgTo, by reusing internal buffers rather than re-allocating. + ([\#2986](https://github.com/cometbft/cometbft/pull/2986)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2988-flush-throttle-timeout.md b/.changelog/unreleased/improvements/2988-flush-throttle-timeout.md new file mode 100644 index 00000000000..4be9bd52c34 --- /dev/null +++ b/.changelog/unreleased/improvements/2988-flush-throttle-timeout.md @@ -0,0 +1,2 @@ +- `[p2p]` Lower `flush_throttle_timeout` to 10ms + ([\#2988](https://github.com/cometbft/cometbft/issues/2988)) diff --git a/.changelog/unreleased/improvements/3003-use-lru-caches-in-blockstore.md b/.changelog/unreleased/improvements/3003-use-lru-caches-in-blockstore.md new file mode 100644 index 00000000000..7a967076933 --- /dev/null +++ b/.changelog/unreleased/improvements/3003-use-lru-caches-in-blockstore.md @@ -0,0 +1,2 @@ +- `[blockstore]` Use LRU caches in blockstore, significiantly improving consensus gossip routine performance + ([\#3003](https://github.com/cometbft/cometbft/issues/3003)) diff --git a/.changelog/unreleased/improvements/3006-use-thread-independent-randomness-in-gossip.md b/.changelog/unreleased/improvements/3006-use-thread-independent-randomness-in-gossip.md new file mode 100644 index 00000000000..71bb9782c59 --- /dev/null +++ b/.changelog/unreleased/improvements/3006-use-thread-independent-randomness-in-gossip.md @@ -0,0 +1,2 @@ +- `[consensus]` Use an independent rng for gossip threads, reducing mutex contention. + ([\#3005](https://github.com/cometbft/cometbft/issues/3005)) diff --git a/.changelog/unreleased/improvements/3016-remove-expensive-calls-from-flowrate.md b/.changelog/unreleased/improvements/3016-remove-expensive-calls-from-flowrate.md new file mode 100644 index 00000000000..7facdda30ed --- /dev/null +++ b/.changelog/unreleased/improvements/3016-remove-expensive-calls-from-flowrate.md @@ -0,0 +1,3 @@ +- `[flowrate]` Remove expensive time.Now() calls from flowrate calls. + Changes clock updates to happen in a separate goroutine. + ([\#3016](https://github.com/cometbft/cometbft/issues/3016)) diff --git a/.changelog/unreleased/improvements/3017-speedup-consensus-metrics.md b/.changelog/unreleased/improvements/3017-speedup-consensus-metrics.md new file mode 100644 index 00000000000..d028f40eae8 --- /dev/null +++ b/.changelog/unreleased/improvements/3017-speedup-consensus-metrics.md @@ -0,0 +1,2 @@ +- `[consensus]` Improve performance of consensus metrics by lowering string operations + ([\#3017](https://github.com/cometbft/cometbft/issues/3017)) diff --git a/.changelog/unreleased/improvements/3019-reduce-allocations-in-packet-reads.md b/.changelog/unreleased/improvements/3019-reduce-allocations-in-packet-reads.md new file mode 100644 index 00000000000..96e0b1c0fe4 --- /dev/null +++ b/.changelog/unreleased/improvements/3019-reduce-allocations-in-packet-reads.md @@ -0,0 +1,3 @@ +- `[protoio]` Remove one allocation and new object call from `ReadMsg`, + leading to a 4% p2p message reading performance gain. + ([\#3018](https://github.com/cometbft/cometbft/issues/3018)) diff --git a/.changelog/unreleased/improvements/3036-confix.md b/.changelog/unreleased/improvements/3036-confix.md new file mode 100644 index 00000000000..8e4a2514ce9 --- /dev/null +++ b/.changelog/unreleased/improvements/3036-confix.md @@ -0,0 +1,3 @@ +- `[cmd/cometbft]` Add `cometbft config` cmd to view, modify and + upgrade configs across different versions + ([\#3036](https://github.com/cometbft/cometbft/pull/3036)) diff --git a/.changelog/unreleased/improvements/3056-export-node-package-errors.md b/.changelog/unreleased/improvements/3056-export-node-package-errors.md new file mode 100644 index 00000000000..f8cd4f8234c --- /dev/null +++ b/.changelog/unreleased/improvements/3056-export-node-package-errors.md @@ -0,0 +1,2 @@ +- `[node]` export node package errors + ([\#3056](https://github.com/cometbft/cometbft/pull/3056)) diff --git a/.changelog/unreleased/improvements/3057-use-embed-pkg-for-template-string.md b/.changelog/unreleased/improvements/3057-use-embed-pkg-for-template-string.md new file mode 100644 index 00000000000..9d6380a148a --- /dev/null +++ b/.changelog/unreleased/improvements/3057-use-embed-pkg-for-template-string.md @@ -0,0 +1,2 @@ +- `[config]` Use embed pkg for the default template + ([\#3057](https://github.com/cometbft/cometbft/pull/3057)) diff --git a/.changelog/unreleased/improvements/3117-significantly-speedup-make-partset.md b/.changelog/unreleased/improvements/3117-significantly-speedup-make-partset.md new file mode 100644 index 00000000000..9a0c481c46b --- /dev/null +++ b/.changelog/unreleased/improvements/3117-significantly-speedup-make-partset.md @@ -0,0 +1,2 @@ +- `[types]` Significantly speedup types.MakePartSet and types.AddPart, which are used in creating a block proposal + ([\#3117](https://github.com/cometbft/cometbft/issues/3117)) diff --git a/.changelog/unreleased/improvements/3119-speedup-valset-GetByAddress-usages.md b/.changelog/unreleased/improvements/3119-speedup-valset-GetByAddress-usages.md new file mode 100644 index 00000000000..0b6e490d6f2 --- /dev/null +++ b/.changelog/unreleased/improvements/3119-speedup-valset-GetByAddress-usages.md @@ -0,0 +1,2 @@ +- `[types]` Make a new method `GetByAddressMut` for `ValSet`, which does not copy the returned validator. + ([\#3119](https://github.com/cometbft/cometbft/issues/3119)) diff --git a/.changelog/unreleased/improvements/3156-make-addvote-only-take-one-ps-mtx.md b/.changelog/unreleased/improvements/3156-make-addvote-only-take-one-ps-mtx.md new file mode 100644 index 00000000000..215295c2cfc --- /dev/null +++ b/.changelog/unreleased/improvements/3156-make-addvote-only-take-one-ps-mtx.md @@ -0,0 +1,2 @@ +- `[consensus]` Make Vote messages only take one peerstate mutex + ([\#3156](https://github.com/cometbft/cometbft/issues/3156)) diff --git a/.changelog/unreleased/improvements/3159-fix-taking-wlocks-instead-of-rlocks.md b/.changelog/unreleased/improvements/3159-fix-taking-wlocks-instead-of-rlocks.md new file mode 100644 index 00000000000..45819a52502 --- /dev/null +++ b/.changelog/unreleased/improvements/3159-fix-taking-wlocks-instead-of-rlocks.md @@ -0,0 +1,2 @@ +- `[consensus]` Fix some reactor messages taking write locks instead of read locks. + ([\#3159](https://github.com/cometbft/cometbft/issues/3159)) diff --git a/.changelog/unreleased/improvements/3162-reuse-internal-buffer-for-block-building.md b/.changelog/unreleased/improvements/3162-reuse-internal-buffer-for-block-building.md new file mode 100644 index 00000000000..72755e11c8b --- /dev/null +++ b/.changelog/unreleased/improvements/3162-reuse-internal-buffer-for-block-building.md @@ -0,0 +1,2 @@ +- `[consensus]` Reuse an internal buffer for block building to reduce memory allocation overhead. + ([\#3162](https://github.com/cometbft/cometbft/issues/3162)) diff --git a/.changelog/unreleased/improvements/3180-lower-broadcasts-consensus-overhead.md b/.changelog/unreleased/improvements/3180-lower-broadcasts-consensus-overhead.md new file mode 100644 index 00000000000..72515f8f44d --- /dev/null +++ b/.changelog/unreleased/improvements/3180-lower-broadcasts-consensus-overhead.md @@ -0,0 +1,2 @@ +- `[consensus]` Lower the consensus blocking overhead of broadcasts from `num_peers * process_creation_time` to `process_creation_time`. + ([\#3180](https://github.com/cometbft/cometbft/issues/3180)) diff --git a/.changelog/unreleased/improvements/3182-remove-switch-broadcast-return-channel.md b/.changelog/unreleased/improvements/3182-remove-switch-broadcast-return-channel.md new file mode 100644 index 00000000000..a3c043f5bf8 --- /dev/null +++ b/.changelog/unreleased/improvements/3182-remove-switch-broadcast-return-channel.md @@ -0,0 +1,2 @@ +- `[p2p]` Remove `Switch#Broadcast` unused return channel + ([\#3182](https://github.com/cometbft/cometbft/pull/3182)) diff --git a/.changelog/unreleased/improvements/3211-make-cs-reactor-no-longer-takes-cs-locks.md b/.changelog/unreleased/improvements/3211-make-cs-reactor-no-longer-takes-cs-locks.md new file mode 100644 index 00000000000..72fae64598e --- /dev/null +++ b/.changelog/unreleased/improvements/3211-make-cs-reactor-no-longer-takes-cs-locks.md @@ -0,0 +1,4 @@ +- `[consensus]` Make the consensus reactor no longer have packets on receive take the consensus lock. +Consensus will now update the reactor's view after every relevant change through the existing +synchronous event bus subscription. + ([\#3211](https://github.com/cometbft/cometbft/pull/3211)) diff --git a/.changelog/unreleased/improvements/3303-mempool-iterator.md b/.changelog/unreleased/improvements/3303-mempool-iterator.md new file mode 100644 index 00000000000..aa1dcb9ad66 --- /dev/null +++ b/.changelog/unreleased/improvements/3303-mempool-iterator.md @@ -0,0 +1,3 @@ +- `[mempool]` New `Entry` and `Iterator` interfaces. New `CListIterator` data struct to iterate on + the mempool's CList instead of methods `TxsFront` and `TxsWaitChan` + ([\#3303](https://github.com/cometbft/cometbft/pull/3303)). diff --git a/.changelog/unreleased/improvements/3314-mempool-update-consider-full-when-rechecking.md b/.changelog/unreleased/improvements/3314-mempool-update-consider-full-when-rechecking.md new file mode 100644 index 00000000000..1e308ec6040 --- /dev/null +++ b/.changelog/unreleased/improvements/3314-mempool-update-consider-full-when-rechecking.md @@ -0,0 +1,3 @@ +- `[mempool]` Before updating the mempool, consider it as full if rechecking is still in progress. + This will stop accepting transactions in the mempool if the node can't keep up with re-CheckTx. + ([\#3314](https://github.com/cometbft/cometbft/pull/3314)) diff --git a/.changelog/unreleased/improvements/3342-improve-blockstore-caches.md b/.changelog/unreleased/improvements/3342-improve-blockstore-caches.md new file mode 100644 index 00000000000..3577640b04d --- /dev/null +++ b/.changelog/unreleased/improvements/3342-improve-blockstore-caches.md @@ -0,0 +1,3 @@ +- `[blockstore]` Use LRU caches for LoadBlockPart. Make the LoadBlockPart and LoadBlockCommit APIs + return mutative copies, that the caller is expected to not modify. This saves on memory copying. + ([\#3342](https://github.com/cometbft/cometbft/issues/3342)) diff --git a/.changelog/unreleased/improvements/3346-buffer-secret-connection-writes.md b/.changelog/unreleased/improvements/3346-buffer-secret-connection-writes.md new file mode 100644 index 00000000000..a99728a82e1 --- /dev/null +++ b/.changelog/unreleased/improvements/3346-buffer-secret-connection-writes.md @@ -0,0 +1,2 @@ +- `[p2p/conn]` Speedup secret connection large writes, by buffering the write to the underlying connection. + ([\#3346](https://github.com/cometbft/cometbft/pull/3346)) diff --git a/.changelog/unreleased/improvements/3353-e2e-runner-split-logs.md b/.changelog/unreleased/improvements/3353-e2e-runner-split-logs.md new file mode 100644 index 00000000000..2f7c747c889 --- /dev/null +++ b/.changelog/unreleased/improvements/3353-e2e-runner-split-logs.md @@ -0,0 +1,2 @@ +- `[e2e]` add option to the 'runner logs' command to output logs separately. + ([\#3353](https://github.com/cometbft/cometbft/pull/3353)) diff --git a/.changelog/unreleased/improvements/3382-single-send-monitor-per-packet.md b/.changelog/unreleased/improvements/3382-single-send-monitor-per-packet.md new file mode 100644 index 00000000000..efa5e3cc27f --- /dev/null +++ b/.changelog/unreleased/improvements/3382-single-send-monitor-per-packet.md @@ -0,0 +1,2 @@ +- `[p2p/conn]` Update send monitor, used for sending rate limiting, once per batch of packets sent + ([\#3382](https://github.com/cometbft/cometbft/pull/3382)) diff --git a/.changelog/unreleased/improvements/3401-allow-dash-in-event-tags.md b/.changelog/unreleased/improvements/3401-allow-dash-in-event-tags.md new file mode 100644 index 00000000000..6de79f5e09a --- /dev/null +++ b/.changelog/unreleased/improvements/3401-allow-dash-in-event-tags.md @@ -0,0 +1,2 @@ +- `[libs/pubsub]` Allow dash (`-`) in event tags + ([\#3401](https://github.com/cometbft/cometbft/issues/3401)) diff --git a/.changelog/unreleased/improvements/3403-remove-pool-buffer-usage-in-secretconn.md b/.changelog/unreleased/improvements/3403-remove-pool-buffer-usage-in-secretconn.md new file mode 100644 index 00000000000..4069a79ef3b --- /dev/null +++ b/.changelog/unreleased/improvements/3403-remove-pool-buffer-usage-in-secretconn.md @@ -0,0 +1,2 @@ +- `[p2p/conn]` Remove the usage of a synchronous pool of buffers in secret connection, storing instead the buffer in the connection struct. This reduces the synchronization primitive usage, speeding up the code. + ([\#3403](https://github.com/cometbft/cometbft/issues/3403)) diff --git a/.changelog/unreleased/improvements/3407-make-hasvote-broadcast-use-trysend.md b/.changelog/unreleased/improvements/3407-make-hasvote-broadcast-use-trysend.md new file mode 100644 index 00000000000..57eb5417b19 --- /dev/null +++ b/.changelog/unreleased/improvements/3407-make-hasvote-broadcast-use-trysend.md @@ -0,0 +1,5 @@ +- `[consensus]` Make broadcasting `HasVote` and `HasProposalBlockPart` control + messages use `TrySend` instead of `Send`. This saves notable amounts of + performance, while at the same time those messages are for preventing + redundancy, not critical, and may be dropped without risks for the protocol. + ([\#3151](https://github.com/cometbft/cometbft/issues/3151)) diff --git a/.changelog/unreleased/improvements/3412-remove-heap-allocations-for-channel-wrapping-packets.md b/.changelog/unreleased/improvements/3412-remove-heap-allocations-for-channel-wrapping-packets.md new file mode 100644 index 00000000000..e28ef775583 --- /dev/null +++ b/.changelog/unreleased/improvements/3412-remove-heap-allocations-for-channel-wrapping-packets.md @@ -0,0 +1,2 @@ +- `[p2p/conn]` Removes several heap allocations per packet send, stemming from how we double-wrap packets prior to proto marshalling them in the connection layer. This change reduces the memory overhead and speeds up the code. + ([\#3423](https://github.com/cometbft/cometbft/issues/3423)) diff --git a/.changelog/unreleased/improvements/3416-docs-upgrade-guide-v1.md b/.changelog/unreleased/improvements/3416-docs-upgrade-guide-v1.md new file mode 100644 index 00000000000..527cac17146 --- /dev/null +++ b/.changelog/unreleased/improvements/3416-docs-upgrade-guide-v1.md @@ -0,0 +1,2 @@ +- `[docs]` Added an upgrade guide from CometBFT `v0.38.x` to `v1.0`. + ([\#4184](https://github.com/cometbft/cometbft/pull/4184)) diff --git a/.changelog/unreleased/improvements/3419-buffer-secret-connection-reads.md b/.changelog/unreleased/improvements/3419-buffer-secret-connection-reads.md new file mode 100644 index 00000000000..866d7a2312b --- /dev/null +++ b/.changelog/unreleased/improvements/3419-buffer-secret-connection-reads.md @@ -0,0 +1,2 @@ +- `[p2p/conn]` Speedup secret connection large packet reads, by buffering the read to the underlying connection. + ([\#3419](https://github.com/cometbft/cometbft/pull/3419)) diff --git a/.changelog/unreleased/improvements/3458-txindex-search-optimization.md b/.changelog/unreleased/improvements/3458-txindex-search-optimization.md new file mode 100644 index 00000000000..37afa294816 --- /dev/null +++ b/.changelog/unreleased/improvements/3458-txindex-search-optimization.md @@ -0,0 +1,2 @@ +- `[state/txindex]` search optimization + ([\#3458](https://github.com/cometbft/cometbft/pull/3458)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/3513-e2e-key-types.md b/.changelog/unreleased/improvements/3513-e2e-key-types.md new file mode 100644 index 00000000000..1fcb14cea9f --- /dev/null +++ b/.changelog/unreleased/improvements/3513-e2e-key-types.md @@ -0,0 +1,2 @@ +- `[e2e]` add support for testing different keytypes, including BLS + ([\#3513](https://github.com/cometbft/cometbft/pull/3513)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/3519-p2p-reconnect-interval-1day.md b/.changelog/unreleased/improvements/3519-p2p-reconnect-interval-1day.md new file mode 100644 index 00000000000..b6efc0182fa --- /dev/null +++ b/.changelog/unreleased/improvements/3519-p2p-reconnect-interval-1day.md @@ -0,0 +1,2 @@ +- `[p2p]` fix exponential backoff logic to increase reconnect retries close to 24 hours + ([\#3519](https://github.com/cometbft/cometbft/issues/3519)) diff --git a/.changelog/unreleased/improvements/3595-config-remove-genesishash.md b/.changelog/unreleased/improvements/3595-config-remove-genesishash.md new file mode 100644 index 00000000000..a7b67cfbfbe --- /dev/null +++ b/.changelog/unreleased/improvements/3595-config-remove-genesishash.md @@ -0,0 +1,2 @@ +- `[config]` Remove unused `GenesisHash` flag + ([\#3595](https://github.com/cometbft/cometbft/pull/3595)) diff --git a/.changelog/unreleased/improvements/3658-mempool-dont-send-removed-tx.md b/.changelog/unreleased/improvements/3658-mempool-dont-send-removed-tx.md new file mode 100644 index 00000000000..46cc80ee852 --- /dev/null +++ b/.changelog/unreleased/improvements/3658-mempool-dont-send-removed-tx.md @@ -0,0 +1,2 @@ +- `[mempool]` Check if tx is still in the mempool just before sending it to a peer. + ([\#3658](https://github.com/cometbft/cometbft/issues/3658)) diff --git a/.changelog/unreleased/improvements/3819-e2e-log-level.md b/.changelog/unreleased/improvements/3819-e2e-log-level.md new file mode 100644 index 00000000000..b0726fb319e --- /dev/null +++ b/.changelog/unreleased/improvements/3819-e2e-log-level.md @@ -0,0 +1,2 @@ +- `[e2e]` Add `log_level` option to manifest file + ([#3819](https://github.com/cometbft/cometbft/pull/3819)). diff --git a/.changelog/unreleased/improvements/3836-e2e-log-format.md b/.changelog/unreleased/improvements/3836-e2e-log-format.md new file mode 100644 index 00000000000..6e1e50a8c4d --- /dev/null +++ b/.changelog/unreleased/improvements/3836-e2e-log-format.md @@ -0,0 +1,2 @@ +- `[e2e]` Add `log_format` option to manifest file + ([#3836](https://github.com/cometbft/cometbft/issues/3836)). diff --git a/.changelog/unreleased/improvements/3880-e2e-generator-debug.md b/.changelog/unreleased/improvements/3880-e2e-generator-debug.md new file mode 100644 index 00000000000..44d4b5640c2 --- /dev/null +++ b/.changelog/unreleased/improvements/3880-e2e-generator-debug.md @@ -0,0 +1,2 @@ +- `[e2e]` Add log level option in e2e generator + ([\#3880](https://github.com/cometbft/cometbft/issues/3880)) diff --git a/.changelog/unreleased/improvements/3891-mempool-and-state-metrics.md b/.changelog/unreleased/improvements/3891-mempool-and-state-metrics.md new file mode 100644 index 00000000000..6816de64538 --- /dev/null +++ b/.changelog/unreleased/improvements/3891-mempool-and-state-metrics.md @@ -0,0 +1,3 @@ +- `[metrics]` Added metrics `RecheckDuration` (mempool) and `FireBlockEventsDelaySeconds` (state); + removed `BlockProcessingTime` as it is redundant and misleading. + ([\#3891](https://github.com/cometbft/cometbft/pull/3891)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/4019-mempool-metric-evicted-txs.md b/.changelog/unreleased/improvements/4019-mempool-metric-evicted-txs.md new file mode 100644 index 00000000000..420cbf2c5a8 --- /dev/null +++ b/.changelog/unreleased/improvements/4019-mempool-metric-evicted-txs.md @@ -0,0 +1,2 @@ +- `[metrics]` Add `evicted_txs` metric to mempool + ([\#4019](https://github.com/cometbft/cometbft/pull/4019)) diff --git a/.changelog/unreleased/improvements/4123-mempool-is-full-log.md b/.changelog/unreleased/improvements/4123-mempool-is-full-log.md new file mode 100644 index 00000000000..68f187e658d --- /dev/null +++ b/.changelog/unreleased/improvements/4123-mempool-is-full-log.md @@ -0,0 +1,2 @@ +- `[log]` Change "mempool is full" log to debug level + ([\#4123](https://github.com/cometbft/cometbft/pull/4123)) diff --git a/.changelog/unreleased/improvements/4235-4234-environment-stores-one-genesis.md b/.changelog/unreleased/improvements/4235-4234-environment-stores-one-genesis.md new file mode 100644 index 00000000000..0535fedf817 --- /dev/null +++ b/.changelog/unreleased/improvements/4235-4234-environment-stores-one-genesis.md @@ -0,0 +1 @@ +- `[rpc]` Store either a pointer to a `GenesisDoc` or the genesis' chunks, but not both in Environment (RPC API) ([\#4235](https://github.com/cometbft/cometbft/pull/4235)) diff --git a/.changelog/unreleased/improvements/4281-use-embed-pkg-for-template-string.md b/.changelog/unreleased/improvements/4281-use-embed-pkg-for-template-string.md new file mode 100644 index 00000000000..e77515ae1d2 --- /dev/null +++ b/.changelog/unreleased/improvements/4281-use-embed-pkg-for-template-string.md @@ -0,0 +1,2 @@ +- `[config]` Dynamic mempool type when writing config + ([\#4281](https://github.com/cometbft/cometbft/pull/4281)) diff --git a/.changelog/unreleased/improvements/4349-optimize-genesis-file-chunking.md b/.changelog/unreleased/improvements/4349-optimize-genesis-file-chunking.md new file mode 100644 index 00000000000..d0637a28a5f --- /dev/null +++ b/.changelog/unreleased/improvements/4349-optimize-genesis-file-chunking.md @@ -0,0 +1,2 @@ +- Node no longer stores genesis in memory after startup + ([\#4349](https://github.com/cometbft/cometbft/pull/4349)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/4351-e2e-kill-perturbation-timeout.md b/.changelog/unreleased/improvements/4351-e2e-kill-perturbation-timeout.md new file mode 100644 index 00000000000..4f3deec6eda --- /dev/null +++ b/.changelog/unreleased/improvements/4351-e2e-kill-perturbation-timeout.md @@ -0,0 +1,2 @@ +- `[e2e]` increase the timeout value during a `kill` node perturbation + ([\#4351](https://github.com/cometbft/cometbft/pull/4351)) diff --git a/.changelog/unreleased/improvements/4452-config-log-colors.md b/.changelog/unreleased/improvements/4452-config-log-colors.md new file mode 100644 index 00000000000..59c2b4fb28c --- /dev/null +++ b/.changelog/unreleased/improvements/4452-config-log-colors.md @@ -0,0 +1,3 @@ +- `[config]` Added `log_colors` boolean parameter. + Defaults to `true`, which is the current behaviour. + ([\#4452](https://github.com/cometbft/cometbft/pull/4452)) diff --git a/.changelog/unreleased/improvements/4484-v1keylayout-sprintf.md b/.changelog/unreleased/improvements/4484-v1keylayout-sprintf.md new file mode 100644 index 00000000000..035286c838f --- /dev/null +++ b/.changelog/unreleased/improvements/4484-v1keylayout-sprintf.md @@ -0,0 +1 @@ +- `[state]` v1LegacyLayout keys no longer use fmt.Sprintf ([\#4484](https://github.com/cometbft/cometbft/pull/4484)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/4489-blockstore-v1LegacyLayout-sprintf.md b/.changelog/unreleased/improvements/4489-blockstore-v1LegacyLayout-sprintf.md new file mode 100644 index 00000000000..06804cbc10e --- /dev/null +++ b/.changelog/unreleased/improvements/4489-blockstore-v1LegacyLayout-sprintf.md @@ -0,0 +1,2 @@ +- `[store]` `v1LegacyLayout` keys no longer use `fmt.Sprintf` + ([\#4489](https://github.com/cometbft/cometbft/pull/4489)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/4490-v1LegacyLayout.md b/.changelog/unreleased/improvements/4490-v1LegacyLayout.md new file mode 100644 index 00000000000..d54cc5acb16 --- /dev/null +++ b/.changelog/unreleased/improvements/4490-v1LegacyLayout.md @@ -0,0 +1,2 @@ +- `[state]`, `[store]`, `[light/store/db]` optimized `v1LegacyLayout` key generation to be faster and without heap + allocations ([\#4490](https://github.com/cometbft/cometbft/pull/4490)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/658-adr-102-rpc-companion.md b/.changelog/unreleased/improvements/658-adr-102-rpc-companion.md new file mode 100644 index 00000000000..d75828fa662 --- /dev/null +++ b/.changelog/unreleased/improvements/658-adr-102-rpc-companion.md @@ -0,0 +1,2 @@ +- `[docs/references]` Added ADR-102: RPC Companion. + ([\#658](https://github.com/cometbft/cometbft/pull/658)) diff --git a/.changelog/unreleased/improvements/955-lightclient-spec.md b/.changelog/unreleased/improvements/955-lightclient-spec.md index 24680c39dc4..5fa4ebd6a3c 100644 --- a/.changelog/unreleased/improvements/955-lightclient-spec.md +++ b/.changelog/unreleased/improvements/955-lightclient-spec.md @@ -1 +1 @@ -- Update Apalache type annotations in the light client spec ([#955](https://github.com/cometbft/cometbft/pull/955)) +- `[spec]` Update Apalache type annotations in the light client spec ([#955](https://github.com/cometbft/cometbft/pull/955)) diff --git a/.changelog/unreleased/minimum-go-version/1244-bump-go-version.md b/.changelog/unreleased/minimum-go-version/1244-bump-go-version.md deleted file mode 100644 index e04bb31d4e3..00000000000 --- a/.changelog/unreleased/minimum-go-version/1244-bump-go-version.md +++ /dev/null @@ -1,2 +0,0 @@ -- Bump minimum Go version to v1.21 - ([\#1244](https://github.com/cometbft/cometbft/pull/1244)) \ No newline at end of file diff --git a/.changelog/unreleased/summary.md b/.changelog/unreleased/summary.md new file mode 100644 index 00000000000..189a151ca17 --- /dev/null +++ b/.changelog/unreleased/summary.md @@ -0,0 +1,53 @@ +*November 20, 2024* + +This is a major release of CometBFT that includes several substantial changes +that aim to reduce bandwidth consumption, enable modularity, improve +integrators' experience and increase the velocity of the CometBFT development +team, including: + +1. Proposer-Based Timestamps (PBTS) support. PBTS is a Byzantine fault-tolerant + algorithm used by CometBFT for computing block times. + When activated on a chain, it replaces the pre-existing BFT-time algorithm. + See [spec](./spec/consensus/proposer-based-timestamp) doc for PBTS. +2. Validators now proactively communicate the block parts they already have so + others do not resend them, reducing amplification in the network and reducing + bandwidth consumption. +3. An experimental feature in the mempool that allows limiting the number of + peers to which transactions are forwarded, allowing operators to optimize + gossip-related bandwidth consumption further. +4. An opt-in `nop` mempool, which allows application developers to turn off all + mempool-related functionality in Comet such that they can build their own + transaction dissemination mechanism, for example a standalone mempool-like + process that can be scaled independently of the consensus engine/application. + This requires application developers to implement their own gossip/networking + mechanisms. See [ADR 111](./docs/architecture/adr-111-nop-mempool.md) for + details. +5. The first officially supported release of the [data companion + API](./docs/architecture/adr-101-data-companion-pull-api.md). +6. Versioning of both the Protobuf definitions _and_ RPC. By versioning our + APIs, we aim to provide a level of commitment to API stability while + simultaneously affording ourselves the ability to roll out substantial + changes in non-breaking releases of CometBFT. See [ADR + 103](./docs/architecture/adr-103-proto-versioning.md) and [ADR + 107](./docs/architecture/adr-107-betaize-proto-versions.md). +7. Moving many Go packages that are currently publicly accessible into the + `internal` directory such that the team can roll out substantial changes in + future without needing to worry about causing breakages in users' codebases. + The massive surface area of previous versions has in the past significantly + hampered the team's ability to roll out impactful new changes to users, as + previously such changes required a new breaking release (which currently + takes 6 to 12 months to reach production use for many users). See [ADR + 109](./docs/architecture/adr-109-reduce-go-api-surface.md) for more details. + +None of these changes are state machine-breaking for CometBFT-based networks, +but could be breaking for some users who depend on the Protobuf definitions type +URLs. + +See the [upgrading guidelines](./UPGRADING.md) and the specific changes below for more details. In this release, +we are also introducing a migration guide, please refer to the +[Upgrading from CometBFT v0.38.x to v1.0](./docs/guides/upgrades/v0.38-to-v1.0.md) document. + +**NB: This version is still a release candidate, which means that +API-breaking changes, although very unlikely, might still be introduced +before the final release.** See [RELEASES.md](./RELEASES.md) for more information on +the stability guarantees we provide for pre-releases. diff --git a/.changelog/v0.34.28/breaking-changes/558-tm10011.md b/.changelog/v0.34.28/breaking-changes/558-tm10011.md deleted file mode 100644 index d1b9fca4aba..00000000000 --- a/.changelog/v0.34.28/breaking-changes/558-tm10011.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[crypto/merkle]` Do not allow verification of Merkle Proofs against empty trees (`nil` root). `Proof.ComputeRootHash` now panics when it encounters an error, but `Proof.Verify` does not panic - ([\#558](https://github.com/cometbft/cometbft/issues/558)) diff --git a/.changelog/v0.34.28/bug-fixes/496-error-on-applyblock-should-panic.md b/.changelog/v0.34.28/bug-fixes/496-error-on-applyblock-should-panic.md deleted file mode 100644 index 55e9c874f8c..00000000000 --- a/.changelog/v0.34.28/bug-fixes/496-error-on-applyblock-should-panic.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[consensus]` Unexpected error conditions in `ApplyBlock` are non-recoverable, so ignoring the error and carrying on is a bug. We replaced a `return` that disregarded the error by a `panic`. - ([\#496](https://github.com/cometbft/cometbft/pull/496)) \ No newline at end of file diff --git a/.changelog/v0.34.28/bug-fixes/524-rename-peerstate-tojson.md b/.changelog/v0.34.28/bug-fixes/524-rename-peerstate-tojson.md deleted file mode 100644 index b9a43b3ce4e..00000000000 --- a/.changelog/v0.34.28/bug-fixes/524-rename-peerstate-tojson.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[consensus]` Rename `(*PeerState).ToJSON` to `MarshalJSON` to fix a logging data race - ([\#524](https://github.com/cometbft/cometbft/pull/524)) diff --git a/.changelog/v0.34.28/bug-fixes/575-fix-light-client-panic.md b/.changelog/v0.34.28/bug-fixes/575-fix-light-client-panic.md deleted file mode 100644 index 0ec55b923fb..00000000000 --- a/.changelog/v0.34.28/bug-fixes/575-fix-light-client-panic.md +++ /dev/null @@ -1,6 +0,0 @@ -- `[light]` Fixed an edge case where a light client would panic when attempting - to query a node that (1) has started from a non-zero height and (2) does - not yet have any data. The light client will now, correctly, not panic - _and_ keep the node in its list of providers in the same way it would if - it queried a node starting from height zero that does not yet have data - ([\#575](https://github.com/cometbft/cometbft/issues/575)) \ No newline at end of file diff --git a/.changelog/v0.34.28/improvements/475-upgrade-go-schnorrkel.md b/.changelog/v0.34.28/improvements/475-upgrade-go-schnorrkel.md deleted file mode 100644 index bdaf96c14cf..00000000000 --- a/.changelog/v0.34.28/improvements/475-upgrade-go-schnorrkel.md +++ /dev/null @@ -1 +0,0 @@ -- `[crypto/sr25519]` Upgrade to go-schnorrkel@v1.0.0 ([\#475](https://github.com/cometbft/cometbft/issues/475)) diff --git a/.changelog/v0.34.28/improvements/638-json-rpc-error-message.md b/.changelog/v0.34.28/improvements/638-json-rpc-error-message.md deleted file mode 100644 index 6922091fd25..00000000000 --- a/.changelog/v0.34.28/improvements/638-json-rpc-error-message.md +++ /dev/null @@ -1,3 +0,0 @@ -- `[jsonrpc/client]` Improve the error message for client errors stemming from - bad HTTP responses. - ([cometbft/cometbft\#638](https://github.com/cometbft/cometbft/pull/638)) diff --git a/.changelog/v0.34.28/summary.md b/.changelog/v0.34.28/summary.md deleted file mode 100644 index ba3efa9d79e..00000000000 --- a/.changelog/v0.34.28/summary.md +++ /dev/null @@ -1,6 +0,0 @@ -*April 26, 2023* - -This release fixes several bugs, and has had to introduce one small Go -API-breaking change in the `crypto/merkle` package in order to address what -could be a security issue for some users who directly and explicitly make use of -that code. diff --git a/.changelog/v0.34.29/bug-fixes/771-kvindexer-parsing-big-ints.md b/.changelog/v0.34.29/bug-fixes/771-kvindexer-parsing-big-ints.md deleted file mode 100644 index 4a0000db6d3..00000000000 --- a/.changelog/v0.34.29/bug-fixes/771-kvindexer-parsing-big-ints.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[state/kvindex]` Querying event attributes that are bigger than int64 is now - enabled. ([\#771](https://github.com/cometbft/cometbft/pull/771)) diff --git a/.changelog/v0.34.29/bug-fixes/771-pubsub-parsing-big-ints.md b/.changelog/v0.34.29/bug-fixes/771-pubsub-parsing-big-ints.md deleted file mode 100644 index fc5f25a90ff..00000000000 --- a/.changelog/v0.34.29/bug-fixes/771-pubsub-parsing-big-ints.md +++ /dev/null @@ -1,4 +0,0 @@ -- `[pubsub]` Pubsub queries are now able to parse big integers (larger than - int64). Very big floats are also properly parsed into very big integers - instead of being truncated to int64. - ([\#771](https://github.com/cometbft/cometbft/pull/771)) diff --git a/.changelog/v0.34.29/improvements/654-rpc-rm-response-data-logs.md b/.changelog/v0.34.29/improvements/654-rpc-rm-response-data-logs.md deleted file mode 100644 index 3fddfee8e71..00000000000 --- a/.changelog/v0.34.29/improvements/654-rpc-rm-response-data-logs.md +++ /dev/null @@ -1,3 +0,0 @@ -- `[rpc]` Remove response data from response failure logs in order - to prevent large quantities of log data from being produced - ([\#654](https://github.com/cometbft/cometbft/issues/654)) \ No newline at end of file diff --git a/.changelog/v0.34.29/security-fixes/788-rpc-client-pw.md b/.changelog/v0.34.29/security-fixes/788-rpc-client-pw.md deleted file mode 100644 index 430b7b5ac4b..00000000000 --- a/.changelog/v0.34.29/security-fixes/788-rpc-client-pw.md +++ /dev/null @@ -1,3 +0,0 @@ -- `[rpc/jsonrpc/client]` **Low severity** - Prevent RPC - client credentials from being inadvertently dumped to logs - ([\#788](https://github.com/cometbft/cometbft/pull/788)) diff --git a/.changelog/v0.34.29/security-fixes/794-cli-debug-kill-unsafe-cast.md b/.changelog/v0.34.29/security-fixes/794-cli-debug-kill-unsafe-cast.md deleted file mode 100644 index 782eccd9d58..00000000000 --- a/.changelog/v0.34.29/security-fixes/794-cli-debug-kill-unsafe-cast.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[cmd/cometbft/commands/debug/kill]` **Low severity** - Fix unsafe int cast in - `debug kill` command ([\#794](https://github.com/cometbft/cometbft/pull/794)) diff --git a/.changelog/v0.34.29/security-fixes/865-fix-peerstate-marshaljson.md b/.changelog/v0.34.29/security-fixes/865-fix-peerstate-marshaljson.md deleted file mode 100644 index fdd9172c209..00000000000 --- a/.changelog/v0.34.29/security-fixes/865-fix-peerstate-marshaljson.md +++ /dev/null @@ -1,3 +0,0 @@ -- `[consensus]` **Low severity** - Avoid recursive call after rename to - `(*PeerState).MarshalJSON` - ([\#863](https://github.com/cometbft/cometbft/pull/863)) diff --git a/.changelog/v0.34.29/security-fixes/890-mempool-fix-cache.md b/.changelog/v0.34.29/security-fixes/890-mempool-fix-cache.md deleted file mode 100644 index bad30efc7ab..00000000000 --- a/.changelog/v0.34.29/security-fixes/890-mempool-fix-cache.md +++ /dev/null @@ -1,3 +0,0 @@ -- `[mempool/clist_mempool]` **Low severity** - Prevent a transaction from - appearing twice in the mempool - ([\#890](https://github.com/cometbft/cometbft/pull/890): @otrack) diff --git a/.changelog/v0.34.29/summary.md b/.changelog/v0.34.29/summary.md deleted file mode 100644 index 7ecb2739409..00000000000 --- a/.changelog/v0.34.29/summary.md +++ /dev/null @@ -1,4 +0,0 @@ -*June 14, 2023* - -Provides several minor bug fixes, as well as fixes for several low-severity -security issues. diff --git a/.changelog/v0.37.0/breaking-changes/409-deprecate-blocksyncmode b/.changelog/v0.37.0/breaking-changes/409-deprecate-blocksyncmode.md similarity index 100% rename from .changelog/v0.37.0/breaking-changes/409-deprecate-blocksyncmode rename to .changelog/v0.37.0/breaking-changes/409-deprecate-blocksyncmode.md diff --git a/.changelog/v0.37.1/breaking-changes/558-tm10011.md b/.changelog/v0.37.1/breaking-changes/558-tm10011.md deleted file mode 100644 index d1b9fca4aba..00000000000 --- a/.changelog/v0.37.1/breaking-changes/558-tm10011.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[crypto/merkle]` Do not allow verification of Merkle Proofs against empty trees (`nil` root). `Proof.ComputeRootHash` now panics when it encounters an error, but `Proof.Verify` does not panic - ([\#558](https://github.com/cometbft/cometbft/issues/558)) diff --git a/.changelog/v0.37.1/bug-fixes/496-error-on-applyblock-should-panic.md b/.changelog/v0.37.1/bug-fixes/496-error-on-applyblock-should-panic.md deleted file mode 100644 index 55e9c874f8c..00000000000 --- a/.changelog/v0.37.1/bug-fixes/496-error-on-applyblock-should-panic.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[consensus]` Unexpected error conditions in `ApplyBlock` are non-recoverable, so ignoring the error and carrying on is a bug. We replaced a `return` that disregarded the error by a `panic`. - ([\#496](https://github.com/cometbft/cometbft/pull/496)) \ No newline at end of file diff --git a/.changelog/v0.37.1/bug-fixes/524-rename-peerstate-tojson.md b/.changelog/v0.37.1/bug-fixes/524-rename-peerstate-tojson.md deleted file mode 100644 index b9a43b3ce4e..00000000000 --- a/.changelog/v0.37.1/bug-fixes/524-rename-peerstate-tojson.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[consensus]` Rename `(*PeerState).ToJSON` to `MarshalJSON` to fix a logging data race - ([\#524](https://github.com/cometbft/cometbft/pull/524)) diff --git a/.changelog/v0.37.1/bug-fixes/575-fix-light-client-panic.md b/.changelog/v0.37.1/bug-fixes/575-fix-light-client-panic.md deleted file mode 100644 index 0ec55b923fb..00000000000 --- a/.changelog/v0.37.1/bug-fixes/575-fix-light-client-panic.md +++ /dev/null @@ -1,6 +0,0 @@ -- `[light]` Fixed an edge case where a light client would panic when attempting - to query a node that (1) has started from a non-zero height and (2) does - not yet have any data. The light client will now, correctly, not panic - _and_ keep the node in its list of providers in the same way it would if - it queried a node starting from height zero that does not yet have data - ([\#575](https://github.com/cometbft/cometbft/issues/575)) \ No newline at end of file diff --git a/.changelog/v0.37.1/improvements/638-json-rpc-error-message.md b/.changelog/v0.37.1/improvements/638-json-rpc-error-message.md deleted file mode 100644 index 6922091fd25..00000000000 --- a/.changelog/v0.37.1/improvements/638-json-rpc-error-message.md +++ /dev/null @@ -1,3 +0,0 @@ -- `[jsonrpc/client]` Improve the error message for client errors stemming from - bad HTTP responses. - ([cometbft/cometbft\#638](https://github.com/cometbft/cometbft/pull/638)) diff --git a/.changelog/v0.37.1/summary.md b/.changelog/v0.37.1/summary.md deleted file mode 100644 index ba3efa9d79e..00000000000 --- a/.changelog/v0.37.1/summary.md +++ /dev/null @@ -1,6 +0,0 @@ -*April 26, 2023* - -This release fixes several bugs, and has had to introduce one small Go -API-breaking change in the `crypto/merkle` package in order to address what -could be a security issue for some users who directly and explicitly make use of -that code. diff --git a/.changelog/v0.37.2/bug-fixes/771-kvindexer-parsing-big-ints.md b/.changelog/v0.37.2/bug-fixes/771-kvindexer-parsing-big-ints.md deleted file mode 100644 index ba19adbc8ba..00000000000 --- a/.changelog/v0.37.2/bug-fixes/771-kvindexer-parsing-big-ints.md +++ /dev/null @@ -1,4 +0,0 @@ -- `[state/kvindex]` Querying event attributes that are bigger than int64 is now - enabled. We are not supporting reading floats from the db into the indexer - nor parsing them into BigFloats to not introduce breaking changes in minor - releases. ([\#771](https://github.com/cometbft/cometbft/pull/771)) diff --git a/.changelog/v0.37.2/bug-fixes/771-pubsub-parsing-big-ints.md b/.changelog/v0.37.2/bug-fixes/771-pubsub-parsing-big-ints.md deleted file mode 100644 index fc5f25a90ff..00000000000 --- a/.changelog/v0.37.2/bug-fixes/771-pubsub-parsing-big-ints.md +++ /dev/null @@ -1,4 +0,0 @@ -- `[pubsub]` Pubsub queries are now able to parse big integers (larger than - int64). Very big floats are also properly parsed into very big integers - instead of being truncated to int64. - ([\#771](https://github.com/cometbft/cometbft/pull/771)) diff --git a/.changelog/v0.37.2/improvements/654-rpc-rm-response-data-logs.md b/.changelog/v0.37.2/improvements/654-rpc-rm-response-data-logs.md deleted file mode 100644 index 3fddfee8e71..00000000000 --- a/.changelog/v0.37.2/improvements/654-rpc-rm-response-data-logs.md +++ /dev/null @@ -1,3 +0,0 @@ -- `[rpc]` Remove response data from response failure logs in order - to prevent large quantities of log data from being produced - ([\#654](https://github.com/cometbft/cometbft/issues/654)) \ No newline at end of file diff --git a/.changelog/v0.37.2/security-fixes/787-rpc-client-pw.md b/.changelog/v0.37.2/security-fixes/787-rpc-client-pw.md deleted file mode 100644 index 209b799d9ad..00000000000 --- a/.changelog/v0.37.2/security-fixes/787-rpc-client-pw.md +++ /dev/null @@ -1,3 +0,0 @@ -- `[rpc/jsonrpc/client]` **Low severity** - Prevent RPC - client credentials from being inadvertently dumped to logs - ([\#787](https://github.com/cometbft/cometbft/pull/787)) \ No newline at end of file diff --git a/.changelog/v0.37.2/security-fixes/793-cli-debug-kill-unsafe-cast.md b/.changelog/v0.37.2/security-fixes/793-cli-debug-kill-unsafe-cast.md deleted file mode 100644 index 7482a5ae039..00000000000 --- a/.changelog/v0.37.2/security-fixes/793-cli-debug-kill-unsafe-cast.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[cmd/cometbft/commands/debug/kill]` **Low severity** - Fix unsafe int cast in - `debug kill` command ([\#793](https://github.com/cometbft/cometbft/pull/793)) \ No newline at end of file diff --git a/.changelog/v0.37.2/security-fixes/865-fix-peerstate-marshaljson.md b/.changelog/v0.37.2/security-fixes/865-fix-peerstate-marshaljson.md deleted file mode 100644 index fdd9172c209..00000000000 --- a/.changelog/v0.37.2/security-fixes/865-fix-peerstate-marshaljson.md +++ /dev/null @@ -1,3 +0,0 @@ -- `[consensus]` **Low severity** - Avoid recursive call after rename to - `(*PeerState).MarshalJSON` - ([\#863](https://github.com/cometbft/cometbft/pull/863)) diff --git a/.changelog/v0.37.2/security-fixes/890-mempool-fix-cache.md b/.changelog/v0.37.2/security-fixes/890-mempool-fix-cache.md deleted file mode 100644 index bad30efc7ab..00000000000 --- a/.changelog/v0.37.2/security-fixes/890-mempool-fix-cache.md +++ /dev/null @@ -1,3 +0,0 @@ -- `[mempool/clist_mempool]` **Low severity** - Prevent a transaction from - appearing twice in the mempool - ([\#890](https://github.com/cometbft/cometbft/pull/890): @otrack) diff --git a/.changelog/v0.37.2/summary.md b/.changelog/v0.37.2/summary.md deleted file mode 100644 index 7ecb2739409..00000000000 --- a/.changelog/v0.37.2/summary.md +++ /dev/null @@ -1,4 +0,0 @@ -*June 14, 2023* - -Provides several minor bug fixes, as well as fixes for several low-severity -security issues. diff --git a/.codespellrc b/.codespellrc new file mode 100644 index 00000000000..7472204e3cd --- /dev/null +++ b/.codespellrc @@ -0,0 +1,4 @@ +[codespell] +skip = *.sum,go.mod,LOG,*/consensus-paper/*,*/grammar-auto/*,*.qnt,*.tla,.github/workflows/fuzz-nightly.yml,.github/workflows/ignore-words.txt +quiet-level = 2 +ignore-words = .github/workflows/ignore-words.txt diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 852aa44b461..ec7ed28b18f 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,25 +1,3 @@ - --- @@ -28,4 +6,3 @@ https://github.com/orgs/cometbft/projects/1 - [ ] Tests written/updated - [ ] Changelog entry added in `.changelog` (we use [unclog](https://github.com/informalsystems/unclog) to manage our changelog) - [ ] Updated relevant documentation (`docs/` or `spec/`) and code comments - diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 02f10f286ae..04e83f70eac 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -10,6 +10,16 @@ updates: - dependencies - automerge + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: weekly + target-branch: "v1.x" + open-pull-requests-limit: 10 + labels: + - dependencies + - automerge + - package-ecosystem: github-actions directory: "/" schedule: @@ -57,11 +67,19 @@ updates: - package-ecosystem: gomod directory: "/" schedule: - interval: daily + interval: weekly + target-branch: "v1.x" + open-pull-requests-limit: 10 + labels: + - dependencies + - automerge + + - package-ecosystem: gomod + directory: "/" + schedule: + interval: monthly target-branch: "v0.38.x" - # Only allow automated security-related dependency updates on release - # branches. - open-pull-requests-limit: 0 + open-pull-requests-limit: 10 labels: - dependencies - automerge @@ -69,11 +87,9 @@ updates: - package-ecosystem: gomod directory: "/" schedule: - interval: daily + interval: monthly target-branch: "v0.37.x" - # Only allow automated security-related dependency updates on release - # branches. - open-pull-requests-limit: 0 + open-pull-requests-limit: 10 labels: - dependencies - automerge @@ -81,11 +97,9 @@ updates: - package-ecosystem: gomod directory: "/" schedule: - interval: daily + interval: monthly target-branch: "v0.34.x" - # Only allow automated security-related dependency updates on release - # branches. - open-pull-requests-limit: 0 + open-pull-requests-limit: 10 labels: - dependencies - automerge diff --git a/.github/mergify.yml b/.github/mergify.yml index 0e4412841e7..1f91649785b 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -1,4 +1,112 @@ pull_request_rules: + - name: merge PR (v0.34.x) + description: merge PR if it passes tests and there are no conflicts (v0.34.x) + conditions: + - '-label=manual-backport' + - author=mergify[bot] + - head~=mergify/bp/ + - base=v0.34.x + - check-success=check-mocks + - check-success=golangci-lint + - check-success=Super linter + - check-success=split-test-files + - check-success=cleanup-runs + - check-success=e2e-test + - check-success=check-proto + - check-success=Build + - check-success=tests (00) + - check-success=tests (01) + - check-success=tests (02) + - check-success=tests (03) + - check-success=test_abci_cli + - check-success=test_apps + actions: + merge: + method: squash + - name: merge PR (v0.37.x and v0.38.x) + description: merge PR if it passes tests and there are no conflicts (v0.37.x and v0.38.x) + conditions: + - '-label=manual-backport' + - author=mergify[bot] + - head~=mergify/bp/ + - '-base=v1.x' + - '-base=v0.34.x' + - check-success=Build (arm, linux) + - check-success=check + - check-success=golangci-lint + - check-success=e2e-test + - check-success=tests (00) + - check-success=tests (01) + - check-success=tests (02) + - check-success=tests (03) + - check-success=tests (04) + - check-success=tests (05) + - check-success=test_abci_cli + - check-success=test_apps + actions: + merge: + method: squash + - name: merge PR (v1.x) + description: merge PR if it passes tests and there are no conflicts (v1.x) + conditions: + - '-label=manual-backport' + - author=mergify[bot] + - head~=mergify/bp/ + - base=v1.x + - check-success=Build (arm, linux) + - check-success=check + - check-success=check-mocks-metrics + - check-success=lint + - check-success=e2e-test + - check-success=tests (00) + - check-success=tests (01) + - check-success=tests (02) + - check-success=tests (03) + - check-success=tests (04) + - check-success=tests (05) + - check-success=test_abci_cli + - check-success=test_apps + - check-success=check-proto + actions: + merge: + method: squash + + - name: automatic approval for Dependabot pull requests + conditions: + - author=dependabot[bot] + actions: + review: + type: APPROVE + message: Automatically approving dependabot + + - name: automatically merge PR with automerge label and no manual-backport label + conditions: + - '-label=manual-backport' + - label=automerge + actions: + merge: + method: squash + + - name: Make sure PR are up to date before merging + description: >- + This automatically updates PRs when they are out-of-date with the base + branch to avoid semantic conflicts (next step is using a merge queue). + conditions: + - '-draft' + - "#approved-reviews-by >= 1" + actions: + update: + + - name: backport patches to v1.x branch + conditions: + - base=main + - label=backport-to-v1.x + actions: + backport: + branches: + - v1.x + assignees: + - "{{ author }}" - name: backport patches to v0.38.x branch conditions: - base=main @@ -7,6 +115,8 @@ pull_request_rules: backport: branches: - v0.38.x + assignees: + - "{{ author }}" - name: backport patches to v0.37.x branch conditions: - base=main @@ -15,6 +125,8 @@ pull_request_rules: backport: branches: - v0.37.x + assignees: + - "{{ author }}" - name: backport patches to v0.34.x branch conditions: - base=main @@ -23,3 +135,5 @@ pull_request_rules: backport: branches: - v0.34.x + assignees: + - "{{ author }}" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d278abf627c..52179087a02 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -8,7 +8,10 @@ on: push: branches: - main - - release/** + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: build: @@ -21,64 +24,25 @@ jobs: goos: ["linux"] timeout-minutes: 5 steps: - - uses: actions/setup-go@v4 - with: - go-version: "1.21" - uses: actions/checkout@v4 - - uses: technote-space/get-diff-action@v6 + + - id: filter + uses: dorny/paths-filter@v3 with: - PATTERNS: | - **/*.go - "!test/" - go.mod - go.sum - Makefile + filters: | + code: + - '**/*.go' + - 'Makefile' + - 'go.*' - - name: install - run: GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} make build - if: "env.GIT_DIFF != ''" + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + if: steps.filter.outputs.code == 'true' - test_abci_cli: - runs-on: ubuntu-latest - needs: build - timeout-minutes: 5 - steps: - - uses: actions/setup-go@v4 - with: - go-version: "1.21" - - uses: actions/checkout@v4 - - uses: technote-space/get-diff-action@v6 + - uses: actions/setup-go@v5 + if: steps.filter.outputs.code == 'true' with: - PATTERNS: | - **/*.go - go.mod - go.sum - - name: install - run: make install_abci - if: "env.GIT_DIFF != ''" - - run: abci/tests/test_cli/test.sh - shell: bash - if: "env.GIT_DIFF != ''" + go-version: ${{ env.GO_VERSION }} - test_apps: - runs-on: ubuntu-latest - needs: build - timeout-minutes: 5 - steps: - - uses: actions/setup-go@v4 - with: - go-version: "1.21" - - uses: actions/checkout@v4 - - uses: technote-space/get-diff-action@v6 - with: - PATTERNS: | - **/*.go - go.mod - go.sum - name: install - run: make install install_abci - if: "env.GIT_DIFF != ''" - - name: test_apps - run: test/app/test.sh - shell: bash - if: "env.GIT_DIFF != ''" + if: steps.filter.outputs.code == 'true' + run: GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} make build diff --git a/.github/workflows/check-generated.yml b/.github/workflows/check-generated.yml index 81efc26c294..00830378b4b 100644 --- a/.github/workflows/check-generated.yml +++ b/.github/workflows/check-generated.yml @@ -3,37 +3,43 @@ # Note that we run these checks regardless whether the input files have # changed, because generated code can change in response to toolchain updates # even if no files in the repository are modified. + name: Check generated code on: pull_request: - branches: - - main merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + permissions: contents: read jobs: - check-mocks: + check-mocks-metrics: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v4 - with: - go-version: "1.21" - - uses: actions/checkout@v4 - - name: "Check generated mocks" + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + + - name: "Check generated mocks and metrics" run: | set -euo pipefail - make mockery + make mockery metrics - if ! git diff --stat --exit-code ; then + git add . + if ! git diff HEAD --stat --exit-code ; then echo ">> ERROR:" echo ">>" - echo ">> Generated mocks require update (either Mockery or source files may have changed)." - echo ">> Ensure your tools are up-to-date, re-run 'make mockery' and update this PR." + echo ">> Generated mocks and/or metrics require update (either Mockery or source files may have changed)." + echo ">> Ensure your tools are up-to-date, re-run 'make mockery metrics' and update this PR." echo ">>" git diff exit 1 @@ -42,13 +48,15 @@ jobs: check-proto: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v4 + - uses: actions/checkout@v4 with: - go-version: "1.21" + fetch-depth: 1 # we need a .git directory to run git diff - - uses: actions/checkout@v4 + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 with: - fetch-depth: 1 # we need a .git directory to run git diff + go-version: ${{ env.GO_VERSION }} - name: "Check protobuf generated code" run: | @@ -56,7 +64,8 @@ jobs: make proto-gen - if ! git diff --stat --exit-code ; then + git add . + if ! git diff HEAD --stat --exit-code ; then echo ">> ERROR:" echo ">>" echo ">> Protobuf generated code requires update (either tools or .proto files may have changed)." diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 40a318a5c8e..212dda39c50 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -12,13 +12,21 @@ name: "CodeQL" on: - workflow_dispatch: + workflow_dispatch: # allow running workflow manually push: - branches: ["main"] + branches: + - main + paths: + - "**.go" pull_request: - # The branches below must be a subset of the branches above - branches: ["main"] + branches: + - main + paths: + - "**.go" +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: analyze: @@ -44,7 +52,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -58,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v3 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -71,6 +79,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/cometbft-docker.yml b/.github/workflows/cometbft-docker.yml index 0d8f1adf8c7..5abbcd72c30 100644 --- a/.github/workflows/cometbft-docker.yml +++ b/.github/workflows/cometbft-docker.yml @@ -41,17 +41,17 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v3.0.0 + uses: docker/setup-buildx-action@v3.7.1 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v3.0.0 + uses: docker/login-action@v3.3.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v5.0.0 + uses: docker/build-push-action@v6.9.0 with: context: . file: ./DOCKER/Dockerfile diff --git a/.github/workflows/conventional-pr-title.yml b/.github/workflows/conventional-pr-title.yml new file mode 100644 index 00000000000..d58e82f2c1a --- /dev/null +++ b/.github/workflows/conventional-pr-title.yml @@ -0,0 +1,65 @@ +name: "Conventional PR Title" + +on: + pull_request_target: + types: + - opened + - edited + - synchronize + +permissions: + pull-requests: write + +jobs: + main: + name: Validate PR title + runs-on: ubuntu-latest + steps: + - uses: amannn/action-semantic-pull-request@v5 + id: lint_pr_title + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + types: | + feat + fix + build + chore + ci + docs + refactor + perf + test + revert + spec + merge + + - uses: marocchino/sticky-pull-request-comment@v2 + # When the previous steps fails, the workflow would stop. By adding this + # condition you can continue the execution with the populated error message. + if: always() && (steps.lint_pr_title.outputs.error_message != null) + with: + header: pr-title-lint-error + message: | + Hey there and thank you for opening this pull request! 👋🏼 + + We require pull request titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) and it looks like your proposed title needs to be adjusted. + + Details: + + ``` + ${{ steps.lint_pr_title.outputs.error_message }} + ``` + + General format: `type(scope): msg` + Breaking change: `type(scope)!: msg` + Multi-scope change: `type: msg` + Types: `feat`, `fix`, `build`, `chore`, `ci`, `docs`, `refactor`, `perf`, `test`, `revert`, `spec`, `merge`. + Example: `fix(cmd/cometbft/commands/debug): execute p.Signal only when p is not nil` + + # Delete a previous comment when the issue has been resolved + - if: ${{ steps.lint_pr_title.outputs.error_message == null }} + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: pr-title-lint-error + delete: true diff --git a/.github/workflows/docs-toc.yml b/.github/workflows/docs-toc.yml index f36b75d81eb..e6bddd1c9f3 100644 --- a/.github/workflows/docs-toc.yml +++ b/.github/workflows/docs-toc.yml @@ -2,19 +2,23 @@ name: Check documentation ToC on: pull_request: - push: - branches: - - main + paths: + - docs/architecture/** + - docs/rfc/** + push: + branches: + - main + paths: + - docs/architecture/** + - docs/rfc/** + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: check: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: technote-space/get-diff-action@v6 - with: - PATTERNS: | - docs/architecture/** - docs/rfc/** - run: make check-docs-toc - if: env.GIT_DIFF diff --git a/.github/workflows/e2e-long-main.yml b/.github/workflows/e2e-long-main.yml index bc8b5b9f096..0f0d559cae9 100644 --- a/.github/workflows/e2e-long-main.yml +++ b/.github/workflows/e2e-long-main.yml @@ -13,12 +13,14 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 120 steps: - - uses: actions/setup-go@v4 - with: - go-version: '1.21' - - uses: actions/checkout@v4 + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Build working-directory: test/e2e # Run make jobs in parallel, since we can't run steps in parallel. @@ -34,23 +36,17 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.24.0 + uses: slackapi/slack-github-action@v2.0.0 env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK BRANCH: ${{ github.ref_name }} RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ github.ref_name }}" with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook payload: | - { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": ":skull: Weekly long-run E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." - } - } - ] - } + blocks: + - type: "section" + text: + type: "mrkdwn" + text: ":skull: Weekly long-run E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." diff --git a/.github/workflows/e2e-manual-debug.yml b/.github/workflows/e2e-manual-debug.yml new file mode 100644 index 00000000000..18547575fcc --- /dev/null +++ b/.github/workflows/e2e-manual-debug.yml @@ -0,0 +1,45 @@ +# Runs randomly generated E2E testnets nightly on main with debug logs (but p2p module only info log since it's too verbose) +# manually run e2e tests with debug logs +name: e2e-manual-debug +on: + workflow_dispatch: + +jobs: + e2e-nightly-test: + # Run parallel jobs for the listed testnet groups (must match the + # ./build/generator -g flag) + strategy: + fail-fast: false + matrix: + group: ['00', '01', '02', '03', '04', '05'] + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Build + working-directory: test/e2e + # Run make jobs in parallel, since we can't run steps in parallel. + run: make -j2 docker generator runner tests + + - name: Generate testnets + if: matrix.group != 5 + working-directory: test/e2e + # When changing -g, also change the matrix groups above + run: ./build/generator -g 5 -d networks/nightly/ -p -l "*:debug,p2p:info" + + - name: Run p2p testnets (${{ matrix.group }}) + if: matrix.group != 5 + working-directory: test/e2e + run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml + + - name: Run p2p testnets (regression) + if: matrix.group == 5 + working-directory: test/e2e + run: ./run-multiple.sh networks_regressions/*.toml diff --git a/.github/workflows/e2e-manual-multiversion.yml b/.github/workflows/e2e-manual-multiversion.yml index 771c5675e49..18bea674ffe 100644 --- a/.github/workflows/e2e-manual-multiversion.yml +++ b/.github/workflows/e2e-manual-multiversion.yml @@ -11,15 +11,18 @@ jobs: strategy: fail-fast: false matrix: - group: ['00', '01', '02', '03', '04'] + group: ['00', '01', '02', '03', '04', '05'] runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v4 + - uses: actions/checkout@v4 + + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 with: - go-version: '1.21' + go-version: ${{ env.GO_VERSION }} - - uses: actions/checkout@v4 - name: Build working-directory: test/e2e @@ -27,12 +30,19 @@ jobs: run: make -j2 docker generator runner tests - name: Generate testnets + if: matrix.group != 5 working-directory: test/e2e # When changing -g, also change the matrix groups above # Generate multi-version tests with double the quantity of E2E nodes # based on the current branch as compared to the latest version. run: ./build/generator -g 5 -m "latest:1,local:2" -d networks/nightly/ -p - - name: Run ${{ matrix.p2p }} p2p testnets + - name: Run p2p testnets (${{ matrix.group }}) + if: matrix.group != 5 working-directory: test/e2e run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml + + - name: Run p2p testnets (regression) + if: matrix.group == 5 + working-directory: test/e2e + run: ./run-multiple.sh networks_regressions/*.toml diff --git a/.github/workflows/e2e-manual.yml b/.github/workflows/e2e-manual.yml index fd809ebd541..90e898e040b 100644 --- a/.github/workflows/e2e-manual.yml +++ b/.github/workflows/e2e-manual.yml @@ -11,26 +11,35 @@ jobs: strategy: fail-fast: false matrix: - group: ['00', '01', '02', '03', '04'] + group: ['00', '01', '02', '03', '04', '05'] runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v4 - with: - go-version: '1.21' - - uses: actions/checkout@v4 + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Build working-directory: test/e2e # Run make jobs in parallel, since we can't run steps in parallel. run: make -j2 docker generator runner tests - name: Generate testnets + if: matrix.group != 5 working-directory: test/e2e # When changing -g, also change the matrix groups above run: ./build/generator -g 5 -d networks/nightly/ -p - - name: Run ${{ matrix.p2p }} p2p testnets + - name: Run p2p testnets (${{ matrix.group }}) + if: matrix.group != 5 working-directory: test/e2e run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml + + - name: Run p2p testnets (regression) + if: matrix.group == 5 + working-directory: test/e2e + run: ./run-multiple.sh networks_regressions/*.toml diff --git a/.github/workflows/e2e-nightly-1x.yml b/.github/workflows/e2e-nightly-1x.yml new file mode 100644 index 00000000000..962d7d43560 --- /dev/null +++ b/.github/workflows/e2e-nightly-1x.yml @@ -0,0 +1,80 @@ +# Runs randomly generated E2E testnets nightly on the v1.x branch. + +# !! This file should be kept in sync with the e2e-nightly-main.yml file, +# modulo changes to the version labels. + +name: e2e-nightly-1x +on: + schedule: + - cron: '0 2 * * *' + +jobs: + e2e-nightly-test: + # Run parallel jobs for the listed testnet groups (must match the + # ./build/generator -g flag) + strategy: + fail-fast: false + matrix: + group: ['00', '01', '02', '03', '04', '05'] + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + with: + ref: 'v1.x' + + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Capture git repo info + id: git-info + run: | + echo "branch=`git branch --show-current`" >> $GITHUB_OUTPUT + + - name: Build + working-directory: test/e2e + # Run make jobs in parallel, since we can't run steps in parallel. + run: make -j2 docker generator runner tests + + - name: Generate testnets + if: matrix.group != 5 + working-directory: test/e2e + # When changing -g, also change the matrix groups above + run: ./build/generator -g 5 -d networks/nightly/ -p + + - name: Run p2p testnets (${{ matrix.group }}) + if: matrix.group != 5 + working-directory: test/e2e + run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml + + - name: Run p2p testnets (regression) + if: matrix.group == 5 + working-directory: test/e2e + run: ./run-multiple.sh networks_regressions/*.toml + + outputs: + git-branch: ${{ steps.git-info.outputs.branch }} + + e2e-nightly-fail: + needs: e2e-nightly-test + if: ${{ failure() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack on failure + uses: slackapi/slack-github-action@v2.0.0 + env: + BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }} + RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}" + with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook + payload: | + blocks: + - type: "section" + text: + type: "mrkdwn" + text: ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 5c117231a21..ad14a93aa4e 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -19,9 +19,9 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.18' + go-version: '1.21' - uses: actions/checkout@v4 with: @@ -55,23 +55,17 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.24.0 + uses: slackapi/slack-github-action@v2.0.0 env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }} RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}" with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook payload: | - { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." - } - } - ] - } + blocks: + - type: "section" + text: + type: "mrkdwn" + text: ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." diff --git a/.github/workflows/e2e-nightly-37x.yml b/.github/workflows/e2e-nightly-37x.yml index 8549ede868e..0f7e090ec2d 100644 --- a/.github/workflows/e2e-nightly-37x.yml +++ b/.github/workflows/e2e-nightly-37x.yml @@ -19,14 +19,15 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v4 - with: - go-version: '1.21' - - uses: actions/checkout@v4 with: ref: 'v0.37.x' + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} - name: Capture git repo info id: git-info run: | @@ -55,23 +56,17 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.24.0 + uses: slackapi/slack-github-action@v2.0.0 env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }} RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}" with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook payload: | - { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." - } - } - ] - } + blocks: + - type: "section" + text: + type: "mrkdwn" + text: ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." diff --git a/.github/workflows/e2e-nightly-38x.yml b/.github/workflows/e2e-nightly-38x.yml index b0bc761726e..dcaec55a90f 100644 --- a/.github/workflows/e2e-nightly-38x.yml +++ b/.github/workflows/e2e-nightly-38x.yml @@ -15,18 +15,19 @@ jobs: strategy: fail-fast: false matrix: - group: ['00', '01', '02', '03', "04"] + group: ['00', '01', '02', '03', '04', '05'] runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v4 - with: - go-version: '1.21' - - uses: actions/checkout@v4 with: ref: 'v0.38.x' + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} - name: Capture git repo info id: git-info run: | @@ -38,14 +39,21 @@ jobs: run: make -j2 docker generator runner tests - name: Generate testnets + if: matrix.group != 5 working-directory: test/e2e # When changing -g, also change the matrix groups above run: ./build/generator -g 5 -d networks/nightly/ -p - - name: Run ${{ matrix.p2p }} p2p testnets + - name: Run p2p testnets (${{ matrix.group }}) + if: matrix.group != 5 working-directory: test/e2e run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml + - name: Run p2p testnets (regression) + if: matrix.group == 5 + working-directory: test/e2e + run: ./run-multiple.sh networks_regressions/*.toml + outputs: git-branch: ${{ steps.git-info.outputs.branch }} @@ -55,23 +63,17 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.24.0 + uses: slackapi/slack-github-action@v2.0.0 env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }} RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}" with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook payload: | - { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." - } - } - ] - } + blocks: + - type: "section" + text: + type: "mrkdwn" + text: ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." diff --git a/.github/workflows/e2e-nightly-main.yml b/.github/workflows/e2e-nightly-main.yml index 23144e76aeb..32318739712 100644 --- a/.github/workflows/e2e-nightly-main.yml +++ b/.github/workflows/e2e-nightly-main.yml @@ -16,53 +16,56 @@ jobs: strategy: fail-fast: false matrix: - group: ['00', '01', '02', '03', "04"] + group: ['00', '01', '02', '03', '04', '05'] runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v4 - with: - go-version: '1.21' - - uses: actions/checkout@v4 + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Build working-directory: test/e2e # Run make jobs in parallel, since we can't run steps in parallel. run: make -j2 docker generator runner tests - name: Generate testnets + if: matrix.group != 5 working-directory: test/e2e # When changing -g, also change the matrix groups above run: ./build/generator -g 5 -d networks/nightly/ -p - - name: Run ${{ matrix.p2p }} p2p testnets + - name: Run p2p testnets (${{ matrix.group }}) + if: matrix.group != 5 working-directory: test/e2e run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml + - name: Run p2p testnets (regression) + if: matrix.group == 5 + working-directory: test/e2e + run: ./run-multiple.sh networks_regressions/*.toml + e2e-nightly-fail: needs: e2e-nightly-test if: ${{ failure() }} runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.24.0 + uses: slackapi/slack-github-action@v2.0.0 env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK BRANCH: ${{ github.ref_name }} RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ github.ref_name }}" with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook payload: | - { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." - } - } - ] - } + blocks: + - type: "section" + text: + type: "mrkdwn" + text: ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 11ee034a8fb..63c61b6a0c1 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -8,31 +8,48 @@ on: push: branches: - main - - release/** + - v0.3[478].x + - v[1-9][0-9]?.x + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: e2e-test: + # Run two parallel jobs: for `ci.toml`, and for the regression manifests + strategy: + fail-fast: false + matrix: + group: ['networks/ci', 'networks_regressions/*'] runs-on: ubuntu-latest timeout-minutes: 15 steps: - - uses: actions/setup-go@v4 - with: - go-version: '1.21' - uses: actions/checkout@v4 - - uses: technote-space/get-diff-action@v6 + + - id: filter + uses: dorny/paths-filter@v3 + with: + filters: | + code: + - '**/*.go' + - 'Makefile' + - 'go.*' + + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + if: steps.filter.outputs.code == 'true' + + - uses: actions/setup-go@v5 + if: steps.filter.outputs.code == 'true' with: - PATTERNS: | - **/**.go - go.mod - go.sum + go-version: ${{ env.GO_VERSION }} - name: Build + if: steps.filter.outputs.code == 'true' working-directory: test/e2e - # Run two make jobs in parallel, since we can't run steps in parallel. - run: make -j2 docker runner tests - if: "env.GIT_DIFF != ''" + run: make -j2 docker runner - - name: Run CI testnet + - name: Run "${{ matrix.group }}"" testnet + if: steps.filter.outputs.code == 'true' working-directory: test/e2e - run: ./run-multiple.sh networks/ci.toml - if: "env.GIT_DIFF != ''" + run: ./run-multiple.sh ${{ matrix.group }}.toml diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index fef0a9e7416..407cc11fa70 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -9,15 +9,20 @@ on: paths: - "test/fuzz/**/*.go" +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: fuzz-nightly-test: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v4 - with: - go-version: '1.21' - - uses: actions/checkout@v4 + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} - name: Install go-fuzz working-directory: test/fuzz @@ -49,14 +54,14 @@ jobs: continue-on-error: true - name: Archive crashers - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: crashers path: test/fuzz/**/crashers retention-days: 3 - name: Archive suppressions - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: suppressions path: test/fuzz/**/suppressions @@ -76,23 +81,17 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.24.0 + uses: slackapi/slack-github-action@v2.0.0 env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK BRANCH: ${{ github.ref_name }} CRASHERS: ${{ needs.fuzz-nightly-test.outputs.crashers-count }} RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook payload: | - { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": ":skull: Nightly fuzz tests for `${{ env.BRANCH }}` failed with ${{ env.CRASHERS }} crasher(s). See the <${{ env.RUN_URL }}|run details>." - } - } - ] - } + blocks: + - type: "section" + text: + type: "mrkdwn" + text: ":skull: Nightly fuzz tests for `${{ env.BRANCH }}` failed with ${{ env.CRASHERS }} crasher(s). See the <${{ env.RUN_URL }}|run details>." diff --git a/.github/workflows/go-version.env b/.github/workflows/go-version.env new file mode 100644 index 00000000000..057ae893412 --- /dev/null +++ b/.github/workflows/go-version.env @@ -0,0 +1,2 @@ +# .github/workflows/go-version.env +GO_VERSION=1.23.1 diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index df8a78f4404..e3811db9769 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -10,24 +10,26 @@ on: push: branches: - main - - release/** + paths: + - "**.go" + - go.mod + - go.sum + - Makefile + - "!test/**" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: govulncheck: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v4 - with: - go-version: "1.21" - check-latest: true - uses: actions/checkout@v4 - - uses: technote-space/get-diff-action@v6 + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 with: - PATTERNS: | - **/*.go - go.mod - go.sum - Makefile + go-version: ${{ env.GO_VERSION }} - name: govulncheck run: make vulncheck - if: "env.GIT_DIFF != ''" diff --git a/.github/workflows/ignore-words.txt b/.github/workflows/ignore-words.txt new file mode 100644 index 00000000000..586e3b62287 --- /dev/null +++ b/.github/workflows/ignore-words.txt @@ -0,0 +1,9 @@ +caf +consequentially +fo +onlyonce +toi +CopyIn +pplication +hain +nterface diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml new file mode 100644 index 00000000000..7ee5271f987 --- /dev/null +++ b/.github/workflows/integration_tests.yml @@ -0,0 +1,41 @@ +name: Integration Tests +on: + pull_request: + merge_group: + push: + paths: + - "**.go" + branches: + - main + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + +jobs: + integration_tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - id: filter + uses: dorny/paths-filter@v3 + with: + filters: | + code: + - '**/*.go' + - 'Makefile' + - 'go.*' + + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + if: steps.filter.outputs.code == 'true' + + - uses: actions/setup-go@v5 + if: steps.filter.outputs.code == 'true' + with: + go-version: ${{ env.GO_VERSION }} + + - name: Run tests + if: steps.filter.outputs.code == 'true' + run: | + make test_integrations diff --git a/.github/workflows/janitor.yml b/.github/workflows/janitor.yml deleted file mode 100644 index 9c28eb4fd37..00000000000 --- a/.github/workflows/janitor.yml +++ /dev/null @@ -1,16 +0,0 @@ -name: Janitor -# Janitor cleans up previous runs of various workflows -# To add more workflows to cancel visit https://api.github.com/repos/cometbft/cometbft/actions/workflows and find the actions name -on: - pull_request: - -jobs: - cancel: - name: "Cancel Previous Runs" - runs-on: ubuntu-latest - timeout-minutes: 3 - steps: - - uses: styfle/cancel-workflow-action@0.12.0 - with: - workflow_id: 1041851,1401230,2837803 - access_token: ${{ github.token }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b8b8bf3dfc9..1d186412403 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,11 +1,8 @@ -name: Golang Linter -# Lint runs golangci-lint over the entire CometBFT repository. +name: Lint, format and check the code for typos +# Lint runs `make lint` # # This workflow is run on every pull request and push to main. # -# The `golangci` job will pass without running if no *.{go, mod, sum} -# files have been modified. -# # To run this locally, simply run `make lint` from the root of the repo. on: @@ -14,25 +11,26 @@ on: push: branches: - main + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: - golangci: - name: golangci-lint + lint: + name: lint runs-on: ubuntu-latest - timeout-minutes: 8 + timeout-minutes: 10 steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 - with: - go-version: '1.21' - - uses: technote-space/get-diff-action@v6 + - uses: actions/setup-python@v5 with: - PATTERNS: | - **/**.go - go.mod - go.sum - - uses: golangci/golangci-lint-action@v3 + python-version: '3.x' + + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 with: - version: latest - args: --timeout 10m - github-token: ${{ secrets.github_token }} - if: env.GIT_DIFF + go-version: ${{ env.GO_VERSION }} + + - uses: pre-commit/action@v3.0.1 diff --git a/.github/workflows/markdown-linter.yml b/.github/workflows/markdown-linter.yml index b50f10dce7e..cc3fcd477ab 100644 --- a/.github/workflows/markdown-linter.yml +++ b/.github/workflows/markdown-linter.yml @@ -8,10 +8,16 @@ on: - "**.yml" - "**.yaml" pull_request: - branches: [main] + branches: + - main paths: - "**.md" - "**.yml" + - "**.yaml" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: build: diff --git a/.github/workflows/notify-about-breaking-changes.yml b/.github/workflows/notify-about-breaking-changes.yml new file mode 100644 index 00000000000..7aa32bb3ac5 --- /dev/null +++ b/.github/workflows/notify-about-breaking-changes.yml @@ -0,0 +1,65 @@ +name: "Notify about breaking changes" + +on: + push: + branches: + - v1.x + paths: + - ".changelog/unreleased/breaking-changes/**" + +jobs: + notify: + name: Notify about breaking changes + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: 'v1.x' + + - name: Detect new files in directory + id: detect-new-files + run: | + DIRECTORY=".changelog/unreleased/breaking-changes" + + # Get a list of new files in the directory (added in the last commit) + NEW_FILES=$(git diff --name-status HEAD^..HEAD $DIRECTORY | grep "^A" | awk '{print $2}') + + if [ -n "$NEW_FILES" ]; then + echo "New files detected: $NEW_FILES" + echo "::set-output name=new_files::$NEW_FILES" + else + echo "No new files detected" + echo "::set-output name=new_files::none" + fi + + - name: Read the content of new files + if: steps.detect-new-files.outputs.new_files != 'none' + id: read-content + run: | + NEW_FILES="${{ steps.detect-new-files.outputs.new_files }}" + CONTENT="" + + for FILE in $NEW_FILES; do + FILE_CONTENT=$(cat $FILE) + CONTENT="$CONTENT\n---\n$FILE_CONTENT\n" + done + + echo "$CONTENT" + echo "::set-output name=file_content::$CONTENT" + + - name: Post to a Slack channel + id: slack + if: steps.read-content.outputs.file_content != '' + uses: slackapi/slack-github-action@v2.0.0 + with: + channel-id: 'C03Q5J9SXS8' # cometbft-engineering + # channel-id: 'shared-sdk-comet' + payload: | + text: "New breaking changes (pushed in ${{ github.event.pull_request.html_url || github.event.head_commit.url }}):" + blocks: + - type: "section" + text: + type: "mrkdwn" + text: "${{ steps.read-content.outputs.file_content }}" + env: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 7c66cbaa3c7..57bf0666cbd 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -16,9 +16,11 @@ jobs: with: fetch-depth: 0 - - uses: actions/setup-go@v4 + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 with: - go-version: '1.21' + go-version: ${{ env.GO_VERSION }} # Similar check to ./release-version.yml, but enforces this when pushing # tags. The ./release-version.yml check can be bypassed and is mainly @@ -44,7 +46,7 @@ jobs: echo "See the [CHANGELOG](${CHANGELOG_URL}) for changes available in this pre-release, but not yet officially released." > ../release_notes.md - name: Release - uses: goreleaser/goreleaser-action@v5 + uses: goreleaser/goreleaser-action@v6 with: version: latest args: release --clean --release-notes ../release_notes.md @@ -57,21 +59,15 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack upon pre-release - uses: slackapi/slack-github-action@v1.24.0 + uses: slackapi/slack-github-action@v2.0.0 env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}" with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook payload: | - { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": ":sparkles: New CometBFT pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>" - } - } - ] - } + blocks: + - type: "section" + text: + type: "mrkdwn" + text: ":sparkles: New CometBFT pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>" diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index 97d5974f9e3..ec967ee65db 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -10,13 +10,17 @@ on: paths: - 'proto/**' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: lint: runs-on: ubuntu-latest timeout-minutes: 5 steps: - uses: actions/checkout@v4 - - uses: bufbuild/buf-setup-action@v1.27.2 + - uses: bufbuild/buf-setup-action@v1.47.2 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' diff --git a/.github/workflows/release-version.yml b/.github/workflows/release-version.yml index 73d5e2f8679..090a1b7a1a9 100644 --- a/.github/workflows/release-version.yml +++ b/.github/workflows/release-version.yml @@ -13,9 +13,11 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 with: - go-version: '1.21' + go-version: ${{ env.GO_VERSION }} - name: Check version run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7ae9b172295..9e0cdd31851 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -14,9 +14,11 @@ jobs: with: fetch-depth: 0 - - uses: actions/setup-go@v4 + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + + - uses: actions/setup-go@v5 with: - go-version: '1.21' + go-version: ${{ env.GO_VERSION }} # Similar check to ./release-version.yml, but enforces this when pushing # tags. The ./release-version.yml check can be bypassed and is mainly @@ -43,7 +45,7 @@ jobs: echo "See the [CHANGELOG](${CHANGELOG_URL}) for this release." > ../release_notes.md - name: Release - uses: goreleaser/goreleaser-action@v5 + uses: goreleaser/goreleaser-action@v6 with: version: latest args: release --clean --release-notes ../release_notes.md @@ -56,21 +58,15 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack upon release - uses: slackapi/slack-github-action@v1.24.0 + uses: slackapi/slack-github-action@v2.0.0 env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}" with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook payload: | - { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": ":rocket: New CometBFT release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>" - } - } - ] - } + blocks: + - type: "section" + text: + type: "mrkdwn" + text: ":rocket: New CometBFT release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>" diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 396f41b1aba..35bfb53d77f 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -7,7 +7,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v8 + - uses: actions/stale@v9 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-pr-message: "This pull request has been automatically marked as stale because it has not had diff --git a/.github/workflows/test-slack-notification.yml b/.github/workflows/test-slack-notification.yml new file mode 100644 index 00000000000..1f0dbfcac2f --- /dev/null +++ b/.github/workflows/test-slack-notification.yml @@ -0,0 +1,23 @@ +# Test if the Slack notification and Github Actions integration is working +# Manually running this workflow should trigger a message that is successfully +# delivered to Slack + +name: Test Slack Notification +on: + workflow_dispatch: + +jobs: + notify: + runs-on: ubuntu-latest + steps: + - name: Notify Slack with Test message + uses: slackapi/slack-github-action@v2.0.0 + with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook + payload: | + blocks: + - type: "section" + text: + type: "mrkdwn" + text: ":inbox_tray: CometBFT test message from Github Actions" diff --git a/.github/workflows/testapp-docker.yml b/.github/workflows/testapp-docker.yml index 62c5779cc9a..a3d4ad7b889 100644 --- a/.github/workflows/testapp-docker.yml +++ b/.github/workflows/testapp-docker.yml @@ -41,17 +41,17 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v3.0.0 + uses: docker/setup-buildx-action@v3.7.1 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v3.0.0 + uses: docker/login-action@v3.3.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v5.0.0 + uses: docker/build-push-action@v6.9.0 with: context: . file: ./test/e2e/docker/Dockerfile diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 9087e705865..75d362df759 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -3,11 +3,12 @@ on: pull_request: merge_group: push: - paths: - - "**.go" branches: - main - - release/** + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: tests: @@ -15,21 +16,28 @@ jobs: strategy: fail-fast: false matrix: - part: ["00", "01", "02", "03", "04", "05"] + part: ["00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19"] steps: - - uses: actions/setup-go@v4 - with: - go-version: "1.21" - uses: actions/checkout@v4 - - uses: technote-space/get-diff-action@v6 + + - id: filter + uses: dorny/paths-filter@v3 with: - PATTERNS: | - **/**.go - "!test/" - go.mod - go.sum - Makefile + filters: | + code: + - '**/*.go' + - 'Makefile' + - 'go.*' + + - run: echo "GO_VERSION=$(cat .github/workflows/go-version.env | grep GO_VERSION | cut -d '=' -f2)" >> $GITHUB_ENV + if: steps.filter.outputs.code == 'true' + + - uses: actions/setup-go@v5 + if: steps.filter.outputs.code == 'true' + with: + go-version: ${{ env.GO_VERSION }} + - name: Run Go Tests + if: steps.filter.outputs.code == 'true' run: | - make test-group-${{ matrix.part }} NUM_SPLIT=6 - if: env.GIT_DIFF + make test-group-${{ matrix.part }} NUM_SPLIT=20 diff --git a/.gitignore b/.gitignore index 0ae98430ea5..f6d4c6c42b9 100644 --- a/.gitignore +++ b/.gitignore @@ -44,6 +44,7 @@ test/loadtime/build test/e2e/build test/e2e/data/ test/e2e/networks/*/ +test/e2e/networks_regressions/*/ test/logs test/p2p/data/ tools/goleveldb_perf/.ipynb_checkpoints @@ -62,3 +63,11 @@ proto/spec/**/*.pb.go *.dvi # Python virtual environments .venv +go.work.sum +cmd/cometbft/cometbft +# gobenchdata +assets +benchmarks.json +gobenchdata-web.yml +index.html +overrides.css diff --git a/.golangci.yml b/.golangci.yml index ca690083747..fe243d84065 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,49 +1,81 @@ +run: + tests: true + timeout: 10m + linters: - enable: - - asciicheck - - bodyclose - - depguard - - dogsled - - dupl - - errcheck - - exportloopref - - goconst - - gofmt - - goimports - - revive - - gosec - - gosimple - - govet - - ineffassign - - misspell - - nakedret - - nolintlint - - prealloc - - staticcheck - # - structcheck // to be fixed by golangci-lint - - stylecheck - - typecheck - - unconvert - - unused + enable-all: true + disable: + - containedctx + - contextcheck + - cyclop + - dupword + - errorlint + - errname + - err113 + - exhaustive + - exhaustruct + - execinquery + - forbidigo + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - gocyclo + - godox + - gomnd + - interfacebloat + - intrange + - ireturn + - lll + - maintidx + - mnd + - nestif + - nilnil + - nlreturn + - nonamedreturns + - predeclared + - tagliatelle + - testifylint + - varnamelen + - wrapcheck + - wsl issues: exclude-rules: - path: _test\.go linters: + - gocritic + - gofmt + - goimport - gosec - max-same-issues: 50 - + - noctx + - paralleltest + - testpackage + - tparallel + - path: \.pb\.go + linters: + - gofmt + - goimports + - govet + - stylecheck + max-same-issues: 10000 + max-issues-per-linter: 10000 linters-settings: dogsled: max-blank-identifiers: 3 - golint: - min-confidence: 0 goconst: ignore-tests: true - maligned: - suggest-new: true misspell: locale: US + gci: + sections: + - standard # Standard section: captures all standard packages. + - default # Default section: contains all imports that could not be matched to another section type. + - blank # blank imports + - dot # dot imports + - prefix(github.com/cometbft/cometbft, github.com/cometbft/cometbft-db, github.com/cometbft/cometbft-load-test) + custom-order: true depguard: rules: main: @@ -52,22 +84,25 @@ linters-settings: - "!$test" allow: - $gostd + - github.com/BurntSushi/toml + - github.com/Masterminds/semver/v3 - github.com/cometbft - github.com/cosmos - - github.com/btcsuite/btcd/btcec/v2 - - github.com/BurntSushi/toml + - github.com/creachadair/atomicfile + - github.com/creachadair/tomledit + - github.com/decred/dcrd/dcrec/secp256k1/v4 + - github.com/dgraph-io/badger/v4 - github.com/go-git/go-git/v5 - - github.com/go-kit - - github.com/go-logfmt/logfmt - - github.com/gofrs/uuid - github.com/google - github.com/gorilla/websocket - - github.com/informalsystems/tm-load-test/pkg/loadtest + - github.com/hashicorp/golang-lru/v2 - github.com/lib/pq - github.com/libp2p/go-buffer-pool - - github.com/Masterminds/semver/v3 + - github.com/lmittmann/tint - github.com/minio/highwayhash + - github.com/mitchellh/mapstructure - github.com/oasisprotocol/curve25519-voi + - github.com/pelletier/go-toml/v2 - github.com/pkg/errors - github.com/prometheus - github.com/rcrowley/go-metrics @@ -81,18 +116,69 @@ linters-settings: - "$test" allow: - $gostd - - github.com/cosmos - - github.com/cometbft - github.com/adlio/schema - github.com/btcsuite/btcd + - github.com/cometbft + - github.com/cosmos + - github.com/dgraph-io/badger/v4 - github.com/fortytw2/leaktest - - github.com/go-kit - github.com/google/uuid - github.com/gorilla/websocket - github.com/lib/pq - github.com/oasisprotocol/curve25519-voi/primitives/merlin - github.com/ory/dockertest - github.com/pkg/errors + - github.com/prometheus/client_golang/prometheus - github.com/prometheus/client_golang/prometheus/promhttp - github.com/spf13 - github.com/stretchr/testify + - github.com/decred/dcrd/dcrec/secp256k1/v4 + + revive: + enable-all-rules: true + rules: + - name: comment-spacings # temporarily disabled + disabled: true + - name: max-public-structs + disabled: true + - name: cognitive-complexity + disabled: true + - name: argument-limit + disabled: true + - name: cyclomatic + disabled: true + - name: deep-exit + disabled: true + - name: file-header + disabled: true + - name: function-length + disabled: true + - name: function-result-limit + disabled: true + - name: line-length-limit + disabled: true + - name: flag-parameter + disabled: true + - name: add-constant + disabled: true + - name: empty-lines + disabled: true + - name: import-shadowing + disabled: true + - name: modifies-value-receiver + disabled: true + - name: confusing-naming + disabled: true + - name: defer + disabled: true + - name: unchecked-type-assertion + disabled: true + - name: unhandled-error + disabled: true + arguments: + - "fmt.Printf" + - "fmt.Print" + - "fmt.Println" + gosec: + excludes: + - G115 diff --git a/.goreleaser.yml b/.goreleaser.yml index da0b5e23053..b9a76a3d4e5 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -8,7 +8,7 @@ builds: - id: "cometbft" main: ./cmd/cometbft/main.go ldflags: - - -s -w -X github.com/cometbft/cometbft/version.TMCoreSemVer={{ .Version }} + - -s -w -X github.com/cometbft/cometbft/version.CMTSemVer={{ .Version }} env: - CGO_ENABLED=0 goos: diff --git a/.mockery.yml b/.mockery.yml new file mode 100644 index 00000000000..00251ea8882 --- /dev/null +++ b/.mockery.yml @@ -0,0 +1,2 @@ +issue-845-fix: True +resolve-type-alias: False diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..543d58b1ba9 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,30 @@ +--- +# Details: https://pre-commit.com/#new-hooks + +repos: + # golangci-lint + - repo: https://github.com/golangci/golangci-lint + rev: v1.60.3 + hooks: + - id: golangci-lint-full + args: [--timeout, 10m] # for CI + + # gofumpt (does not have .pre-commmit-hooks.yaml) + - repo: local + hooks: + - id: gofumpt + name: "gofumpt" + additional_dependencies: + - mvdan.cc/gofumpt@v0.7.0 + entry: "gofumpt" + language: golang + args: [-w, .] + types: [go] + pass_filenames: false + + # codespell + - repo: https://github.com/codespell-project/codespell + rev: v2.3.0 + hooks: + - id: codespell + args: [-w] diff --git a/CHANGELOG.md b/CHANGELOG.md index c4214ae6b74..6d5bf782584 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,41 +2,498 @@ ## Unreleased +*July 1, 2024* + +This is a major release of CometBFT that includes several substantial changes +that aim to reduce bandwidth consumption, enable modularity, improve +integrators' experience and increase the velocity of the CometBFT development +team, including: + +1. Validators now proactively communicate the block parts they already have so + others do not resend them, reducing amplification in the network and reducing + bandwidth consumption. +2. An experimental feature in the mempool that allows limiting the number of + peers to which transactions are forwarded, allowing operators to optimize + gossip-related bandwidth consumption further. +3. An opt-in `nop` mempool, which allows application developers to turn off all + mempool-related functionality in Comet such that they can build their own + transaction dissemination mechanism, for example a standalone mempool-like + process that can be scaled independently of the consensus engine/application. + This requires application developers to implement their own gossip/networking + mechanisms. See [ADR 111](./docs/architecture/adr-111-nop-mempool.md) for + details. +4. The first officially supported release of the [data companion + API](./docs/architecture/adr-101-data-companion-pull-api.md). +5. Versioning of both the Protobuf definitions _and_ RPC. By versioning our + APIs, we aim to provide a level of commitment to API stability while + simultaneously affording ourselves the ability to roll out substantial + changes in non-breaking releases of CometBFT. See [ADR + 103](./docs/architecture/adr-103-proto-versioning.md) and [ADR + 107](./docs/architecture/adr-107-betaize-proto-versions.md). +6. Moving many Go packages that are currently publicly accessible into the + `internal` directory such that the team can roll out substantial changes in + future without needing to worry about causing breakages in users' codebases. + The massive surface area of previous versions has in the past significantly + hampered the team's ability to roll out impactful new changes to users, as + previously such changes required a new breaking release (which currently + takes 6 to 12 months to reach production use for many users). See [ADR + 109](./docs/architecture/adr-109-reduce-go-api-surface.md) for more details. +7. Proposer-Based Timestamps (PBTS) support. PBTS is a Byzantine fault-tolerant + algorithm used by CometBFT for computing block times. + When activated on a chain, it replaces the pre-existing BFT-time algorithm. + See [spec](./spec/consensus/proposer-based-timestamp) doc for PBTS. + +None of these changes are state machine-breaking for CometBFT-based networks, +but could be breaking for some users who depend on the Protobuf definitions type +URLs. See the [upgrading guidelines](./UPGRADING.md) and specific changes below +for more details. + +**NB: This version is still a release candidate, which means that +API-breaking changes, although very unlikely, might still be introduced +before the final release.** See [RELEASES.md](./RELEASES.md) for more information on +the stability guarantees we provide for pre-releases. + ### BREAKING CHANGES + - `[abci/types]` Rename `UpdateValidator` to `NewValidatorUpdate`, remove + `Ed25519ValidatorUpdate` ([\#2843](https://github.com/cometbft/cometbft/pull/2843)) +- `[abci/client]` Deprecate `SetResponseCallback(cb Callback)` in the `Client` interface as it is no +longer used. ([\#3084](https://github.com/cometbft/cometbft/issues/3084)) +- `[abci/types]` Replace `ValidatorUpdate.PubKey` with `PubKeyType` and + `PubKeyBytes` to allow applications to avoid implementing `PubKey` interface. + ([\#2843](https://github.com/cometbft/cometbft/pull/2843)) +- `[abci]` Changed the proto-derived enum type and constant aliases to the + buf-recommended naming conventions adopted in the `abci/v1` proto package. + For example, `ResponseProcessProposal_ACCEPT` is renamed to `PROCESS_PROPOSAL_STATUS_ACCEPT` + ([\#736](https://github.com/cometbft/cometbft/issues/736)). +- `[abci]` The `Type` enum field is now required to be set to a value other + than the default `CHECK_TX_TYPE_UNKNOWN` for a valid `CheckTxRequest` + ([\#736](https://github.com/cometbft/cometbft/issues/736)). +- `[abci]` Deprecates `ABCIParams` field of `ConsensusParam` and + introduces replacement in `FeatureParams` to enable Vote Extensions. + ([\#2322](https://github.com/cometbft/cometbft/pull/2322)) +- `[abci]` Renamed the alias types for gRPC requests, responses, and service + instances to follow the naming changes in the proto-derived + `api/cometbft/abci/v1` package + ([\#1533](https://github.com/cometbft/cometbft/pull/1533)): + * The prefixed naming pattern `RequestFoo`, `ReponseFoo` changed to + suffixed `FooRequest`, `FooResponse`. + * Each method gets its own unique request and response type to allow for + independent evolution with backward compatibility. + * `ABCIClient` renamed to `ABCIServiceClient`. + * `ABCIServer` renamed to `ABCIServiceServer`. +- `[blocksync]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[cmd]` Remove `replay` and `replay-console` subcommands + and corresponding consensus file replay code, such as + `consensus.RunReplayFile`, and `consensus.State.ReplayFile` + ([\#1170](https://github.com/cometbft/cometbft/pull/1170)) +- `[comet]` Version variables, in `version/version.go`, have been renamed to reflect the CometBFT rebranding. + ([\#1621](https://github.com/cometbft/cometbft/pull/1621)) +- `[config]` Merge `timeout_prevote` and `timeout_precommit`, + `timeout_prevote_delta` and `timeout_precommit_delta` into `timeout_round` + and `timeout_round_delta` accordingly + ([\#2895](https://github.com/cometbft/cometbft/pull/2895)) +- `[config]` Remove `cleveldb` and `boltdb` ([\#2786](https://github.com/cometbft/cometbft/pull/2786)) +- `[config]` Remove `skip_timeout_commit` in favor of `timeout_commit=0` + ([\#2892](https://github.com/cometbft/cometbft/pull/2892)) +- `[consensus]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[consensus]` `Handshaker.Handshake` now requires `context.Context` ([\#857](https://github.com/cometbft/cometbft/pull/857)) +- `[node]` `NewNode` now requires `context.Context` as the first parameter ([\#857](https://github.com/cometbft/cometbft/pull/857)) - `[crypto/merkle]` The public `Proof.ComputeRootHash` function has been deleted. ([\#558](https://github.com/cometbft/cometbft/issues/558)) -- `[rpc/grpc]` Remove the deprecated gRPC broadcast API - ([\#650](https://github.com/cometbft/cometbft/issues/650)) -- `[consensus]` `Handshaker.Handshake` now requires `context.Context` ([cometbft/cometbft\#857](https://github.com/cometbft/cometbft/pull/857)) -- `[node]` `NewNode` now requires `context.Context` as the first parameter ([cometbft/cometbft\#857](https://github.com/cometbft/cometbft/pull/857)) -`[mempool]` Change the signature of `CheckTx` in the `Mempool` interface to -`CheckTx(tx types.Tx) (*abcicli.ReqRes, error)`. Also, add new method -`SetTxRemovedCallback`. -([\#1010](https://github.com/cometbft/cometbft/issues/1010)) +- `[crypto]` Remove unnecessary `Sha256` wrapper + ([\#3248](https://github.com/cometbft/cometbft/pull/3248)) +- `[crypto]` Remove unnecessary `xchacha20` and `xsalsa20` implementations + ([\#3347](https://github.com/cometbft/cometbft/pull/3347)) +- `[evidence]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[go/runtime]` Bump minimum Go version to v1.22 + ([\#2725](https://github.com/cometbft/cometbft/pull/2725)) +- `[inspect]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[internal/state]` Moved function `MedianTime` to package `types`, + and made it a method of `Commit` so it can be used by external packages. + ([\#2397](https://github.com/cometbft/cometbft/pull/2397)) +- `[libs/async]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[libs/autofile]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[libs/bits]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[libs/clist]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[libs/cmap]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[libs/events]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[libs/fail]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[libs/flowrate]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[libs/net]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[libs/os]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[libs/progressbar]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[libs/rand]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[libs/strings]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[libs/tempfile]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[libs/timer]` Move to `internal` + ([\#1485](https://github.com/cometbft/cometbft/pull/1485)) +- `[mempool]` Add to the `Mempool` interface a new method `PreUpdate()`. This method should be + called before acquiring the mempool lock, to signal that a new update is coming. Also add to + `ErrMempoolIsFull` a new field `RecheckFull`. + ([\#3314](https://github.com/cometbft/cometbft/pull/3314)) +- `[mempool]` Change the signature of `CheckTx` in the `Mempool` interface to +`CheckTx(tx types.Tx, sender p2p.ID) (*abcicli.ReqRes, error)`. +([\#1010](https://github.com/cometbft/cometbft/issues/1010), [\#3084](https://github.com/cometbft/cometbft/issues/3084)) - `[mempool]` Remove `mempoolIDs` for internally storing peer ids as `p2p.ID` instead of `uint16`. ([\#1146](https://github.com/cometbft/cometbft/pull/1146)) -- `[cmd]` Remove `replay` and `replay-console` subcommands - and corresponding consensus file replay code, such as - `consensus.RunReplayFile`, and `consensus.State.ReplayFile` - ([\#1170](https://github.com/cometbft/cometbft/pull/1170)) - `[node]` Change the signature of `GenesisDocProvider` to return the checksum of JSON content alongside the parsed genesis data ([\#1287](https://github.com/cometbft/cometbft/issues/1287)). +- `[node]` Go-API breaking: Change the signature of `LoadStateFromDBOrGenesisDocProvider` + to accept an optional operator provided hash of the genesis file + ([\#1324](https://github.com/cometbft/cometbft/pull/1324)). +- `[p2p]` Remove `p2p_peer_send_bytes_total` and `p2p_peer_receive_bytes_total` + metrics as they are costly to track, and not that informative in debugging + ([\#3184](https://github.com/cometbft/cometbft/issues/3184)) +- `[p2p]` Rename `IPeerSet#List` to `Copy`, add `Random`, `ForEach` methods. + Rename `PeerSet#List` to `Copy`, add `Random`, `ForEach` methods. + ([\#2246](https://github.com/cometbft/cometbft/pull/2246)) +- `[privval]` allow privval to sign arbitrary bytes + ([\#2692](https://github.com/cometbft/cometbft/pull/2692)) +- `[proto/api]` Made `/api` a standalone Go module with its own `go.mod` + ([\#1561](https://github.com/cometbft/cometbft/issues/1561)) +- `[proto/privval]` Replace `pub_key` with `pub_key_type` and `pub_key_bytes` in + `PubKeyResponse` ([\#2878](https://github.com/cometbft/cometbft/issues/2878)) +- `[proto/types]` Deprecate `pub_key` in favor of `pub_key_type` and `pub_key_bytes` in + `Validator` ([\#2878](https://github.com/cometbft/cometbft/issues/2878)) +- `[proto]` Remove `abci.ValidatorUpdate.pub_key`, add `pub_key_type` and + `pub_key_bytes` ([\#2843](https://github.com/cometbft/cometbft/pull/2843)) +- `[proto]` Remove stateful block data retrieval methods from the + data companion gRPC API as per + [RFC 106](https://github.com/cometbft/cometbft/blob/main/docs/references/rfc/rfc-106-separate-stateful-methods.md) + ([\#2230](https://github.com/cometbft/cometbft/issues/2230)): + * `GetLatest` from `cometbft.services.block.v1.BlockService`; + * `GetLatestBlockResults` from `cometbft.services.block_results.v1.BlockResultsService`. +- `[rpc/grpc]` Remove support for stateful block data retrieval methods from the + data companion APIs as per [RFC 106](https://github.com/cometbft/cometbft/blob/main/docs/references/rfc/rfc-106-separate-stateful-methods.md) + * `GetLatestBlock` method removed from the `BlockServiceClient` interface. + * `GetLatestBlockResults` method removed from the `BlockResultServiceClient` interface. + * `GetLatest` endpoint is no longer served by `BlockServiceServer` instances. + * `GetLatestBlockResults` endpoint is no longer served by `BlockResultServiceServer` instances. +- `[proto]` Renamed the packages from `tendermint.*` to `cometbft.*` + and introduced versioned packages to distinguish between proto definitions + released in `0.34.x`, `0.37.x`, `0.38.x`, and `1.x` versions. + Prior to the 1.0 release, the versioned packages are suffixed with + `.v1beta1`, `.v1beta2`, and so on; all definitions describing the protocols + as per the 1.0.0 release are in packages suffixed with `.v1`. + Relocated generated Go code into a new `api` folder and changed the import + paths accordingly. + ([\#495](https://github.com/cometbft/cometbft/pull/495), + [\#1504](https://github.com/cometbft/cometbft/issues/1504)) +- `[proto]` The names in the `cometbft.abci.v1` versioned proto package + are changed to satisfy the + [buf guidelines](https://buf.build/docs/best-practices/style-guide/) + ([#736](https://github.com/cometbft/cometbft/issues/736), + [#1504](https://github.com/cometbft/cometbft/issues/1504), + [#1530](https://github.com/cometbft/cometbft/issues/1530)): + * Names of request and response types used in gRPC changed by making + `Request`/`Response` the suffix instead of the prefix, e.g. + `RequestCheckTx` ⭢ `CheckTxRequest`. + * The `Request` and `Response` multiplex messages are redefined accordingly. + * `CheckTxType` values renamed with the `CHECK_TX_TYPE_` prefix. + * `MisbehaviorType` values renamed with the `MISBEHAVIOR_TYPE_` prefix. + * `Result` enum formerly nested in `ResponseOfferSnapshot` replaced with the package-level + `OfferSnapshotResult`, its values named with the + `OFFER_SNAPSHOT_RESULT_` prefix. + * `Result` enum formerly nested in `ResponseApplyShapshotChunk` replaced with the package-level + `ApplySnapshotChunkResult`, its values named with the + `APPLY_SNAPSHOT_CHUNK_RESULT_` prefix. + * `Status` enum formerly nested in `ResponseProcessProposal` replaced with the package-level + `ProcessProposalStatus`, its values named with the + `PROCESS_PROPOSAL_STATUS_` prefix. + * `Status` enum formerly nested in `ResponseVerifyVoteExtension` replaced with the package-level + `VerifyVoteExtensionStatus`, its values named with the + `VERIFY_VOTE_EXTENSION_STATUS_` prefix. + * New definition of `Misbehavior` using the changed `MisbehaviorType`. + * The gRPC service is renamed `ABCIService` and defined using the types listed above. +- `[proto]` In the `cometbft.state.v1` package, the definition for `ABCIResponsesInfo` + is changed, renaming `response_finalize_block` field to `finalize_block`. +- `[proxy]` Expand `ClientCreator` interface to allow + for per-"connection" control of client creation + ([\#1141](https://github.com/cometbft/cometbft/pull/1141)) +- `[rpc/client]` Hard-code the `/websocket` endpoint path such that it is + no longer configurable, removing the related client constructor parameter + ([\#1412](https://github.com/cometbft/cometbft/pull/1412)) +- `[rpc/grpc]` Remove the deprecated gRPC broadcast API + ([\#650](https://github.com/cometbft/cometbft/issues/650)) +- `[rpc]` The endpoints `broadcast_tx_*` now return an error when the node is + performing block sync or state sync. + ([\#785](https://github.com/cometbft/cometbft/issues/785)) +- `[mempool]` When the node is performing block sync or state sync, the mempool + reactor now discards incoming transactions from peers, and does not propagate + transactions to peers. + ([\#785](https://github.com/cometbft/cometbft/issues/785)) +- `[state/indexer/block]` BlockIndexer now has additional method `Prune`, `GetRetainHeight`, `SetRetainHeight` ([\#1176](https://github.com/cometbft/cometbft/pull/1176)) +- `[state/txindex]` TxIndexer now has additional methods: `Prune`, `GetRetainHeight`, `SetRetainHeight` ([\#1176](https://github.com/cometbft/cometbft/pull/1176)) +- `[state/store]` go-API breaking change in `PruneABCIResponses`: added parameter to force compaction. ([\#1972](https://github.com/cometbft/cometbft/pull/1972)) +- `[state/store]` go-API breaking change in `PruneStates`: added parameter to pass the number of pruned states and return pruned entries in current pruning iteration. ([\#1972](https://github.com/cometbft/cometbft/pull/1972)) +- `[state]` The `state.Store` interface has been expanded + to accommodate the data pull companion API of ADR 101 + ([\#1096](https://github.com/cometbft/cometbft/issues/1096)) +- `[store]` Make the `LoadBlock` method also return block metadata + ([\#1556](https://github.com/cometbft/cometbft/issues/1556)) +- `[version]` Bumped the P2P version from 8 to 9, as this release contains new P2P messages. + ([\#1411](https://github.com/cometbft/cometbft/pull/1411)) ### BUG FIXES -- `[consensus]` \#1203 consensus now prevotes `nil` when the proposed value - does not match the value the local validator has locked on +- `[bits]` prevent `BitArray.UnmarshalJSON` from crashing on 0 bits + ([\#2774](https://github.com/cometbft/cometbft/pull/2774)) +- `[blocksync]` Wait for `poolRoutine` to stop in `(*Reactor).OnStop` + ([\#1879](https://github.com/cometbft/cometbft/pull/1879)) +- `[consensus]` Consensus now prevotes `nil` when the proposed value does not + match the value the local validator has locked on ([\#1203](https://github.com/cometbft/cometbft/pull/1203)) -- `[consensus]` \#1175 remove logic to unlock block on +2/3 prevote for nil +- `[consensus]` Fix a race condition in the consensus timeout ticker. Race is caused by two timeouts being scheduled at the same time. + ([\#3092](https://github.com/cometbft/cometbft/pull/2136)) +- `[consensus]` Fix for Security Advisory `ASA-2024-001`: Validation of `VoteExtensionsEnableHeight` can cause chain halt + ([ASA-2024-001](https://github.com/cometbft/cometbft/security/advisories/GHSA-qr8r-m495-7hc4)) +- `[consensus]` Remove logic to unlock block on +2/3 prevote for nil ([\#1175](https://github.com/cometbft/cometbft/pull/1175): @BrendanChou) +- `[crypto]` `SupportsBatchVerifier` returns false + if public key is nil instead of dereferencing nil. + ([\#1825](https://github.com/cometbft/cometbft/pull/1825)) +- `[evidence]` When `VerifyCommitLight` & `VerifyCommitLightTrusting` are called as part + of evidence verification, all signatures present in the evidence must be verified + ([\#1749](https://github.com/cometbft/cometbft/pull/1749)) +- `[log]` Fix panic when log with nil val which is a pointer who implements + fmt.Stringer interface + ([\#3145](https://github.com/cometbft/cometbft/pull/3145)) +- `[mempool]` Fix data race when rechecking with async ABCI client + ([\#1827](https://github.com/cometbft/cometbft/issues/1827)) +- `[mempool]` Fix data races in `CListMempool` by making atomic the types of `height`, `txsBytes`, and + `notifiedTxsAvailable`. ([\#642](https://github.com/cometbft/cometbft/pull/642)) +- `[mempool]` Panic when a CheckTx request to the app returns an error + ([\#2225](https://github.com/cometbft/cometbft/pull/2225)) +- `[mempool]` The calculation method of tx size returned by calling proxyapp should be consistent with that of mempool + ([\#1687](https://github.com/cometbft/cometbft/pull/1687)) +- `[p2p/pex]` Gracefully shutdown Reactor ([\#2010](https://github.com/cometbft/cometbft/pull/2010)) +- `[privval]` Retry accepting a connection ([\#2047](https://github.com/cometbft/cometbft/pull/2047)) +- `[rpc]` Fix nil pointer error in `/tx` and `/tx_search` when block is + absent ([\#3352](https://github.com/cometbft/cometbft/issues/3352)) +- `[state/indexer]` Respect both height params while querying for events + ([\#1529](https://github.com/cometbft/cometbft/pull/1529)) +- `[state/pruning]` When no blocks are pruned, do not attempt to prune statestore + ([\#1616](https://github.com/cometbft/cometbft/pull/1616)) +- `[state]` Fix rollback to a specific height + ([\#2136](https://github.com/cometbft/cometbft/pull/2136)) +- `[types]` Do not batch verify a commit if the validator set keys have different + types. ([\#3195](https://github.com/cometbft/cometbft/issues/3195) + +### DEPENDENCIES + +- Bump api to v1.0.0-rc.1 for v1.0.0 Release Candidate 1 + ([\#3191](https://github.com/cometbft/cometbft/pull/3191)) +- Bump cometbft-db to v0.9.0, providing support for RocksDB v8 + ([\#1725](https://github.com/cometbft/cometbft/pull/1725)) + +### FEATURES + +- `[config]` Add [`pebbledb`](https://github.com/cockroachdb/pebble). To use, build with + `pebbledb` tag (`go build -tags pebbledb`) ([\#2132](https://github.com/cometbft/cometbft/pull/2132/)) +- `[config]` Add `[grpc.block_results_service]` gRPC configuration `BlockResultsService` + ([\#1095](https://github.com/cometbft/cometbft/issues/1095)) +- `[config]` Add `[grpc.block_service]` section to configure gRPC `BlockService` + ([\#1094](https://github.com/cometbft/cometbft/issues/1094)) +- `[config]` Add configuration parameters to tweak forced compaction. ([\#1972](https://github.com/cometbft/cometbft/pull/1972)) +- `[config]` Added `[grpc.version_service]` section for configuring the gRPC version service. + ([\#816](https://github.com/cometbft/cometbft/issues/816)) +- `[config]` Added `[grpc]` section to configure the gRPC server. + ([\#816](https://github.com/cometbft/cometbft/issues/816)) +- `[config]` Added `[storage.experimental_db_key_layout]` storage parameter, set to "v2" + for order preserving representation +([\#2327](https://github.com/cometbft/cometbft/pull/2327/)) +- `[config]` Move `timeout_commit` into the ABCI `FinalizeBlockResponse` + ([\#2655](https://github.com/cometbft/cometbft/issues/2655)) +- `[config]` Removed unused `[mempool.max_batch_bytes]` mempool parameter. + ([\#2056](https://github.com/cometbft/cometbft/pull/2056/)) +- `[config]` Update the default value of `mempool.max_txs_bytes` to 64 MiB. + ([\#2756](https://github.com/cometbft/cometbft/issues/2756)) +- `[consensus]` Make mempool updates asynchronous from consensus Commit's, + reducing latency for reaching consensus timeouts. + ([#3008](https://github.com/cometbft/cometbft/pull/3008)) +- `[consensus]` Update block validation to no longer require the block timestamp + to be the median of the timestamps of the previous commit. (@anca) + ([tendermint/tendermint\#7382](https://github.com/tendermint/tendermint/pull/7382)) +- `[consensus]` Update proposal validation logic to Prevote nil + if a proposal does not meet the conditions for Timeliness + per the proposer-based timestamp specification. (@anca) + ([tendermint/tendermint\#7415](https://github.com/tendermint/tendermint/pull/7415)) +- `[consensus]` Update the proposal logic per the Propose-based timestamps specification + so that the proposer will wait for the previous block time to occur + before proposing the next block. (@williambanfield) + ([tendermint/tendermint\#7376](https://github.com/tendermint/tendermint/pull/7376)) +- `[consensus]` Use the proposed block timestamp as the proposal timestamp. + Update the block validation logic to ensure that the proposed block's timestamp + matches the timestamp in the proposal message. (@williambanfield) + ([tendermint/tendermint\#7391](https://github.com/tendermint/tendermint/pull/7391)) +- `[consensus]` Use the proposer timestamp for the first height instead of the genesis time. + Chains will still start consensus at the genesis time. (@anca) + ([tendermint/tendermint\#7711](https://github.com/tendermint/tendermint/pull/7711)) +- `[consensus]` add a new `synchrony` field to the `ConsensusParameter` struct + for controlling the parameters of the proposer-based timestamp algorithm. (@williambanfield) + ([tendermint/tendermint\#7354](https://github.com/tendermint/tendermint/pull/7354)) +- `[crypto]` Add support for BLS12-381 keys. Since the implementation needs + `cgo` and brings in new dependencies, we use the `bls12381` build flag to + enable it ([\#2765](https://github.com/cometbft/cometbft/pull/2765)) +- `[docs]` Add report on storage improvements and findings. ([\#2569](https://github.com/cometbft/cometbft/pull/2569)) +- `[e2e]` Add `block_max_bytes` option to the manifest file. + ([\#2362](https://github.com/cometbft/cometbft/pull/2362)) +- `[e2e]` Add new `--testnet-dir` parameter to set a custom directory for the generated testnet files. + ([\#2433](https://github.com/cometbft/cometbft/pull/2433)) +- `[evidence/store]` Added support for a different DB key representation within the evidence store ([\#2327](https://github.com/cometbft/cometbft/pull/2327/)) +- `[grpc]` Add `BlockResultsService` with client to fetch BlockResults + for a given height, or latest. + ([\#1095](https://github.com/cometbft/cometbft/issues/1095)) +- `[grpc]` Add `BlockService` with client to facilitate fetching of blocks and + streaming of the latest committed block height + ([\#1094](https://github.com/cometbft/cometbft/issues/1094)) +- `[light/store]` Added support for a different DB key representation within the light block store ([\#2327](https://github.com/cometbft/cometbft/pull/2327/)) +- `[mempool]` Add `nop` mempool ([\#1643](https://github.com/cometbft/cometbft/pull/1643)). If you want to use it, change mempool's `type` to `nop`: +```toml + [mempool] + + # The type of mempool for this node to use. + # + # Possible types: + # - "flood" : concurrent linked list mempool with flooding gossip protocol + # (default) + # - "nop" : nop-mempool (short for no operation; the ABCI app is responsible + # for storing, disseminating and proposing txs). "create_empty_blocks=false" + # is not supported. + type = "nop" +``` +- `[metrics]` Add metric for mempool size in bytes `SizeBytes`. + ([\#1512](https://github.com/cometbft/cometbft/pull/1512)) +- `[metrics]` Add metrics to monitor pruning and current available data in stores: `PruningServiceBlockRetainHeight`, `PruningServiceBlockResultsRetainHeight`, `ApplicationBlockRetainHeight`, `BlockStoreBaseHeight`, `ABCIResultsBaseHeight`. + ([\#1234](https://github.com/cometbft/cometbft/pull/1234)) +- `[metrics]` Added metrics to monitor block store access. ([\#1974](https://github.com/cometbft/cometbft/pull/1974)) +- `[metrics]` Added metrics to monitor state store access. ([\#1974](https://github.com/cometbft/cometbft/pull/1974)) +- `[proto]` Add definitions and generated code for + [ADR-101](./docs/architecture/adr-101-data-companion-pull-api.md) + `PruningService` in the `cometbft.services.pruning.v1` proto package + ([\#1097](https://github.com/cometbft/cometbft/issues/1097)) +- `[rpc/grpc]` Add privileged gRPC server and client facilities, in + `server/privileged` and `client/privileged` packages respectively, to + enable a separate API server within the node which serves trusted clients + without authentication and should never be exposed to public internet + ([\#1097](https://github.com/cometbft/cometbft/issues/1097)) +- `[rpc/grpc]` Add a pruning service adding on the privileged gRPC server API to + give an [ADR-101](./docs/architecture/adr-101-data-companion-pull-api.md) data + companion control over block data retained by the node. The + `WithPruningService` option method in `server/privileged` is provided to + configure the pruning service + ([\#1097](https://github.com/cometbft/cometbft/issues/1097)) +- `[rpc/grpc]` Add `PruningServiceClient` interface + for the gRPC client in `client/privileged` along with a configuration option + to enable it + ([\#1097](https://github.com/cometbft/cometbft/issues/1097)) +- `[config]` Add `[grpc.privileged]` section to configure the privileged + gRPC server for the node, and `[grpc.privileged.pruning_service]` section + to control the pruning service + ([\#1097](https://github.com/cometbft/cometbft/issues/1097)) +- `[proto]` add `syncing_to_height` to `FinalizeBlockRequest` to let the ABCI app + know if the node is syncing or not. + ([\#1247](https://github.com/cometbft/cometbft/issues/1247)) +- `[rpc/grpc]` Add gRPC client with support for version service + ([\#816](https://github.com/cometbft/cometbft/issues/816)) +- `[rpc/grpc]` Add gRPC endpoint for pruning the block and transaction indexes +([\#1327](https://github.com/cometbft/cometbft/pull/1327)) +- `[rpc/grpc]` Add gRPC server to the node, configurable + via a new `[grpc]` section in the configuration file + ([\#816](https://github.com/cometbft/cometbft/issues/816)) +- `[rpc/grpc]` Add gRPC version service to allow clients to + establish the software and protocol versions of the node + ([\#816](https://github.com/cometbft/cometbft/issues/816)) +- `[rpc]` Add `unconfirmed_tx` to support query mempool transaction by transaction hash. + ([\#3079](https://github.com/cometbft/cometbft/pull/3079)) +- `[state]` Add TxIndexer and BlockIndexer pruning metrics + ([\#1334](https://github.com/cometbft/cometbft/issues/1334)) +- `[store]` Added support for a different DB key representation to state and block store ([\#2327](https://github.com/cometbft/cometbft/pull/2327/)) +- `[store]` When pruning force compaction of the database. ([\#1972](https://github.com/cometbft/cometbft/pull/1972)) +- `[test]` Added monitoring tools and dashboards for local testing with `localnet`. ([\#2107](https://github.com/cometbft/cometbft/issues/2107)) ### IMPROVEMENTS -- `[mempool]` Add a metric (a counter) to measure whether a tx was received more than once. - ([\#634](https://github.com/cometbft/cometbft/pull/634)) +- `[abci/client]` Add consensus-synchronized local client creator, + which only imposes a mutex on the consensus "connection", leaving + the concurrency of all other "connections" up to the application + ([\#1141](https://github.com/cometbft/cometbft/pull/1141)) +- `[abci/client]` Add fully unsynchronized local client creator, which + imposes no mutexes on the application, leaving all handling of concurrency up + to the application ([\#1141](https://github.com/cometbft/cometbft/pull/1141)) +- `[abci]` Increase ABCI socket message size limit to 2GB ([\#1730](https://github.com/cometbft/cometbft/pull/1730): @troykessler) +- `[blockstore]` Remove a redundant `Header.ValidateBasic` call in `LoadBlockMeta`, 75% reducing this time. + ([\#2964](https://github.com/cometbft/cometbft/pull/2964)) +- `[blockstore]` Use LRU caches for LoadBlockPart. Make the LoadBlockPart and LoadBlockCommit APIs + return mutative copies, that the caller is expected to not modify. This saves on memory copying. + ([\#3342](https://github.com/cometbft/cometbft/issues/3342)) +- `[blockstore]` Use LRU caches in blockstore, significiantly improving consensus gossip routine performance + ([\#3003](https://github.com/cometbft/cometbft/issues/3003)) +- `[blocksync]` Avoid double-calling `types.BlockFromProto` for performance + reasons ([\#2016](https://github.com/cometbft/cometbft/pull/2016)) +- `[blocksync]` Request a block from peer B if we are approaching pool's height + (less than 50 blocks) and the current peer A is slow in sending us the + block ([\#2475](https://github.com/cometbft/cometbft/pull/2475)) +- `[blocksync]` Request the block N from peer B immediately after getting + `NoBlockResponse` from peer A + ([\#2475](https://github.com/cometbft/cometbft/pull/2475)) +- `[blocksync]` Sort peers by download rate (the fastest peer is picked first) + ([\#2475](https://github.com/cometbft/cometbft/pull/2475)) +- `[blocksync]` make the max number of downloaded blocks dynamic. + Previously it was a const 600. Now it's `peersCount * maxPendingRequestsPerPeer (20)` + ([\#2467](https://github.com/cometbft/cometbft/pull/2467)) +- `[cli/node]` The genesis hash provided with the `--genesis-hash` is now + forwarded to the node, instead of reading the file. + ([\#1324](https://github.com/cometbft/cometbft/pull/1324)) +- `[cmd]` Add support for all key types in `gen-validator` command. Use + `--key-type=` (or `-k`) to specify the key type (e.g., `-k secp256k1`). + ([\#1757](https://github.com/cometbft/cometbft/issues/1757)) +- `[config]` Added `[storage.pruning]` and `[storage.pruning.data_companion]` + sections to facilitate background pruning and data companion (ADR 101) + operations ([\#1096](https://github.com/cometbft/cometbft/issues/1096)) +- `[config]` Added `genesis_hash` storage parameter, which when set it is checked + on node startup + ([\#1324](https://github.com/cometbft/cometbft/pull/1324/)) +- `[config]` Added `recheck_timeout` mempool parameter to set how much time to wait for recheck + responses from the app (only applies to non-local ABCI clients). + ([\#1827](https://github.com/cometbft/cometbft/issues/1827/)) +- `[config]` Use embed pkg for the default template + ([\#3057](https://github.com/cometbft/cometbft/pull/3057)) +- `[consensus/state]` Remove a redundant `VerifyBlock` call in `FinalizeCommit` + ([\#2928](https://github.com/cometbft/cometbft/pull/2928)) +- `[consensus]` Add `chain_size_bytes` metric for measuring the size of the blockchain in bytes + ([\#2093](https://github.com/cometbft/cometbft/pull/2093)) +- `[consensus]` Fix some reactor messages taking write locks instead of read locks. + ([\#3159](https://github.com/cometbft/cometbft/issues/3159)) +- `[consensus]` Improve performance of consensus metrics by lowering string operations + ([\#3017](https://github.com/cometbft/cometbft/issues/3017)) +- `[consensus]` Log vote validation failures at info level + ([\#1022](https://github.com/cometbft/cometbft/pull/1022)) +- `[consensus]` Lower the consensus blocking overhead of broadcasts from `num_peers * process_creation_time` to `process_creation_time`. + ([\#3180](https://github.com/cometbft/cometbft/issues/3180)) +- `[consensus]` Make Vote messages only take one peerstate mutex + ([\#3156](https://github.com/cometbft/cometbft/issues/3156)) - `[consensus]` New metrics (counters) to track duplicate votes and block parts. ([\#896](https://github.com/cometbft/cometbft/pull/896)) - `[consensus]` Optimize vote and block part gossip with new message `HasProposalBlockPartMessage`, @@ -45,22 +502,143 @@ by default as this is experimental. Our scale tests show substantial bandwidth improvement with a value of 50 ms. ([\#904](https://github.com/cometbft/cometbft/pull/904)) -- Update Apalache type annotations in the light client spec ([#955](https://github.com/cometbft/cometbft/pull/955)) -- `[node]` Remove genesis persistence in state db, replaced by a hash - ([cometbft/cometbft\#1017](https://github.com/cometbft/cometbft/pull/1017), - [cometbft/cometbft\#1295](https://github.com/cometbft/cometbft/pull/1295)) -- `[consensus]` Log vote validation failures at info level - ([\#1022](https://github.com/cometbft/cometbft/pull/1022)) +- `[consensus]` Reduce the default MaxBytes to 4MB and increase MaxGas to 10 million + ([\#1518](https://github.com/cometbft/cometbft/pull/1518)) +- `[consensus]` Reuse an internal buffer for block building to reduce memory allocation overhead. + ([\#3162](https://github.com/cometbft/cometbft/issues/3162)) +- `[consensus]` Use an independent rng for gossip threads, reducing mutex contention. + ([\#3005](https://github.com/cometbft/cometbft/issues/3005)) - `[consensus]` When prevoting, avoid calling PropocessProposal when we know the proposal was already validated by correct nodes. ([\#1230](https://github.com/cometbft/cometbft/pull/1230)) +- `[crypto/merkle]` faster calculation of hashes ([#1921](https://github.com/cometbft/cometbft/pull/1921)) +- `[docs/references]` Added ADR-102: RPC Companion. + ([\#658](https://github.com/cometbft/cometbft/pull/658)) +- `[docs]` Merge configuration doc in explanation section with the config.toml document in references. + ([\#2769](https://github.com/cometbft/cometbft/pull/2769)) +- `[e2e]` Add manifest option `VoteExtensionsUpdateHeight` to test + vote extension activation via `InitChain` and `FinalizeBlock`. + Also, extend the manifest generator to produce different values + of this new option + ([\#2065](https://github.com/cometbft/cometbft/pull/2065)) +- `[e2e]` Add manifest option `load_max_txs` to limit the number of transactions generated by the + `load` command. ([\#2094](https://github.com/cometbft/cometbft/pull/2094)) +- `[e2e]` Add new targets `fast` and `clean` to Makefile. + ([\#2192](https://github.com/cometbft/cometbft/pull/2192)) +- `[e2e]` Allow disabling the PEX reactor on all nodes in the testnet + ([\#1579](https://github.com/cometbft/cometbft/pull/1579)) +- `[e2e]` Allow latency emulation between nodes. + ([\#1559](https://github.com/cometbft/cometbft/pull/1559)) +- `[e2e]` Allow latency emulation between nodes. + ([\#1560](https://github.com/cometbft/cometbft/pull/1560)) +- `[e2e]` Introduce the possibility in the manifest for some nodes + to run in a preconfigured clock skew. + ([\#2453](https://github.com/cometbft/cometbft/pull/2453)) +- `[e2e]` Log the number of transactions that were sent successfully or failed. + ([\#2328](https://github.com/cometbft/cometbft/pull/2328)) +- `[e2e]` add option to the 'runner logs' command to output logs separately. + ([\#3353](https://github.com/cometbft/cometbft/pull/3353)) +- `[event-bus]` Remove the debug logs in PublishEventTx, which were noticed production slowdowns. + ([\#2911](https://github.com/cometbft/cometbft/pull/2911)) +- `[flowrate]` Remove expensive time.Now() calls from flowrate calls. + Changes clock updates to happen in a separate goroutine. + ([\#3016](https://github.com/cometbft/cometbft/issues/3016)) +- `[indexer]` Optimized the PSQL indexer + ([\#2142](https://github.com/cometbft/cometbft/pull/2142)) thanks to external contributor @k0marov ! +- `[internal/bits]` 10x speedup and remove heap overhead of `bitArray.PickRandom` (used extensively in consensus gossip) + ([\#2841](https://github.com/cometbft/cometbft/pull/2841)). +- `[internal/bits]` 10x speedup creating initialized bitArrays, which speedsup extendedCommit.BitArray(). This is used in consensus vote gossip. + ([\#2959](https://github.com/cometbft/cometbft/pull/2841)). +- `[jsonrpc]` enable HTTP basic auth in websocket client ([#2434](https://github.com/cometbft/cometbft/pull/2434)) +- `[libs/json]` Lower the memory overhead of JSON encoding by using JSON encoders internally. + ([\#2846](https://github.com/cometbft/cometbft/pull/2846)) +- `[light]` Export light package errors ([\#1904](https://github.com/cometbft/cometbft/pull/1904)) (contributes to [\#1140](https://github.com/cometbft/cometbft/issues/1140)) +- `[linting]` Removed undesired linting from `Makefile` and added dependency check for `codespell`. + ([\#1958](https://github.com/cometbft/cometbft/pull/1958/)) +- `[mempool]` Add a metric (a counter) to measure whether a tx was received more than once. + ([\#634](https://github.com/cometbft/cometbft/pull/634)) +- `[mempool]` Add experimental feature to limit the number of persistent peers and non-persistent + peers to which the node gossip transactions. + ([\#1558](https://github.com/cometbft/cometbft/pull/1558)) + ([\#1584](https://github.com/cometbft/cometbft/pull/1584)) +- `[config]` Add mempool parameters `experimental_max_gossip_connections_to_persistent_peers` and + `experimental_max_gossip_connections_to_non_persistent_peers` for limiting the number of peers to + which the node gossip transactions. + ([\#1558](https://github.com/cometbft/cometbft/pull/1558)) + ([\#1584](https://github.com/cometbft/cometbft/pull/1584)) +- `[mempool]` Before updating the mempool, consider it as full if rechecking is still in progress. + This will stop accepting transactions in the mempool if the node can't keep up with re-CheckTx. + ([\#3314](https://github.com/cometbft/cometbft/pull/3314)) - `[node]` On upgrade, after [\#1296](https://github.com/cometbft/cometbft/pull/1296), delete the genesis file existing in the DB. - ([cometbft/cometbft\#1297](https://github.com/cometbft/cometbft/pull/1297) - -### MINIMUM GO VERSION - -- Bump minimum Go version to v1.21 - ([\#1244](https://github.com/cometbft/cometbft/pull/1244)) + ([\#1297](https://github.com/cometbft/cometbft/pull/1297)) +- `[node]` Remove genesis persistence in state db, replaced by a hash + ([\#1017](https://github.com/cometbft/cometbft/pull/1017), + [\#1295](https://github.com/cometbft/cometbft/pull/1295)) +- `[node]` The `node.Node` struct now manages a + `state.Pruner` service to facilitate background pruning + ([\#1096](https://github.com/cometbft/cometbft/issues/1096)) +- `[node]` export node package errors + ([\#3056](https://github.com/cometbft/cometbft/pull/3056)) +- `[p2p/channel]` Speedup `ProtoIO` writer creation time, and thereby speedup channel writing by 5%. + ([\#2949](https://github.com/cometbft/cometbft/pull/2949)) +- `[p2p/conn]` Minor speedup (3%) to connection.WritePacketMsgTo, by removing MinInt calls. + ([\#2952](https://github.com/cometbft/cometbft/pull/2952)) +- `[p2p/conn]` Speedup connection.WritePacketMsgTo, by reusing internal buffers rather than re-allocating. + ([\#2986](https://github.com/cometbft/cometbft/pull/2986)) +- `[p2p]` Export p2p package errors ([\#1901](https://github.com/cometbft/cometbft/pull/1901)) (contributes to [\#1140](https://github.com/cometbft/cometbft/issues/1140)) +- `[p2p]` Lower `flush_throttle_timeout` to 10ms + ([\#2988](https://github.com/cometbft/cometbft/issues/2988)) +- `[p2p]` Remove `Switch#Broadcast` unused return channel + ([\#3182](https://github.com/cometbft/cometbft/pull/3182)) +- `[p2p]` make `PeerSet.Remove` more efficient (Author: @odeke-em) [\#2246](https://github.com/cometbft/cometbft/pull/2246) +- `[privval]` DO NOT require extension signature from privval if vote + extensions are disabled. Remote signers can skip signing the extension if + `skip_extension_signing` flag in `SignVoteRequest` is true. + ([\#2496](https://github.com/cometbft/cometbft/pull/2496)) +- `[proto]` Add `skip_extension_signing` field to the `SignVoteRequest` message + in `cometbft.privval.v1` ([\#2522](https://github.com/cometbft/cometbft/pull/2522)). + The `cometbft.privval.v1beta2` package is added to capture the protocol as it was + released in CometBFT 0.38.x + ([\#2529](https://github.com/cometbft/cometbft/pull/2529)). +- `[protoio]` Remove one allocation and new object call from `ReadMsg`, + leading to a 4% p2p message reading performance gain. + ([\#3018](https://github.com/cometbft/cometbft/issues/3018)) +- `[rpc]` Add a configurable maximum batch size for RPC requests. + ([\#2867](https://github.com/cometbft/cometbft/pull/2867)). +- `[rpc]` Export RPC package errors ([\#2200](https://github.com/cometbft/cometbft/pull/2200)) (contributes to [\#1140](https://github.com/cometbft/cometbft/issues/1140)) +- `[rpc]` Export `MakeHTTPDialer` to allow HTTP client constructors more flexibility. + ([\#1594](https://github.com/cometbft/cometbft/pull/1594)) +- `[rpc]` Move the websockets info log for successful replies to debug. + ([\#2788](https://github.com/cometbft/cometbft/pull/2788)) +- `[rpc]` Support setting proxy from env to `DefaultHttpClient`. + ([\#1900](https://github.com/cometbft/cometbft/pull/1900)) +- `[rpc]` The RPC API is now versioned, with all existing endpoints accessible + via `/v1/*` as well as `/*` + ([\#1412](https://github.com/cometbft/cometbft/pull/1412)) +- `[rpc]` Use default port for HTTP(S) URLs when there is no explicit port ([\#1903](https://github.com/cometbft/cometbft/pull/1903)) +- `[spec]` Update Apalache type annotations in the light client spec ([#955](https://github.com/cometbft/cometbft/pull/955)) +- `[state/execution]` Cache the block hash computation inside of the Block Type, so we only compute it once. + ([\#2924](https://github.com/cometbft/cometbft/pull/2924)) +- `[state/indexer]` Add transaction and block index pruning + ([\#1176](https://github.com/cometbft/cometbft/pull/1176)) +- `[state/indexer]` Fix txSearch performance issue + ([\#2855](https://github.com/cometbft/cometbft/pull/2855)) +- `[state/indexer]` Lower the heap allocation of transaction searches + ([\#2839](https://github.com/cometbft/cometbft/pull/2839)) +- `[state]` ABCI response pruning has been added for use by the data companion + ([\#1096](https://github.com/cometbft/cometbft/issues/1096)) +- `[state]` Block pruning has been moved from the block executor into a + background process ([\#1096](https://github.com/cometbft/cometbft/issues/1096)) +- `[state]` Save the state using a single DB batch ([\#1735](https://github.com/cometbft/cometbft/pull/1735)) +- `[state]` avoid double-saving `FinalizeBlockResponse` for performance reasons + ([\#2017](https://github.com/cometbft/cometbft/pull/2017)) +- `[store]` Save block using a single DB batch if block is less than 640kB, otherwise each block part is saved individually + ([\#1755](https://github.com/cometbft/cometbft/pull/1755)) +- `[types]` Make a new method `GetByAddressMut` for `ValSet`, which does not copy the returned validator. + ([\#3119](https://github.com/cometbft/cometbft/issues/3119)) +- `[types]` Significantly speedup types.MakePartSet and types.AddPart, which are used in creating a block proposal + ([\#3117](https://github.com/cometbft/cometbft/issues/3117)) +- `[types]` Validate `Validator#Address` in `ValidateBasic` ([\#1715](https://github.com/cometbft/cometbft/pull/1715)) ## v0.38.0 @@ -80,23 +658,18 @@ See the [specification](./spec/abci/) for more details on ABCI 2.0. ### BREAKING CHANGES -- `[mempool]` Remove priority mempool. - ([\#260](https://github.com/cometbft/cometbft/issues/260)) +- `[abci]` Introduce `FinalizeBlock` which condenses `BeginBlock`, `DeliverTx` + and `EndBlock` into a single method call + ([\#9468](https://github.com/tendermint/tendermint/pull/9468)) +- `[abci]` Move `app_hash` parameter from `Commit` to `FinalizeBlock` + ([\#8664](https://github.com/tendermint/tendermint/pull/8664)) - `[config]` Remove `Version` field from `MempoolConfig`. ([\#260](https://github.com/cometbft/cometbft/issues/260)) -- `[protobuf]` Remove fields `sender`, `priority`, and `mempool_error` from - `ResponseCheckTx`. ([\#260](https://github.com/cometbft/cometbft/issues/260)) - `[crypto/merkle]` Do not allow verification of Merkle Proofs against empty trees (`nil` root). `Proof.ComputeRootHash` now panics when it encounters an error, but `Proof.Verify` does not panic ([\#558](https://github.com/cometbft/cometbft/issues/558)) -- `[state/kvindexer]` Remove the function type from the event key stored in the database. This should be breaking only -for people who forked CometBFT and interact directly with the indexers kvstore. - ([\#774](https://github.com/cometbft/cometbft/pull/774)) -- `[rpc]` Removed `begin_block_events` and `end_block_events` from `BlockResultsResponse`. - The events are merged into one field called `finalize_block_events`. - ([\#9427](https://github.com/tendermint/tendermint/issues/9427)) -- `[pubsub]` Added support for big integers and big floats in the pubsub event query system. - Breaking changes: function `Number` in package `libs/pubsub/query/syntax` changed its return value. - ([\#797](https://github.com/cometbft/cometbft/pull/797)) +- `[inspect]` Add a new `inspect` command for introspecting + the state and block store of a crashed tendermint node. + ([\#9655](https://github.com/tendermint/tendermint/pull/9655)) - `[kvindexer]` Added support for big integers and big floats in the kvindexer. Breaking changes: function `Number` in package `libs/pubsub/query/syntax` changed its return value. ([\#797](https://github.com/cometbft/cometbft/pull/797)) @@ -109,63 +682,68 @@ for people who forked CometBFT and interact directly with the indexers kvstore. returned in `ResponsePrepareProposal.txs` does not exceed `RequestPrepareProposal.max_tx_bytes`, otherwise CometBFT will panic. ([\#980](https://github.com/cometbft/cometbft/issues/980)) +- `[mempool]` Remove priority mempool. + ([\#260](https://github.com/cometbft/cometbft/issues/260)) +- `[metrics]` Move state-syncing and block-syncing metrics to + their respective packages. Move labels from block_syncing + -> blocksync_syncing and state_syncing -> statesync_syncing + ([\#9682](https://github.com/tendermint/tendermint/pull/9682)) - `[node/state]` Add Go API to bootstrap block store and state store to a height. Make sure block sync starts syncing from bootstrapped height. ([\#1057](https://github.com/tendermint/tendermint/pull/#1057)) (@yihuang) - `[state/store]` Added Go functions to save height at which offline state sync is performed. ([\#1057](https://github.com/tendermint/tendermint/pull/#1057)) (@jmalicevic) -- `[p2p]` Remove UPnP functionality - ([\#1113](https://github.com/cometbft/cometbft/issues/1113)) +- `[node]` Move DBContext and DBProvider from the node package to the config + package. ([\#9655](https://github.com/tendermint/tendermint/pull/9655)) - `[node]` Removed `ConsensusState()` accessor from `Node` struct - all access to consensus state should go via the reactor ([\#1120](https://github.com/cometbft/cometbft/pull/1120)) -- `[state]` Signature of `ExtendVote` changed in `BlockExecutor`. - It now includes the block whose precommit will be extended, an the state object. - ([\#1270](https://github.com/cometbft/cometbft/pull/1270)) -- `[state]` Move pruneBlocks from node/state to state/execution. - ([\#6541](https://github.com/tendermint/tendermint/pull/6541)) -- `[abci]` Move `app_hash` parameter from `Commit` to `FinalizeBlock` - ([\#8664](https://github.com/tendermint/tendermint/pull/8664)) -- `[abci]` Introduce `FinalizeBlock` which condenses `BeginBlock`, `DeliverTx` - and `EndBlock` into a single method call - ([\#9468](https://github.com/tendermint/tendermint/pull/9468)) +- `[p2p]` Remove UPnP functionality + ([\#1113](https://github.com/cometbft/cometbft/issues/1113)) - `[p2p]` Remove unused p2p/trust package ([\#9625](https://github.com/tendermint/tendermint/pull/9625)) +- `[protobuf]` Remove fields `sender`, `priority`, and `mempool_error` from + `ResponseCheckTx`. ([\#260](https://github.com/cometbft/cometbft/issues/260)) +- `[pubsub]` Added support for big integers and big floats in the pubsub event query system. + Breaking changes: function `Number` in package `libs/pubsub/query/syntax` changed its return value. + ([\#797](https://github.com/cometbft/cometbft/pull/797)) - `[rpc]` Remove global environment and replace with constructor ([\#9655](https://github.com/tendermint/tendermint/pull/9655)) -- `[node]` Move DBContext and DBProvider from the node package to the config - package. ([\#9655](https://github.com/tendermint/tendermint/pull/9655)) -- `[inspect]` Add a new `inspect` command for introspecting - the state and block store of a crashed tendermint node. - ([\#9655](https://github.com/tendermint/tendermint/pull/9655)) -- `[metrics]` Move state-syncing and block-syncing metrics to - their respective packages. Move labels from block_syncing - -> blocksync_syncing and state_syncing -> statesync_syncing - ([\#9682](https://github.com/tendermint/tendermint/pull/9682)) +- `[rpc]` Removed `begin_block_events` and `end_block_events` from `BlockResultsResponse`. + The events are merged into one field called `finalize_block_events`. + ([\#9427](https://github.com/tendermint/tendermint/issues/9427)) +- `[state/kvindexer]` Remove the function type from the event key stored in the database. This should be breaking only +for people who forked CometBFT and interact directly with the indexers kvstore. + ([\#774](https://github.com/cometbft/cometbft/pull/774)) +- `[state]` Move pruneBlocks from node/state to state/execution. + ([\#6541](https://github.com/tendermint/tendermint/pull/6541)) +- `[state]` Signature of `ExtendVote` changed in `BlockExecutor`. + It now includes the block whose precommit will be extended, an the state object. + ([\#1270](https://github.com/cometbft/cometbft/pull/1270)) ### BUG FIXES -- `[kvindexer]` Forward porting the fixes done to the kvindexer in 0.37 in PR \#77 - ([\#423](https://github.com/cometbft/cometbft/pull/423)) -- `[consensus]` Unexpected error conditions in `ApplyBlock` are non-recoverable, so ignoring the error and carrying on is a bug. We replaced a `return` that disregarded the error by a `panic`. - ([\#496](https://github.com/cometbft/cometbft/pull/496)) +- `[abci-cli]` Fix broken abci-cli help command. + ([\#9717](https://github.com/tendermint/tendermint/pull/9717)) +- `[abci]` Restore the snake_case naming in JSON serialization of + `ExecTxResult` ([\#855](https://github.com/cometbft/cometbft/issues/855)). +- `[consensus]` Avoid recursive call after rename to (*PeerState).MarshalJSON + ([\#863](https://github.com/cometbft/cometbft/pull/863)) - `[consensus]` Rename `(*PeerState).ToJSON` to `MarshalJSON` to fix a logging data race ([\#524](https://github.com/cometbft/cometbft/pull/524)) +- `[consensus]` Unexpected error conditions in `ApplyBlock` are non-recoverable, so ignoring the error and carrying on is a bug. We replaced a `return` that disregarded the error by a `panic`. + ([\#496](https://github.com/cometbft/cometbft/pull/496)) +- `[docker]` Ensure Docker image uses consistent version of Go. + ([\#9462](https://github.com/tendermint/tendermint/pull/9462)) +- `[kvindexer]` Forward porting the fixes done to the kvindexer in 0.37 in PR \#77 + ([\#423](https://github.com/cometbft/cometbft/pull/423)) - `[light]` Fixed an edge case where a light client would panic when attempting to query a node that (1) has started from a non-zero height and (2) does not yet have any data. The light client will now, correctly, not panic _and_ keep the node in its list of providers in the same way it would if it queried a node starting from height zero that does not yet have data ([\#575](https://github.com/cometbft/cometbft/issues/575)) -- `[abci]` Restore the snake_case naming in JSON serialization of - `ExecTxResult` ([\#855](https://github.com/cometbft/cometbft/issues/855)). -- `[consensus]` Avoid recursive call after rename to (*PeerState).MarshalJSON - ([\#863](https://github.com/cometbft/cometbft/pull/863)) - `[mempool/clist_mempool]` Prevent a transaction to appear twice in the mempool ([\#890](https://github.com/cometbft/cometbft/pull/890): @otrack) -- `[docker]` Ensure Docker image uses consistent version of Go. - ([\#9462](https://github.com/tendermint/tendermint/pull/9462)) -- `[abci-cli]` Fix broken abci-cli help command. - ([\#9717](https://github.com/tendermint/tendermint/pull/9717)) ### DEPRECATIONS @@ -176,6 +754,7 @@ for people who forked CometBFT and interact directly with the indexers kvstore. ### FEATURES +- `[abci]` New ABCI methods `VerifyVoteExtension` and `ExtendVote` allow validators to validate the vote extension data attached to a pre-commit message and allow applications to let their validators do more than just validate within consensus ([\#9836](https://github.com/tendermint/tendermint/pull/9836)) - `[node/state]` Add Go API to bootstrap block store and state store to a height ([\#1057](https://github.com/tendermint/tendermint/pull/#1057)) (@yihuang) - `[proxy]` Introduce `NewConnSyncLocalClientCreator`, which allows local ABCI @@ -187,109 +766,37 @@ for people who forked CometBFT and interact directly with the indexers kvstore. clients to have the same concurrency model as remote clients (i.e. one mutex per client "connection", for each of the four ABCI "connections"). ([\#9830](https://github.com/tendermint/tendermint/pull/9830)) -- `[abci]` New ABCI methods `VerifyVoteExtension` and `ExtendVote` allow validators to validate the vote extension data attached to a pre-commit message and allow applications to let their validators do more than just validate within consensus ([\#9836](https://github.com/tendermint/tendermint/pull/9836)) ### IMPROVEMENTS - `[blocksync]` Generate new metrics during BlockSync ([\#543](https://github.com/cometbft/cometbft/pull/543)) +- `[crypto/merkle]` Improve HashAlternatives performance + ([\#6443](https://github.com/tendermint/tendermint/pull/6443)) +- `[crypto/merkle]` Improve HashAlternatives performance + ([\#6513](https://github.com/tendermint/tendermint/pull/6513)) - `[jsonrpc/client]` Improve the error message for client errors stemming from bad HTTP responses. ([cometbft/cometbft\#638](https://github.com/cometbft/cometbft/pull/638)) -- `[rpc]` Remove response data from response failure logs in order - to prevent large quantities of log data from being produced - ([\#654](https://github.com/cometbft/cometbft/issues/654)) -- `[pubsub/kvindexer]` Numeric query conditions and event values are represented as big floats with default precision of 125. - Integers are read as "big ints" and represented with as many bits as they need when converting to floats. - ([\#797](https://github.com/cometbft/cometbft/pull/797)) -- `[node]` Make handshake cancelable ([cometbft/cometbft\#857](https://github.com/cometbft/cometbft/pull/857)) - `[mempool]` Application can now set `ConsensusParams.Block.MaxBytes` to -1 to gain more control on the max size of transactions in a block. It also allows the application to have visibility on all transactions in the mempool at `PrepareProposal` time. ([\#980](https://github.com/cometbft/cometbft/pull/980)) - `[node]` Close evidence.db OnStop ([cometbft/cometbft\#1210](https://github.com/cometbft/cometbft/pull/1210): @chillyvee) -- `[state]` Make logging `block_app_hash` and `app_hash` consistent by logging them both as hex. - ([\#1264](https://github.com/cometbft/cometbft/pull/1264)) -- `[crypto/merkle]` Improve HashAlternatives performance - ([\#6443](https://github.com/tendermint/tendermint/pull/6443)) +- `[node]` Make handshake cancelable ([cometbft/cometbft\#857](https://github.com/cometbft/cometbft/pull/857)) - `[p2p/pex]` Improve addrBook.hash performance ([\#6509](https://github.com/tendermint/tendermint/pull/6509)) -- `[crypto/merkle]` Improve HashAlternatives performance - ([\#6513](https://github.com/tendermint/tendermint/pull/6513)) +- `[pubsub/kvindexer]` Numeric query conditions and event values are represented as big floats with default precision of 125. + Integers are read as "big ints" and represented with as many bits as they need when converting to floats. + ([\#797](https://github.com/cometbft/cometbft/pull/797)) - `[pubsub]` Performance improvements for the event query API ([\#7319](https://github.com/tendermint/tendermint/pull/7319)) - -## v0.37.2 - -*June 14, 2023* - -Provides several minor bug fixes, as well as fixes for several low-severity -security issues. - -### BUG FIXES - -- `[state/kvindex]` Querying event attributes that are bigger than int64 is now - enabled. We are not supporting reading floats from the db into the indexer - nor parsing them into BigFloats to not introduce breaking changes in minor - releases. ([\#771](https://github.com/cometbft/cometbft/pull/771)) -- `[pubsub]` Pubsub queries are now able to parse big integers (larger than - int64). Very big floats are also properly parsed into very big integers - instead of being truncated to int64. - ([\#771](https://github.com/cometbft/cometbft/pull/771)) - -### IMPROVEMENTS - - `[rpc]` Remove response data from response failure logs in order to prevent large quantities of log data from being produced ([\#654](https://github.com/cometbft/cometbft/issues/654)) - -### SECURITY FIXES - -- `[rpc/jsonrpc/client]` **Low severity** - Prevent RPC - client credentials from being inadvertently dumped to logs - ([\#787](https://github.com/cometbft/cometbft/pull/787)) -- `[cmd/cometbft/commands/debug/kill]` **Low severity** - Fix unsafe int cast in - `debug kill` command ([\#793](https://github.com/cometbft/cometbft/pull/793)) -- `[consensus]` **Low severity** - Avoid recursive call after rename to - `(*PeerState).MarshalJSON` - ([\#863](https://github.com/cometbft/cometbft/pull/863)) -- `[mempool/clist_mempool]` **Low severity** - Prevent a transaction from - appearing twice in the mempool - ([\#890](https://github.com/cometbft/cometbft/pull/890): @otrack) - -## v0.37.1 - -*April 26, 2023* - -This release fixes several bugs, and has had to introduce one small Go -API-breaking change in the `crypto/merkle` package in order to address what -could be a security issue for some users who directly and explicitly make use of -that code. - -### BREAKING CHANGES - -- `[crypto/merkle]` Do not allow verification of Merkle Proofs against empty trees (`nil` root). `Proof.ComputeRootHash` now panics when it encounters an error, but `Proof.Verify` does not panic - ([\#558](https://github.com/cometbft/cometbft/issues/558)) - -### BUG FIXES - -- `[consensus]` Unexpected error conditions in `ApplyBlock` are non-recoverable, so ignoring the error and carrying on is a bug. We replaced a `return` that disregarded the error by a `panic`. - ([\#496](https://github.com/cometbft/cometbft/pull/496)) -- `[consensus]` Rename `(*PeerState).ToJSON` to `MarshalJSON` to fix a logging data race - ([\#524](https://github.com/cometbft/cometbft/pull/524)) -- `[light]` Fixed an edge case where a light client would panic when attempting - to query a node that (1) has started from a non-zero height and (2) does - not yet have any data. The light client will now, correctly, not panic - _and_ keep the node in its list of providers in the same way it would if - it queried a node starting from height zero that does not yet have data - ([\#575](https://github.com/cometbft/cometbft/issues/575)) - -### IMPROVEMENTS - -- `[jsonrpc/client]` Improve the error message for client errors stemming from - bad HTTP responses. - ([cometbft/cometbft\#638](https://github.com/cometbft/cometbft/pull/638)) +- `[state]` Make logging `block_app_hash` and `app_hash` consistent by logging them both as hex. + ([\#1264](https://github.com/cometbft/cometbft/pull/1264)) ## v0.37.0 @@ -321,58 +828,77 @@ See below for more details. ### BREAKING CHANGES -- The `TMHOME` environment variable was renamed to `CMTHOME`, and all environment variables starting with `TM_` are instead prefixed with `CMT_` - ([\#211](https://github.com/cometbft/cometbft/issues/211)) -- `[p2p]` Reactor `Send`, `TrySend` and `Receive` renamed to `SendEnvelope`, - `TrySendEnvelope` and `ReceiveEnvelope` to allow metrics to be appended to - messages and measure bytes sent/received. - ([\#230](https://github.com/cometbft/cometbft/pull/230)) - Bump minimum Go version to 1.20 ([\#385](https://github.com/cometbft/cometbft/issues/385)) -- `[abci]` Make length delimiter encoding consistent - (`uint64`) between ABCI and P2P wire-level protocols - ([\#5783](https://github.com/tendermint/tendermint/pull/5783)) -- `[abci]` Change the `key` and `value` fields from - `[]byte` to `string` in the `EventAttribute` type. - ([\#6403](https://github.com/tendermint/tendermint/pull/6403)) +- Change spelling from British English to American. Rename + `Subscription.Cancelled()` to `Subscription.Canceled()` in `libs/pubsub` + ([\#9144](https://github.com/tendermint/tendermint/pull/9144)) +- The `TMHOME` environment variable was renamed to `CMTHOME`, and all environment variables starting with `TM_` are instead prefixed with `CMT_` + ([\#211](https://github.com/cometbft/cometbft/issues/211)) +- [config] The boolean key `fastsync` is deprecated and replaced by + `block_sync`. ([\#9259](https://github.com/tendermint/tendermint/pull/9259)) + At the same time, `block_sync` is also deprecated. In the next release, + BlocSync will always be enabled and `block_sync` will be removed. + ([\#409](https://github.com/cometbft/cometbft/issues/409)) - `[abci/counter]` Delete counter example app ([\#6684](https://github.com/tendermint/tendermint/pull/6684)) -- `[abci]` Renamed `EvidenceType` to `MisbehaviorType` and `Evidence` - to `Misbehavior` as a more accurate label of their contents. - ([\#8216](https://github.com/tendermint/tendermint/pull/8216)) +- `[abci/params]` Deduplicate `ConsensusParams` and `BlockParams` so + only `types` proto definitions are use. Remove `TimeIotaMs` and use + a hard-coded 1 millisecond value to ensure monotonically increasing + block times. Rename `AppVersion` to `App` so as to not stutter. + ([\#9287](https://github.com/tendermint/tendermint/pull/9287)) - `[abci]` Added cli commands for `PrepareProposal` and `ProcessProposal`. ([\#8656](https://github.com/tendermint/tendermint/pull/8656)) - `[abci]` Added cli commands for `PrepareProposal` and `ProcessProposal`. ([\#8901](https://github.com/tendermint/tendermint/pull/8901)) -- `[abci]` Renamed `LastCommitInfo` to `CommitInfo` in preparation for vote - extensions. ([\#9122](https://github.com/tendermint/tendermint/pull/9122)) -- Change spelling from British English to American. Rename - `Subscription.Cancelled()` to `Subscription.Canceled()` in `libs/pubsub` - ([\#9144](https://github.com/tendermint/tendermint/pull/9144)) +- `[abci]` Change the `key` and `value` fields from + `[]byte` to `string` in the `EventAttribute` type. + ([\#6403](https://github.com/tendermint/tendermint/pull/6403)) +- `[abci]` Make length delimiter encoding consistent + (`uint64`) between ABCI and P2P wire-level protocols + ([\#5783](https://github.com/tendermint/tendermint/pull/5783)) +- `[abci]` New ABCI methods `PrepareProposal` and `ProcessProposal` which give + the app control over transactions proposed and allows for verification of + proposed blocks. ([\#9301](https://github.com/tendermint/tendermint/pull/9301)) - `[abci]` Removes unused Response/Request `SetOption` from ABCI ([\#9145](https://github.com/tendermint/tendermint/pull/9145)) +- `[abci]` Renamed `EvidenceType` to `MisbehaviorType` and `Evidence` + to `Misbehavior` as a more accurate label of their contents. + ([\#8216](https://github.com/tendermint/tendermint/pull/8216)) +- `[abci]` Renamed `LastCommitInfo` to `CommitInfo` in preparation for vote + extensions. ([\#9122](https://github.com/tendermint/tendermint/pull/9122)) - `[config]` Rename the fastsync section and the fast\_sync key blocksync and block\_sync respectively ([\#9259](https://github.com/tendermint/tendermint/pull/9259)) +- `[p2p]` Reactor `Send`, `TrySend` and `Receive` renamed to `SendEnvelope`, + `TrySendEnvelope` and `ReceiveEnvelope` to allow metrics to be appended to + messages and measure bytes sent/received. + ([\#230](https://github.com/cometbft/cometbft/pull/230)) - `[types]` Reduce the use of protobuf types in core logic. `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams` have become native types. They still utilize protobuf when being sent over the wire or written to disk. Moved `ValidateConsensusParams` inside (now native type) `ConsensusParams`, and renamed it to `ValidateBasic`. ([\#9287](https://github.com/tendermint/tendermint/pull/9287)) -- `[abci/params]` Deduplicate `ConsensusParams` and `BlockParams` so - only `types` proto definitions are use. Remove `TimeIotaMs` and use - a hard-coded 1 millisecond value to ensure monotonically increasing - block times. Rename `AppVersion` to `App` so as to not stutter. - ([\#9287](https://github.com/tendermint/tendermint/pull/9287)) -- `[abci]` New ABCI methods `PrepareProposal` and `ProcessProposal` which give - the app control over transactions proposed and allows for verification of - proposed blocks. ([\#9301](https://github.com/tendermint/tendermint/pull/9301)) ### BUG FIXES +- `[blocksync]` handle the case when the sending + queue is full: retry block request after a timeout + ([\#9518](https://github.com/tendermint/tendermint/pull/9518)) +- `[consensus]` ([\#386](https://github.com/cometbft/cometbft/pull/386)) Short-term fix for the case when `needProofBlock` cannot find previous block meta by defaulting to the creation of a new proof block. (@adizere) + - Special thanks to the [Vega.xyz](https://vega.xyz/) team, and in particular to Zohar (@ze97286), for reporting the problem and working with us to get to a fix. - `[consensus]` Fixed a busy loop that happened when sending of a block part failed by sleeping in case of error. ([\#4](https://github.com/informalsystems/tendermint/pull/4)) +- `[consensus]` fix round number of `enterPropose` + when handling `RoundStepNewRound` timeout. + ([\#9229](https://github.com/tendermint/tendermint/pull/9229)) +- `[docker]` enable cross platform build using docker buildx + ([\#9073](https://github.com/tendermint/tendermint/pull/9073)) +- `[docker]` ensure Docker image uses consistent version of Go + ([\#9462](https://github.com/tendermint/tendermint/pull/9462)) +- `[p2p]` prevent peers who have errored from being added to `peer_set` + ([\#9500](https://github.com/tendermint/tendermint/pull/9500)) - `[state/kvindexer]` Fixed the default behaviour of the kvindexer to index and query attributes by events in which they occur. In 0.34.25 this was mitigated by a separated RPC flag. @jmalicevic @@ -381,20 +907,6 @@ See below for more details. introduced after adding event sequences in [\#77](https://github.com/cometbft/cometbft/pull/77). @jmalicevic ([\#382](https://github.com/cometbft/cometbft/pull/382)) -- `[consensus]` ([\#386](https://github.com/cometbft/cometbft/pull/386)) Short-term fix for the case when `needProofBlock` cannot find previous block meta by defaulting to the creation of a new proof block. (@adizere) - - Special thanks to the [Vega.xyz](https://vega.xyz/) team, and in particular to Zohar (@ze97286), for reporting the problem and working with us to get to a fix. -- `[docker]` enable cross platform build using docker buildx - ([\#9073](https://github.com/tendermint/tendermint/pull/9073)) -- `[consensus]` fix round number of `enterPropose` - when handling `RoundStepNewRound` timeout. - ([\#9229](https://github.com/tendermint/tendermint/pull/9229)) -- `[docker]` ensure Docker image uses consistent version of Go - ([\#9462](https://github.com/tendermint/tendermint/pull/9462)) -- `[p2p]` prevent peers who have errored from being added to `peer_set` - ([\#9500](https://github.com/tendermint/tendermint/pull/9500)) -- `[blocksync]` handle the case when the sending - queue is full: retry block request after a timeout - ([\#9518](https://github.com/tendermint/tendermint/pull/9518)) ### FEATURES @@ -404,14 +916,6 @@ See below for more details. ### IMPROVEMENTS -- `[e2e]` Add functionality for uncoordinated (minor) upgrades - ([\#56](https://github.com/tendermint/tendermint/pull/56)) -- `[tools/tm-signer-harness]` Remove the folder as it is unused - ([\#136](https://github.com/cometbft/cometbft/issues/136)) -- `[p2p]` Reactor `Send`, `TrySend` and `Receive` renamed to `SendEnvelope`, - `TrySendEnvelope` and `ReceiveEnvelope` to allow metrics to be appended to - messages and measure bytes sent/received. - ([\#230](https://github.com/cometbft/cometbft/pull/230)) - `[abci]` Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to CometBFT. ([\#5706](https://github.com/tendermint/tendermint/pull/5706)) @@ -419,86 +923,24 @@ See below for more details. state and remove the last block. This command can be triggered multiple times. The application must also rollback state to the same height. ([\#9171](https://github.com/tendermint/tendermint/pull/9171)) +- `[consensus]` Save peer LastCommit correctly to achieve 50% reduction in gossiped precommits. + ([\#9760](https://github.com/tendermint/tendermint/pull/9760)) - `[crypto]` Update to use btcec v2 and the latest btcutil. ([\#9250](https://github.com/tendermint/tendermint/pull/9250)) -- `[rpc]` Added `header` and `header_by_hash` queries to the RPC client - ([\#9276](https://github.com/tendermint/tendermint/pull/9276)) +- `[e2e]` Add functionality for uncoordinated (minor) upgrades + ([\#56](https://github.com/tendermint/tendermint/pull/56)) +- `[p2p]` Reactor `Send`, `TrySend` and `Receive` renamed to `SendEnvelope`, + `TrySendEnvelope` and `ReceiveEnvelope` to allow metrics to be appended to + messages and measure bytes sent/received. + ([\#230](https://github.com/cometbft/cometbft/pull/230)) - `[proto]` Migrate from `gogo/protobuf` to `cosmos/gogoproto` ([\#9356](https://github.com/tendermint/tendermint/pull/9356)) +- `[rpc]` Added `header` and `header_by_hash` queries to the RPC client + ([\#9276](https://github.com/tendermint/tendermint/pull/9276)) - `[rpc]` Enable caching of RPC responses ([\#9650](https://github.com/tendermint/tendermint/pull/9650)) -- `[consensus]` Save peer LastCommit correctly to achieve 50% reduction in gossiped precommits. - ([\#9760](https://github.com/tendermint/tendermint/pull/9760)) - -## v0.34.29 - -*June 14, 2023* - -Provides several minor bug fixes, as well as fixes for several low-severity -security issues. - -### BUG FIXES - -- `[state/kvindex]` Querying event attributes that are bigger than int64 is now - enabled. ([\#771](https://github.com/cometbft/cometbft/pull/771)) -- `[pubsub]` Pubsub queries are now able to parse big integers (larger than - int64). Very big floats are also properly parsed into very big integers - instead of being truncated to int64. - ([\#771](https://github.com/cometbft/cometbft/pull/771)) - -### IMPROVEMENTS - -- `[rpc]` Remove response data from response failure logs in order - to prevent large quantities of log data from being produced - ([\#654](https://github.com/cometbft/cometbft/issues/654)) - -### SECURITY FIXES - -- `[rpc/jsonrpc/client]` **Low severity** - Prevent RPC - client credentials from being inadvertently dumped to logs - ([\#788](https://github.com/cometbft/cometbft/pull/788)) -- `[cmd/cometbft/commands/debug/kill]` **Low severity** - Fix unsafe int cast in - `debug kill` command ([\#794](https://github.com/cometbft/cometbft/pull/794)) -- `[consensus]` **Low severity** - Avoid recursive call after rename to - `(*PeerState).MarshalJSON` - ([\#863](https://github.com/cometbft/cometbft/pull/863)) -- `[mempool/clist_mempool]` **Low severity** - Prevent a transaction from - appearing twice in the mempool - ([\#890](https://github.com/cometbft/cometbft/pull/890): @otrack) - -## v0.34.28 - -*April 26, 2023* - -This release fixes several bugs, and has had to introduce one small Go -API-breaking change in the `crypto/merkle` package in order to address what -could be a security issue for some users who directly and explicitly make use of -that code. - -### BREAKING CHANGES - -- `[crypto/merkle]` Do not allow verification of Merkle Proofs against empty trees (`nil` root). `Proof.ComputeRootHash` now panics when it encounters an error, but `Proof.Verify` does not panic - ([\#558](https://github.com/cometbft/cometbft/issues/558)) - -### BUG FIXES - -- `[consensus]` Unexpected error conditions in `ApplyBlock` are non-recoverable, so ignoring the error and carrying on is a bug. We replaced a `return` that disregarded the error by a `panic`. - ([\#496](https://github.com/cometbft/cometbft/pull/496)) -- `[consensus]` Rename `(*PeerState).ToJSON` to `MarshalJSON` to fix a logging data race - ([\#524](https://github.com/cometbft/cometbft/pull/524)) -- `[light]` Fixed an edge case where a light client would panic when attempting - to query a node that (1) has started from a non-zero height and (2) does - not yet have any data. The light client will now, correctly, not panic - _and_ keep the node in its list of providers in the same way it would if - it queried a node starting from height zero that does not yet have data - ([\#575](https://github.com/cometbft/cometbft/issues/575)) - -### IMPROVEMENTS - -- `[crypto/sr25519]` Upgrade to go-schnorrkel@v1.0.0 ([\#475](https://github.com/cometbft/cometbft/issues/475)) -- `[jsonrpc/client]` Improve the error message for client errors stemming from - bad HTTP responses. - ([cometbft/cometbft\#638](https://github.com/cometbft/cometbft/pull/638)) +- `[tools/tm-signer-harness]` Remove the folder as it is unused + ([\#136](https://github.com/cometbft/cometbft/issues/136)) ## v0.34.27 @@ -535,9 +977,6 @@ to this release! - `[consensus]` Fixed a busy loop that happened when sending of a block part failed by sleeping in case of error. ([\#4](https://github.com/informalsystems/tendermint/pull/4)) -- `[state/kvindexer]` Resolved crashes when event values contained slashes, - introduced after adding event sequences. - (\#[383](https://github.com/cometbft/cometbft/pull/383): @jmalicevic) - `[consensus]` Short-term fix for the case when `needProofBlock` cannot find previous block meta by defaulting to the creation of a new proof block. ([\#386](https://github.com/cometbft/cometbft/pull/386): @adizere) @@ -549,14 +988,17 @@ to this release! whether this has a meaningful impact on P2P performance, but this patch does correct the underlying behaviour to what it should be ([tendermint/tendermint\#9936](https://github.com/tendermint/tendermint/pull/9936)) +- `[state/kvindexer]` Resolved crashes when event values contained slashes, + introduced after adding event sequences. + (\#[383](https://github.com/cometbft/cometbft/pull/383): @jmalicevic) ### DEPENDENCIES +- Bump tm-load-test to v1.3.0 to remove implicit dependency on Tendermint Core + ([\#165](https://github.com/cometbft/cometbft/pull/165)) - Replace [tm-db](https://github.com/tendermint/tm-db) with [cometbft-db](https://github.com/cometbft/cometbft-db) ([\#160](https://github.com/cometbft/cometbft/pull/160)) -- Bump tm-load-test to v1.3.0 to remove implicit dependency on Tendermint Core - ([\#165](https://github.com/cometbft/cometbft/pull/165)) - `[crypto]` Update to use btcec v2 and the latest btcutil ([tendermint/tendermint\#9787](https://github.com/tendermint/tendermint/pull/9787): @wcsiu) @@ -569,29 +1011,29 @@ to this release! ### IMPROVEMENTS -- `[e2e]` Add functionality for uncoordinated (minor) upgrades - ([\#56](https://github.com/tendermint/tendermint/pull/56)) -- `[tools/tm-signer-harness]` Remove the folder as it is unused - ([\#136](https://github.com/cometbft/cometbft/issues/136)) - Append the commit hash to the version of CometBFT being built ([\#204](https://github.com/cometbft/cometbft/pull/204)) -- `[mempool/v1]` Suppress "rejected bad transaction" in priority mempool logs by - reducing log level from info to debug - ([\#314](https://github.com/cometbft/cometbft/pull/314): @JayT106) - `[consensus]` Add `consensus_block_gossip_parts_received` and `consensus_step_duration_seconds` metrics in order to aid in investigating the impact of database compaction on consensus performance ([tendermint/tendermint\#9733](https://github.com/tendermint/tendermint/pull/9733)) -- `[state/kvindexer]` Add `match.event` keyword to support condition evaluation - based on the event the attributes belong to - ([tendermint/tendermint\#9759](https://github.com/tendermint/tendermint/pull/9759)) +- `[consensus]` Reduce bandwidth consumption of consensus votes by roughly 50% + through fixing a small logic bug + ([tendermint/tendermint\#9776](https://github.com/tendermint/tendermint/pull/9776)) +- `[e2e]` Add functionality for uncoordinated (minor) upgrades + ([\#56](https://github.com/tendermint/tendermint/pull/56)) +- `[mempool/v1]` Suppress "rejected bad transaction" in priority mempool logs by + reducing log level from info to debug + ([\#314](https://github.com/cometbft/cometbft/pull/314): @JayT106) - `[p2p]` Reduce log spam through reducing log level of "Dialing peer" and "Added peer" messages from info to debug ([tendermint/tendermint\#9764](https://github.com/tendermint/tendermint/pull/9764): @faddat) -- `[consensus]` Reduce bandwidth consumption of consensus votes by roughly 50% - through fixing a small logic bug - ([tendermint/tendermint\#9776](https://github.com/tendermint/tendermint/pull/9776)) +- `[state/kvindexer]` Add `match.event` keyword to support condition evaluation + based on the event the attributes belong to + ([tendermint/tendermint\#9759](https://github.com/tendermint/tendermint/pull/9759)) +- `[tools/tm-signer-harness]` Remove the folder as it is unused + ([\#136](https://github.com/cometbft/cometbft/issues/136)) --- diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index c25964180e6..3f93f1e5e83 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,13 +1,13 @@ # The CometBFT Code of Conduct -This code of conduct applies to all projects run by the CometBFT/Cosmos team and +This code of conduct applies to all projects run by the CometBFT team and hence to CometBFT. ---- # Conduct -## Contact: conduct@interchain.io +## Contact: conduct@informal.systems * We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender, gender identity and @@ -35,7 +35,7 @@ hence to CometBFT. * Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community - member, please contact one of the channel admins or the person mentioned above + member, please get in touch with one of the channel admins or the contact address above immediately. Whether you’re a regular contributor or a newcomer, we care about making this community a safe place for you and we’ve got your back. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3f050cb8665..a5111ea8fbc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -54,6 +54,15 @@ release? automatically generated code changes should occur within separate commits, so they are easily distinguishable from manual code changes. +3. Make sure that your pull request addresses a particular issue and that its +description starts with the issue number: If it fully closes the issue, +please start with "Closes #XXX" (where "XXX" is the issue number), otherwise +"Partially closes #XXX", "Addresses #XXX" should be used. + +If the work in a PR is not aligned with the team's current priorities, please +be advised that it may take some time before it is merged - especially if it has +not been previously discussed with the team. + ## Workflow The following diagram summarizes the general workflow used by the core team to @@ -126,7 +135,7 @@ problems and help structure conversations around trade-offs. When the problem is well understood but the solution leads to large/complex/risky structural changes to the code base, these changes should be proposed in the form of an [Architectural Decision Record -(ADR)](./docs/architecture/). The ADR will help build consensus on an overall +(ADR)](docs/references/architecture/). The ADR will help build consensus on an overall strategy to ensure the code base maintains coherence in the larger context. If you are not comfortable with writing an ADR, you can open a less-formal issue and the maintainers will help you turn it into an ADR. Sometimes the best way to @@ -207,6 +216,9 @@ When updating dependencies, please only update the particular dependencies you need. Instead of running `go get -u=patch`, which will update anything, specify exactly the dependency you want to update. +Do not bump the major Go version in a patch release (namely, `v0.34.x`, `v0.37.x`, +`v0.38.x` branches) unless there's a pressing reason to do so (e.g., known security vulnerabilities). + ## Logging Operators, consensus engine and application developers all need information from @@ -227,12 +239,12 @@ Nth message, or a summary message every minute or hour). ### Log levels -Different log levels should target different groups of users. At present, only -**Debug**, **Info** and **Error** levels are supported. +Different log levels should target different groups of users. CometBFT supports +**Debug**, **Info**, **Warn** and **Error** levels. - **Debug**: Should primarily target consensus engine developers (i.e. core team members and developers working on CometBFT forks). -- **Info** and **Error**: Should primarily target operators and application +- **Info**, **Warn** and **Error**: Should primarily target operators and application developers. ### Sensitive information @@ -382,20 +394,23 @@ title of the PR _very_ clearly explains the benefit of a change to a user. Some good examples of changelog entry descriptions: ```md -- [consensus] \#1111 Small transaction throughput improvement (approximately - 3-5\% from preliminary tests) through refactoring the way we use channels -- [mempool] \#1112 Refactor Go API to be able to easily swap out the current - mempool implementation in CometBFT forks -- [p2p] \#1113 Automatically ban peers when their messages are unsolicited or - are received too frequently +- `[consensus]` Small transaction throughput improvement (approximately 3-5\% + from preliminary tests) through refactoring the way we use channels + ([\#1111](https://github.com/cometbft/cometbft/issues/1111)) +- `[mempool]` Refactor Go API to be able to easily swap out the current mempool + implementation in CometBFT forks + ([\#1112](https://github.com/cometbft/cometbft/issues/1112)) +- `[p2p]` Automatically ban peers when their messages are unsolicited or are + received too frequently + ([\#1113](https://github.com/cometbft/cometbft/issues/1113)) ``` Some bad examples of changelog entry descriptions: ```md -- [consensus] \#1111 Refactor channel usage -- [mempool] \#1112 Make API generic -- [p2p] \#1113 Ban for PEX message abuse +- `[consensus]` Refactor channel usage +- `[mempool]` Make API generic +- `[p2p]` Ban for PEX message abuse ``` For more on how to write good changelog entries, see: @@ -409,24 +424,24 @@ For more on how to write good changelog entries, see: Changelog entries should be formatted as follows: ```md -- [module] \#xxx Some description of the change (@contributor) +- `[module]` Some description of the change + ([\#1234](https://github.com/cometbft/cometbft/issues/1234): @contributor) ``` Here, `module` is the part of the code that changed (typically a top-level Go -package), `xxx` is the pull-request number, and `contributor` is the author/s of -the change. +package), `1234` is the pull-request number, and `contributor` is the author/s +of the change (only necessary if you are not a member of the CometBFT core +team). -It's also acceptable for `xxx` to refer to the relevant issue number, but +It's also acceptable for `1234` to refer to the relevant issue number, but pull-request numbers are preferred. Note this means pull-requests should be opened first so the changelog can then be updated with the pull-request's -number. There is no need to include the full link, as this will be added -automatically during release. But please include the backslash and pound, eg. -`\#2313`. +number. Changelog entries should be ordered alphabetically according to the `module`, and numerically according to the pull-request number. -Changes with multiple classifications should be doubly included (eg. a bug fix +Changes with multiple classifications should be doubly included (e.g. a bug fix that is also a breaking change should be recorded under both). Breaking changes are further subdivided according to the APIs/users they impact. @@ -439,7 +454,8 @@ removed from the header in RPC responses as well. The main development branch is `main`. -Every release is maintained in a release branch named `vX.Y.Z`. +Every release is maintained in a release branch named according to its major +release number (e.g. `v0.38.x` or `v1.x`). Pending minor releases have long-lived release candidate ("RC") branches. Minor release changes should be merged to these long-lived RC branches at the same @@ -454,8 +470,8 @@ the feature is complete, the feature branch is merged back (merge commit) into different features in different releases. Note, all pull requests should be squash merged except for merging to a release -branch (named `vX.Y`). This keeps the commit history clean and makes it easy to -reference the pull request where a change was introduced. +branch. This keeps the commit history clean and makes it easy to reference the +pull request where a change was introduced. ### Development Procedure @@ -481,6 +497,29 @@ means that you shouldn't update someone else's branch for them; even if it seems like you're doing them a favor, you may be interfering with their git flow in some way!) +### Formatting & Linting + +When submitting a change, please make sure to: + +1. Format the code using [gofumpt](https://github.com/mvdan/gofumpt) +2. Lint the code using [golangci-lint](https://golangci-lint.run/) +3. Check the code and docs for spelling errors using [codespell](https://github.com/codespell-project/codespell). + +It's recommended to install a Git pre-commit hook: `make pre-commit`. The hook will +automatically run the above steps for you every time you commit something. You +can also do this manually with `make lint`. + +The pre-commit hook uses [the pre-commit framework](https://pre-commit.com/). +If you have Python 3 installed, you don't need to do anything else. Otherwise, +please refer to [the installation guide](https://pre-commit.com/#install). + +In rare cases, you may want to skip the pre-commit hook. You can do so by adding +`-n` (or `--no-verify`) flag to `git commit`: + +```bash +git commit -n -m "add X" +``` + #### Merging Pull Requests It is also our convention that authors merge their own pull requests, when @@ -495,39 +534,26 @@ Before merging a pull request: - Run `make test` to ensure that all tests pass - [Squash][git-squash] merge pull request -#### Pull Requests for Minor Releases - -If your change should be included in a minor release, please also open a PR -against the long-lived minor release candidate branch (e.g., `rc1/v0.33.5`) -_immediately after your change has been merged to main_. - -You can do this by cherry-picking your commit off `main`: - -```sh -$ git checkout rc1/v0.33.5 -$ git checkout -b {new branch name} -$ git cherry-pick {commit SHA from main} -# may need to fix conflicts, and then use git add and git cherry-pick --continue -$ git push origin {new branch name} -``` +### Git Commit Style -After this, you can open a PR. Please note in the PR body if there were merge -conflicts so that reviewers can be sure to take a thorough look. +We follow the [Conventional Commits][conventional-commits] spec. Write concise +commits that start with a type (`fix`, `feat`, `chore`, `ci`, `docs`, etc.) and +an optional scope - package name (e.g., `feat(internal/consensus)`), followed +by a description that finishes the sentence "This change modifies CometBFT +to...". -### Git Commit Style +If the commit introduces a breaking change, append the `!` after the scope +(e.g., `feat(internal/consensus)!`). -We follow the [Go style guide on commit messages][go-git-commit-style]. Write -concise commits that start with the package name and have a description that -finishes the sentence "This change modifies CometBFT to...". For example, +For example, ```sh -cmd/debug: execute p.Signal only when p is not nil +fix(cmd/cometbft/commands/debug): execute p.Signal only when p is not nil [potentially longer description in the body] Fixes #nnnn ``` - Each PR should have one commit once it lands on `main`; this can be accomplished by using the "squash and merge" button on GitHub. Be sure to edit your commit message, though! @@ -591,8 +617,8 @@ in the [OpenAPI file](./rpc/openapi/openapi.yaml)**. [`clang-format`]: https://clang.llvm.org/docs/ClangFormat.html [unclog]: https://github.com/informalsystems/unclog [git-squash]: https://stackoverflow.com/questions/5189560/squash-my-last-x-commits-together-using-git -[go-git-commit-style]: https://tip.golang.org/doc/contribute.html#commit_messages [go-testing]: https://golang.org/pkg/testing/ [Fuzz tests]: https://en.wikipedia.org/wiki/Fuzzing [delve]: https://github.com/go-delve/delve [log-lazy]: https://github.com/cometbft/cometbft/blob/main/libs/log/lazy.go +[conventional-commits]: https://www.conventionalcommits.org/en/v1.0.0/ diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index 3649cc5a0b3..52b2144488c 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -1,9 +1,9 @@ # Use a build arg to ensure that both stages use the same, # hopefully current, go version. -ARG GOLANG_BASE_IMAGE=golang:1.21-alpine +ARG GOLANG_BASE_IMAGE=golang:1.23.1-alpine # stage 1 Generate CometBFT Binary -FROM --platform=$BUILDPLATFORM $GOLANG_BASE_IMAGE as builder +FROM --platform=$BUILDPLATFORM $GOLANG_BASE_IMAGE AS builder RUN apk update && \ apk upgrade && \ apk --no-cache add make git diff --git a/DOCKER/Dockerfile.build_c-amazonlinux b/DOCKER/Dockerfile.build_c-amazonlinux deleted file mode 100644 index 633b2a99775..00000000000 --- a/DOCKER/Dockerfile.build_c-amazonlinux +++ /dev/null @@ -1,28 +0,0 @@ -FROM amazonlinux:2 - -RUN yum -y update && \ - yum -y install wget - -RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \ - rpm -ivh epel-release-latest-7.noarch.rpm - -RUN yum -y groupinstall "Development Tools" -RUN yum -y install leveldb-devel which - -ENV GOVERSION=1.12.9 - -RUN cd /tmp && \ - wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \ - tar -C /usr/local -xf go${GOVERSION}.linux-amd64.tar.gz && \ - mkdir -p /go/src && \ - mkdir -p /go/bin - -ENV PATH=$PATH:/usr/local/go/bin:/go/bin -ENV GOBIN=/go/bin -ENV GOPATH=/go/src - -RUN mkdir -p /cometbft -WORKDIR /cometbft - -CMD ["/usr/bin/make", "build", "COMETBFT_BUILD_OPTIONS=cleveldb"] - diff --git a/DOCKER/Makefile b/DOCKER/Makefile index 103d70c4d08..0bf457e7a4d 100644 --- a/DOCKER/Makefile +++ b/DOCKER/Makefile @@ -7,7 +7,4 @@ push: build_testing: docker build --tag cometbft/testing -f ./Dockerfile.testing . -build_amazonlinux_buildimage: - docker build -t "cometbft/cometbft:build_c-amazonlinux" -f Dockerfile.build_c-amazonlinux . - -.PHONY: build push build_testing build_amazonlinux_buildimage +.PHONY: build push build_testing diff --git a/DOCKER/README.md b/DOCKER/README.md index 21f3dd2000b..f66f99fe93c 100644 --- a/DOCKER/README.md +++ b/DOCKER/README.md @@ -20,9 +20,9 @@ Respective versioned files can be found at `https://raw.githubusercontent.com/co CometBFT is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines. -For more background, see the [the docs](https://docs.cometbft.com/main/introduction/#quick-start). +For more background, see the [the docs](https://docs.cometbft.com/v1.0/explanation/introduction/). -To get started developing applications, see the [application developers guide](https://docs.cometbft.com/main/introduction/quick-start.html). +To get started developing applications, see the [application developers guide](https://docs.cometbft.com/v1.0/tutorials/quick-start). ## How to use this image diff --git a/DOCKER/build.sh b/DOCKER/build.sh index 3d8a6a01b97..35eb27d7032 100755 --- a/DOCKER/build.sh +++ b/DOCKER/build.sh @@ -3,7 +3,7 @@ set -e # Get the tag from the version, or try to figure it out. if [ -z "$TAG" ]; then - TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go) + TAG=$(awk -F\" '/CMTSemVer =/ { print $2; exit }' < ../version/version.go) fi if [ -z "$TAG" ]; then echo "Please specify a tag." diff --git a/DOCKER/push.sh b/DOCKER/push.sh index 3ceeeeba958..6948c6761ef 100755 --- a/DOCKER/push.sh +++ b/DOCKER/push.sh @@ -3,7 +3,7 @@ set -e # Get the tag from the version, or try to figure it out. if [ -z "$TAG" ]; then - TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go) + TAG=$(awk -F\" '/CMTSemVer =/ { print $2; exit }' < ../version/version.go) fi if [ -z "$TAG" ]; then echo "Please specify a tag." diff --git a/Makefile b/Makefile index 5ba60576fbf..5ef969cfa23 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ OUTPUT?=$(BUILDDIR)/cometbft HTTPS_GIT := https://github.com/cometbft/cometbft.git CGO_ENABLED ?= 0 -# Process Docker environment varible TARGETPLATFORM +# Process Docker environment variable TARGETPLATFORM # in order to build binary with correspondent ARCH # by default will always build for linux/amd64 TARGETPLATFORM ?= @@ -68,7 +68,8 @@ ifeq (linux/riscv64,$(findstring linux/riscv64,$(TARGETPLATFORM))) GOARCH=riscv64 endif -all: check build test install +#? all: Run target build, test and install +all: build test install .PHONY: all include tests.mk @@ -77,18 +78,21 @@ include tests.mk ### Build CometBFT ### ############################################################################### +#? build: Build CometBFT build: CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(OUTPUT) ./cmd/cometbft/ .PHONY: build +#? install: Install CometBFT to GOBIN install: - CGO_ENABLED=$(CGO_ENABLED) go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/cometbft + CGO_ENABLED=$(CGO_ENABLED) go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/cometbft .PHONY: install ############################################################################### ### Metrics ### ############################################################################### +#? metrics: Generate metrics metrics: testdata-metrics go generate -run="scripts/metricsgen" ./... .PHONY: metrics @@ -96,6 +100,7 @@ metrics: testdata-metrics # By convention, the go tool ignores subdirectories of directories named # 'testdata'. This command invokes the generate command on the folder directly # to avoid this. +#? testdata-metrics: Generate test data for metrics testdata-metrics: ls ./scripts/metricsgen/testdata | xargs -I{} go generate -v -run="scripts/metricsgen" ./scripts/metricsgen/testdata/{} .PHONY: testdata-metrics @@ -104,6 +109,7 @@ testdata-metrics: ### Mocks ### ############################################################################### +#? mockery: Generate test mocks mockery: go generate -run="./scripts/mockery_generate.sh" ./... .PHONY: mockery @@ -112,56 +118,64 @@ mockery: ### Protobuf ### ############################################################################### +#? check-proto-deps: Check protobuf deps check-proto-deps: -ifeq (,$(shell which protoc-gen-gogofaster)) - @go install github.com/cosmos/gogoproto/protoc-gen-gogofaster@latest +ifeq (,$(shell which protoc-gen-gocosmos)) + @go install github.com/cosmos/gogoproto/protoc-gen-gocosmos@latest endif .PHONY: check-proto-deps +#? check-proto-format-deps: Check protobuf format deps check-proto-format-deps: ifeq (,$(shell which clang-format)) $(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.") endif .PHONY: check-proto-format-deps +#? proto-gen: Generate protobuf files proto-gen: check-proto-deps @echo "Generating Protobuf files" - @go run github.com/bufbuild/buf/cmd/buf generate - @mv ./proto/tendermint/abci/types.pb.go ./abci/types/ + @go run github.com/bufbuild/buf/cmd/buf@latest generate --path proto/cometbft .PHONY: proto-gen # These targets are provided for convenience and are intended for local # execution only. +#? proto-lint: Lint protobuf files proto-lint: check-proto-deps @echo "Linting Protobuf files" - @go run github.com/bufbuild/buf/cmd/buf lint + @go run github.com/bufbuild/buf/cmd/buf@latest lint .PHONY: proto-lint +#? proto-format: Format protobuf files proto-format: check-proto-format-deps @echo "Formatting Protobuf files" @find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \; .PHONY: proto-format +#? proto-check-breaking: Check for breaking changes in Protobuf files against local branch. This is only useful if your changes have not yet been committed proto-check-breaking: check-proto-deps @echo "Checking for breaking changes in Protobuf files against local branch" @echo "Note: This is only useful if your changes have not yet been committed." @echo " Otherwise read up on buf's \"breaking\" command usage:" @echo " https://docs.buf.build/breaking/usage" - @go run github.com/bufbuild/buf/cmd/buf breaking --against ".git" + @go run github.com/bufbuild/buf/cmd/buf@latest breaking --against ".git" .PHONY: proto-check-breaking +#? proto-check-breaking-ci: Check for breaking changes in Protobuf files against v0.34.x. This is only useful if your changes have not yet been committed proto-check-breaking-ci: - @go run github.com/bufbuild/buf/cmd/buf breaking --against $(HTTPS_GIT)#branch=v0.34.x + @go run github.com/bufbuild/buf/cmd/buf@latest breaking --against $(HTTPS_GIT)#branch=v0.34.x .PHONY: proto-check-breaking-ci ############################################################################### ### Build ABCI ### ############################################################################### +#? build_abci: Build abci build_abci: @go build -mod=readonly -i ./abci/cmd/... .PHONY: build_abci +#? install_abci: Install abci install_abci: @go install -mod=readonly ./abci/cmd/... .PHONY: install_abci @@ -172,20 +186,25 @@ install_abci: # dist builds binaries for all platforms and packages them for distribution # TODO add abci to these scripts +#? dist: Build binaries for all platforms and package them for distribution dist: @BUILD_TAGS=$(BUILD_TAGS) sh -c "'$(CURDIR)/scripts/dist.sh'" .PHONY: dist +#? go-mod-cache: Download go modules to local cache go-mod-cache: go.sum @echo "--> Download go modules to local cache" @go mod download .PHONY: go-mod-cache +#? go.sum: Ensure dependencies have not been modified go.sum: go.mod @echo "--> Ensure dependencies have not been modified" @go mod verify @go mod tidy +.PHONY: go.sum +#? draw_deps: Generate deps graph draw_deps: @# requires brew install graphviz or apt-get install graphviz go get github.com/RobotsAndPencils/goviz @@ -203,7 +222,7 @@ get_deps_bin_size: ### Libs ### ############################################################################### -# generates certificates for TLS testing in remotedb and RPC server +#? gen_certs: Generate certificates for TLS testing in remotedb and RPC server gen_certs: clean_certs certstrap init --common-name "cometbft.com" --passphrase "" certstrap request-cert --common-name "server" -ip "127.0.0.1" --passphrase "" @@ -213,7 +232,7 @@ gen_certs: clean_certs rm -rf out .PHONY: gen_certs -# deletes generated certificates +#? clean_certs: Delete generated certificates clean_certs: rm -f rpc/jsonrpc/server/test.crt rm -f rpc/jsonrpc/server/test.key @@ -223,28 +242,29 @@ clean_certs: ### Formatting, linting, and vetting ### ############################################################################### -format: - find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs gofmt -w -s - find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs goimports -w -local github.com/cometbft/cometbft -.PHONY: format - -lint: - @echo "--> Running linter" - @go run github.com/golangci/golangci-lint/cmd/golangci-lint@latest run +#? lint: Lint, format and fix typos +lint: pre-commit + @pre-commit run .PHONY: lint +#? vulncheck: Run latest govulncheck vulncheck: @go run golang.org/x/vuln/cmd/govulncheck@latest ./... .PHONY: vulncheck -DESTINATION = ./index.html.md +#? pre-commit: Create pre-commit hook using the pre-commit framework. +pre-commit: + @which pre-commit || pip3 install pre-commit + @pre-commit install +.PHONY: pre-commit +DESTINATION = ./index.html.md ############################################################################### ### Documentation ### ############################################################################### -# Verify that important design docs have ToC entries. +#? check-docs-toc: Verify that important design docs have ToC entries. check-docs-toc: @./docs/presubmit.sh .PHONY: check-docs-toc @@ -255,6 +275,7 @@ check-docs-toc: # On Linux, you may need to run `DOCKER_BUILDKIT=1 make build-docker` for this # to work. +#? build-docker: Build docker image cometbft/cometbft build-docker: docker build \ --label=cometbft \ @@ -266,35 +287,12 @@ build-docker: ### Local testnet using docker ### ############################################################################### -# Build linux binary on other platforms +#? build-linux: Build linux binary on other platforms build-linux: GOOS=$(GOOS) GOARCH=$(GOARCH) GOARM=$(GOARM) $(MAKE) build .PHONY: build-linux -build-docker-localnode: - @cd networks/local && make -.PHONY: build-docker-localnode - -# Runs `make build COMETBFT_BUILD_OPTIONS=cleveldb` from within an Amazon -# Linux (v2)-based Docker build container in order to build an Amazon -# Linux-compatible binary. Produces a compatible binary at ./build/cometbft -build_c-amazonlinux: - $(MAKE) -C ./DOCKER build_amazonlinux_buildimage - docker run --rm -it -v `pwd`:/cometbft cometbft/cometbft:build_c-amazonlinux -.PHONY: build_c-amazonlinux - -# Run a 4-node testnet locally -localnet-start: localnet-stop build-docker-localnode - @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/cometbft:Z cometbft/localnode testnet --config /etc/cometbft/config-template.toml --o . --starting-ip-address 192.167.10.2; fi - docker-compose up -.PHONY: localnet-start - -# Stop testnet -localnet-stop: - docker-compose down -.PHONY: localnet-stop - -# Build hooks for dredd, to skip or add information on some steps +#? build-contract-tests-hooks: Build hooks for dredd, to skip or add information on some steps build-contract-tests-hooks: ifeq ($(OS),Windows_NT) go build -mod=readonly $(BUILD_FLAGS) -o build/contract_tests.exe ./cmd/contract_tests @@ -303,33 +301,33 @@ else endif .PHONY: build-contract-tests-hooks -# Run a nodejs tool to test endpoints against a localnet +#? contract-tests: Run a nodejs tool to test endpoints against a localnet # The command takes care of starting and stopping the network -# prerequisits: build-contract-tests-hooks build-linux +# prerequisites: build-contract-tests-hooks build-linux # the two build commands were not added to let this command run from generic containers or machines. # The binaries should be built beforehand contract-tests: dredd .PHONY: contract-tests -# Implements test splitting and running. This is pulled directly from -# the github action workflows for better local reproducibility. - -GO_TEST_FILES != find $(CURDIR) -name "*_test.go" - -# default to four splits by default -NUM_SPLIT ?= 4 - $(BUILDDIR): mkdir -p $@ -# The format statement filters out all packages that don't have tests. -# Note we need to check for both in-package tests (.TestGoFiles) and -# out-of-package tests (.XTestGoFiles). -$(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR) - go list -f "{{ if (or .TestGoFiles .XTestGoFiles) }}{{ .ImportPath }}{{ end }}" ./... | sort > $@ +#? help: Get more info on make commands. +help: Makefile + @echo " Choose a command run in comebft:" + @sed -n 's/^#?//p' $< | column -t -s ':' | sort | sed -e 's/^/ /' +.PHONY: help + +############################################################################### +### Benchmarking ### +############################################################################### -split-test-packages:$(BUILDDIR)/packages.txt - split -d -n l/$(NUM_SPLIT) $< $<. -test-group-%:split-test-packages - cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=15m -race -coverprofile=$(BUILDDIR)/$*.profile.out +#? bench: Run benchmarks +bench: + @echo "--> Running benchmarks (this might take a while)" + @go install go.bobheadxi.dev/gobenchdata@latest + @go test -bench . -benchmem ./... | gobenchdata --json benchmarks.json + @gobenchdata web generate . + @echo "--> Serving results at http://localhost:8080" + @gobenchdata web serve diff --git a/README.md b/README.md index adf53b6cbb5..33ae2a26535 100644 --- a/README.md +++ b/README.md @@ -6,30 +6,38 @@ [![Version][version-badge]][version-url] [![API Reference][api-badge]][api-url] [![Go version][go-badge]][go-url] -[![Discord chat][discord-badge]][discord-url] [![License][license-badge]][license-url] [![Sourcegraph][sg-badge]][sg-url] +[![Discord chat][discord-badge]][discord-url] + | Branch | Tests | Linting | |---------|------------------------------------------------|---------------------------------------------| | main | [![Tests][tests-badge]][tests-url] | [![Lint][lint-badge]][lint-url] | +| v1.x | [![Tests][tests-badge-v1x]][tests-url-v1x] | [![Lint][lint-badge-v1x]][lint-url-v1x] | | v0.38.x | [![Tests][tests-badge-v038x]][tests-url-v038x] | [![Lint][lint-badge-v038x]][lint-url-v038x] | | v0.37.x | [![Tests][tests-badge-v037x]][tests-url-v037x] | [![Lint][lint-badge-v037x]][lint-url-v037x] | | v0.34.x | [![Tests][tests-badge-v034x]][tests-url-v034x] | [![Lint][lint-badge-v034x]][lint-url-v034x] | CometBFT is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely -replicates it on many machines. +replicates it on many machines. In modular blockchain terminology, +CometBFT can be thought of as a sequencer layer and is indeed used in +modern decentralized (shared) sequencer implementations. -It is a fork of [Tendermint Core][tm-core] and implements the Tendermint -consensus algorithm. +CometBFT is the canonical implementation of the Tendermint consensus algorithm and is a +primary building block for the [Interchain Stack](https://interchain.io/). Historically, +CometBFT originated as a fork of [Tendermint Core][tm-core] in early 2023 +(announcement [here][comet-announcement]) and since then it diverged significantly by adopting modern features such as [PBTS][pbts] or [ABCI v2][abci-v2]. CometBFT provides [optimistic responsiveness][optimistic-responsive] guarantees. For protocol details, refer to the [CometBFT Specification](./spec/README.md). -For detailed analysis of the consensus protocol, including safety and liveness +For detailed analysis of the Tendermint consensus protocol, including safety and liveness proofs, read our paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)". +For general links, including communications and announcements: [![Linktree][linktree-badge] linktr.ee/cometbft][linktree-url] + ## Documentation Complete documentation can be found on the @@ -37,43 +45,51 @@ Complete documentation can be found on the ## Releases -Please do not depend on `main` as your production branch. Use +Please do not depend on `main` as your production branch, as it may receive +significant breaking changes at any time. Use [releases](https://github.com/cometbft/cometbft/releases) instead. -We haven't released v1.0 yet -since we are making breaking changes to the protocol and the APIs. See below for -more details about [versioning](#versioning). - -In any case, if you intend to run CometBFT in production, we're happy to help. +If you intend to run CometBFT in production, we're happy to help. To contact us, +in order of preference: -To contact us, you can also -[join the chat](https://discord.com/channels/669268347736686612/669283915743232011). +- [Create a new discussion on + GitHub](https://github.com/cometbft/cometbft/discussions) +- Reach out to us via [Telegram](https://t.me/CometBFT) +- [Join the Cosmos Network Discord](https://discord.gg/interchain) and + discuss in + [`#cometbft`](https://discord.com/channels/669268347736686612/1069933855307472906) More on how releases are conducted can be found [here](./RELEASES.md). +## Support Policy + +CometBFT aligns with other components of the [Interchain Stack](https://interchain.io/) +and we offer long-term support (LTS) guarantees for certain releases. The +complete End of Life (EOL) schedule, LTS plans, and the general support policy is +in documented and regularly updated in the +discussion [Support policy for CometBFT releases #590](https://github.com/cometbft/cometbft/discussions/590). + ## Security -To report a security vulnerability, see our [bug bounty -program](https://hackerone.com/cosmos). For examples of the kinds of bugs we're -looking for, see [our security policy](SECURITY.md). +Please see [SECURITY.md](./SECURITY.md). ## Minimum requirements -| CometBFT version | Requirement | Notes | -|------------------|-------------|-------------------| -| main | Go version | Go 1.21 or higher | -| v0.38.x | Go version | Go 1.20 or higher | -| v0.37.x | Go version | Go 1.20 or higher | -| v0.34.x | Go version | Go 1.20 or higher | +| CometBFT version | Requirement | Version | Tested with | +|------------------|-------------|----------------|--------------| +| main | Go version | 1.23 or higher | up to 1.23.1 | +| v1.x | Go version | 1.23 or higher | up to 1.23.1 | +| v0.38.x | Go version | 1.22 or higher | up to 1.22 | +| v0.37.x | Go version | 1.22 or higher | up to 1.22 | +| v0.34.x | Go version | 1.22 or higher | up to 1.22 | ### Install -See the [install guide](./docs/guides/install.md). +See the [install guide](docs/tutorials/install.md). ### Quick Start -- [Single node](./docs/guides/quick-start.md) -- [Local cluster using docker-compose](./docs/networks/docker-compose.md) +- [Single node](docs/tutorials/quick-start.md) ## Contributing @@ -83,35 +99,35 @@ Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md) and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the [specifications](./spec/README.md), and familiarize yourself with our [Architectural Decision Records -(ADRs)](./docs/architecture/README.md) and [Request For Comments -(RFCs)](./docs/rfc/README.md). +(ADRs)](docs/references/architecture/README.md) and [Request For Comments +(RFCs)](docs/references/rfc/README.md). ## Versioning -### Semantic Versioning - -CometBFT uses [Semantic Versioning](http://semver.org/) to determine when and -how the version changes. According to SemVer, anything in the public API can -change at any time before version 1.0.0 +As of v1, CometBFT uses the following approach to versioning: -To provide some stability to users of 0.X.X versions of CometBFT, the MINOR -version is used to signal breaking changes across CometBFT's API. This API -includes all publicly exposed types, functions, and methods in non-internal Go -packages as well as the types and methods accessible via the CometBFT RPC -interface. - -Breaking changes to these public APIs will be documented in the CHANGELOG. +- **Major version** bumps, such as v1.0.0 to v2.0.0, would generally involve + changes that _force_ users to perform a coordinated upgrade in order to use + the new version, such as protocol-breaking changes (e.g. changes to how block + hashes are computed and thus what the network considers to be "valid blocks", + or how the consensus protocol works, or changes that affect network-level + compatibility between nodes, etc.). +- **Minor version** bumps, such as v1.1.0 to v1.2.0, are reserved for rolling + out new features or substantial changes that do not force a coordinated + upgrade (i.e. not protocol-breaking), but could potentially break Go APIs. +- **Patch version** bumps, such as v1.0.0 to v1.0.1, are reserved for + bug/security fixes that are not protocol- or Go API-breaking. ### Upgrades -In an effort to avoid accumulating technical debt prior to 1.0.0, we do not -guarantee that breaking changes (i.e. bumps in the MINOR version) will work with -existing CometBFT blockchains. In these cases you will have to start a new -blockchain, or write something custom to get the old data into the new chain. -However, any bump in the PATCH version should be compatible with existing -blockchain histories. +We do not guarantee compatibility between major releases of CometBFT. Minor +releases of the same major release series (v1.1, v1.2, etc.) should, unless +otherwise specified, be compatible with each other. Patch releases of the same +minor release series (v1.0.1, v1.0.2, etc.) are guaranteed to be compatible with +each other. -For more information on upgrading, see [UPGRADING.md](./UPGRADING.md). +For more detailed information on upgrading from one version to another, see +[UPGRADING.md](./UPGRADING.md). ### Supported Versions @@ -122,6 +138,10 @@ CometBFT up-to-date. Upgrading instructions can be found in Currently supported versions include: +- v1.x: Currently in pre-release with no guarantees as to API stability until a + release candidate is cut. See [RELEASES.md](./RELEASES.md) for details on our + process as to API stability guarantees that can be expected of CometBFT + pre-releases. - v0.38.x: CometBFT v0.38 introduces ABCI 2.0, which implements the entirety of ABCI++ - v0.37.x: CometBFT v0.37 introduces ABCI 1.0, which is the first major step @@ -133,23 +153,22 @@ Currently supported versions include: ### Libraries -- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); A framework for building - applications in Golang +- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk): A framework for building + high-value public blockchain applications in Go - [Tendermint in Rust](https://github.com/informalsystems/tendermint-rs) - [ABCI Tower](https://github.com/penumbra-zone/tower-abci) ### Applications - [Cosmos Hub](https://hub.cosmos.network/) -- [Terra](https://www.terra.money/) - [Celestia](https://celestia.org/) - [Anoma](https://anoma.network/) -- [Vocdoni](https://docs.vocdoni.io/) +- [Vocdoni](https://developer.vocdoni.io/) ### Research Below are links to the original Tendermint consensus algorithm and relevant -whitepapers which CometBFT will continue to build on. +whitepapers, which CometBFT will continue to build on. - [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938) - [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769) @@ -167,24 +186,27 @@ maintains [cometbft.com](https://cometbft.com). [bft]: https://en.wikipedia.org/wiki/Byzantine_fault_tolerance [smr]: https://en.wikipedia.org/wiki/State_machine_replication +[optimistic-responsive]: https://informal.systems/blog/tendermint-responsiveness [Blockchain]: https://en.wikipedia.org/wiki/Blockchain [version-badge]: https://img.shields.io/github/v/release/cometbft/cometbft.svg [version-url]: https://github.com/cometbft/cometbft/releases/latest -[api-badge]: https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 +[api-badge]: https://pkg.go.dev/badge/github.com/cometbft/cometbft.svg [api-url]: https://pkg.go.dev/github.com/cometbft/cometbft [go-badge]: https://img.shields.io/badge/go-1.21-blue.svg [go-url]: https://github.com/moovweb/gvm [discord-badge]: https://img.shields.io/discord/669268347736686612.svg -[discord-url]: https://discord.gg/cosmosnetwork +[discord-url]: https://discord.gg/interchain [license-badge]: https://img.shields.io/github/license/cometbft/cometbft.svg [license-url]: https://github.com/cometbft/cometbft/blob/main/LICENSE [sg-badge]: https://sourcegraph.com/github.com/cometbft/cometbft/-/badge.svg [sg-url]: https://sourcegraph.com/github.com/cometbft/cometbft?badge [tests-url]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml +[tests-url-v1x]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml?query=branch%3Av1.x [tests-url-v038x]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml?query=branch%3Av0.38.x [tests-url-v037x]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml?query=branch%3Av0.37.x [tests-url-v034x]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml?query=branch%3Av0.34.x [tests-badge]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml/badge.svg?branch=main +[tests-badge-v1x]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml/badge.svg?branch=v1.x [tests-badge-v038x]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml/badge.svg?branch=v0.38.x [tests-badge-v037x]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml/badge.svg?branch=v0.37.x [tests-badge-v034x]: https://github.com/cometbft/cometbft/actions/workflows/tests.yml/badge.svg?branch=v0.34.x @@ -192,8 +214,15 @@ maintains [cometbft.com](https://cometbft.com). [lint-badge-v034x]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml/badge.svg?branch=v0.34.x [lint-badge-v037x]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml/badge.svg?branch=v0.37.x [lint-badge-v038x]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml/badge.svg?branch=v0.38.x +[lint-badge-v1x]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml/badge.svg?branch=v1.x [lint-url]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml [lint-url-v034x]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml?query=branch%3Av0.34.x [lint-url-v037x]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml?query=branch%3Av0.37.x [lint-url-v038x]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml?query=branch%3Av0.38.x +[lint-url-v1x]: https://github.com/cometbft/cometbft/actions/workflows/lint.yml?query=branch%3Av1.x [tm-core]: https://github.com/tendermint/tendermint +[pbts]: https://docs.cometbft.com/v1.0/explanation/core/proposer-based-timestamps +[abci-v2]: https://docs.cometbft.com/v1.0/spec/abci/ +[comet-announcement]: https://informal.systems/blog/cosmos-meet-cometbft +[linktree-url]: https://linktr.ee/cometbft +[linktree-badge]: https://www.google.com/s2/favicons?domain=https://linktr.ee/ diff --git a/RELEASES.md b/RELEASES.md index b7a12b02194..6e95edee6bd 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,17 +1,17 @@ # Releases CometBFT uses modified [semantic versioning](https://semver.org/) with each -release following a `vX.Y.Z` format. CometBFT is currently on major version 0 -and uses the minor version to signal breaking changes. The `main` branch is -used for active development and thus it is not advisable to build against it. +release following a `vX.Y.Z` format. The versioning approach used by CometBFT +v1.x onwards differs from that of the v0.x series, and is documented in +[README.md](./README.md#versioning). The `main` branch is used for active +development and thus it is not advisable to build against it. The latest changes are always initially merged into `main`. Releases are specified using tags and are built from long-lived "backport" branches that are -cut from `main` when the release process begins. Each release "line" (e.g. -0.34 or 0.33) has its own long-lived backport branch, and the backport branches -have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder -in this case). CometBFT only maintains the last two releases at a time (the -oldest release is predominantly just security patches). +cut from `main` when the release process begins. Each release "line" (e.g. 1.0 +or 0.38) has its own long-lived backport branch, and the backport branches have +names like `v1.x` or `v0.38.x` (literally, `x`; it is not a placeholder in this +case). ## Backporting @@ -22,44 +22,41 @@ We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport to the needed branch. There should be a label for any backport branch that you'll be targeting. To notify the bot to backport a pull request, mark the pull request with the label corresponding to the correct -backport branch. For example, to backport to v0.38.x, add the label -`S:backport-to-v0.38.x`. Once the original pull request is merged, the bot will +backport branch. For example, to backport to v1.x, add the label +`S:backport-to-v1.x`. Once the original pull request is merged, the bot will try to cherry-pick the pull request to the backport branch. If the bot fails to backport, it will open a pull request. The author of the original pull request is responsible for solving the conflicts and merging the pull request. ### Creating a backport branch -If this is the first release candidate for a minor version release, e.g. -v0.25.0, you get to have the honor of creating the backport branch! +If this is the first release candidate for a major version release, e.g. v2.0.0, +you get to have the honor of creating the backport branch! Note that, after creating the backport branch, you'll also need to update the -tags on `main` so that `go mod` is able to order the branches correctly. You -should tag `main` with a "dev" tag that is "greater than" the backport -branches tags. Otherwise, `go mod` does not 'know' whether commits on `main` -come before or after the release. +tags on `main` so that `go mod` is able to order the branches correctly. In the following example, we'll assume that we're making a backport branch for -the 0.38.x line. +the 2.x line. 1. Start on `main` 2. Ensure that there is a [branch protection - rule](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/managing-a-branch-protection-rule) for the - branch you are about to create (you will need admin access to the repository - in order to do this). + rule](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/managing-a-branch-protection-rule) + for the branch you are about to create (you will need admin access to the + repository in order to do this). 3. Create and push the backport branch: ```sh - git checkout -b v0.38.x - git push origin v0.38.x + git checkout -b v2.x + git push origin v2.x ``` 4. Create a PR to update the documentation directory for the backport branch. - We rewrite any URLs pointing to `main` to point to the backport branch, - so that generated documentation will link to the correct versions of files + We rewrite any URLs pointing to `main` to point to the backport branch, so + that generated documentation will link to the correct versions of files elsewhere in the repository. The following files are to be excluded from this search: @@ -73,21 +70,21 @@ the 0.38.x line. * `https://github.com/cometbft/cometbft/blob/main/LICENSE` Be sure to search for all of the following links and replace `main` with your - corresponding branch label or version (e.g. `v0.38.x` or `v0.38`): + corresponding branch label or version (e.g. `v2.x` or `v2.0`): * `github.com/cometbft/cometbft/blob/main` -> - `github.com/cometbft/cometbft/blob/v0.38.x` + `github.com/cometbft/cometbft/blob/v2.x` * `github.com/cometbft/cometbft/tree/main` -> - `github.com/cometbft/cometbft/tree/v0.38.x` - * `docs.cometbft.com/main` -> `docs.cometbft.com/v0.38` + `github.com/cometbft/cometbft/tree/v2.x` + * `docs.cometbft.com/main` -> `docs.cometbft.com/v2` Once you have updated all of the relevant documentation: ```sh # Create and push the PR. - git checkout -b update-docs-v038x - git commit -m "Update docs for v0.38.x backport branch." - git push -u origin update-docs-v038x + git checkout -b update-docs-v2.x + git commit -m "Update docs for v2.x backport branch." + git push -u origin update-docs-v2.x ``` Be sure to merge this PR before making other changes on the newly-created @@ -104,11 +101,11 @@ the 0.38.x line. After doing these steps, go back to `main` and do the following: -1. Create a new workflow to run e2e nightlies for the new backport branch. (See +1. Create a new workflow to run E2E nightlies for the new backport branch. (See [e2e-nightly-main.yml][e2e] for an example.) 2. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the - backport bot to work on this branch, and add a corresponding `backport-to-v0.38.x` + backport bot to work on this branch, and add a corresponding `backport-to-v2.x` [label](https://github.com/cometbft/cometbft/labels) so the bot can be triggered. 3. Add a new section to the Dependabot config (`.github/dependabot.yml`) to @@ -122,28 +119,28 @@ After doing these steps, go back to `main` and do the following: ## Pre-releases -Before creating an official release, especially a minor release, we may want to +Before creating an official release, especially a major release, we may want to create an alpha or beta version, or release candidate (RC) for our friends and partners to test out. We use git tags to create pre-releases, and we build them off of backport branches, for example: -* `v0.38.0-alpha.1` - The first alpha release of `v0.38.0`. Subsequent alpha - releases will be numbered `v0.38.0-alpha.2`, `v0.38.0-alpha.3`, etc. +* `v2.0.0-alpha.1` - The first alpha release of `v2.0.0`. Subsequent alpha + releases will be numbered `v2.0.0-alpha.2`, `v2.0.0-alpha.3`, etc. Alpha releases are to be considered the _most_ unstable of pre-releases, and are most likely not yet properly QA'd. These are made available to allow early adopters to start integrating and testing new functionality before we're done with QA. -* `v0.38.0-beta.1` - The first beta release of `v0.38.0`. Subsequent beta - releases will be numbered `v0.38.0-beta.2`, `v0.38.0-beta.3`, etc. +* `v2.0.0-beta.1` - The first beta release of `v2.0.0`. Subsequent beta + releases will be numbered `v2.0.0-beta.2`, `v2.0.0-beta.3`, etc. Beta releases can be considered more stable than alpha releases in that we will have QA'd them better than alpha releases, but there still may be minor breaking API changes if users have strong demands for such changes. -* `v0.38.0-rc1` - The first release candidate (RC) of `v0.38.0`. Subsequent RCs - will be numbered `v0.38.0-rc2`, `v0.38.0-rc3`, etc. +* `v2.0.0-rc1` - The first release candidate (RC) of `v2.0.0`. Subsequent RCs + will be numbered `v2.0.0-rc2`, `v2.0.0-rc3`, etc. RCs are considered more stable than beta releases in that we will have completed our QA on them. APIs will most likely be stable at this point. The @@ -154,97 +151,142 @@ off of backport branches, for example: (Note that branches and tags _cannot_ have the same names, so it's important that these branches have distinct names from the tags/release names.) -If this is the first pre-release for a minor release, you'll have to make a new +If this is the first pre-release for a major release, you'll have to make a new backport branch (see above). Otherwise: -1. Start from the backport branch (e.g. `v0.38.x`). -2. Run the integration tests and the E2E nightlies - (which can be triggered from the GitHub UI; - e.g., ). +1. Start from the backport branch (e.g. `v2.x`). +2. Run the E2E nightlies (which can be triggered from the GitHub UI; e.g., + ). 3. Prepare the pre-release documentation: - * Build the changelog with [unclog] _without_ doing an unclog release, and + * Build the changelog with [unclog] _without_ doing an unclog release (`unclog build -a > CHANGELOG.md`), and commit the built changelog. This ensures that all changelog entries appear under an "Unreleased" heading in the pre-release's changelog. The changes are only considered officially "released" once we cut a regular (final) release. * Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes or other upgrading flows. -4. Prepare the versioning: - * Bump TMVersionDefault version in `version.go` - * Bump P2P and block protocol versions in `version.go`, if necessary. +4. Check the dependency to `github.com/cometbft/cometbft/api` in the `go.mod` + file. If it does not point to an official api version, run `go get github.com/cometbft/cometbft/api` + and `go mod tidy` (to update `go.sum`) so that it points to one. You may need to tag a new version of the api + if the last version is too old (i.e., it does not contain the latest + changes to the protos). If that is the case: + * `git tag -a api/v2.0.0-rc1 -s -m "Release api module v2.0.0-rc1" origin/v2.x` + * `git push origin api/v2.0.0-rc1` + * Notice the prefix `api/`, which denotes that the version refers to the `api` module. +5. Prepare the versioning: + * Bump CometBFT version in `version.go` + * Bump P2P and block protocol versions in `version.go`, if necessary. Check the changelog for breaking changes in these components. * Bump ABCI protocol version in `version.go`, if necessary -5. Open a PR with these changes against the backport branch. -6. Once these changes have landed on the backport branch, be sure to pull them back down locally. -7. Once you have the changes locally, create the new tag, specifying a name and a tag "message": - `git tag -a v0.38.0-rc1 -s -m "Release Candidate v0.38.0-rc1` -8. Push the tag back up to origin: - `git push origin v0.38.0-rc1` +6. Open a PR with these changes against the backport branch. +7. Once these changes have landed on the backport branch, be sure to pull them + back down locally. +8. Once you have the changes locally, create the new tag, specifying a name and + a tag "message": + `git tag -a v2.0.0-rc1 -s -m "Release Candidate v2.0.0-rc1` +9. Push the tag back up to origin: + `git push origin v2.0.0-rc1` Now the tag should be available on the repo's releases page. -9. Future pre-releases will continue to be built off of this branch. +10. Future pre-releases will continue to be built off of this branch. +11. [Publish the proto changes](#publish-protos-to-the-buf-schema-registry) to the `Buf` schema registry `comet/comet` +repository (if necessary). -## Minor release +## Major release -This minor release process assumes that this release was preceded by release +This major release process assumes that this release was preceded by release candidates. If there were no release candidates, begin by creating a backport branch, as described above. Before performing these steps, be sure the -[Minor Release Checklist](#minor-release-checklist) has been completed. +[Major Release Checklist](#major-release-checklist) has been completed. -1. Start on the backport branch (e.g. `v0.38.x`) -2. Run integration tests (`make test_integrations`) and the e2e nightlies. +1. Start on the backport branch (e.g. `v2.x`) +2. Run the E2E nightlies (which can be triggered from the GitHub UI; e.g., + ). 3. Prepare the release: + * Check the dependency to `github.com/cometbft/cometbft/api` in the `go.mod` + file. If it does not point to an official api version, run `go get github.com/cometbft/cometbft/api` + and `go mod tidy` (to update `go.sum`) so that it points to one. You may need to tag a new version of the api + if the last released version is too old (i.e., it does not contain the latest + changes to the protos). If that is the case: + * `git tag -a api/v2.0.0 -s -m "Release api module v2.0.0" origin/v2.x` + * `git push origin api/v2.0.0` + * Notice the prefix `api/`, which denotes that the version refers to the `api` module. * Do a [release][unclog-release] with [unclog] for the desired version, ensuring that you write up a good summary of the major highlights of the release that users would be interested in. * Build the changelog using unclog, and commit the built changelog. * Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes or other upgrading flows. - * Bump TMVersionDefault version in `version.go` + * Bump CometBFT version in `version.go` * Bump P2P and block protocol versions in `version.go`, if necessary * Bump ABCI protocol version in `version.go`, if necessary 4. Open a PR with these changes against the backport branch. -5. Once these changes are on the backport branch, push a tag with prepared release details. - This will trigger the actual release `v0.38.0`. - * `git tag -a v0.38.0 -s -m 'Release v0.38.0'` - * `git push origin v0.38.0` -6. Make sure that `main` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`. - -## Patch release - -Patch releases are done differently from minor releases: They are built off of -long-lived backport branches, rather than from main. As non-breaking changes -land on `main`, they should also be backported into these backport branches. +5. Once these changes are on the backport branch, push a tag with prepared + release details. This will trigger the actual release `v2.0.0`. + * `git tag -a v2.0.0 -s -m 'Release v2.0.0'` + * `git push origin v2.0.0` +6. Make sure that `main` is updated with the latest `CHANGELOG.md`, + `CHANGELOG_PENDING.md`, and `UPGRADING.md`. +7. [Publish the proto changes](#publish-protos-to-the-buf-schema-registry) to the `Buf` schema registry `comet/comet` +repository (if necessary). + +## Minor and patch releases + +Minor and patch releases are done differently from major releases: they are +built off of long-lived backport branches, rather than from `main`. As +non-breaking changes land on `main`, they should also be backported into these +backport branches. Patch releases don't have release candidates by default, although any tricky changes may merit a release candidate. To create a patch release: -1. Checkout the long-lived backport branch: `git checkout v0.38.x` -2. Run integration tests (`make test_integrations`) and the nightlies. +1. Checkout the long-lived backport branch: `git checkout v2.x` +2. Run the E2E nightlies (which can be triggered from the GitHub UI; e.g., + ). 3. Check out a new branch and prepare the release: + * Check the dependency to `github.com/cometbft/cometbft/api` in the `go.mod` + file. If it does not point to an official api version, run `go get github.com/cometbft/cometbft/api` + and `go mod tidy` (to update `go.sum`) so that it points to one. You may need to tag a new version of the api + if the last released version is too old (i.e., it does not contain the latest + changes to the protos). If that is the case: + * `git tag -a api/v2.0.1 -s -m "Release api module v2.0.1" origin/v2.x` + * `git push origin api/v2.0.1` + * Notice the prefix `api/`, which denotes that the version refers to the `api` module. * Do a [release][unclog-release] with [unclog] for the desired version, ensuring that you write up a good summary of the major highlights of the release that users would be interested in. * Build the changelog using unclog, and commit the built changelog. - * Bump the TMDefaultVersion in `version.go` + * Bump the CometBFT in `version.go` * Bump the ABCI version number, if necessary. (Note that ABCI follows semver, and that ABCI versions are the only versions which can change during patch releases, and only field additions are valid patch changes.) -4. Open a PR with these changes that will land them back on `v0.38.x` +4. Open a PR with these changes that will land them back on `v2.x` 5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag. - * `git tag -a v0.38.1 -s -m 'Release v0.38.1'` - * `git push origin v0.38.1` -6. Create a pull request back to main with the CHANGELOG & version changes from the latest release. - * Remove all `R:patch` labels from the pull requests that were included in the release. + * `git tag -a v2.0.1 -s -m 'Release v2.0.1'` + * `git push origin v2.0.1` + + The process for minor releases is similar to patch releases: + * `git tag -a v2.1.0 -s -m 'Release v2.1.0` + * `git push origin v2.1.0` +6. Create a pull request back to main with the CHANGELOG and version changes + from the latest release. * Do not merge the backport branch into main. +7. [Publish the proto changes](#publish-protos-to-the-buf-schema-registry) to the `Buf` schema registry `comet/comet` + repository (if necessary). + +After the release of `v1.0.0`, the backport branch is named `v1.x`. Any future minor or patch releases will be cut +from the `v1.x` branch. There won't be a separate backport branch for minor releases, so there won't be a `v1.1.x` backport branch. -## Minor Release Checklist +For more details about versioning guarantees after the `v1.0.0` release, please check +the [versioning](https://github.com/cometbft/cometbft/blob/main/UPGRADING.md#versioning) section in the `UPGRADING.md` document. + +## Major Release Checklist The following set of steps are performed on all releases that increment the -_minor_ version, e.g. v0.25 to v0.26. These steps ensure that CometBFT is well +_major_ version, e.g. v1.0 to v2.0. These steps ensure that CometBFT is well tested, stable, and suitable for adoption by the various diverse projects that rely on CometBFT. @@ -336,28 +378,36 @@ critical systems, testnets of larger sizes should be considered. #### Rotating Node Testnet -Real-world deployments of CometBFT frequently see new nodes arrive and old -nodes exit the network. The rotating node testnet ensures that CometBFT is -able to handle this reliably. In this test, a network with 10 validators and -3 seed nodes is started. A rolling set of 25 full nodes are started and each -connects to the network by dialing one of the seed nodes. Once the node is able -to blocksync to the head of the chain and begins producing blocks using -consensus it is stopped. Once stopped, a new node is started and -takes its place. This network is run for several days. +Real-world deployments of CometBFT frequently see new nodes arrive and old nodes +exit the network. The rotating node testnet ensures that CometBFT is able to +handle this reliably. In this test, a network with 10 validators and 3 seed +nodes is started. A rolling set of 25 full nodes are started and each connects +to the network by dialing one of the seed nodes. Once the node is able to +blocksync to the head of the chain and begins producing blocks using consensus +it is stopped. Once stopped, a new node is started and takes its place. This +network is run for several days. #### Vote-extension Testnet -CometBFT v0.38.0 introduced **vote-extensions**, which are added as the name suggests, to precommit votes sent by validators. -The Vote-extension Testnet is used to determine how vote-extensions affect the performance of CometBFT, under various settings. -The application used in the experiment is the same used on the (#200-node-testnet), but is configured differently to gauge de effects of varying vote extension sizes. -In the (#200-node-testnet) the application extends pre-commit votes with a 64 bit number encoded with variable compression. -In the Vote-extension Testnet, pre-commit votes are extended with a non-compressed extension of configurable size. -Experiments are run with multiple sizes to determine their impact and, for comparison sake, we include a run with the same settings as in the (#200-node-testnet). - -The testnet consists of 175 validators, 20 non-validator full-nodes, and 5 seed nodes. -All 195 full-nodes begin by dialing a subset of the seed nodes to discover peers. -Once all full-nodes are started, a 5 minute period is waited before starting an experiment. -For each experiment, the load generators issue requests at a constant rate during 150 seconds, then wait for 5 minutes to allow the system to quiesce, then repeat the load generation; the load generation step is repeated 5 times for each experiment. +CometBFT v0.38.0 introduced **vote-extensions**, which are added as the name +suggests, to precommit votes sent by validators. The Vote-extension Testnet is +used to determine how vote-extensions affect the performance of CometBFT, under +various settings. The application used in the experiment is the same used on the +(#200-node-testnet), but is configured differently to gauge de effects of +varying vote extension sizes. In the (#200-node-testnet) the application extends +pre-commit votes with a 64 bit number encoded with variable compression. In the +Vote-extension Testnet, pre-commit votes are extended with a non-compressed +extension of configurable size. Experiments are run with multiple sizes to +determine their impact and, for comparison sake, we include a run with the same +settings as in the (#200-node-testnet). + +The testnet consists of 175 validators, 20 non-validator full-nodes, and 5 seed +nodes. All 195 full-nodes begin by dialing a subset of the seed nodes to +discover peers. Once all full-nodes are started, a 5 minute period is waited +before starting an experiment. For each experiment, the load generators issue +requests at a constant rate during 150 seconds, then wait for 5 minutes to allow +the system to quiesce, then repeat the load generation; the load generation step +is repeated 5 times for each experiment. #### Network Partition Testnet @@ -365,25 +415,62 @@ CometBFT is expected to recover from network partitions. A partition where no subset of the nodes is left with the super-majority of the stake is expected to stop making blocks. Upon alleviation of the partition, the network is expected to once again become fully connected and capable of producing blocks. The -network partition testnet ensures that CometBFT is able to handle this -reliably at scale. In this test, a network with 100 validators and 95 full -nodes is started. All validators have equal stake. Once the network is -producing blocks, a set of firewall rules is deployed to create a partitioned -network with 50% of the stake on one side and 50% on the other. Once the -network stops producing blocks, the firewall rules are removed and the nodes -are monitored to ensure they reconnect and that the network again begins -producing blocks. +network partition testnet ensures that CometBFT is able to handle this reliably +at scale. In this test, a network with 100 validators and 95 full nodes is +started. All validators have equal stake. Once the network is producing blocks, +a set of firewall rules is deployed to create a partitioned network with 50% of +the stake on one side and 50% on the other. Once the network stops producing +blocks, the firewall rules are removed and the nodes are monitored to ensure +they reconnect and that the network again begins producing blocks. #### Absent Stake Testnet -CometBFT networks often run with _some_ portion of the voting power offline. -The absent stake testnet ensures that large networks are able to handle this +CometBFT networks often run with _some_ portion of the voting power offline. The +absent stake testnet ensures that large networks are able to handle this reliably. A set of 150 validator nodes and three seed nodes is started. The set -of 150 validators is configured to only possess a cumulative stake of 67% of -the total stake. The remaining 33% of the stake is configured to belong to -a validator that is never actually run in the test network. The network is run -for multiple days, ensuring that it is able to produce blocks without issue. - +of 150 validators is configured to only possess a cumulative stake of 67% of the +total stake. The remaining 33% of the stake is configured to belong to a +validator that is never actually run in the test network. The network is run for +multiple days, ensuring that it is able to produce blocks without issue. + +## Publish protos to the Buf schema registry + +For each release, if necessary, publish the proto changes to the `Buf` schema registry `comet/comet` repository. + +* Install the `buf` tool: + * If you don't have the `buf` tool installed on your machine, please refer to the [Buf docs](https://buf.build/docs/installation) + in order to learn how to install it. +* Ensure you have access to `Buf` and that you can login and publish files: + * Go to [Buf schema registry](https://buf.build/) + * Click on `Sign in`. If you don't have an account, please create one. + * Once you sign in, click on your username (top right), and in the dropdown menu select `Organizations`. + * Ensure that you can see `cometbft` under Organizations and it shows `Writer` role on the right side. + * If you can't see `cometbft` or you don't see the `Writer` role, please reach out to one of the CometBFT team members who have + admin access in Buf for the `cometbft` organization so they can grant you the right permissions. + * If you see `cometbft` and have `Writer` role, login in Buf via terminal: + - `buf registry login` + * When prompted for the username, type it and hit enter. It will ask for a `token` next. + * Go back to the Buf website and click on your username, and select `Settings`. + * Click on `Create new token`, add a name and select an expiry date. + * Once the token is created, copy the token code from the website and paste it on the terminal and hit enter. You + should see a message saying `Credentials saved ...` +* Publish the files: + * Checkout the new release that was tagged: + - `git checkout v2.0.0-rc1` + * Change to the proto directory: + - `cd proto` + * Lint the protos: + - `buf lint` + * Update the dependencies: + - `buf dep update` + * Build the files + - `buf build` + * Push the files to the registry. This will publish a commit to the Buf registry using the last + commit checksum from the release. + - `buf push --tag "$(git rev-parse HEAD)"` + * Go to the `Commits` section for the `cometbft/cometbft` repository and ensure that you + can see commit just [published there](https://buf.build/cometbft/cometbft/commits). + * All set, the Buf registry now hosts the [latest proto files](https://buf.build/cometbft/cometbft) in the `cometbft/cometbft` repository. [unclog]: https://github.com/informalsystems/unclog [unclog-release]: https://github.com/informalsystems/unclog#releasing-a-new-versions-change-set diff --git a/SECURITY.md b/SECURITY.md index f8ba537a63b..2a5c5666415 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,79 +1,33 @@ -# Coordinated Vulnerability Disclosure Policy +# How to Report a Security Bug -The Cosmos ecosystem believes that strong security is a blend of highly -technical security researchers who care about security and the forward -progression of the ecosystem and the attentiveness and openness of Cosmos core -contributors to help continually secure our operations. +If you believe you have found a security vulnerability in the Interchain Stack, +you can report it to our primary vulnerability disclosure channel, the [Cosmos +HackerOne Bug Bounty program][h1]. -> **IMPORTANT**: *DO NOT* open public issues on this repository for security -> vulnerabilities. +If you prefer to report an issue via email, you may send a bug report to + with the issue details, reproduction, impact, and other +information. Please submit only one unique email thread per vulnerability. Any +issues reported via email are ineligible for bounty rewards. -## Scope +Artifacts from an email report are saved at the time the email is triaged. +Please note: our team is not able to monitor dynamic content (e.g. a Google Docs +link that is edited after receipt) throughout the lifecycle of a report. If you +would like to share additional information or modify previous information, +please include it in an additional reply as an additional attachment. -| Scope | -|-----------------------| -| last release (tagged) | -| main branch | +Please **DO NOT** file a public issue in this repository to report a security +vulnerability. -The latest **release tag** of this repository is supported for security updates -as well as the **main** branch. Security vulnerabilities should be reported if -the vulnerability can be reproduced on either one of those. +## Coordinated Vulnerability Disclosure Policy and Safe Harbor -## Reporting a Vulnerability +For the most up-to-date version of the policies that govern vulnerability +disclosure, please consult the [HackerOne program page][h1-policy]. -| Reporting methods | -|---------------------------------------------------------------| -| [GitHub Private Vulnerability Reporting][gh-private-advisory] | -| [HackerOne bug bounty program][h1] | +The policy hosted on HackerOne is the official Coordinated Vulnerability +Disclosure policy and Safe Harbor for the Interchain Stack, and the teams and +infrastructure it supports, and it supersedes previous security policies that +have been used in the past by individual teams and projects with targets in +scope of the program. -All security vulnerabilities can be reported under GitHub's [Private -vulnerability reporting][gh-private-advisory] system. This will open a private -issue for the developers. Try to fill in as much of the questions as possible. -If you are not familiar with the CVSS system for assessing vulnerabilities, just -use the Low/High/Critical severity ratings. A partially filled in report for a -critical vulnerability is still better than no report at all. - -Vulnerabilities associated with the **Go, Rust or Protobuf code** of the -repository may be eligible for a [bug bounty][h1]. Please see the bug bounty -page for more details on submissions and rewards. If you think the vulnerability -is eligible for a payout, **report on HackerOne first**. - -Vulnerabilities in services and their source codes (JavaScript, web page, Google -Workspace) are not in scope for the bug bounty program, but they are welcome to -be reported in GitHub. - -### Guidelines - -We require that all researchers: - -* Abide by this policy to disclose vulnerabilities, and avoid posting - vulnerability information in public places, including GitHub, Discord, - Telegram, and Twitter. -* Make every effort to avoid privacy violations, degradation of user experience, - disruption to production systems (including but not limited to the Cosmos - Hub), and destruction of data. -* Keep any information about vulnerabilities that you’ve discovered confidential - between yourself and the Cosmos engineering team until the issue has been - resolved and disclosed. -* Avoid posting personally identifiable information, privately or publicly. - -If you follow these guidelines when reporting an issue to us, we commit to: - -* Not pursue or support any legal action related to your research on this - vulnerability -* Work with you to understand, resolve and ultimately disclose the issue in a - timely fashion - -### More information - -* See [TIMELINE.md] for an example timeline of a disclosure. -* See [DISCLOSURE.md] to see more into the inner workings of the disclosure - process. -* See [EXAMPLES.md] for some of the examples that we are interested in for the - bug bounty program. - -[gh-private-advisory]: https://github.com/cometbft/cometbft/security/advisories/new -[h1]: https://hackerone.com/cosmos -[TIMELINE.md]: https://github.com/cosmos/security/blob/main/TIMELINE.md -[DISCLOSURE.md]: https://github.com/cosmos/security/blob/main/DISCLOSURE.md -[EXAMPLES.md]: https://github.com/cosmos/security/blob/main/EXAMPLES.md +[h1]: https://hackerone.com/cosmos?type=team +[h1-policy]: https://hackerone.com/cosmos?type=team&view_policy=true diff --git a/UPGRADING.md b/UPGRADING.md index 9e17b7d0f3e..bdff8ded239 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -4,62 +4,43 @@ This guide provides instructions for upgrading to specific versions of CometBFT. ## Unreleased +CometBFT `v1.0` includes some substantial breaking API changes that will hopefully +allow future changes to be rolled out quicker. + +### Versioning + +As of v1.0, the CometBFT team provides the following guarantees relating to +versioning: + +- **Major version** bumps, such as v1.0.0 to v2.0.0, would generally involve + changes that _force_ users to perform a coordinated upgrade in order to use + the new version, such as protocol-breaking changes (e.g. changes to how block + hashes are computed and thus what the network considers to be "valid blocks", + or how the consensus protocol works, or changes that affect network-level + compatibility between nodes, etc.). +- **Minor version** bumps, such as v1.1.0 to v1.2.0, are reserved for rolling + out new features or substantial changes that do not force a coordinated + upgrade (i.e. not protocol-breaking), but could potentially break Go APIs. +- **Patch version** bumps, such as v1.0.0 to v1.0.1, are reserved for + bug/security fixes that are not protocol- or Go API-breaking. + ### Building CometBFT -The minimum Go version has been bumped to [v1.21][go121]. +The minimum Go version has been bumped to [v1.23][go123]. -### Mempool Changes +### Upgrading Guide (`v0.38` -> `v1.0`) + +Starting with the `v1.0` release, instead of providing detailed information +about new features, changes, and other relevant details for upgrading to ComeBFT `v1.0` in this document, +we have created a comprehensive upgrading guide from the previous `v0.38.x` release line to this new `v1.0` release. +This guide can be utilized as a valuable resource when upgrading to the CometBFT `v1.0` release. + +The upgrading guide includes detailed information about major new features in CometBFT `v1.0`, such as PBTS, +Data Companion API, several enhancements, configuration and genesis updates for a smoother +transition to the new `v1.0` version. -* The `Mempool` interface was modified on the following methods. Note that this - interface is meant for internal use only, so you should be aware of these - changes only if you happen to call these methods directly. - * `CheckTx`'s signature changed from - `CheckTx(tx types.Tx, cb func(*abci.ResponseCheckTx), txInfo TxInfo) error` - to `CheckTx(tx types.Tx) (abcicli.ReqRes, error)`. - * The method used to take a callback function `cb` to be applied to the ABCI - `CheckTx` response. Now `CheckTx` returns the ABCI response of type - `abcicli.ReqRes`, on which the callback must be applied manually. For - example: - - ```golang - reqRes, err := CheckTx(tx) - cb(reqRes.Response.GetCheckTx()) - ``` - - * The second parameter was `txInfo`, which essentially contained information - about the sender of the transaction. Now that information is stored in the - mempool reactor instead of the data structure, so it is no longer needed in - this method. - -### Consensus Changes - -* Removed the `consensus.State.ReplayFile` and `consensus.RunReplayFile` - methods, as these were exclusively used by the `replay` and `replay-console` - subcommands, which were also removed - ([\#1170](https://github.com/cometbft/cometbft/pull/1170)) - -### Command Line Subcommands - -* Removed the `replay` and `replay-console` subcommands - ([\#1170](https://github.com/cometbft/cometbft/pull/1170)) - -### RPC API - -* The RPC API is now versioned. - Although invoking methods without specifying the version is still supported for now, - support will be dropped in future releases and users are urged to use the versioned - approach. - For example, instead of `curl localhost:26657/block?height=5`, use - `curl localhost:26657/v1/block?height=5`. - -* The `/websocket` endpoint path is no longer configurable in the client or - server. Creating an RPC client now takes the form: - - ```golang - // The WebSocket endpoint in the following example is assumed to be available - // at http://localhost:26657/v1/websocket - rpcClient, err := client.New("http://localhost:26657/v1") - ``` +Please see more information on the [Upgrading from CometBFT v0.38 to v1.0](/docs/guides/upgrades/v0.38-to-v1.0.md) +guide. ## v0.38.0 @@ -69,44 +50,44 @@ coordinated upgrade. ### Config Changes -* The field `Version` in the mempool section has been removed. The priority +- The field `Version` in the mempool section has been removed. The priority mempool (what was called version `v1`) has been removed (see below), thus there is only one implementation of the mempool available (what was called `v0`). -* Config fields `TTLDuration` and `TTLNumBlocks`, which were only used by the +- Config fields `TTLDuration` and `TTLNumBlocks`, which were only used by the priority mempool, have been removed. ### Mempool Changes -* The priority mempool (what was referred in the code as version `v1`) has been +- The priority mempool (what was referred in the code as version `v1`) has been removed. There is now only one mempool (what was called version `v0`), that is, the default implementation as a queue of transactions. -* In the protobuf message `ResponseCheckTx`, fields `sender`, `priority`, and +- In the protobuf message `ResponseCheckTx`, fields `sender`, `priority`, and `mempool_error`, which were only used by the priority mempool, were removed but still kept in the message as "reserved". ### ABCI Changes -* The `ABCIVersion` is now `2.0.0`. -* Added new ABCI methods `ExtendVote`, and `VerifyVoteExtension`. +- The `ABCIVersion` is now `2.0.0`. +- Added new ABCI methods `ExtendVote`, and `VerifyVoteExtension`. Applications upgrading to v0.38.0 must implement these methods as described [here](./spec/abci/abci%2B%2B_comet_expected_behavior.md#adapting-existing-applications-that-use-abci) -* Removed methods `BeginBlock`, `DeliverTx`, `EndBlock`, and replaced them by +- Removed methods `BeginBlock`, `DeliverTx`, `EndBlock`, and replaced them by method `FinalizeBlock`. Applications upgrading to `v0.38.0` must refactor the logic handling the methods removed to handle `FinalizeBlock`. -* The Application's hash (or any data representing the Application's current state) +- The Application's hash (or any data representing the Application's current state) is known by the time `FinalizeBlock` finishes its execution. Accordingly, the `app_hash` parameter has been moved from `ResponseCommit` to `ResponseFinalizeBlock`. -* Field `signed_last_block` in structure `VoteInfo` has been replaced by the +- Field `signed_last_block` in structure `VoteInfo` has been replaced by the more expressive `block_id_flag`. Applications willing to keep the semantics of `signed_last_block` can now use the following predicate - * `voteInfo.block_id_flag != BlockIDFlagAbsent` -* For further details, please see the updated [specification](spec/abci/README.md) + - `voteInfo.block_id_flag != BlockIDFlagAbsent` +- For further details, please see the updated [specification](spec/abci/README.md) ### `block_results` RPC endpoint - query result display change (breaking) -* When returning a block, all block events are displayed within the `finalize_block_events` field. +- When returning a block, all block events are displayed within the `finalize_block_events` field. For blocks generated with older versions of CometBFT, that means that block results that appeared as `begin_block_events` and `end_block_events` are merged into `finalize_block_events`. For users who rely on the events to be grouped by the function they were generated by, this change @@ -117,7 +98,7 @@ coordinated upgrade. The changes described here are internal to the implementation of the kvindexer, and they are transparent to the user. However, if you own a fork with a modified version of the indexer, you should be aware of these changes. -* Indexer key for block events will not contain information about the function that returned the event. +- Indexer key for block events will not contain information about the function that returned the event. The events were indexed by their attributes, event type, the function that returned them, the height and event sequence. The functions returning events in old (pre `v0.38.0`) versions of CometBFT were `BeginBlock` or `EndBlock`. As events are returned now only via `FinalizeBlock`, the value of this field has no use, and has been removed. @@ -140,25 +121,25 @@ now changed to `github.com/cometbft/cometbft`. ### ABCI Changes -* The `ABCIVersion` is now `1.0.0`. -* Added new ABCI methods `PrepareProposal` and `ProcessProposal`. For details, +- The `ABCIVersion` is now `1.0.0`. +- Added new ABCI methods `PrepareProposal` and `ProcessProposal`. For details, please see the [spec](spec/abci/README.md). Applications upgrading to v0.37.0 must implement these methods, at the very minimum, as described [here](./spec/abci/abci++_app_requirements.md) -* Deduplicated `ConsensusParams` and `BlockParams`. +- Deduplicated `ConsensusParams` and `BlockParams`. In the v0.34 branch they are defined both in `abci/types.proto` and `types/params.proto`. The definitions in `abci/types.proto` have been removed. In-process applications should make sure they are not using the deleted version of those structures. -* In v0.34, messages on the wire used to be length-delimited with `int64` varint +- In v0.34, messages on the wire used to be length-delimited with `int64` varint values, which was inconsistent with the `uint64` varint length delimiters used in the P2P layer. Both now consistently use `uint64` varint length delimiters. -* Added `AbciVersion` to `RequestInfo`. +- Added `AbciVersion` to `RequestInfo`. Applications should check that CometBFT's ABCI version matches the one they expect in order to ensure compatibility. -* The `SetOption` method has been removed from the ABCI `Client` interface. +- The `SetOption` method has been removed from the ABCI `Client` interface. The corresponding Protobuf types have been deprecated. -* The `key` and `value` fields in the `EventAttribute` type have been changed +- The `key` and `value` fields in the `EventAttribute` type have been changed from type `bytes` to `string`. As per the [Protocol Buffers updating guidelines](https://developers.google.com/protocol-buffers/docs/proto3#updating), this should have no effect on the wire-level encoding for UTF8-encoded @@ -232,4 +213,5 @@ please see the [Tendermint Core upgrading instructions][tmupgrade]. [discussions]: https://github.com/cometbft/cometbft/discussions [tmupgrade]: https://github.com/tendermint/tendermint/blob/35581cf54ec436b8c37fabb43fdaa3f48339a170/UPGRADING.md [go120]: https://go.dev/blog/go1.20 -[go121]: https://go.dev/blog/go1.21 +[go123]: https://go.dev/blog/go1.23 +[pbts-spec]: ./spec/consensus/proposer-based-timestamp/README.md diff --git a/abci/README.md b/abci/README.md index e83e61d42a8..fdbe05ac579 100644 --- a/abci/README.md +++ b/abci/README.md @@ -12,14 +12,14 @@ Previously, the ABCI was referred to as TMSP. ## Installation & Usage -To get up and running quickly, see the [getting started guide](../docs/app-dev/getting-started.md) along with the [abci-cli documentation](../docs/app-dev/abci-cli.md) which will go through the examples found in the [examples](./example/) directory. +To get up and running quickly, see the [getting started guide](../docs/guides/app-dev/getting-started.md) along with the [abci-cli documentation](../docs/guides/app-dev/abci-cli.md) which will go through the examples found in the [examples](./example/) directory. ## Specification A detailed description of the ABCI methods and message types is contained in: - [The main spec](https://github.com/cometbft/cometbft/blob/main/spec/abci/README.md) -- [A protobuf file](../proto/tendermint/types/types.proto) +- [A protobuf file](../proto/cometbft/types/v1/types.proto) - [A Go interface](./types/application.go) ## Protocol Buffers diff --git a/abci/client/client.go b/abci/client/client.go index b274cd3c233..60f3e7fc681 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -28,22 +28,29 @@ type Client interface { // TODO: remove as each method now returns an error Error() error // TODO: remove as this is not implemented - Flush(context.Context) error - Echo(context.Context, string) (*types.ResponseEcho, error) + Flush(ctx context.Context) error + Echo(ctx context.Context, echo string) (*types.EchoResponse, error) // FIXME: All other operations are run synchronously and rely // on the caller to dictate concurrency (i.e. run a go routine), // with the exception of `CheckTxAsync` which we maintain // for the v0 mempool. We should explore refactoring the // mempool to remove this vestige behavior. - SetResponseCallback(Callback) - CheckTxAsync(context.Context, *types.RequestCheckTx) (*ReqRes, error) + // + // SetResponseCallback is not used anymore. The callback was invoked only by the mempool on + // CheckTx responses, only during rechecking. Now the responses are handled by the callback of + // the *ReqRes struct returned by CheckTxAsync. This callback is more flexible as it allows to + // pass other information such as the sender. + // + // Deprecated: Do not use. + SetResponseCallback(cb Callback) + CheckTxAsync(ctx context.Context, req *types.CheckTxRequest) (*ReqRes, error) } -//---------------------------------------- +// ---------------------------------------- // NewClient returns a new ABCI client of the specified transport type. -// It returns an error if the transport is not "socket" or "grpc" +// It returns an error if the transport is not "socket" or "grpc". func NewClient(addr, transport string, mustConnect bool) (client Client, err error) { switch transport { case "socket": @@ -53,7 +60,7 @@ func NewClient(addr, transport string, mustConnect bool) (client Client, err err default: err = ErrUnknownAbciTransport{Transport: transport} } - return + return client, err } type Callback func(*types.Request, *types.Response) @@ -71,7 +78,8 @@ type ReqRes struct { // invoking the callback twice by accident, once when 'SetCallback' is // called and once during the normal request. callbackInvoked bool - cb func(*types.Response) // A single callback that may be set. + cb func(*types.Response) error // A single callback that may be set. + cbErr error } func NewReqRes(req *types.Request) *ReqRes { @@ -85,15 +93,15 @@ func NewReqRes(req *types.Request) *ReqRes { } } -// Sets sets the callback. If reqRes is already done, it will call the cb +// SetCallback sets the callback. If reqRes is already done, it will call the cb // immediately. Note, reqRes.cb should not change if reqRes.done and only one // callback is supported. -func (r *ReqRes) SetCallback(cb func(res *types.Response)) { +func (r *ReqRes) SetCallback(cb func(res *types.Response) error) { r.mtx.Lock() if r.callbackInvoked { r.mtx.Unlock() - cb(r.Response) + r.cbErr = cb(r.Response) return } @@ -107,26 +115,19 @@ func (r *ReqRes) InvokeCallback() { r.mtx.Lock() defer r.mtx.Unlock() - if r.cb != nil { - r.cb(r.Response) + if r.cb != nil && r.Response != nil { + r.cbErr = r.cb(r.Response) } r.callbackInvoked = true } -// GetCallback returns the configured callback of the ReqRes object which may be -// nil. Note, it is not safe to concurrently call this in cases where it is -// marked done and SetCallback is called before calling GetCallback as that -// will invoke the callback twice and create a potential race condition. -// -// ref: https://github.com/tendermint/tendermint/issues/5439 -func (r *ReqRes) GetCallback() func(*types.Response) { - r.mtx.Lock() - defer r.mtx.Unlock() - return r.cb +// Error returns the error returned by the callback, if any. +func (r *ReqRes) Error() error { + return r.cbErr } func waitGroup1() (wg *sync.WaitGroup) { wg = &sync.WaitGroup{} wg.Add(1) - return + return wg } diff --git a/abci/client/errors.go b/abci/client/errors.go index f8bfb1ec8bc..a7f3edc4d61 100644 --- a/abci/client/errors.go +++ b/abci/client/errors.go @@ -6,13 +6,13 @@ import ( "github.com/cometbft/cometbft/abci/types" ) -// ErrUnknownAbciTransport is returned when trying to create a client with an invalid transport option +// ErrUnknownAbciTransport is returned when trying to create a client with an invalid transport option. type ErrUnknownAbciTransport struct { Transport string } func (e ErrUnknownAbciTransport) Error() string { - return fmt.Sprintf("unknown abci transport: %s", e.Transport) + return "unknown abci transport: " + e.Transport } type ErrUnexpectedResponse struct { diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 926e679d687..02ef0118ebd 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -11,26 +11,26 @@ import ( "google.golang.org/grpc/credentials/insecure" "github.com/cometbft/cometbft/abci/types" - cmtnet "github.com/cometbft/cometbft/libs/net" + cmtnet "github.com/cometbft/cometbft/internal/net" "github.com/cometbft/cometbft/libs/service" ) var _ Client = (*grpcClient)(nil) // A stripped copy of the remoteClient that makes -// synchronous calls using grpc +// synchronous calls using grpc. type grpcClient struct { service.BaseService mustConnect bool - client types.ABCIClient + client types.ABCIServiceClient conn *grpc.ClientConn chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool mtx sync.Mutex addr string err error - resCb func(*types.Request, *types.Response) // listens to all callbacks + resCb Callback // listens to all callbacks } func NewGRPCClient(addr string, mustConnect bool) Client { @@ -87,7 +87,7 @@ func (cli *grpcClient) OnStart() error { RETRY_LOOP: for { - conn, err := grpc.Dial(cli.addr, + conn, err := grpc.NewClient(cli.addr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialerFunc), ) @@ -106,7 +106,7 @@ RETRY_LOOP: ENSURE_CONNECTED: for { - _, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true)) + _, err := client.Echo(context.Background(), &types.EchoRequest{Message: "hello"}, grpc.WaitForReady(true)) if err == nil { break ENSURE_CONNECTED } @@ -151,23 +151,23 @@ func (cli *grpcClient) Error() error { return cli.err } -// Set listener for all responses -// NOTE: callback may get internally generated flush responses. +// SetResponseCallback sets a listener for all responses. +// NOTE: The callback may receive internally generated flush responses. func (cli *grpcClient) SetResponseCallback(resCb Callback) { cli.mtx.Lock() cli.resCb = resCb cli.mtx.Unlock() } -//---------------------------------------- +// ---------------------------------------- -func (cli *grpcClient) CheckTxAsync(ctx context.Context, req *types.RequestCheckTx) (*ReqRes, error) { +func (cli *grpcClient) CheckTxAsync(ctx context.Context, req *types.CheckTxRequest) (*ReqRes, error) { res, err := cli.client.CheckTx(ctx, req, grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) return nil, err } - return cli.finishAsyncCall(types.ToRequestCheckTx(req), &types.Response{Value: &types.Response_CheckTx{CheckTx: res}}), nil + return cli.finishAsyncCall(types.ToCheckTxRequest(req), &types.Response{Value: &types.Response_CheckTx{CheckTx: res}}), nil } // finishAsyncCall creates a ReqRes for an async call, and immediately populates it @@ -179,69 +179,69 @@ func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) return reqres } -//---------------------------------------- +// ---------------------------------------- func (cli *grpcClient) Flush(ctx context.Context) error { - _, err := cli.client.Flush(ctx, types.ToRequestFlush().GetFlush(), grpc.WaitForReady(true)) + _, err := cli.client.Flush(ctx, types.ToFlushRequest().GetFlush(), grpc.WaitForReady(true)) return err } -func (cli *grpcClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { - return cli.client.Echo(ctx, types.ToRequestEcho(msg).GetEcho(), grpc.WaitForReady(true)) +func (cli *grpcClient) Echo(ctx context.Context, msg string) (*types.EchoResponse, error) { + return cli.client.Echo(ctx, types.ToEchoRequest(msg).GetEcho(), grpc.WaitForReady(true)) } -func (cli *grpcClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) { +func (cli *grpcClient) Info(ctx context.Context, req *types.InfoRequest) (*types.InfoResponse, error) { return cli.client.Info(ctx, req, grpc.WaitForReady(true)) } -func (cli *grpcClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { +func (cli *grpcClient) CheckTx(ctx context.Context, req *types.CheckTxRequest) (*types.CheckTxResponse, error) { return cli.client.CheckTx(ctx, req, grpc.WaitForReady(true)) } -func (cli *grpcClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) { - return cli.client.Query(ctx, types.ToRequestQuery(req).GetQuery(), grpc.WaitForReady(true)) +func (cli *grpcClient) Query(ctx context.Context, req *types.QueryRequest) (*types.QueryResponse, error) { + return cli.client.Query(ctx, types.ToQueryRequest(req).GetQuery(), grpc.WaitForReady(true)) } -func (cli *grpcClient) Commit(ctx context.Context, _ *types.RequestCommit) (*types.ResponseCommit, error) { - return cli.client.Commit(ctx, types.ToRequestCommit().GetCommit(), grpc.WaitForReady(true)) +func (cli *grpcClient) Commit(ctx context.Context, _ *types.CommitRequest) (*types.CommitResponse, error) { + return cli.client.Commit(ctx, types.ToCommitRequest().GetCommit(), grpc.WaitForReady(true)) } -func (cli *grpcClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) { - return cli.client.InitChain(ctx, types.ToRequestInitChain(req).GetInitChain(), grpc.WaitForReady(true)) +func (cli *grpcClient) InitChain(ctx context.Context, req *types.InitChainRequest) (*types.InitChainResponse, error) { + return cli.client.InitChain(ctx, types.ToInitChainRequest(req).GetInitChain(), grpc.WaitForReady(true)) } -func (cli *grpcClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { - return cli.client.ListSnapshots(ctx, types.ToRequestListSnapshots(req).GetListSnapshots(), grpc.WaitForReady(true)) +func (cli *grpcClient) ListSnapshots(ctx context.Context, req *types.ListSnapshotsRequest) (*types.ListSnapshotsResponse, error) { + return cli.client.ListSnapshots(ctx, types.ToListSnapshotsRequest(req).GetListSnapshots(), grpc.WaitForReady(true)) } -func (cli *grpcClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { - return cli.client.OfferSnapshot(ctx, types.ToRequestOfferSnapshot(req).GetOfferSnapshot(), grpc.WaitForReady(true)) +func (cli *grpcClient) OfferSnapshot(ctx context.Context, req *types.OfferSnapshotRequest) (*types.OfferSnapshotResponse, error) { + return cli.client.OfferSnapshot(ctx, types.ToOfferSnapshotRequest(req).GetOfferSnapshot(), grpc.WaitForReady(true)) } -func (cli *grpcClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - return cli.client.LoadSnapshotChunk(ctx, types.ToRequestLoadSnapshotChunk(req).GetLoadSnapshotChunk(), grpc.WaitForReady(true)) +func (cli *grpcClient) LoadSnapshotChunk(ctx context.Context, req *types.LoadSnapshotChunkRequest) (*types.LoadSnapshotChunkResponse, error) { + return cli.client.LoadSnapshotChunk(ctx, types.ToLoadSnapshotChunkRequest(req).GetLoadSnapshotChunk(), grpc.WaitForReady(true)) } -func (cli *grpcClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - return cli.client.ApplySnapshotChunk(ctx, types.ToRequestApplySnapshotChunk(req).GetApplySnapshotChunk(), grpc.WaitForReady(true)) +func (cli *grpcClient) ApplySnapshotChunk(ctx context.Context, req *types.ApplySnapshotChunkRequest) (*types.ApplySnapshotChunkResponse, error) { + return cli.client.ApplySnapshotChunk(ctx, types.ToApplySnapshotChunkRequest(req).GetApplySnapshotChunk(), grpc.WaitForReady(true)) } -func (cli *grpcClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { - return cli.client.PrepareProposal(ctx, types.ToRequestPrepareProposal(req).GetPrepareProposal(), grpc.WaitForReady(true)) +func (cli *grpcClient) PrepareProposal(ctx context.Context, req *types.PrepareProposalRequest) (*types.PrepareProposalResponse, error) { + return cli.client.PrepareProposal(ctx, types.ToPrepareProposalRequest(req).GetPrepareProposal(), grpc.WaitForReady(true)) } -func (cli *grpcClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { - return cli.client.ProcessProposal(ctx, types.ToRequestProcessProposal(req).GetProcessProposal(), grpc.WaitForReady(true)) +func (cli *grpcClient) ProcessProposal(ctx context.Context, req *types.ProcessProposalRequest) (*types.ProcessProposalResponse, error) { + return cli.client.ProcessProposal(ctx, types.ToProcessProposalRequest(req).GetProcessProposal(), grpc.WaitForReady(true)) } -func (cli *grpcClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) { - return cli.client.ExtendVote(ctx, types.ToRequestExtendVote(req).GetExtendVote(), grpc.WaitForReady(true)) +func (cli *grpcClient) ExtendVote(ctx context.Context, req *types.ExtendVoteRequest) (*types.ExtendVoteResponse, error) { + return cli.client.ExtendVote(ctx, types.ToExtendVoteRequest(req).GetExtendVote(), grpc.WaitForReady(true)) } -func (cli *grpcClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { - return cli.client.VerifyVoteExtension(ctx, types.ToRequestVerifyVoteExtension(req).GetVerifyVoteExtension(), grpc.WaitForReady(true)) +func (cli *grpcClient) VerifyVoteExtension(ctx context.Context, req *types.VerifyVoteExtensionRequest) (*types.VerifyVoteExtensionResponse, error) { + return cli.client.VerifyVoteExtension(ctx, types.ToVerifyVoteExtensionRequest(req).GetVerifyVoteExtension(), grpc.WaitForReady(true)) } -func (cli *grpcClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { - return cli.client.FinalizeBlock(ctx, types.ToRequestFinalizeBlock(req).GetFinalizeBlock(), grpc.WaitForReady(true)) +func (cli *grpcClient) FinalizeBlock(ctx context.Context, req *types.FinalizeBlockRequest) (*types.FinalizeBlockResponse, error) { + return cli.client.FinalizeBlock(ctx, types.ToFinalizeBlockRequest(req).GetFinalizeBlock(), grpc.WaitForReady(true)) } diff --git a/abci/client/grpc_client_test.go b/abci/client/grpc_client_test.go index 95d6e481cd6..26f23146460 100644 --- a/abci/client/grpc_client_test.go +++ b/abci/client/grpc_client_test.go @@ -9,17 +9,14 @@ import ( "time" "github.com/stretchr/testify/require" - - "google.golang.org/grpc" - "golang.org/x/net/context" - - "github.com/cometbft/cometbft/libs/log" - cmtnet "github.com/cometbft/cometbft/libs/net" + "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" abciserver "github.com/cometbft/cometbft/abci/server" "github.com/cometbft/cometbft/abci/types" + cmtnet "github.com/cometbft/cometbft/internal/net" + "github.com/cometbft/cometbft/libs/log" ) func TestGRPC(t *testing.T) { @@ -42,7 +39,7 @@ func TestGRPC(t *testing.T) { }) // Connect to the socket - conn, err := grpc.Dial(socket, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialerFunc)) + conn, err := grpc.NewClient(socket, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialerFunc)) require.NoError(t, err) t.Cleanup(func() { @@ -56,7 +53,12 @@ func TestGRPC(t *testing.T) { // Write requests for counter := 0; counter < numCheckTxs; counter++ { // Send request - response, err := client.CheckTx(context.Background(), &types.RequestCheckTx{Tx: []byte("test")}) + response, err := client.CheckTx( + context.Background(), + &types.CheckTxRequest{ + Tx: []byte("test"), + Type: types.CHECK_TX_TYPE_CHECK, + }) require.NoError(t, err) counter++ if response.Code != 0 { @@ -71,7 +73,6 @@ func TestGRPC(t *testing.T) { time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow }() } - } } diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 95648b4bf8f..8edb0fc1fba 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -3,7 +3,7 @@ package abcicli import ( "context" - types "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/libs/service" cmtsync "github.com/cometbft/cometbft/libs/sync" ) @@ -46,7 +46,7 @@ func (app *localClient) SetResponseCallback(cb Callback) { app.mtx.Unlock() } -func (app *localClient) CheckTxAsync(ctx context.Context, req *types.RequestCheckTx) (*ReqRes, error) { +func (app *localClient) CheckTxAsync(ctx context.Context, req *types.CheckTxRequest) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -55,13 +55,15 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req *types.RequestChec return nil, err } return app.callback( - types.ToRequestCheckTx(req), - types.ToResponseCheckTx(res), + types.ToCheckTxRequest(req), + types.ToCheckTxResponse(res), ), nil } func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes { - app.Callback(req, res) + if app.Callback != nil { + app.Callback(req, res) + } rr := newLocalReqRes(req, res) rr.callbackInvoked = true return rr @@ -70,66 +72,67 @@ func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRe func newLocalReqRes(req *types.Request, res *types.Response) *ReqRes { reqRes := NewReqRes(req) reqRes.Response = res + reqRes.Done() // release waiters on response return reqRes } -//------------------------------------------------------- +// ------------------------------------------------------- -func (app *localClient) Error() error { +func (*localClient) Error() error { return nil } -func (app *localClient) Flush(context.Context) error { +func (*localClient) Flush(context.Context) error { return nil } -func (app *localClient) Echo(_ context.Context, msg string) (*types.ResponseEcho, error) { - return &types.ResponseEcho{Message: msg}, nil +func (*localClient) Echo(_ context.Context, msg string) (*types.EchoResponse, error) { + return &types.EchoResponse{Message: msg}, nil } -func (app *localClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) { +func (app *localClient) Info(ctx context.Context, req *types.InfoRequest) (*types.InfoResponse, error) { app.mtx.Lock() defer app.mtx.Unlock() return app.Application.Info(ctx, req) } -func (app *localClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { +func (app *localClient) CheckTx(ctx context.Context, req *types.CheckTxRequest) (*types.CheckTxResponse, error) { app.mtx.Lock() defer app.mtx.Unlock() return app.Application.CheckTx(ctx, req) } -func (app *localClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) { +func (app *localClient) Query(ctx context.Context, req *types.QueryRequest) (*types.QueryResponse, error) { app.mtx.Lock() defer app.mtx.Unlock() return app.Application.Query(ctx, req) } -func (app *localClient) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) { +func (app *localClient) Commit(ctx context.Context, req *types.CommitRequest) (*types.CommitResponse, error) { app.mtx.Lock() defer app.mtx.Unlock() return app.Application.Commit(ctx, req) } -func (app *localClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) { +func (app *localClient) InitChain(ctx context.Context, req *types.InitChainRequest) (*types.InitChainResponse, error) { app.mtx.Lock() defer app.mtx.Unlock() return app.Application.InitChain(ctx, req) } -func (app *localClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { +func (app *localClient) ListSnapshots(ctx context.Context, req *types.ListSnapshotsRequest) (*types.ListSnapshotsResponse, error) { app.mtx.Lock() defer app.mtx.Unlock() return app.Application.ListSnapshots(ctx, req) } -func (app *localClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { +func (app *localClient) OfferSnapshot(ctx context.Context, req *types.OfferSnapshotRequest) (*types.OfferSnapshotResponse, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -137,8 +140,8 @@ func (app *localClient) OfferSnapshot(ctx context.Context, req *types.RequestOff } func (app *localClient) LoadSnapshotChunk(ctx context.Context, - req *types.RequestLoadSnapshotChunk, -) (*types.ResponseLoadSnapshotChunk, error) { + req *types.LoadSnapshotChunkRequest, +) (*types.LoadSnapshotChunkResponse, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -146,43 +149,43 @@ func (app *localClient) LoadSnapshotChunk(ctx context.Context, } func (app *localClient) ApplySnapshotChunk(ctx context.Context, - req *types.RequestApplySnapshotChunk, -) (*types.ResponseApplySnapshotChunk, error) { + req *types.ApplySnapshotChunkRequest, +) (*types.ApplySnapshotChunkResponse, error) { app.mtx.Lock() defer app.mtx.Unlock() return app.Application.ApplySnapshotChunk(ctx, req) } -func (app *localClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { +func (app *localClient) PrepareProposal(ctx context.Context, req *types.PrepareProposalRequest) (*types.PrepareProposalResponse, error) { app.mtx.Lock() defer app.mtx.Unlock() return app.Application.PrepareProposal(ctx, req) } -func (app *localClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { +func (app *localClient) ProcessProposal(ctx context.Context, req *types.ProcessProposalRequest) (*types.ProcessProposalResponse, error) { app.mtx.Lock() defer app.mtx.Unlock() return app.Application.ProcessProposal(ctx, req) } -func (app *localClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) { +func (app *localClient) ExtendVote(ctx context.Context, req *types.ExtendVoteRequest) (*types.ExtendVoteResponse, error) { app.mtx.Lock() defer app.mtx.Unlock() return app.Application.ExtendVote(ctx, req) } -func (app *localClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { +func (app *localClient) VerifyVoteExtension(ctx context.Context, req *types.VerifyVoteExtensionRequest) (*types.VerifyVoteExtensionResponse, error) { app.mtx.Lock() defer app.mtx.Unlock() return app.Application.VerifyVoteExtension(ctx, req) } -func (app *localClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { +func (app *localClient) FinalizeBlock(ctx context.Context, req *types.FinalizeBlockRequest) (*types.FinalizeBlockResponse, error) { app.mtx.Lock() defer app.mtx.Unlock() diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index 9035b33d143..e18372bde4e 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -11,7 +11,7 @@ import ( mock "github.com/stretchr/testify/mock" - types "github.com/cometbft/cometbft/abci/types" + v1 "github.com/cometbft/cometbft/api/cometbft/abci/v1" ) // Client is an autogenerated mock type for the Client type @@ -19,25 +19,29 @@ type Client struct { mock.Mock } -// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1 -func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - ret := _m.Called(_a0, _a1) +// ApplySnapshotChunk provides a mock function with given fields: ctx, req +func (_m *Client) ApplySnapshotChunk(ctx context.Context, req *v1.ApplySnapshotChunkRequest) (*v1.ApplySnapshotChunkResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseApplySnapshotChunk + if len(ret) == 0 { + panic("no return value specified for ApplySnapshotChunk") + } + + var r0 *v1.ApplySnapshotChunkResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ApplySnapshotChunkRequest) (*v1.ApplySnapshotChunkResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ApplySnapshotChunkRequest) *v1.ApplySnapshotChunkResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk) + r0 = ret.Get(0).(*v1.ApplySnapshotChunkResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestApplySnapshotChunk) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.ApplySnapshotChunkRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -45,25 +49,29 @@ func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestAppl return r0, r1 } -// CheckTx provides a mock function with given fields: _a0, _a1 -func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) { - ret := _m.Called(_a0, _a1) +// CheckTx provides a mock function with given fields: ctx, req +func (_m *Client) CheckTx(ctx context.Context, req *v1.CheckTxRequest) (*v1.CheckTxResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for CheckTx") + } - var r0 *types.ResponseCheckTx + var r0 *v1.CheckTxResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) (*types.ResponseCheckTx, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.CheckTxRequest) (*v1.CheckTxResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *types.ResponseCheckTx); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.CheckTxRequest) *v1.CheckTxResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseCheckTx) + r0 = ret.Get(0).(*v1.CheckTxResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.CheckTxRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -71,25 +79,29 @@ func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*type return r0, r1 } -// CheckTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 *types.RequestCheckTx) (*abcicli.ReqRes, error) { - ret := _m.Called(_a0, _a1) +// CheckTxAsync provides a mock function with given fields: ctx, req +func (_m *Client) CheckTxAsync(ctx context.Context, req *v1.CheckTxRequest) (*abcicli.ReqRes, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for CheckTxAsync") + } var r0 *abcicli.ReqRes var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) (*abcicli.ReqRes, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.CheckTxRequest) (*abcicli.ReqRes, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *abcicli.ReqRes); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.CheckTxRequest) *abcicli.ReqRes); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.CheckTxRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -97,25 +109,29 @@ func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 *types.RequestCheckTx) ( return r0, r1 } -// Commit provides a mock function with given fields: _a0, _a1 -func (_m *Client) Commit(_a0 context.Context, _a1 *types.RequestCommit) (*types.ResponseCommit, error) { - ret := _m.Called(_a0, _a1) +// Commit provides a mock function with given fields: ctx, req +func (_m *Client) Commit(ctx context.Context, req *v1.CommitRequest) (*v1.CommitResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseCommit + if len(ret) == 0 { + panic("no return value specified for Commit") + } + + var r0 *v1.CommitResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCommit) (*types.ResponseCommit, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.CommitRequest) (*v1.CommitResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCommit) *types.ResponseCommit); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.CommitRequest) *v1.CommitResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseCommit) + r0 = ret.Get(0).(*v1.CommitResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCommit) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.CommitRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -123,25 +139,29 @@ func (_m *Client) Commit(_a0 context.Context, _a1 *types.RequestCommit) (*types. return r0, r1 } -// Echo provides a mock function with given fields: _a0, _a1 -func (_m *Client) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) { - ret := _m.Called(_a0, _a1) +// Echo provides a mock function with given fields: ctx, echo +func (_m *Client) Echo(ctx context.Context, echo string) (*v1.EchoResponse, error) { + ret := _m.Called(ctx, echo) + + if len(ret) == 0 { + panic("no return value specified for Echo") + } - var r0 *types.ResponseEcho + var r0 *v1.EchoResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) (*types.ResponseEcho, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, string) (*v1.EchoResponse, error)); ok { + return rf(ctx, echo) } - if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, string) *v1.EchoResponse); ok { + r0 = rf(ctx, echo) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseEcho) + r0 = ret.Get(0).(*v1.EchoResponse) } } if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { - r1 = rf(_a0, _a1) + r1 = rf(ctx, echo) } else { r1 = ret.Error(1) } @@ -153,6 +173,10 @@ func (_m *Client) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, er func (_m *Client) Error() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Error") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -163,25 +187,29 @@ func (_m *Client) Error() error { return r0 } -// ExtendVote provides a mock function with given fields: _a0, _a1 -func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) { - ret := _m.Called(_a0, _a1) +// ExtendVote provides a mock function with given fields: ctx, req +func (_m *Client) ExtendVote(ctx context.Context, req *v1.ExtendVoteRequest) (*v1.ExtendVoteResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for ExtendVote") + } - var r0 *types.ResponseExtendVote + var r0 *v1.ExtendVoteResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) (*types.ResponseExtendVote, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ExtendVoteRequest) (*v1.ExtendVoteResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) *types.ResponseExtendVote); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ExtendVoteRequest) *v1.ExtendVoteResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseExtendVote) + r0 = ret.Get(0).(*v1.ExtendVoteResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestExtendVote) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.ExtendVoteRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -189,25 +217,29 @@ func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) return r0, r1 } -// FinalizeBlock provides a mock function with given fields: _a0, _a1 -func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { - ret := _m.Called(_a0, _a1) +// FinalizeBlock provides a mock function with given fields: ctx, req +func (_m *Client) FinalizeBlock(ctx context.Context, req *v1.FinalizeBlockRequest) (*v1.FinalizeBlockResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for FinalizeBlock") + } - var r0 *types.ResponseFinalizeBlock + var r0 *v1.FinalizeBlockResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.FinalizeBlockRequest) (*v1.FinalizeBlockResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.FinalizeBlockRequest) *v1.FinalizeBlockResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseFinalizeBlock) + r0 = ret.Get(0).(*v1.FinalizeBlockResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestFinalizeBlock) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.FinalizeBlockRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -215,13 +247,17 @@ func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeB return r0, r1 } -// Flush provides a mock function with given fields: _a0 -func (_m *Client) Flush(_a0 context.Context) error { - ret := _m.Called(_a0) +// Flush provides a mock function with given fields: ctx +func (_m *Client) Flush(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Flush") + } var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(_a0) + r0 = rf(ctx) } else { r0 = ret.Error(0) } @@ -229,25 +265,29 @@ func (_m *Client) Flush(_a0 context.Context) error { return r0 } -// Info provides a mock function with given fields: _a0, _a1 -func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) { - ret := _m.Called(_a0, _a1) +// Info provides a mock function with given fields: ctx, req +func (_m *Client) Info(ctx context.Context, req *v1.InfoRequest) (*v1.InfoResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for Info") + } - var r0 *types.ResponseInfo + var r0 *v1.InfoResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) (*types.ResponseInfo, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.InfoRequest) (*v1.InfoResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) *types.ResponseInfo); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.InfoRequest) *v1.InfoResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseInfo) + r0 = ret.Get(0).(*v1.InfoResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInfo) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.InfoRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -255,25 +295,29 @@ func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.Resp return r0, r1 } -// InitChain provides a mock function with given fields: _a0, _a1 -func (_m *Client) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) { - ret := _m.Called(_a0, _a1) +// InitChain provides a mock function with given fields: ctx, req +func (_m *Client) InitChain(ctx context.Context, req *v1.InitChainRequest) (*v1.InitChainResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseInitChain + if len(ret) == 0 { + panic("no return value specified for InitChain") + } + + var r0 *v1.InitChainResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) (*types.ResponseInitChain, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.InitChainRequest) (*v1.InitChainResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) *types.ResponseInitChain); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.InitChainRequest) *v1.InitChainResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseInitChain) + r0 = ret.Get(0).(*v1.InitChainResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInitChain) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.InitChainRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -285,6 +329,10 @@ func (_m *Client) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (* func (_m *Client) IsRunning() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for IsRunning") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -295,25 +343,29 @@ func (_m *Client) IsRunning() bool { return r0 } -// ListSnapshots provides a mock function with given fields: _a0, _a1 -func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { - ret := _m.Called(_a0, _a1) +// ListSnapshots provides a mock function with given fields: ctx, req +func (_m *Client) ListSnapshots(ctx context.Context, req *v1.ListSnapshotsRequest) (*v1.ListSnapshotsResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for ListSnapshots") + } - var r0 *types.ResponseListSnapshots + var r0 *v1.ListSnapshotsResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) (*types.ResponseListSnapshots, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ListSnapshotsRequest) (*v1.ListSnapshotsResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) *types.ResponseListSnapshots); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ListSnapshotsRequest) *v1.ListSnapshotsResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseListSnapshots) + r0 = ret.Get(0).(*v1.ListSnapshotsResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestListSnapshots) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.ListSnapshotsRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -321,25 +373,29 @@ func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnaps return r0, r1 } -// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1 -func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - ret := _m.Called(_a0, _a1) +// LoadSnapshotChunk provides a mock function with given fields: ctx, req +func (_m *Client) LoadSnapshotChunk(ctx context.Context, req *v1.LoadSnapshotChunkRequest) (*v1.LoadSnapshotChunkResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseLoadSnapshotChunk + if len(ret) == 0 { + panic("no return value specified for LoadSnapshotChunk") + } + + var r0 *v1.LoadSnapshotChunkResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.LoadSnapshotChunkRequest) (*v1.LoadSnapshotChunkResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.LoadSnapshotChunkRequest) *v1.LoadSnapshotChunkResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk) + r0 = ret.Get(0).(*v1.LoadSnapshotChunkResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestLoadSnapshotChunk) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.LoadSnapshotChunkRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -347,25 +403,29 @@ func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadS return r0, r1 } -// OfferSnapshot provides a mock function with given fields: _a0, _a1 -func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { - ret := _m.Called(_a0, _a1) +// OfferSnapshot provides a mock function with given fields: ctx, req +func (_m *Client) OfferSnapshot(ctx context.Context, req *v1.OfferSnapshotRequest) (*v1.OfferSnapshotResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for OfferSnapshot") + } - var r0 *types.ResponseOfferSnapshot + var r0 *v1.OfferSnapshotResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.OfferSnapshotRequest) (*v1.OfferSnapshotResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.OfferSnapshotRequest) *v1.OfferSnapshotResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseOfferSnapshot) + r0 = ret.Get(0).(*v1.OfferSnapshotResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestOfferSnapshot) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.OfferSnapshotRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -377,6 +437,10 @@ func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnap func (_m *Client) OnReset() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for OnReset") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -391,6 +455,10 @@ func (_m *Client) OnReset() error { func (_m *Client) OnStart() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for OnStart") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -406,25 +474,29 @@ func (_m *Client) OnStop() { _m.Called() } -// PrepareProposal provides a mock function with given fields: _a0, _a1 -func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { - ret := _m.Called(_a0, _a1) +// PrepareProposal provides a mock function with given fields: ctx, req +func (_m *Client) PrepareProposal(ctx context.Context, req *v1.PrepareProposalRequest) (*v1.PrepareProposalResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for PrepareProposal") + } - var r0 *types.ResponsePrepareProposal + var r0 *v1.PrepareProposalResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.PrepareProposalRequest) (*v1.PrepareProposalResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.PrepareProposalRequest) *v1.PrepareProposalResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponsePrepareProposal) + r0 = ret.Get(0).(*v1.PrepareProposalResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestPrepareProposal) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.PrepareProposalRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -432,25 +504,29 @@ func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepare return r0, r1 } -// ProcessProposal provides a mock function with given fields: _a0, _a1 -func (_m *Client) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { - ret := _m.Called(_a0, _a1) +// ProcessProposal provides a mock function with given fields: ctx, req +func (_m *Client) ProcessProposal(ctx context.Context, req *v1.ProcessProposalRequest) (*v1.ProcessProposalResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseProcessProposal + if len(ret) == 0 { + panic("no return value specified for ProcessProposal") + } + + var r0 *v1.ProcessProposalResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) (*types.ResponseProcessProposal, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ProcessProposalRequest) (*v1.ProcessProposalResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) *types.ResponseProcessProposal); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ProcessProposalRequest) *v1.ProcessProposalResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseProcessProposal) + r0 = ret.Get(0).(*v1.ProcessProposalResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestProcessProposal) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.ProcessProposalRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -458,25 +534,29 @@ func (_m *Client) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcess return r0, r1 } -// Query provides a mock function with given fields: _a0, _a1 -func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) { - ret := _m.Called(_a0, _a1) +// Query provides a mock function with given fields: ctx, req +func (_m *Client) Query(ctx context.Context, req *v1.QueryRequest) (*v1.QueryResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for Query") + } - var r0 *types.ResponseQuery + var r0 *v1.QueryResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) (*types.ResponseQuery, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.QueryRequest) (*v1.QueryResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) *types.ResponseQuery); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.QueryRequest) *v1.QueryResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseQuery) + r0 = ret.Get(0).(*v1.QueryResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestQuery) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.QueryRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -488,6 +568,10 @@ func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.Re func (_m *Client) Quit() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Quit") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -504,6 +588,10 @@ func (_m *Client) Quit() <-chan struct{} { func (_m *Client) Reset() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Reset") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -514,20 +602,24 @@ func (_m *Client) Reset() error { return r0 } -// SetLogger provides a mock function with given fields: _a0 -func (_m *Client) SetLogger(_a0 log.Logger) { - _m.Called(_a0) +// SetLogger provides a mock function with given fields: l +func (_m *Client) SetLogger(l log.Logger) { + _m.Called(l) } -// SetResponseCallback provides a mock function with given fields: _a0 -func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) { - _m.Called(_a0) +// SetResponseCallback provides a mock function with given fields: cb +func (_m *Client) SetResponseCallback(cb abcicli.Callback) { + _m.Called(cb) } // Start provides a mock function with given fields: func (_m *Client) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -542,6 +634,10 @@ func (_m *Client) Start() error { func (_m *Client) Stop() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Stop") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -556,6 +652,10 @@ func (_m *Client) Stop() error { func (_m *Client) String() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for String") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -566,25 +666,29 @@ func (_m *Client) String() string { return r0 } -// VerifyVoteExtension provides a mock function with given fields: _a0, _a1 -func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { - ret := _m.Called(_a0, _a1) +// VerifyVoteExtension provides a mock function with given fields: ctx, req +func (_m *Client) VerifyVoteExtension(ctx context.Context, req *v1.VerifyVoteExtensionRequest) (*v1.VerifyVoteExtensionResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for VerifyVoteExtension") + } - var r0 *types.ResponseVerifyVoteExtension + var r0 *v1.VerifyVoteExtensionResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.VerifyVoteExtensionRequest) (*v1.VerifyVoteExtensionResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.VerifyVoteExtensionRequest) *v1.VerifyVoteExtensionResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension) + r0 = ret.Get(0).(*v1.VerifyVoteExtensionResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestVerifyVoteExtension) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.VerifyVoteExtensionRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index f25691401a6..874699f7309 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -12,9 +12,9 @@ import ( "time" "github.com/cometbft/cometbft/abci/types" - cmtnet "github.com/cometbft/cometbft/libs/net" + cmtnet "github.com/cometbft/cometbft/internal/net" + "github.com/cometbft/cometbft/internal/timer" "github.com/cometbft/cometbft/libs/service" - "github.com/cometbft/cometbft/libs/timer" ) const ( @@ -40,8 +40,8 @@ type socketClient struct { mtx sync.Mutex err error - reqSent *list.List // list of requests sent, waiting for response - resCb func(*types.Request, *types.Response) // called on all requests, if set. + reqSent *list.List // list of requests sent, waiting for response + resCb Callback // called on all requests, if set. } var _ Client = (*socketClient)(nil) @@ -108,7 +108,7 @@ func (cli *socketClient) Error() error { return cli.err } -//---------------------------------------- +// ---------------------------------------- // SetResponseCallback sets a callback, which will be executed for each // non-error & non-empty response from the server. @@ -120,11 +120,11 @@ func (cli *socketClient) SetResponseCallback(resCb Callback) { cli.mtx.Unlock() } -func (cli *socketClient) CheckTxAsync(ctx context.Context, req *types.RequestCheckTx) (*ReqRes, error) { - return cli.queueRequest(ctx, types.ToRequestCheckTx(req)) +func (cli *socketClient) CheckTxAsync(ctx context.Context, req *types.CheckTxRequest) (*ReqRes, error) { + return cli.queueRequest(ctx, types.ToCheckTxRequest(req)) } -//---------------------------------------- +// ---------------------------------------- func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { w := bufio.NewWriter(conn) @@ -152,7 +152,7 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { } case <-cli.flushTimer.Ch: // flush queue select { - case cli.reqQueue <- NewReqRes(types.ToRequestFlush()): + case cli.reqQueue <- NewReqRes(types.ToFlushRequest()): default: // Probably will fill the buffer, or retry later. } @@ -236,10 +236,10 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error { return nil } -//---------------------------------------- +// ---------------------------------------- func (cli *socketClient) Flush(ctx context.Context) error { - reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush()) + reqRes, err := cli.queueRequest(ctx, types.ToFlushRequest()) if err != nil { return err } @@ -247,8 +247,8 @@ func (cli *socketClient) Flush(ctx context.Context) error { return nil } -func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestEcho(msg)) +func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.EchoResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToEchoRequest(msg)) if err != nil { return nil, err } @@ -258,8 +258,8 @@ func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseE return reqRes.Response.GetEcho(), cli.Error() } -func (cli *socketClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestInfo(req)) +func (cli *socketClient) Info(ctx context.Context, req *types.InfoRequest) (*types.InfoResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToInfoRequest(req)) if err != nil { return nil, err } @@ -269,8 +269,8 @@ func (cli *socketClient) Info(ctx context.Context, req *types.RequestInfo) (*typ return reqRes.Response.GetInfo(), cli.Error() } -func (cli *socketClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestCheckTx(req)) +func (cli *socketClient) CheckTx(ctx context.Context, req *types.CheckTxRequest) (*types.CheckTxResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToCheckTxRequest(req)) if err != nil { return nil, err } @@ -280,8 +280,8 @@ func (cli *socketClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) return reqRes.Response.GetCheckTx(), cli.Error() } -func (cli *socketClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestQuery(req)) +func (cli *socketClient) Query(ctx context.Context, req *types.QueryRequest) (*types.QueryResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToQueryRequest(req)) if err != nil { return nil, err } @@ -291,8 +291,8 @@ func (cli *socketClient) Query(ctx context.Context, req *types.RequestQuery) (*t return reqRes.Response.GetQuery(), cli.Error() } -func (cli *socketClient) Commit(ctx context.Context, _ *types.RequestCommit) (*types.ResponseCommit, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestCommit()) +func (cli *socketClient) Commit(ctx context.Context, _ *types.CommitRequest) (*types.CommitResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToCommitRequest()) if err != nil { return nil, err } @@ -302,8 +302,8 @@ func (cli *socketClient) Commit(ctx context.Context, _ *types.RequestCommit) (*t return reqRes.Response.GetCommit(), cli.Error() } -func (cli *socketClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestInitChain(req)) +func (cli *socketClient) InitChain(ctx context.Context, req *types.InitChainRequest) (*types.InitChainResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToInitChainRequest(req)) if err != nil { return nil, err } @@ -313,8 +313,8 @@ func (cli *socketClient) InitChain(ctx context.Context, req *types.RequestInitCh return reqRes.Response.GetInitChain(), cli.Error() } -func (cli *socketClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestListSnapshots(req)) +func (cli *socketClient) ListSnapshots(ctx context.Context, req *types.ListSnapshotsRequest) (*types.ListSnapshotsResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToListSnapshotsRequest(req)) if err != nil { return nil, err } @@ -324,8 +324,8 @@ func (cli *socketClient) ListSnapshots(ctx context.Context, req *types.RequestLi return reqRes.Response.GetListSnapshots(), cli.Error() } -func (cli *socketClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestOfferSnapshot(req)) +func (cli *socketClient) OfferSnapshot(ctx context.Context, req *types.OfferSnapshotRequest) (*types.OfferSnapshotResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToOfferSnapshotRequest(req)) if err != nil { return nil, err } @@ -335,8 +335,8 @@ func (cli *socketClient) OfferSnapshot(ctx context.Context, req *types.RequestOf return reqRes.Response.GetOfferSnapshot(), cli.Error() } -func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestLoadSnapshotChunk(req)) +func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req *types.LoadSnapshotChunkRequest) (*types.LoadSnapshotChunkResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToLoadSnapshotChunkRequest(req)) if err != nil { return nil, err } @@ -346,8 +346,8 @@ func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req *types.Reque return reqRes.Response.GetLoadSnapshotChunk(), cli.Error() } -func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestApplySnapshotChunk(req)) +func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req *types.ApplySnapshotChunkRequest) (*types.ApplySnapshotChunkResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToApplySnapshotChunkRequest(req)) if err != nil { return nil, err } @@ -357,8 +357,8 @@ func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req *types.Requ return reqRes.Response.GetApplySnapshotChunk(), cli.Error() } -func (cli *socketClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestPrepareProposal(req)) +func (cli *socketClient) PrepareProposal(ctx context.Context, req *types.PrepareProposalRequest) (*types.PrepareProposalResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToPrepareProposalRequest(req)) if err != nil { return nil, err } @@ -368,8 +368,8 @@ func (cli *socketClient) PrepareProposal(ctx context.Context, req *types.Request return reqRes.Response.GetPrepareProposal(), cli.Error() } -func (cli *socketClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestProcessProposal(req)) +func (cli *socketClient) ProcessProposal(ctx context.Context, req *types.ProcessProposalRequest) (*types.ProcessProposalResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToProcessProposalRequest(req)) if err != nil { return nil, err } @@ -379,8 +379,8 @@ func (cli *socketClient) ProcessProposal(ctx context.Context, req *types.Request return reqRes.Response.GetProcessProposal(), cli.Error() } -func (cli *socketClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestExtendVote(req)) +func (cli *socketClient) ExtendVote(ctx context.Context, req *types.ExtendVoteRequest) (*types.ExtendVoteResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToExtendVoteRequest(req)) if err != nil { return nil, err } @@ -390,8 +390,8 @@ func (cli *socketClient) ExtendVote(ctx context.Context, req *types.RequestExten return reqRes.Response.GetExtendVote(), cli.Error() } -func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestVerifyVoteExtension(req)) +func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req *types.VerifyVoteExtensionRequest) (*types.VerifyVoteExtensionResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToVerifyVoteExtensionRequest(req)) if err != nil { return nil, err } @@ -401,8 +401,8 @@ func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req *types.Req return reqRes.Response.GetVerifyVoteExtension(), cli.Error() } -func (cli *socketClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { - reqRes, err := cli.queueRequest(ctx, types.ToRequestFinalizeBlock(req)) +func (cli *socketClient) FinalizeBlock(ctx context.Context, req *types.FinalizeBlockRequest) (*types.FinalizeBlockResponse, error) { + reqRes, err := cli.queueRequest(ctx, types.ToFinalizeBlockRequest(req)) if err != nil { return nil, err } @@ -457,7 +457,7 @@ LOOP: } } -//---------------------------------------- +// ---------------------------------------- func resMatchesReq(req *types.Request, res *types.Response) (ok bool) { switch req.Value.(type) { diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go index f4bade22934..8b53d6cd595 100644 --- a/abci/client/socket_client_test.go +++ b/abci/client/socket_client_test.go @@ -9,13 +9,12 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abcicli "github.com/cometbft/cometbft/abci/client" "github.com/cometbft/cometbft/abci/server" "github.com/cometbft/cometbft/abci/types" - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/service" ) @@ -39,7 +38,7 @@ func TestCalls(t *testing.T) { require.Fail(t, "No response arrived") case err, ok := <-resp: require.True(t, ok, "Must not close channel") - assert.NoError(t, err, "This should return success") + require.NoError(t, err) } } @@ -51,7 +50,9 @@ func TestHangingAsyncCalls(t *testing.T) { resp := make(chan error, 1) go func() { // Call CheckTx - reqres, err := c.CheckTxAsync(context.Background(), &types.RequestCheckTx{}) + reqres, err := c.CheckTxAsync(context.Background(), &types.CheckTxRequest{ + Type: types.CHECK_TX_TYPE_CHECK, + }) require.NoError(t, err) // wait 50 ms for all events to travel socket, but // no response yet from server @@ -70,7 +71,7 @@ func TestHangingAsyncCalls(t *testing.T) { require.Fail(t, "No response arrived") case err, ok := <-resp: require.True(t, ok, "Must not close channel") - assert.Error(t, err, "We should get EOF error") + require.Error(t, err, "We should get EOF error") } } @@ -104,14 +105,14 @@ func TestBulk(t *testing.T) { require.NoError(t, err) // Construct request - rfb := &types.RequestFinalizeBlock{Txs: make([][]byte, numTxs)} + rfb := &types.FinalizeBlockRequest{Txs: make([][]byte, numTxs)} for counter := 0; counter < numTxs; counter++ { rfb.Txs[counter] = []byte("test") } // Send bulk request res, err := client.FinalizeBlock(context.Background(), rfb) require.NoError(t, err) - require.Equal(t, numTxs, len(res.TxResults), "Number of txs doesn't match") + require.Len(t, res.TxResults, numTxs, "Number of txs doesn't match") for _, tx := range res.TxResults { require.Equal(t, uint32(0), tx.Code, "Tx failed") } @@ -157,12 +158,12 @@ type slowApp struct { types.BaseApplication } -func (slowApp) CheckTx(context.Context, *types.RequestCheckTx) (*types.ResponseCheckTx, error) { +func (slowApp) CheckTx(context.Context, *types.CheckTxRequest) (*types.CheckTxResponse, error) { time.Sleep(time.Second) - return &types.ResponseCheckTx{}, nil + return &types.CheckTxResponse{}, nil } -// TestCallbackInvokedWhenSetLaet ensures that the callback is invoked when +// TestCallbackInvokedWhenSetLate ensures that the callback is invoked when // set after the client completes the call into the app. Currently this // test relies on the callback being allowed to be invoked twice if set multiple // times, once when set early and once when set late. @@ -176,20 +177,24 @@ func TestCallbackInvokedWhenSetLate(t *testing.T) { wg: wg, } _, c := setupClientServer(t, app) - reqRes, err := c.CheckTxAsync(ctx, &types.RequestCheckTx{}) + reqRes, err := c.CheckTxAsync(ctx, &types.CheckTxRequest{ + Type: types.CHECK_TX_TYPE_CHECK, + }) require.NoError(t, err) done := make(chan struct{}) - cb := func(_ *types.Response) { + cb := func(_ *types.Response) error { close(done) + return nil } reqRes.SetCallback(cb) app.wg.Done() <-done var called bool - cb = func(_ *types.Response) { + cb = func(_ *types.Response) error { called = true + return nil } reqRes.SetCallback(cb) require.True(t, called) @@ -200,7 +205,7 @@ type blockedABCIApplication struct { types.BaseApplication } -func (b blockedABCIApplication) CheckTxAsync(ctx context.Context, r *types.RequestCheckTx) (*types.ResponseCheckTx, error) { +func (b blockedABCIApplication) CheckTxAsync(ctx context.Context, r *types.CheckTxRequest) (*types.CheckTxResponse, error) { b.wg.Wait() return b.BaseApplication.CheckTx(ctx, r) } @@ -217,12 +222,15 @@ func TestCallbackInvokedWhenSetEarly(t *testing.T) { wg: wg, } _, c := setupClientServer(t, app) - reqRes, err := c.CheckTxAsync(ctx, &types.RequestCheckTx{}) + reqRes, err := c.CheckTxAsync(ctx, &types.CheckTxRequest{ + Type: types.CHECK_TX_TYPE_CHECK, + }) require.NoError(t, err) done := make(chan struct{}) - cb := func(_ *types.Response) { + cb := func(_ *types.Response) error { close(done) + return nil } reqRes.SetCallback(cb) app.wg.Done() diff --git a/abci/client/unsync_local_client.go b/abci/client/unsync_local_client.go index 0adccb1db13..cd8bab11050 100644 --- a/abci/client/unsync_local_client.go +++ b/abci/client/unsync_local_client.go @@ -4,7 +4,7 @@ import ( "context" "sync" - types "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/libs/service" ) @@ -43,94 +43,96 @@ func (app *unsyncLocalClient) SetResponseCallback(cb Callback) { app.mtx.Unlock() } -func (app *unsyncLocalClient) CheckTxAsync(ctx context.Context, req *types.RequestCheckTx) (*ReqRes, error) { +func (app *unsyncLocalClient) CheckTxAsync(ctx context.Context, req *types.CheckTxRequest) (*ReqRes, error) { res, err := app.Application.CheckTx(ctx, req) if err != nil { return nil, err } return app.callback( - types.ToRequestCheckTx(req), - types.ToResponseCheckTx(res), + types.ToCheckTxRequest(req), + types.ToCheckTxResponse(res), ), nil } func (app *unsyncLocalClient) callback(req *types.Request, res *types.Response) *ReqRes { - app.Callback(req, res) + if app.Callback != nil { + app.Callback(req, res) + } rr := newLocalReqRes(req, res) rr.callbackInvoked = true return rr } -//------------------------------------------------------- +// ------------------------------------------------------- -func (app *unsyncLocalClient) Error() error { +func (*unsyncLocalClient) Error() error { return nil } -func (app *unsyncLocalClient) Flush(context.Context) error { +func (*unsyncLocalClient) Flush(context.Context) error { return nil } -func (app *unsyncLocalClient) Echo(_ context.Context, msg string) (*types.ResponseEcho, error) { - return &types.ResponseEcho{Message: msg}, nil +func (*unsyncLocalClient) Echo(_ context.Context, msg string) (*types.EchoResponse, error) { + return &types.EchoResponse{Message: msg}, nil } -func (app *unsyncLocalClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) { +func (app *unsyncLocalClient) Info(ctx context.Context, req *types.InfoRequest) (*types.InfoResponse, error) { return app.Application.Info(ctx, req) } -func (app *unsyncLocalClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { +func (app *unsyncLocalClient) CheckTx(ctx context.Context, req *types.CheckTxRequest) (*types.CheckTxResponse, error) { return app.Application.CheckTx(ctx, req) } -func (app *unsyncLocalClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) { +func (app *unsyncLocalClient) Query(ctx context.Context, req *types.QueryRequest) (*types.QueryResponse, error) { return app.Application.Query(ctx, req) } -func (app *unsyncLocalClient) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) { +func (app *unsyncLocalClient) Commit(ctx context.Context, req *types.CommitRequest) (*types.CommitResponse, error) { return app.Application.Commit(ctx, req) } -func (app *unsyncLocalClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) { +func (app *unsyncLocalClient) InitChain(ctx context.Context, req *types.InitChainRequest) (*types.InitChainResponse, error) { return app.Application.InitChain(ctx, req) } -func (app *unsyncLocalClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { +func (app *unsyncLocalClient) ListSnapshots(ctx context.Context, req *types.ListSnapshotsRequest) (*types.ListSnapshotsResponse, error) { return app.Application.ListSnapshots(ctx, req) } -func (app *unsyncLocalClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { +func (app *unsyncLocalClient) OfferSnapshot(ctx context.Context, req *types.OfferSnapshotRequest) (*types.OfferSnapshotResponse, error) { return app.Application.OfferSnapshot(ctx, req) } func (app *unsyncLocalClient) LoadSnapshotChunk(ctx context.Context, - req *types.RequestLoadSnapshotChunk, -) (*types.ResponseLoadSnapshotChunk, error) { + req *types.LoadSnapshotChunkRequest, +) (*types.LoadSnapshotChunkResponse, error) { return app.Application.LoadSnapshotChunk(ctx, req) } func (app *unsyncLocalClient) ApplySnapshotChunk(ctx context.Context, - req *types.RequestApplySnapshotChunk, -) (*types.ResponseApplySnapshotChunk, error) { + req *types.ApplySnapshotChunkRequest, +) (*types.ApplySnapshotChunkResponse, error) { return app.Application.ApplySnapshotChunk(ctx, req) } -func (app *unsyncLocalClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { +func (app *unsyncLocalClient) PrepareProposal(ctx context.Context, req *types.PrepareProposalRequest) (*types.PrepareProposalResponse, error) { return app.Application.PrepareProposal(ctx, req) } -func (app *unsyncLocalClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { +func (app *unsyncLocalClient) ProcessProposal(ctx context.Context, req *types.ProcessProposalRequest) (*types.ProcessProposalResponse, error) { return app.Application.ProcessProposal(ctx, req) } -func (app *unsyncLocalClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) { +func (app *unsyncLocalClient) ExtendVote(ctx context.Context, req *types.ExtendVoteRequest) (*types.ExtendVoteResponse, error) { return app.Application.ExtendVote(ctx, req) } -func (app *unsyncLocalClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { +func (app *unsyncLocalClient) VerifyVoteExtension(ctx context.Context, req *types.VerifyVoteExtensionRequest) (*types.VerifyVoteExtensionResponse, error) { return app.Application.VerifyVoteExtension(ctx, req) } -func (app *unsyncLocalClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { +func (app *unsyncLocalClient) FinalizeBlock(ctx context.Context, req *types.FinalizeBlockRequest) (*types.FinalizeBlockResponse, error) { return app.Application.FinalizeBlock(ctx, req) } diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 17d9230105d..daca0fa86e8 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -11,38 +11,37 @@ import ( "github.com/spf13/cobra" - "github.com/cometbft/cometbft/libs/log" - cmtos "github.com/cometbft/cometbft/libs/os" - abcicli "github.com/cometbft/cometbft/abci/client" "github.com/cometbft/cometbft/abci/example/kvstore" "github.com/cometbft/cometbft/abci/server" servertest "github.com/cometbft/cometbft/abci/tests/server" "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/abci/version" - "github.com/cometbft/cometbft/proto/tendermint/crypto" + crypto "github.com/cometbft/cometbft/api/cometbft/crypto/v1" + cmtos "github.com/cometbft/cometbft/internal/os" + "github.com/cometbft/cometbft/libs/log" ) -// client is a global variable so it can be reused by the console +// client is a global variable so it can be reused by the console. var ( client abcicli.Client logger log.Logger ) -// flags +// flags. var ( - // global + // global. flagAddress string flagAbci string flagVerbose bool // for the println output flagLogLevel string // for the logger - // query + // query. flagPath string flagHeight int flagProve bool - // kvstore + // kvstore. flagPersist string ) @@ -50,7 +49,7 @@ var RootCmd = &cobra.Command{ Use: "abci-cli", Short: "the ABCI CLI tool wraps an ABCI client", Long: "the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers", - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { switch cmd.Use { case "kvstore", "version", "help [command]": return nil @@ -61,7 +60,7 @@ var RootCmd = &cobra.Command{ if err != nil { return err } - logger = log.NewFilter(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), allowLevel) + logger = log.NewFilter(log.NewLogger(os.Stdout), allowLevel) } if client == nil { var err error @@ -233,7 +232,7 @@ var versionCmd = &cobra.Command{ Short: "print ABCI console version", Long: "print ABCI console version", Args: cobra.ExactArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { fmt.Println(version.Version) return nil }, @@ -279,7 +278,7 @@ var testCmd = &cobra.Command{ RunE: cmdTest, } -// Generates new Args array based off of previous call args to maintain flag persistence +// Generates new Args array based off of previous call args to maintain flag persistence. func persistentArgs(line []byte) []string { // generate the arguments to run from original os.Args // to maintain flag arguments @@ -292,7 +291,7 @@ func persistentArgs(line []byte) []string { return args } -//-------------------------------------------------------------------------------- +// -------------------------------------------------------------------------------- func compose(fs []func() error) error { if len(fs) == 0 { @@ -355,7 +354,7 @@ func cmdTest(cmd *cobra.Command, _ []string) error { func() error { return servertest.ProcessProposal(ctx, client, [][]byte{ {0x01}, - }, types.ResponseProcessProposal_ACCEPT) + }, types.PROCESS_PROPOSAL_STATUS_ACCEPT) }, }) } @@ -364,12 +363,11 @@ func cmdBatch(cmd *cobra.Command, _ []string) error { bufReader := bufio.NewReader(os.Stdin) LOOP: for { - line, more, err := bufReader.ReadLine() switch { case more: return errors.New("input line is too long") - case err == io.EOF: + case errors.Is(err, io.EOF): break LOOP case len(line) == 0: continue @@ -492,7 +490,7 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error { return nil } -// Have the application echo a message +// Have the application echo a message. func cmdEcho(cmd *cobra.Command, args []string) error { msg := "" if len(args) > 0 { @@ -510,13 +508,13 @@ func cmdEcho(cmd *cobra.Command, args []string) error { return nil } -// Get some info from the application +// Get some info from the application. func cmdInfo(cmd *cobra.Command, args []string) error { var version string if len(args) == 1 { version = args[0] } - res, err := client.Info(cmd.Context(), &types.RequestInfo{Version: version}) + res, err := client.Info(cmd.Context(), &types.InfoRequest{Version: version}) if err != nil { return err } @@ -528,7 +526,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error { const codeBad uint32 = 10 -// Append new txs to application +// Append new txs to application. func cmdFinalizeBlock(cmd *cobra.Command, args []string) error { if len(args) == 0 { printResponse(cmd, args, response{ @@ -545,7 +543,7 @@ func cmdFinalizeBlock(cmd *cobra.Command, args []string) error { } txs[i] = txBytes } - res, err := client.FinalizeBlock(cmd.Context(), &types.RequestFinalizeBlock{Txs: txs}) + res, err := client.FinalizeBlock(cmd.Context(), &types.FinalizeBlockRequest{Txs: txs}) if err != nil { return err } @@ -565,7 +563,7 @@ func cmdFinalizeBlock(cmd *cobra.Command, args []string) error { return nil } -// Validate a tx +// Validate a tx. func cmdCheckTx(cmd *cobra.Command, args []string) error { if len(args) == 0 { printResponse(cmd, args, response{ @@ -578,7 +576,10 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.CheckTx(cmd.Context(), &types.RequestCheckTx{Tx: txBytes}) + res, err := client.CheckTx(cmd.Context(), &types.CheckTxRequest{ + Tx: txBytes, + Type: types.CHECK_TX_TYPE_CHECK, + }) if err != nil { return err } @@ -591,9 +592,9 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { return nil } -// Get application Merkle root hash +// Get application Merkle root hash. func cmdCommit(cmd *cobra.Command, args []string) error { - _, err := client.Commit(cmd.Context(), &types.RequestCommit{}) + _, err := client.Commit(cmd.Context(), &types.CommitRequest{}) if err != nil { return err } @@ -601,7 +602,7 @@ func cmdCommit(cmd *cobra.Command, args []string) error { return nil } -// Query application state +// Query application state. func cmdQuery(cmd *cobra.Command, args []string) error { if len(args) == 0 { printResponse(cmd, args, response{ @@ -616,7 +617,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error { return err } - resQuery, err := client.Query(cmd.Context(), &types.RequestQuery{ + resQuery, err := client.Query(cmd.Context(), &types.QueryRequest{ Data: queryBytes, Path: flagPath, Height: int64(flagHeight), @@ -650,7 +651,7 @@ func cmdPrepareProposal(cmd *cobra.Command, args []string) error { txsBytesArray[i] = txBytes } - res, err := client.PrepareProposal(cmd.Context(), &types.RequestPrepareProposal{ + res, err := client.PrepareProposal(cmd.Context(), &types.PrepareProposalRequest{ Txs: txsBytesArray, // kvstore has to have this parameter in order not to reject a tx as the default value is 0 MaxTxBytes: 65536, @@ -681,7 +682,7 @@ func cmdProcessProposal(cmd *cobra.Command, args []string) error { txsBytesArray[i] = txBytes } - res, err := client.ProcessProposal(cmd.Context(), &types.RequestProcessProposal{ + res, err := client.ProcessProposal(cmd.Context(), &types.ProcessProposalRequest{ Txs: txsBytesArray, }) if err != nil { @@ -695,7 +696,7 @@ func cmdProcessProposal(cmd *cobra.Command, args []string) error { } func cmdKVStore(*cobra.Command, []string) error { - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + logger := log.NewLogger(os.Stdout) // Create the application - in memory or persisted to disk var app types.Application @@ -730,7 +731,7 @@ func cmdKVStore(*cobra.Command, []string) error { select {} } -//-------------------------------------------------------------------------------- +// -------------------------------------------------------------------------------- func printResponse(cmd *cobra.Command, args []string, rsps ...response) { if flagVerbose { @@ -757,7 +758,7 @@ func printResponse(cmd *cobra.Command, args []string, rsps ...response) { fmt.Printf("-> log: %s\n", rsp.Log) } if cmd.Use == "process_proposal" { - fmt.Printf("-> status: %s\n", types.ResponseProcessProposal_ProposalStatus_name[rsp.Status]) + fmt.Printf("-> status: %s\n", types.ProcessProposalStatus(rsp.Status).String()) } if rsp.Query != nil { @@ -777,7 +778,7 @@ func printResponse(cmd *cobra.Command, args []string, rsps ...response) { } } -// NOTE: s is interpreted as a string unless prefixed with 0x +// NOTE: s is interpreted as a string unless prefixed with 0x. func stringOrHexToBytes(s string) ([]byte, error) { if len(s) > 2 && strings.ToLower(s[:2]) == "0x" { b, err := hex.DecodeString(s[2:]) diff --git a/abci/example/kvstore/README.md b/abci/example/kvstore/README.md index e9e38b53c1e..55c13572366 100644 --- a/abci/example/kvstore/README.md +++ b/abci/example/kvstore/README.md @@ -8,9 +8,10 @@ The app has no replay protection (other than what the mempool provides). Validator set changes are effected using the following transaction format: ```md -"val:pubkey1!power1,pubkey2!power2,pubkey3!power3" +"val:pubkeytype1!pubkey1!power1,pubkeytype2!pubkey2!power2,pubkeytype3!pubkey3!power3" ``` -where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one). +where `pubkeyN` is a base64-encoded 32-byte key, `pubkeytypeN` is a string representing the key type, +and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one). To remove a validator from the validator set, set power to `0`. There is no sybil protection against new validators joining. diff --git a/abci/example/kvstore/code.go b/abci/example/kvstore/code.go index f58cfb83c77..0c3d6bafad6 100644 --- a/abci/example/kvstore/code.go +++ b/abci/example/kvstore/code.go @@ -1,10 +1,11 @@ package kvstore -// Return codes for the examples +// Return codes for the examples. const ( CodeTypeOK uint32 = 0 CodeTypeEncodingError uint32 = 1 CodeTypeInvalidTxFormat uint32 = 2 CodeTypeUnauthorized uint32 = 3 - CodeTypeExecuted uint32 = 5 + CodeTypeExecuted uint32 = 4 + CodeTypeExpired uint32 = 5 ) diff --git a/abci/example/kvstore/helpers.go b/abci/example/kvstore/helpers.go index 6dc818aeda9..23c8cc917d5 100644 --- a/abci/example/kvstore/helpers.go +++ b/abci/example/kvstore/helpers.go @@ -7,24 +7,22 @@ import ( "strings" "github.com/cometbft/cometbft/abci/types" - cryptoencoding "github.com/cometbft/cometbft/crypto/encoding" - cmtrand "github.com/cometbft/cometbft/libs/rand" - "github.com/cometbft/cometbft/proto/tendermint/crypto" + "github.com/cometbft/cometbft/crypto/ed25519" + cmtrand "github.com/cometbft/cometbft/internal/rand" ) // RandVal creates one random validator, with a key derived -// from the input value +// from the input value. func RandVal() types.ValidatorUpdate { - pubkey := cmtrand.Bytes(32) + pubkey := ed25519.GenPrivKey().PubKey() power := cmtrand.Uint16() + 1 - v := types.UpdateValidator(pubkey, int64(power), "") - return v + return types.ValidatorUpdate{Power: int64(power), PubKeyType: pubkey.Type(), PubKeyBytes: pubkey.Bytes()} } // RandVals returns a list of cnt validators for initializing // the application. Note that the keys are deterministically // derived from the index in the array, while the power is -// random (Change this if not desired) +// random (change this if not desired). func RandVals(cnt int) []types.ValidatorUpdate { res := make([]types.ValidatorUpdate, cnt) for i := 0; i < cnt; i++ { @@ -35,19 +33,20 @@ func RandVals(cnt int) []types.ValidatorUpdate { // InitKVStore initializes the kvstore app with some data, // which allows tests to pass and is fine as long as you -// don't make any tx that modify the validator state +// don't make any tx that modify the validator state. func InitKVStore(ctx context.Context, app *Application) error { - _, err := app.InitChain(ctx, &types.RequestInitChain{ + _, err := app.InitChain(ctx, &types.InitChainRequest{ Validators: RandVals(1), }) return err } -// Create a new transaction +// NewTx creates a new transaction. func NewTx(key, value string) []byte { return []byte(strings.Join([]string{key, value}, "=")) } +// NewRandomTx creates a new random transaction. func NewRandomTx(size int) []byte { if size < 4 { panic("random tx size must be greater than 3") @@ -55,6 +54,7 @@ func NewRandomTx(size int) []byte { return NewTx(cmtrand.Str(2), cmtrand.Str(size-3)) } +// NewRandomTxs creates n transactions. func NewRandomTxs(n int) [][]byte { txs := make([][]byte, n) for i := 0; i < n; i++ { @@ -63,17 +63,14 @@ func NewRandomTxs(n int) [][]byte { return txs } +// NewTxFromID creates a new transaction using the given ID. func NewTxFromID(i int) []byte { return []byte(fmt.Sprintf("%d=%d", i, i)) } -// Create a transaction to add/remove/update a validator +// MakeValSetChangeTx creates a transaction to add/remove/update a validator. // To remove, set power to 0. -func MakeValSetChangeTx(pubkey crypto.PublicKey, power int64) []byte { - pk, err := cryptoencoding.PubKeyFromProto(pubkey) - if err != nil { - panic(err) - } - pubStr := base64.StdEncoding.EncodeToString(pk.Bytes()) - return []byte(fmt.Sprintf("%s%s!%d", ValidatorPrefix, pubStr, power)) +func MakeValSetChangeTx(v types.ValidatorUpdate) []byte { + pubStr := base64.StdEncoding.EncodeToString(v.PubKeyBytes) + return []byte(fmt.Sprintf("%s%s!%s!%d", ValidatorPrefix, v.PubKeyType, pubStr, v.Power)) } diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 1909c2906fd..bfb07a2a0e6 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -6,16 +6,17 @@ import ( "encoding/base64" "encoding/binary" "encoding/json" + "errors" "fmt" "strconv" "strings" + "time" dbm "github.com/cometbft/cometbft-db" - "github.com/cometbft/cometbft/abci/types" - cryptoencoding "github.com/cometbft/cometbft/crypto/encoding" + "github.com/cometbft/cometbft/crypto" + cryptoenc "github.com/cometbft/cometbft/crypto/encoding" "github.com/cometbft/cometbft/libs/log" - cryptoproto "github.com/cometbft/cometbft/proto/tendermint/crypto" "github.com/cometbft/cometbft/version" ) @@ -27,95 +28,146 @@ var ( const ( ValidatorPrefix = "val=" AppVersion uint64 = 1 + defaultLane string = "default" ) var _ types.Application = (*Application)(nil) // Application is the kvstore state machine. It complies with the abci.Application interface. // It takes transactions in the form of key=value and saves them in a database. This is -// a somewhat trivial example as there is no real state execution +// a somewhat trivial example as there is no real state execution. type Application struct { types.BaseApplication state State - RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight) + RetainBlocks int64 // blocks to retain after commit (via CommitResponse.RetainHeight) stagedTxs [][]byte logger log.Logger // validator set valUpdates []types.ValidatorUpdate - valAddrToPubKeyMap map[string]cryptoproto.PublicKey + valAddrToPubKeyMap map[string]crypto.PubKey // If true, the app will generate block events in BeginBlock. Used to test the event indexer // Should be false by default to avoid generating too much data. genBlockEvents bool + + // Map from lane IDs to their priorities. + lanePriorities map[string]uint32 + + nextBlockDelay time.Duration } -// NewApplication creates an instance of the kvstore from the provided database -func NewApplication(db dbm.DB) *Application { +// NewApplication creates an instance of the kvstore from the provided database, +// with the given lanes and priorities. +func NewApplication(db dbm.DB, lanePriorities map[string]uint32) *Application { return &Application{ logger: log.NewNopLogger(), state: loadState(db), - valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey), + valAddrToPubKeyMap: make(map[string]crypto.PubKey), + lanePriorities: lanePriorities, + nextBlockDelay: 0, // zero by default because kvstore is mostly used for testing } } -// NewPersistentApplication creates a new application using the goleveldb database engine -func NewPersistentApplication(dbDir string) *Application { +// newDB creates a DB engine for persisting the application state. +func newDB(dbDir string) *dbm.PebbleDB { name := "kvstore" - db, err := dbm.NewGoLevelDB(name, dbDir) + db, err := dbm.NewPebbleDB(name, dbDir) if err != nil { panic(fmt.Errorf("failed to create persistent app at %s: %w", dbDir, err)) } - return NewApplication(db) + return db +} + +// NewPersistentApplication creates a new application using the pebbledb +// database engine and default lanes. +func NewPersistentApplication(dbDir string) *Application { + return NewApplication(newDB(dbDir), DefaultLanes()) } -// NewInMemoryApplication creates a new application from an in memory database. -// Nothing will be persisted. +// NewPersistentApplicationWithoutLanes creates a new application using the +// pebbledb database engine and without lanes. +func NewPersistentApplicationWithoutLanes(dbDir string) *Application { + return NewApplication(newDB(dbDir), nil) +} + +// NewInMemoryApplication creates a new application from an in memory database +// that uses default lanes. Nothing will be persisted. func NewInMemoryApplication() *Application { - return NewApplication(dbm.NewMemDB()) + return NewApplication(dbm.NewMemDB(), DefaultLanes()) +} + +// NewInMemoryApplication creates a new application from an in memory database +// and without lanes. Nothing will be persisted. +func NewInMemoryApplicationWithoutLanes() *Application { + return NewApplication(dbm.NewMemDB(), nil) +} + +// DefaultLanes returns a map from lane names to their priorities. Priority 0 is +// reserved. The higher the value, the higher the priority. +func DefaultLanes() map[string]uint32 { + return map[string]uint32{ + "val": 9, // for validator updates + "foo": 7, + defaultLane: 3, + "bar": 1, + } } func (app *Application) SetGenBlockEvents() { app.genBlockEvents = true } -// Info returns information about the state of the application. This is generally used everytime a Tendermint instance +// SetNextBlockDelay sets the delay for the next finalized block. Default is 0 +// here because kvstore is mostly used for testing. In production, the default +// is 1s, mimicking the default for the deprecated `timeout_commit` parameter. +func (app *Application) SetNextBlockDelay(delay time.Duration) { + app.nextBlockDelay = delay +} + +// Info returns information about the state of the application. This is generally used every time a Tendermint instance // begins and let's the application know what Tendermint versions it's interacting with. Based from this information, // Tendermint will ensure it is in sync with the application by potentially replaying the blocks it has. If the -// Application returns a 0 appBlockHeight, Tendermint will call InitChain to initialize the application with consensus related data -func (app *Application) Info(context.Context, *types.RequestInfo) (*types.ResponseInfo, error) { +// Application returns a 0 appBlockHeight, Tendermint will call InitChain to initialize the application with consensus related data. +func (app *Application) Info(context.Context, *types.InfoRequest) (*types.InfoResponse, error) { // Tendermint expects the application to persist validators, on start-up we need to reload them to memory if they exist if len(app.valAddrToPubKeyMap) == 0 && app.state.Height > 0 { validators := app.getValidators() for _, v := range validators { - pubkey, err := cryptoencoding.PubKeyFromProto(v.PubKey) + pubkey, err := cryptoenc.PubKeyFromTypeAndBytes(v.PubKeyType, v.PubKeyBytes) if err != nil { - panic(fmt.Errorf("can't decode public key: %w", err)) + panic(err) } - app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey + app.valAddrToPubKeyMap[string(pubkey.Address())] = pubkey } } - return &types.ResponseInfo{ + var defLane string + if len(app.lanePriorities) != 0 { + defLane = defaultLane + } + return &types.InfoResponse{ Data: fmt.Sprintf("{\"size\":%v}", app.state.Size), Version: version.ABCIVersion, AppVersion: AppVersion, LastBlockHeight: app.state.Height, LastBlockAppHash: app.state.Hash(), + LanePriorities: app.lanePriorities, + DefaultLane: defLane, }, nil } // InitChain takes the genesis validators and stores them in the kvstore. It returns the application hash in the // case that the application starts prepopulated with values. This method is called whenever a new instance of the application // starts (i.e. app height = 0). -func (app *Application) InitChain(_ context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) { +func (app *Application) InitChain(_ context.Context, req *types.InitChainRequest) (*types.InitChainResponse, error) { for _, v := range req.Validators { app.updateValidator(v) } appHash := make([]byte, 8) binary.PutVarint(appHash, app.state.Size) - return &types.ResponseInitChain{ + return &types.InitChainResponse{ AppHash: appHash, }, nil } @@ -126,23 +178,72 @@ func (app *Application) InitChain(_ context.Context, req *types.RequestInitChain // For the KVStore we check that each transaction has the valid tx format: // - Contains one and only one `=` // - `=` is not the first or last byte. -// - if key is `val` that the validator update transaction is also valid -func (app *Application) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { +// - if key is `val` that the validator update transaction is also valid. +func (app *Application) CheckTx(_ context.Context, req *types.CheckTxRequest) (*types.CheckTxResponse, error) { // If it is a validator update transaction, check that it is correctly formatted if isValidatorTx(req.Tx) { - if _, _, err := parseValidatorTx(req.Tx); err != nil { - return &types.ResponseCheckTx{Code: CodeTypeInvalidTxFormat}, nil + if _, _, _, err := parseValidatorTx(req.Tx); err != nil { + return &types.CheckTxResponse{Code: CodeTypeInvalidTxFormat}, nil //nolint:nilerr // error is not nil but it returns nil } } else if !isValidTx(req.Tx) { - return &types.ResponseCheckTx{Code: CodeTypeInvalidTxFormat}, nil + return &types.CheckTxResponse{Code: CodeTypeInvalidTxFormat}, nil + } + + if len(app.lanePriorities) == 0 { + return &types.CheckTxResponse{Code: CodeTypeOK, GasWanted: 1}, nil } + laneID := assignLane(req.Tx) + return &types.CheckTxResponse{Code: CodeTypeOK, GasWanted: 1, LaneId: laneID}, nil +} + +// assignLane deterministically computes a lane for the given tx. +func assignLane(tx []byte) string { + lane := defaultLane + if isValidatorTx(tx) { + return "val" // priority 9 + } + key, _, err := parseTx(tx) + if err != nil { + return lane + } + + // If the transaction key is an integer (for example, a transaction of the + // form 2=2), we will assign a lane. Any other type of transaction will go + // to the default lane. + keyInt, err := strconv.Atoi(key) + if err != nil { + return lane + } + + // Since a key is usually a numerical value, we assign lanes by computing + // the key modulo some pre-selected divisors. As a result, some lanes will + // be assigned less frequently than others, and we will be able to compute + // in advance the lane assigned to a transaction (useful for testing). + switch { + case keyInt%11 == 0: + return "foo" // priority 7 + case keyInt%3 == 0: + return "bar" // priority 1 + default: + return lane // priority 3 + } +} - return &types.ResponseCheckTx{Code: CodeTypeOK, GasWanted: 1}, nil +// parseTx parses a tx in 'key=value' format into a key and value. +func parseTx(tx []byte) (key, value string, err error) { + parts := bytes.Split(tx, []byte("=")) + if len(parts) != 2 { + return "", "", fmt.Errorf("invalid tx format: %q", string(tx)) + } + if len(parts[0]) == 0 { + return "", "", errors.New("key cannot be empty") + } + return string(parts[0]), string(parts[1]), nil } // Tx must have a format like key:value or key=value. That is: // - it must have one and only one ":" or "=" -// - It must not begin or end with these special characters +// - It must not begin or end with these special characters. func isValidTx(tx []byte) bool { if bytes.Count(tx, []byte(":")) == 1 && bytes.Count(tx, []byte("=")) == 0 { if !bytes.HasPrefix(tx, []byte(":")) && !bytes.HasSuffix(tx, []byte(":")) { @@ -156,20 +257,24 @@ func isValidTx(tx []byte) bool { return false } -// PrepareProposal is called when the node is a proposer. Tendermint stages a set of transactions to the application. As the +// PrepareProposal is called when the node is a proposer. CometBFT stages a set of transactions to the application. As the // KVStore has two accepted formats, `:` and `=`, we modify all instances of `:` with `=` to make it consistent. Note: this is // quite a trivial example of transaction modification. -// NOTE: we assume that Tendermint will never provide more transactions than can fit in a block. -func (app *Application) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { - return &types.ResponsePrepareProposal{Txs: app.formatTxs(ctx, req.Txs)}, nil +// NOTE: we assume that CometBFT will never provide more transactions than can fit in a block. +func (app *Application) PrepareProposal(ctx context.Context, req *types.PrepareProposalRequest) (*types.PrepareProposalResponse, error) { + return &types.PrepareProposalResponse{Txs: app.formatTxs(ctx, req.Txs)}, nil } // formatTxs validates and excludes invalid transactions -// also substitutes all the transactions with x:y to x=y +// also substitutes all the transactions with x:y to x=y. func (app *Application) formatTxs(ctx context.Context, blockData [][]byte) [][]byte { txs := make([][]byte, 0, len(blockData)) for _, tx := range blockData { - if resp, err := app.CheckTx(ctx, &types.RequestCheckTx{Tx: tx}); err == nil && resp.Code == CodeTypeOK { + resp, err := app.CheckTx(ctx, &types.CheckTxRequest{Tx: tx, Type: types.CHECK_TX_TYPE_CHECK}) + if err != nil { + panic(fmt.Sprintln("formatTxs: CheckTx call had an unrecoverable error", err)) + } + if resp.Code == CodeTypeOK { txs = append(txs, bytes.Replace(tx, []byte(":"), []byte("="), 1)) } } @@ -178,33 +283,39 @@ func (app *Application) formatTxs(ctx context.Context, blockData [][]byte) [][]b // ProcessProposal is called whenever a node receives a complete proposal. It allows the application to validate the proposal. // Only validators who can vote will have this method called. For the KVstore we reuse CheckTx. -func (app *Application) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { +func (app *Application) ProcessProposal(ctx context.Context, req *types.ProcessProposalRequest) (*types.ProcessProposalResponse, error) { for _, tx := range req.Txs { // As CheckTx is a full validity check we can simply reuse this - if resp, err := app.CheckTx(ctx, &types.RequestCheckTx{Tx: tx}); err != nil || resp.Code != CodeTypeOK { - return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}, nil + resp, err := app.CheckTx(ctx, &types.CheckTxRequest{Tx: tx, Type: types.CHECK_TX_TYPE_CHECK}) + if err != nil { + panic(fmt.Sprintln("ProcessProposal: CheckTx call had an unrecoverable error", err)) + } + if resp.Code != CodeTypeOK { + return &types.ProcessProposalResponse{Status: types.PROCESS_PROPOSAL_STATUS_REJECT}, nil } } - return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}, nil + return &types.ProcessProposalResponse{Status: types.PROCESS_PROPOSAL_STATUS_ACCEPT}, nil } // FinalizeBlock executes the block against the application state. It punishes validators who equivocated and // updates validators according to transactions in a block. The rest of the transactions are regular key value // updates and are cached in memory and will be persisted once Commit is called. // ConsensusParams are never changed. -func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { +func (app *Application) FinalizeBlock(_ context.Context, req *types.FinalizeBlockRequest) (*types.FinalizeBlockResponse, error) { // reset valset changes app.valUpdates = make([]types.ValidatorUpdate, 0) app.stagedTxs = make([][]byte, 0) // Punish validators who committed equivocation. for _, ev := range req.Misbehavior { - if ev.Type == types.MisbehaviorType_DUPLICATE_VOTE { + if ev.Type == types.MISBEHAVIOR_TYPE_DUPLICATE_VOTE { addr := string(ev.Validator.Address) + //nolint:revive // this is a false positive from early-return if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok { app.valUpdates = append(app.valUpdates, types.ValidatorUpdate{ - PubKey: pubKey, - Power: ev.Validator.Power - 1, + Power: ev.Validator.Power - 1, + PubKeyType: pubKey.Type(), + PubKeyBytes: pubKey.Bytes(), }) app.logger.Info("Decreased val power by 1 because of the equivocation", "val", addr) @@ -217,11 +328,11 @@ func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinal respTxs := make([]*types.ExecTxResult, len(req.Txs)) for i, tx := range req.Txs { if isValidatorTx(tx) { - pubKey, power, err := parseValidatorTx(tx) + keyType, pubKey, power, err := parseValidatorTx(tx) if err != nil { panic(err) } - app.valUpdates = append(app.valUpdates, types.UpdateValidator(pubKey, power, "")) + app.valUpdates = append(app.valUpdates, types.ValidatorUpdate{Power: power, PubKeyType: keyType, PubKeyBytes: pubKey}) } else { app.stagedTxs = append(app.stagedTxs, tx) } @@ -262,7 +373,13 @@ func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinal app.state.Height = req.Height - response := &types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.valUpdates, AppHash: app.state.Hash()} + response := &types.FinalizeBlockResponse{ + TxResults: respTxs, + ValidatorUpdates: app.valUpdates, + AppHash: app.state.Hash(), + NextBlockDelay: app.nextBlockDelay, + } + if !app.genBlockEvents { return response, nil } @@ -323,8 +440,8 @@ func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinal // Commit is called after FinalizeBlock and after Tendermint state which includes the updates to // AppHash, ConsensusParams and ValidatorSet has occurred. -// The KVStore persists the validator updates and the new key values -func (app *Application) Commit(context.Context, *types.RequestCommit) (*types.ResponseCommit, error) { +// The KVStore persists the validator updates and the new key values. +func (app *Application) Commit(context.Context, *types.CommitRequest) (*types.CommitResponse, error) { // apply the validator updates to state (note this is really the validator set at h + 2) for _, valUpdate := range app.valUpdates { app.updateValidator(valUpdate) @@ -346,16 +463,16 @@ func (app *Application) Commit(context.Context, *types.RequestCommit) (*types.Re // persist the state (i.e. size and height) saveState(app.state) - resp := &types.ResponseCommit{} + resp := &types.CommitResponse{} if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks { resp.RetainHeight = app.state.Height - app.RetainBlocks + 1 } return resp, nil } -// Returns an associated value or nil if missing. -func (app *Application) Query(_ context.Context, reqQuery *types.RequestQuery) (*types.ResponseQuery, error) { - resQuery := &types.ResponseQuery{} +// Query returns an associated value or nil if missing. +func (app *Application) Query(_ context.Context, reqQuery *types.QueryRequest) (*types.QueryResponse, error) { + resQuery := &types.QueryResponse{} if reqQuery.Path == "/val" { key := []byte(ValidatorPrefix + string(reqQuery.Data)) @@ -364,7 +481,7 @@ func (app *Application) Query(_ context.Context, reqQuery *types.RequestQuery) ( panic(err) } - return &types.ResponseQuery{ + return &types.QueryResponse{ Key: reqQuery.Data, Value: value, }, nil @@ -413,40 +530,40 @@ func isValidatorTx(tx []byte) bool { return strings.HasPrefix(string(tx), ValidatorPrefix) } -func parseValidatorTx(tx []byte) ([]byte, int64, error) { +func parseValidatorTx(tx []byte) (string, []byte, int64, error) { tx = tx[len(ValidatorPrefix):] // get the pubkey and power - pubKeyAndPower := strings.Split(string(tx), "!") - if len(pubKeyAndPower) != 2 { - return nil, 0, fmt.Errorf("expected 'pubkey!power'. Got %v", pubKeyAndPower) + typePubKeyAndPower := strings.Split(string(tx), "!") + if len(typePubKeyAndPower) != 3 { + return "", nil, 0, fmt.Errorf("expected 'pubkeytype!pubkey!power'. Got %v", typePubKeyAndPower) } - pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1] + keyType, pubkeyS, powerS := typePubKeyAndPower[0], typePubKeyAndPower[1], typePubKeyAndPower[2] // decode the pubkey pubkey, err := base64.StdEncoding.DecodeString(pubkeyS) if err != nil { - return nil, 0, fmt.Errorf("pubkey (%s) is invalid base64", pubkeyS) + return "", nil, 0, fmt.Errorf("pubkey (%s) is invalid base64", pubkeyS) } // decode the power power, err := strconv.ParseInt(powerS, 10, 64) if err != nil { - return nil, 0, fmt.Errorf("power (%s) is not an int", powerS) + return "", nil, 0, fmt.Errorf("power (%s) is not an int", powerS) } if power < 0 { - return nil, 0, fmt.Errorf("power can not be less than 0, got %d", power) + return "", nil, 0, fmt.Errorf("power can not be less than 0, got %d", power) } - return pubkey, power, nil + return keyType, pubkey, power, nil } -// add, update, or remove a validator +// add, update, or remove a validator. func (app *Application) updateValidator(v types.ValidatorUpdate) { - pubkey, err := cryptoencoding.PubKeyFromProto(v.PubKey) + pubkey, err := cryptoenc.PubKeyFromTypeAndBytes(v.PubKeyType, v.PubKeyBytes) if err != nil { - panic(fmt.Errorf("can't decode public key: %w", err)) + panic(err) } key := []byte(ValidatorPrefix + string(pubkey.Bytes())) @@ -473,7 +590,7 @@ func (app *Application) updateValidator(v types.ValidatorUpdate) { if err = app.state.db.Set(key, value.Bytes()); err != nil { panic(err) } - app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey + app.valAddrToPubKeyMap[string(pubkey.Address())] = pubkey } } @@ -495,7 +612,7 @@ func (app *Application) getValidators() (validators []types.ValidatorUpdate) { if err = itr.Error(); err != nil { panic(err) } - return + return validators } // ----------------------------- @@ -540,7 +657,7 @@ func saveState(state State) { // as the size or number of transactions processed within the state. Note that this isn't // a strong guarantee of state machine replication because states could // have different kv values but still have the same size. -// This function is used as the "AppHash" +// This function is used as the "AppHash". func (s State) Hash() []byte { appHash := make([]byte, 8) binary.PutVarint(appHash, s.Size) diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 60ef73fe1b8..2521564e4d0 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -1,6 +1,7 @@ package kvstore import ( + "bytes" "context" "fmt" "sort" @@ -8,12 +9,10 @@ import ( "github.com/stretchr/testify/require" - "github.com/cometbft/cometbft/libs/log" - "github.com/cometbft/cometbft/libs/service" - abcicli "github.com/cometbft/cometbft/abci/client" abciserver "github.com/cometbft/cometbft/abci/server" "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/libs/log" ) const ( @@ -27,34 +26,38 @@ func TestKVStoreKV(t *testing.T) { kvstore := NewInMemoryApplication() tx := []byte(testKey + ":" + testValue) - testKVStore(ctx, t, kvstore, tx, testKey, testValue) + testKVStore(ctx, t, kvstore, tx) tx = []byte(testKey + "=" + testValue) - testKVStore(ctx, t, kvstore, tx, testKey, testValue) + testKVStore(ctx, t, kvstore, tx) } -func testKVStore(ctx context.Context, t *testing.T, app types.Application, tx []byte, key, value string) { - checkTxResp, err := app.CheckTx(ctx, &types.RequestCheckTx{Tx: tx}) +func testKVStore(ctx context.Context, t *testing.T, app types.Application, tx []byte) { + t.Helper() + + value := "def" + key := "abc" + checkTxResp, err := app.CheckTx(ctx, &types.CheckTxRequest{Tx: tx, Type: types.CHECK_TX_TYPE_CHECK}) require.NoError(t, err) require.Equal(t, uint32(0), checkTxResp.Code) - ppResp, err := app.PrepareProposal(ctx, &types.RequestPrepareProposal{Txs: [][]byte{tx}}) + ppResp, err := app.PrepareProposal(ctx, &types.PrepareProposalRequest{Txs: [][]byte{tx}}) require.NoError(t, err) require.Len(t, ppResp.Txs, 1) - req := &types.RequestFinalizeBlock{Height: 1, Txs: ppResp.Txs} + req := &types.FinalizeBlockRequest{Height: 1, Txs: ppResp.Txs} ar, err := app.FinalizeBlock(ctx, req) require.NoError(t, err) - require.Equal(t, 1, len(ar.TxResults)) + require.Len(t, ar.TxResults, 1) require.False(t, ar.TxResults[0].IsErr()) // commit - _, err = app.Commit(ctx, &types.RequestCommit{}) + _, err = app.Commit(ctx, &types.CommitRequest{}) require.NoError(t, err) - info, err := app.Info(ctx, &types.RequestInfo{}) + info, err := app.Info(ctx, &types.InfoRequest{}) require.NoError(t, err) require.NotZero(t, info.LastBlockHeight) // make sure query is fine - resQuery, err := app.Query(ctx, &types.RequestQuery{ + resQuery, err := app.Query(ctx, &types.QueryRequest{ Path: "/store", Data: []byte(key), }) @@ -65,7 +68,7 @@ func testKVStore(ctx context.Context, t *testing.T, app types.Application, tx [] require.EqualValues(t, info.LastBlockHeight, resQuery.Height) // make sure proof is fine - resQuery, err = app.Query(ctx, &types.RequestQuery{ + resQuery, err = app.Query(ctx, &types.QueryRequest{ Path: "/store", Data: []byte(key), Prove: true, @@ -83,17 +86,17 @@ func TestPersistentKVStoreEmptyTX(t *testing.T) { kvstore := NewPersistentApplication(t.TempDir()) tx := []byte("") - reqCheck := types.RequestCheckTx{Tx: tx} + reqCheck := types.CheckTxRequest{Tx: tx, Type: types.CHECK_TX_TYPE_CHECK} resCheck, err := kvstore.CheckTx(ctx, &reqCheck) require.NoError(t, err) - require.Equal(t, resCheck.Code, CodeTypeInvalidTxFormat) + require.Equal(t, CodeTypeInvalidTxFormat, resCheck.Code) txs := make([][]byte, 0, 4) txs = append(txs, []byte("key=value"), []byte("key:val"), []byte(""), []byte("kee=value")) - reqPrepare := types.RequestPrepareProposal{Txs: txs, MaxTxBytes: 10 * 1024} + reqPrepare := types.PrepareProposalRequest{Txs: txs, MaxTxBytes: 10 * 1024} resPrepare, err := kvstore.PrepareProposal(ctx, &reqPrepare) require.NoError(t, err) - require.Equal(t, len(reqPrepare.Txs)-1, len(resPrepare.Txs), "Empty transaction not properly removed") + require.Len(t, resPrepare.Txs, len(reqPrepare.Txs)-1, "Empty transaction not properly removed") } func TestPersistentKVStoreKV(t *testing.T) { @@ -103,7 +106,7 @@ func TestPersistentKVStoreKV(t *testing.T) { kvstore := NewPersistentApplication(t.TempDir()) key := testKey value := testValue - testKVStore(ctx, t, kvstore, NewTx(key, value), key, value) + testKVStore(ctx, t, kvstore, NewTx(key, value)) } func TestPersistentKVStoreInfo(t *testing.T) { @@ -114,7 +117,7 @@ func TestPersistentKVStoreInfo(t *testing.T) { require.NoError(t, InitKVStore(ctx, kvstore)) height := int64(0) - resInfo, err := kvstore.Info(ctx, &types.RequestInfo{}) + resInfo, err := kvstore.Info(ctx, &types.InfoRequest{}) require.NoError(t, err) if resInfo.LastBlockHeight != height { t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight) @@ -123,19 +126,19 @@ func TestPersistentKVStoreInfo(t *testing.T) { // make and apply block height = int64(1) hash := []byte("foo") - if _, err := kvstore.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Hash: hash, Height: height}); err != nil { + if _, err := kvstore.FinalizeBlock(ctx, &types.FinalizeBlockRequest{Hash: hash, Height: height}); err != nil { t.Fatal(err) } - _, err = kvstore.Commit(ctx, &types.RequestCommit{}) + _, err = kvstore.Commit(ctx, &types.CommitRequest{}) require.NoError(t, err) - resInfo, err = kvstore.Info(ctx, &types.RequestInfo{}) + resInfo, err = kvstore.Info(ctx, &types.InfoRequest{}) require.NoError(t, err) require.Equal(t, height, resInfo.LastBlockHeight) } -// add a validator, remove a validator, update a validator +// add a validator, remove a validator, update a validator. func TestValUpdates(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -147,7 +150,7 @@ func TestValUpdates(t *testing.T) { nInit := 5 vals := RandVals(total) // initialize with the first nInit - _, err := kvstore.InitChain(ctx, &types.RequestInitChain{ + _, err := kvstore.InitChain(ctx, &types.InitChainRequest{ Validators: vals[:nInit], }) require.NoError(t, err) @@ -160,8 +163,8 @@ func TestValUpdates(t *testing.T) { // add some validators v1, v2 = vals[nInit], vals[nInit+1] diff := []types.ValidatorUpdate{v1, v2} - tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power) - tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power) + tx1 := MakeValSetChangeTx(v1) + tx2 := MakeValSetChangeTx(v2) makeApplyBlock(ctx, t, kvstore, 1, diff, tx1, tx2) @@ -174,13 +177,13 @@ func TestValUpdates(t *testing.T) { v2.Power = 0 v3.Power = 0 diff = []types.ValidatorUpdate{v1, v2, v3} - tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power) - tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power) - tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power) + tx1 = MakeValSetChangeTx(v1) + tx2 = MakeValSetChangeTx(v2) + tx3 := MakeValSetChangeTx(v3) makeApplyBlock(ctx, t, kvstore, 2, diff, tx1, tx2, tx3) - vals1 = append(vals[:nInit-2], vals[nInit+1]) //nolint: gocritic + vals1 = append(vals[:nInit-2], vals[nInit+1]) vals2 = kvstore.getValidators() valsEqual(t, vals1, vals2) @@ -192,7 +195,7 @@ func TestValUpdates(t *testing.T) { v1.Power = 5 } diff = []types.ValidatorUpdate{v1} - tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power) + tx1 = MakeValSetChangeTx(v1) makeApplyBlock(ctx, t, kvstore, 3, diff, tx1) @@ -207,6 +210,7 @@ func TestCheckTx(t *testing.T) { kvstore := NewInMemoryApplication() val := RandVal() + val.Power = 10 testCases := []struct { expCode uint32 @@ -220,29 +224,60 @@ func TestCheckTx(t *testing.T) { {CodeTypeOK, []byte("a=b")}, {CodeTypeInvalidTxFormat, []byte("val=hello")}, {CodeTypeInvalidTxFormat, []byte("val=hi!5")}, - {CodeTypeOK, MakeValSetChangeTx(val.PubKey, 10)}, + {CodeTypeOK, MakeValSetChangeTx(val)}, } for idx, tc := range testCases { - resp, err := kvstore.CheckTx(ctx, &types.RequestCheckTx{Tx: tc.tx}) + resp, err := kvstore.CheckTx(ctx, &types.CheckTxRequest{ + Tx: tc.tx, + Type: types.CHECK_TX_TYPE_CHECK, + }) require.NoError(t, err, idx) fmt.Println(string(tc.tx)) require.Equal(t, tc.expCode, resp.Code, idx) } } +func TestClientAssignLane(t *testing.T) { + val := RandVal() + + testCases := []struct { + lane string + tx []byte + }{ + {"foo", NewTx("0", "0")}, + {defaultLane, NewTx("1", "1")}, + {defaultLane, NewTx("2", "2")}, + {"bar", NewTx("3", "3")}, + {defaultLane, NewTx("4", "4")}, + {defaultLane, NewTx("5", "5")}, + {"bar", NewTx("6", "6")}, + {defaultLane, NewTx("7", "7")}, + {defaultLane, NewTx("8", "8")}, + {"bar", NewTx("9", "9")}, + {defaultLane, NewTx("10", "10")}, + {"foo", NewTx("11", "11")}, + {"bar", NewTx("12", "12")}, + {"val", MakeValSetChangeTx(val)}, + } + + for idx, tc := range testCases { + require.Equal(t, tc.lane, assignLane(tc.tx), idx) + } +} + func TestClientServer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // set up socket app kvstore := NewInMemoryApplication() - client, _, err := makeClientServer(t, kvstore, "kvstore-socket", "socket") + client, err := makeClientServer(t, kvstore, "kvstore-socket", "socket") require.NoError(t, err) runClientTests(ctx, t, client) // set up grpc app kvstore = NewInMemoryApplication() - gclient, _, err := makeClientServer(t, kvstore, t.TempDir(), "grpc") + gclient, err := makeClientServer(t, kvstore, t.TempDir(), "grpc") require.NoError(t, err) runClientTests(ctx, t, gclient) } @@ -255,23 +290,24 @@ func makeApplyBlock( diff []types.ValidatorUpdate, txs ...[]byte, ) { + t.Helper() // make and apply block height := int64(heightInt) hash := []byte("foo") - resFinalizeBlock, err := kvstore.FinalizeBlock(ctx, &types.RequestFinalizeBlock{ + resFinalizeBlock, err := kvstore.FinalizeBlock(ctx, &types.FinalizeBlockRequest{ Hash: hash, Height: height, Txs: txs, }) require.NoError(t, err) - _, err = kvstore.Commit(ctx, &types.RequestCommit{}) + _, err = kvstore.Commit(ctx, &types.CommitRequest{}) require.NoError(t, err) valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates) } -// order doesn't matter +// order doesn't matter. func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) { t.Helper() if len(vals1) != len(vals2) { @@ -281,14 +317,17 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) { sort.Sort(types.ValidatorUpdates(vals2)) for i, v1 := range vals1 { v2 := vals2[i] - if !v1.PubKey.Equal(v2.PubKey) || + if v1.PubKeyType != v2.PubKeyType || + !bytes.Equal(v1.PubKeyBytes, v2.PubKeyBytes) || v1.Power != v2.Power { - t.Fatalf("vals dont match at index %d. got %X/%d , expected %X/%d", i, v2.PubKey, v2.Power, v1.PubKey, v1.Power) + t.Fatalf("vals dont match at index %d. got %s/%X/%d , expected %s/%X/%d", i, + v2.PubKeyType, v2.PubKeyBytes, v2.Power, v1.PubKeyType, v1.PubKeyBytes, v1.Power) } } } -func makeClientServer(t *testing.T, app types.Application, name, transport string) (abcicli.Client, service.Service, error) { +func makeClientServer(t *testing.T, app types.Application, name, transport string) (abcicli.Client, error) { + t.Helper() // Start the listener addr := fmt.Sprintf("unix://%s.sock", name) logger := log.TestingLogger() @@ -297,7 +336,7 @@ func makeClientServer(t *testing.T, app types.Application, name, transport strin require.NoError(t, err) server.SetLogger(logger.With("module", "abci-server")) if err := server.Start(); err != nil { - return nil, nil, err + return nil, err } t.Cleanup(func() { @@ -311,7 +350,7 @@ func makeClientServer(t *testing.T, app types.Application, name, transport strin require.NoError(t, err) client.SetLogger(logger.With("module", "abci-client")) if err := client.Start(); err != nil { - return nil, nil, err + return nil, err } t.Cleanup(func() { @@ -320,15 +359,16 @@ func makeClientServer(t *testing.T, app types.Application, name, transport strin } }) - return client, server, nil + return client, nil } func runClientTests(ctx context.Context, t *testing.T, client abcicli.Client) { + t.Helper() // run some tests.... tx := []byte(testKey + ":" + testValue) - testKVStore(ctx, t, client, tx, testKey, testValue) + testKVStore(ctx, t, client, tx) tx = []byte(testKey + "=" + testValue) - testKVStore(ctx, t, client, tx, testKey, testValue) + testKVStore(ctx, t, client, tx) } func TestTxGeneration(t *testing.T) { diff --git a/abci/server/errors.go b/abci/server/errors.go index d60eacb2a82..0681c185891 100644 --- a/abci/server/errors.go +++ b/abci/server/errors.go @@ -12,10 +12,10 @@ type ErrUnknownServerType struct { } func (e ErrUnknownServerType) Error() string { - return fmt.Sprintf("unknown server type %s", e.ServerType) + return "unknown server type " + e.ServerType } -// ErrConnectionDoesNotExist is returned when trying to access non-existent network connection +// ErrConnectionDoesNotExist is returned when trying to access non-existent network connection. type ErrConnectionDoesNotExist struct { ConnID int } diff --git a/abci/server/grpc_server.go b/abci/server/grpc_server.go index e0eaefa648e..f0d79d3a734 100644 --- a/abci/server/grpc_server.go +++ b/abci/server/grpc_server.go @@ -7,7 +7,7 @@ import ( "google.golang.org/grpc" "github.com/cometbft/cometbft/abci/types" - cmtnet "github.com/cometbft/cometbft/libs/net" + cmtnet "github.com/cometbft/cometbft/internal/net" "github.com/cometbft/cometbft/libs/service" ) @@ -22,7 +22,7 @@ type GRPCServer struct { app types.Application } -// NewGRPCServer returns a new gRPC ABCI server +// NewGRPCServer returns a new gRPC ABCI server. func NewGRPCServer(protoAddr string, app types.Application) service.Service { proto, addr := cmtnet.ProtocolAndAddress(protoAddr) s := &GRPCServer{ @@ -60,17 +60,17 @@ func (s *GRPCServer) OnStop() { s.server.Stop() } -//------------------------------------------------------- +// ------------------------------------------------------- -// gRPCApplication is a gRPC shim for Application +// gRPCApplication is a gRPC shim for Application. type gRPCApplication struct { types.Application } -func (app *gRPCApplication) Echo(_ context.Context, req *types.RequestEcho) (*types.ResponseEcho, error) { - return &types.ResponseEcho{Message: req.Message}, nil +func (*gRPCApplication) Echo(_ context.Context, req *types.EchoRequest) (*types.EchoResponse, error) { + return &types.EchoResponse{Message: req.Message}, nil } -func (app *gRPCApplication) Flush(context.Context, *types.RequestFlush) (*types.ResponseFlush, error) { - return &types.ResponseFlush{}, nil +func (*gRPCApplication) Flush(context.Context, *types.FlushRequest) (*types.FlushResponse, error) { + return &types.FlushResponse{}, nil } diff --git a/abci/server/server.go b/abci/server/server.go index 010a8948abe..b7b14961e4e 100644 --- a/abci/server/server.go +++ b/abci/server/server.go @@ -13,7 +13,7 @@ import ( ) // NewServer is a utility function for out of process applications to set up either a socket or -// grpc server that can listen to requests from the equivalent Tendermint client +// grpc server that can listen to requests from the equivalent Tendermint client. func NewServer(protoAddr, transport string, app types.Application) (service.Service, error) { var s service.Service var err error diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index 91a65c32bc8..f4f53f63362 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -11,8 +11,8 @@ import ( "runtime" "github.com/cometbft/cometbft/abci/types" + cmtnet "github.com/cometbft/cometbft/internal/net" cmtlog "github.com/cometbft/cometbft/libs/log" - cmtnet "github.com/cometbft/cometbft/libs/net" "github.com/cometbft/cometbft/libs/service" cmtsync "github.com/cometbft/cometbft/libs/sync" ) @@ -21,7 +21,7 @@ import ( // for out-of-process go applications. Note, in the case of an application written in golang, // the developer may also run both Tendermint and the application within the same process. // -// The socket server deliver +// The socket server deliver. type SocketServer struct { service.BaseService isLoggerSet bool @@ -97,7 +97,7 @@ func (s *SocketServer) addConn(conn net.Conn) int { return connID } -// deletes conn even if close errs +// deletes conn even if close errs. func (s *SocketServer) rmConn(connID int) error { s.connsMtx.Lock() defer s.connsMtx.Unlock() @@ -144,7 +144,7 @@ func (s *SocketServer) acceptConnectionsRoutine() { func (s *SocketServer) waitForClose(closeConn chan error, connID int) { err := <-closeConn switch { - case err == io.EOF: + case errors.Is(err, io.EOF): s.Logger.Error("Connection was closed by client") case err != nil: s.Logger.Error("Connection error", "err", err) @@ -159,15 +159,15 @@ func (s *SocketServer) waitForClose(closeConn chan error, connID int) { } } -// Read requests from conn and deal with them +// Read requests from conn and deal with them. func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, responses chan<- *types.Response) { var count int - var bufReader = bufio.NewReader(conn) + bufReader := bufio.NewReader(conn) defer func() { // make sure to recover from any app-related panics to allow proper socket cleanup. // In the case of a panic, we do not notify the client by passing an exception so - // presume that the client is still running and retying to connect + // presume that the client is still running and retrying to connect r := recover() if r != nil { const size = 64 << 10 @@ -183,11 +183,10 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp }() for { - - var req = &types.Request{} + req := &types.Request{} err := types.ReadMessage(bufReader, req) if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { closeConn <- err } else { closeConn <- fmt.Errorf("error reading message: %w", err) @@ -201,7 +200,7 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp // any error either from the application or because of an unknown request // throws an exception back to the client. This will stop the server and // should also halt the client. - responses <- types.ToResponseException(err.Error()) + responses <- types.ToExceptionResponse(err.Error()) } else { responses <- resp } @@ -209,108 +208,108 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp } } -// handleRequests takes a request and calls the application passing the returned +// handleRequest takes a request and calls the application passing the returned. func (s *SocketServer) handleRequest(ctx context.Context, req *types.Request) (*types.Response, error) { switch r := req.Value.(type) { case *types.Request_Echo: - return types.ToResponseEcho(r.Echo.Message), nil + return types.ToEchoResponse(r.Echo.Message), nil case *types.Request_Flush: - return types.ToResponseFlush(), nil + return types.ToFlushResponse(), nil case *types.Request_Info: res, err := s.app.Info(ctx, r.Info) if err != nil { return nil, err } - return types.ToResponseInfo(res), nil + return types.ToInfoResponse(res), nil case *types.Request_CheckTx: res, err := s.app.CheckTx(ctx, r.CheckTx) if err != nil { return nil, err } - return types.ToResponseCheckTx(res), nil + return types.ToCheckTxResponse(res), nil case *types.Request_Commit: res, err := s.app.Commit(ctx, r.Commit) if err != nil { return nil, err } - return types.ToResponseCommit(res), nil + return types.ToCommitResponse(res), nil case *types.Request_Query: res, err := s.app.Query(ctx, r.Query) if err != nil { return nil, err } - return types.ToResponseQuery(res), nil + return types.ToQueryResponse(res), nil case *types.Request_InitChain: res, err := s.app.InitChain(ctx, r.InitChain) if err != nil { return nil, err } - return types.ToResponseInitChain(res), nil + return types.ToInitChainResponse(res), nil case *types.Request_FinalizeBlock: res, err := s.app.FinalizeBlock(ctx, r.FinalizeBlock) if err != nil { return nil, err } - return types.ToResponseFinalizeBlock(res), nil + return types.ToFinalizeBlockResponse(res), nil case *types.Request_ListSnapshots: res, err := s.app.ListSnapshots(ctx, r.ListSnapshots) if err != nil { return nil, err } - return types.ToResponseListSnapshots(res), nil + return types.ToListSnapshotsResponse(res), nil case *types.Request_OfferSnapshot: res, err := s.app.OfferSnapshot(ctx, r.OfferSnapshot) if err != nil { return nil, err } - return types.ToResponseOfferSnapshot(res), nil + return types.ToOfferSnapshotResponse(res), nil case *types.Request_PrepareProposal: res, err := s.app.PrepareProposal(ctx, r.PrepareProposal) if err != nil { return nil, err } - return types.ToResponsePrepareProposal(res), nil + return types.ToPrepareProposalResponse(res), nil case *types.Request_ProcessProposal: res, err := s.app.ProcessProposal(ctx, r.ProcessProposal) if err != nil { return nil, err } - return types.ToResponseProcessProposal(res), nil + return types.ToProcessProposalResponse(res), nil case *types.Request_LoadSnapshotChunk: res, err := s.app.LoadSnapshotChunk(ctx, r.LoadSnapshotChunk) if err != nil { return nil, err } - return types.ToResponseLoadSnapshotChunk(res), nil + return types.ToLoadSnapshotChunkResponse(res), nil case *types.Request_ApplySnapshotChunk: res, err := s.app.ApplySnapshotChunk(ctx, r.ApplySnapshotChunk) if err != nil { return nil, err } - return types.ToResponseApplySnapshotChunk(res), nil + return types.ToApplySnapshotChunkResponse(res), nil case *types.Request_ExtendVote: res, err := s.app.ExtendVote(ctx, r.ExtendVote) if err != nil { return nil, err } - return types.ToResponseExtendVote(res), nil + return types.ToExtendVoteResponse(res), nil case *types.Request_VerifyVoteExtension: res, err := s.app.VerifyVoteExtension(ctx, r.VerifyVoteExtension) if err != nil { return nil, err } - return types.ToResponseVerifyVoteExtension(res), nil + return types.ToVerifyVoteExtensionResponse(res), nil default: return nil, ErrUnknownRequest{Request: *req} } } // Pull responses from 'responses' and write them to conn. -func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) { +func (*SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) { var count int - var bufWriter = bufio.NewWriter(conn) + bufWriter := bufio.NewWriter(conn) for { - var res = <-responses + res := <-responses err := types.WriteMessage(res, bufWriter) if err != nil { closeConn <- fmt.Errorf("error writing message: %w", err) diff --git a/abci/tests/benchmarks/parallel/parallel.go b/abci/tests/benchmarks/parallel/parallel.go index 974ba381538..cb2f2ca055c 100644 --- a/abci/tests/benchmarks/parallel/parallel.go +++ b/abci/tests/benchmarks/parallel/parallel.go @@ -6,11 +6,10 @@ import ( "log" "github.com/cometbft/cometbft/abci/types" - cmtnet "github.com/cometbft/cometbft/libs/net" + cmtnet "github.com/cometbft/cometbft/internal/net" ) func main() { - conn, err := cmtnet.Connect("unix://test.sock") if err != nil { log.Fatal(err.Error()) @@ -20,7 +19,7 @@ func main() { go func() { counter := 0 for { - var res = &types.Response{} + res := &types.Response{} err := types.ReadMessage(conn, res) if err != nil { log.Fatal(err.Error()) @@ -35,8 +34,8 @@ func main() { // Write a bunch of requests counter := 0 for i := 0; ; i++ { - var bufWriter = bufio.NewWriter(conn) - var req = types.ToRequestEcho("foobar") + bufWriter := bufio.NewWriter(conn) + req := types.ToEchoRequest("foobar") err := types.WriteMessage(req, bufWriter) if err != nil { diff --git a/abci/tests/benchmarks/simple/simple.go b/abci/tests/benchmarks/simple/simple.go index 2aaa056d068..79ecd391090 100644 --- a/abci/tests/benchmarks/simple/simple.go +++ b/abci/tests/benchmarks/simple/simple.go @@ -8,11 +8,10 @@ import ( "reflect" "github.com/cometbft/cometbft/abci/types" - cmtnet "github.com/cometbft/cometbft/libs/net" + cmtnet "github.com/cometbft/cometbft/internal/net" ) func main() { - conn, err := cmtnet.Connect("unix://test.sock") if err != nil { log.Fatal(err.Error()) @@ -21,7 +20,7 @@ func main() { // Make a bunch of requests counter := 0 for i := 0; ; i++ { - req := types.ToRequestEcho("foobar") + req := types.ToEchoRequest("foobar") _, err := makeRequest(conn, req) if err != nil { log.Fatal(err.Error()) @@ -34,14 +33,14 @@ func main() { } func makeRequest(conn io.ReadWriter, req *types.Request) (*types.Response, error) { - var bufWriter = bufio.NewWriter(conn) + bufWriter := bufio.NewWriter(conn) // Write desired request err := types.WriteMessage(req, bufWriter) if err != nil { return nil, err } - err = types.WriteMessage(types.ToRequestFlush(), bufWriter) + err = types.WriteMessage(types.ToFlushRequest(), bufWriter) if err != nil { return nil, err } @@ -51,12 +50,12 @@ func makeRequest(conn io.ReadWriter, req *types.Request) (*types.Response, error } // Read desired response - var res = &types.Response{} + res := &types.Response{} err = types.ReadMessage(conn, res) if err != nil { return nil, err } - var resFlush = &types.Response{} + resFlush := &types.Response{} err = types.ReadMessage(conn, resFlush) if err != nil { return nil, err diff --git a/abci/tests/client_server_test.go b/abci/tests/client_server_test.go index 79737a203c0..b1af64dec48 100644 --- a/abci/tests/client_server_test.go +++ b/abci/tests/client_server_test.go @@ -3,7 +3,7 @@ package tests import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" abciclient "github.com/cometbft/cometbft/abci/client" "github.com/cometbft/cometbft/abci/example/kvstore" @@ -18,9 +18,9 @@ func TestClientServerNoAddrPrefix(t *testing.T) { app := kvstore.NewInMemoryApplication() server, err := abciserver.NewServer(addr, transport, app) - assert.NoError(t, err, "expected no error on NewServer") + require.NoError(t, err) err = server.Start() - assert.NoError(t, err, "expected no error on server.Start") + require.NoError(t, err) t.Cleanup(func() { if err := server.Stop(); err != nil { t.Error(err) @@ -28,9 +28,9 @@ func TestClientServerNoAddrPrefix(t *testing.T) { }) client, err := abciclient.NewClient(addr, transport, true) - assert.NoError(t, err, "expected no error on NewClient") + require.NoError(t, err) err = client.Start() - assert.NoError(t, err, "expected no error on client.Start") + require.NoError(t, err) t.Cleanup(func() { if err := client.Stop(); err != nil { t.Error(err) diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 9c33c8eb7d2..ef81914ddc0 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -8,7 +8,7 @@ import ( abcicli "github.com/cometbft/cometbft/abci/client" "github.com/cometbft/cometbft/abci/types" - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtrand "github.com/cometbft/cometbft/internal/rand" ) func InitChain(ctx context.Context, client abcicli.Client) error { @@ -17,9 +17,9 @@ func InitChain(ctx context.Context, client abcicli.Client) error { for i := 0; i < total; i++ { pubkey := cmtrand.Bytes(33) power := cmtrand.Int() - vals[i] = types.UpdateValidator(pubkey, int64(power), "") + vals[i] = types.ValidatorUpdate{Power: int64(power), PubKeyType: "", PubKeyBytes: pubkey} } - _, err := client.InitChain(ctx, &types.RequestInitChain{ + _, err := client.InitChain(ctx, &types.InitChainRequest{ Validators: vals, }) if err != nil { @@ -31,7 +31,7 @@ func InitChain(ctx context.Context, client abcicli.Client) error { } func Commit(ctx context.Context, client abcicli.Client) error { - _, err := client.Commit(ctx, &types.RequestCommit{}) + _, err := client.Commit(ctx, &types.CommitRequest{}) if err != nil { fmt.Println("Failed test: Commit") fmt.Printf("error while committing: %v\n", err) @@ -42,7 +42,7 @@ func Commit(ctx context.Context, client abcicli.Client) error { } func FinalizeBlock(ctx context.Context, client abcicli.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte, hashExp []byte) error { - res, _ := client.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: txBytes}) + res, _ := client.FinalizeBlock(ctx, &types.FinalizeBlockRequest{Txs: txBytes}) appHash := res.AppHash for i, tx := range res.TxResults { code, data, log := tx.Code, tx.Data, tx.Log @@ -69,7 +69,7 @@ func FinalizeBlock(ctx context.Context, client abcicli.Client, txBytes [][]byte, } func PrepareProposal(ctx context.Context, client abcicli.Client, txBytes [][]byte, txExpected [][]byte, _ []byte) error { - res, _ := client.PrepareProposal(ctx, &types.RequestPrepareProposal{Txs: txBytes}) + res, _ := client.PrepareProposal(ctx, &types.PrepareProposalRequest{Txs: txBytes}) for i, tx := range res.Txs { if !bytes.Equal(tx, txExpected[i]) { fmt.Println("Failed test: PrepareProposal") @@ -82,8 +82,8 @@ func PrepareProposal(ctx context.Context, client abcicli.Client, txBytes [][]byt return nil } -func ProcessProposal(ctx context.Context, client abcicli.Client, txBytes [][]byte, statusExp types.ResponseProcessProposal_ProposalStatus) error { - res, _ := client.ProcessProposal(ctx, &types.RequestProcessProposal{Txs: txBytes}) +func ProcessProposal(ctx context.Context, client abcicli.Client, txBytes [][]byte, statusExp types.ProcessProposalStatus) error { + res, _ := client.ProcessProposal(ctx, &types.ProcessProposalRequest{Txs: txBytes}) if res.Status != statusExp { fmt.Println("Failed test: ProcessProposal") fmt.Printf("ProcessProposal response status was unexpected. Got %v expected %v.", @@ -95,7 +95,7 @@ func ProcessProposal(ctx context.Context, client abcicli.Client, txBytes [][]byt } func CheckTx(ctx context.Context, client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { - res, _ := client.CheckTx(ctx, &types.RequestCheckTx{Tx: txBytes}) + res, _ := client.CheckTx(ctx, &types.CheckTxRequest{Tx: txBytes, Type: types.CHECK_TX_TYPE_CHECK}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { fmt.Println("Failed test: CheckTx") diff --git a/abci/tests/test_cli/ex1.abci.out b/abci/tests/test_cli/ex1.abci.out index 39b7f874bef..8b5d53badad 100644 --- a/abci/tests/test_cli/ex1.abci.out +++ b/abci/tests/test_cli/ex1.abci.out @@ -14,11 +14,11 @@ > process_proposal "abc==456" -> code: OK --> status: REJECT +-> status: PROCESS_PROPOSAL_STATUS_REJECT > process_proposal "abc=123" -> code: OK --> status: ACCEPT +-> status: PROCESS_PROPOSAL_STATUS_ACCEPT > finalize_block "abc=123" -> code: OK diff --git a/abci/tests/test_cli/test.sh b/abci/tests/test_cli/test.sh index e905ec0740d..883a0f5c5ec 100755 --- a/abci/tests/test_cli/test.sh +++ b/abci/tests/test_cli/test.sh @@ -1,8 +1,10 @@ #! /bin/bash -set -e + +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes # Get the root directory. -export PATH="$GOBIN:$PATH" SOURCE="${BASH_SOURCE[0]}" while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done DIR="$( cd -P "$( dirname "$SOURCE" )/../.." && pwd )" diff --git a/abci/tutorials/abci-v2-forum-app/.gitignore b/abci/tutorials/abci-v2-forum-app/.gitignore new file mode 100644 index 00000000000..bf003718b66 --- /dev/null +++ b/abci/tutorials/abci-v2-forum-app/.gitignore @@ -0,0 +1,2 @@ +abci-v2-forum-app +tmp/forum-app diff --git a/abci/tutorials/abci-v2-forum-app/README.md b/abci/tutorials/abci-v2-forum-app/README.md new file mode 100644 index 00000000000..034ba9709a3 --- /dev/null +++ b/abci/tutorials/abci-v2-forum-app/README.md @@ -0,0 +1,28 @@ +## Forum Application + +The **ABCI 2.0 Forum Application** is a demo application where users can come and post messages in a forum running on a +blockchain powered by [CometBFT](https://github.com/cometbft/cometbft) state machine replication engine. + +- **Users** + + - Can post messages (by submitting transactions) + - Can view all the message history (querying the blockchain) + - Are banned if they post messages that contain curse words (curse words are tracked with vote extensions) + +## ABCI 2.0 + +**This application demonstrates the use of various [ABCI 2.0](https://docs.cometbft.com/v1.0/spec/abci/) methods such as:** + +- PrepareProposal +- ProcessProposal +- FinalizeBlock +- ExtendVote +- VerifyVoteExtension +- Commit +- CheckTx +- Query + +To follow this tutorial, please check the [Introduction to ABCI 2.0](../../../docs/tutorials/forum-application/1.abci-intro.md) document. + +> Many thanks to the original team for brainstorming and bringing forth this idea. Their original repo can be found [here](https://github.com/interchainio/forum) + diff --git a/abci/tutorials/abci-v2-forum-app/abci/app.go b/abci/tutorials/abci-v2-forum-app/abci/app.go new file mode 100644 index 00000000000..845f7501d8c --- /dev/null +++ b/abci/tutorials/abci-v2-forum-app/abci/app.go @@ -0,0 +1,427 @@ +package abci + +import ( + "context" + "crypto" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/dgraph-io/badger/v4" + + "github.com/cometbft/cometbft/abci/tutorials/abci-v2-forum-app/model" + abci "github.com/cometbft/cometbft/abci/types" + cryptoencoding "github.com/cometbft/cometbft/crypto/encoding" + "github.com/cometbft/cometbft/libs/log" + "github.com/cometbft/cometbft/version" +) + +const ( + ApplicationVersion = 1 + CurseWordsLimitVE = 10 +) + +type ForumApp struct { + abci.BaseApplication + valAddrToPubKeyMap map[string]crypto.PublicKey + CurseWords string + state AppState + onGoingBlock *badger.Txn + logger log.Logger +} + +func NewForumApp(dbDir string, appConfigPath string, logger log.Logger) (*ForumApp, error) { + db, err := model.NewDB(dbDir) + if err != nil { + return nil, fmt.Errorf("error initializing database: %w", err) + } + cfg, err := LoadConfig(appConfigPath) + if err != nil { + return nil, fmt.Errorf("error loading config file: %w", err) + } + + cfg.CurseWords = DeduplicateCurseWords(cfg.CurseWords) + + state, err := loadState(db) + if err != nil { + return nil, err + } + + // Reading the validators from the DB because CometBFT expects the application to have them in memory + valMap := make(map[string]crypto.PublicKey) + validators, err := state.DB.GetValidators() + if err != nil { + return nil, fmt.Errorf("can't load validators: %w", err) + } + for _, v := range validators { + pubKey, err := cryptoencoding.PubKeyFromTypeAndBytes(v.PubKeyType, v.PubKeyBytes) + if err != nil { + return nil, fmt.Errorf("can't decode public key: %w", err) + } + + valMap[string(pubKey.Address())] = pubKey + } + + return &ForumApp{ + state: state, + valAddrToPubKeyMap: valMap, + CurseWords: cfg.CurseWords, + logger: logger, + }, nil +} + +// Info return application information. +func (app *ForumApp) Info(_ context.Context, _ *abci.InfoRequest) (*abci.InfoResponse, error) { + return &abci.InfoResponse{ + Version: version.ABCIVersion, + AppVersion: ApplicationVersion, + LastBlockHeight: app.state.Height, + + LastBlockAppHash: app.state.Hash(), + }, nil +} + +// Query the application state for specific information. +func (app *ForumApp) Query(_ context.Context, query *abci.QueryRequest) (*abci.QueryResponse, error) { + app.logger.Info("Executing Application Query") + + resp := abci.QueryResponse{Key: query.Data} + + // Parse sender from query data + sender := string(query.Data) + + if sender == "history" { + messages, err := model.FetchHistory(app.state.DB) + if err != nil { + return nil, err + } + resp.Log = messages + resp.Value = []byte(messages) + + return &resp, nil + } + // Retrieve all message sent by the sender + messages, err := model.GetMessagesBySender(app.state.DB, sender) + if err != nil { + return nil, err + } + + // Convert the messages to JSON and return as query result + resultBytes, err := json.Marshal(messages) + if err != nil { + return nil, err + } + + resp.Log = string(resultBytes) + resp.Value = resultBytes + + return &resp, nil +} + +// CheckTx handles validation of inbound transactions. If a transaction is not a valid message, or if a user +// does not exist in the database or if a user is banned it returns an error. +func (app *ForumApp) CheckTx(_ context.Context, req *abci.CheckTxRequest) (*abci.CheckTxResponse, error) { + app.logger.Info("Executing Application CheckTx") + + // Parse the tx message + msg, err := model.ParseMessage(req.Tx) + if err != nil { + app.logger.Info("CheckTx: failed to parse transaction message", "message", msg, "error", err) + return &abci.CheckTxResponse{Code: CodeTypeInvalidTxFormat, Log: "Invalid transaction", Info: err.Error()}, nil + } + + // Check for invalid sender + if len(msg.Sender) == 0 { + app.logger.Info("CheckTx: failed to parse transaction message", "message", msg, "error", "Sender is missing") + return &abci.CheckTxResponse{Code: CodeTypeInvalidTxFormat, Log: "Invalid transaction", Info: "Sender is missing"}, nil + } + + app.logger.Debug("searching for sender", "sender", msg.Sender) + u, err := app.state.DB.FindUserByName(msg.Sender) + + if err != nil { + if !errors.Is(err, badger.ErrKeyNotFound) { + app.logger.Error("CheckTx: Error in check tx", "tx", string(req.Tx), "error", err) + return &abci.CheckTxResponse{Code: CodeTypeEncodingError, Log: "Invalid transaction", Info: err.Error()}, nil + } + app.logger.Info("CheckTx: Sender not found", "sender", msg.Sender) + } else if u != nil && u.Banned { + return &abci.CheckTxResponse{Code: CodeTypeBanned, Log: "Invalid transaction", Info: "User is banned"}, nil + } + app.logger.Info("CheckTx: success checking tx", "message", msg.Message, "sender", msg.Sender) + return &abci.CheckTxResponse{Code: CodeTypeOK, Log: "Valid transaction", Info: "Transaction validation succeeded"}, nil +} + +// Consensus Connection + +// InitChain initializes the blockchain with information sent from CometBFT such as validators or consensus parameters. +func (app *ForumApp) InitChain(_ context.Context, req *abci.InitChainRequest) (*abci.InitChainResponse, error) { + app.logger.Info("Executing Application InitChain") + + for _, v := range req.Validators { + err := app.updateValidator(v) + if err != nil { + return nil, err + } + } + appHash := app.state.Hash() + + // This parameter can also be set in the genesis file + req.ConsensusParams.Feature.VoteExtensionsEnableHeight.Value = 1 + return &abci.InitChainResponse{ConsensusParams: req.ConsensusParams, AppHash: appHash}, nil +} + +// PrepareProposal is used to prepare a proposal for the next block in the blockchain. The application can re-order, remove +// or add transactions. +func (app *ForumApp) PrepareProposal(_ context.Context, req *abci.PrepareProposalRequest) (*abci.PrepareProposalResponse, error) { + app.logger.Info("Executing Application PrepareProposal") + + // Get the curse words from for all vote extensions received at the end of last height. + voteExtensionCurseWords := app.getWordsFromVe(req.LocalLastCommit.Votes) + + curseWords := strings.Split(voteExtensionCurseWords, "|") + if hasDuplicateWords(curseWords) { + return nil, errors.New("duplicate words found") + } + + // Prepare req puts the BanTx first, then adds the other transactions + // ProcessProposal should verify this + proposedTxs := make([][]byte, 0) + finalProposal := make([][]byte, 0) + bannedUsersString := make(map[string]struct{}) + for _, tx := range req.Txs { + msg, err := model.ParseMessage(tx) + if err != nil { + // this should never happen since the tx should have been validated by CheckTx + return nil, fmt.Errorf("failed to marshal tx in PrepareProposal: %w", err) + } + // Adding the curse words from vote extensions too + if !hasCurseWord(msg.Message, voteExtensionCurseWords) { + proposedTxs = append(proposedTxs, tx) + continue + } + // If the message contains curse words then ban the user by + // creating a "ban transaction" and adding it to the final proposal + banTx := model.BanTx{UserName: msg.Sender} + bannedUsersString[msg.Sender] = struct{}{} + resultBytes, err := json.Marshal(banTx) + if err != nil { + // this should never happen since the ban tx should have been validated by CheckTx + return nil, fmt.Errorf("failed to marshal ban tx in PrepareProposal: %w", err) + } + finalProposal = append(finalProposal, resultBytes) + } + + // Need to loop again through the proposed Txs to make sure there is none left by a user that was banned + // after the tx was accepted + for _, tx := range proposedTxs { + // there should be no error here as these are just transactions we have checked and added + msg, err := model.ParseMessage(tx) + if err != nil { + // this should never happen since the tx should have been validated by CheckTx + return nil, fmt.Errorf("failed to marshal tx in PrepareProposal: %w", err) + } + // If the user is banned then include this transaction in the final proposal + if _, ok := bannedUsersString[msg.Sender]; !ok { + finalProposal = append(finalProposal, tx) + } + } + return &abci.PrepareProposalResponse{Txs: finalProposal}, nil +} + +// ProcessProposal validates the proposed block and the transactions and return a status if it was accepted or rejected. +func (app *ForumApp) ProcessProposal(_ context.Context, req *abci.ProcessProposalRequest) (*abci.ProcessProposalResponse, error) { + app.logger.Info("Executing Application ProcessProposal") + + bannedUsers := make(map[string]struct{}, 0) + + finishedBanTxIdx := len(req.Txs) + for i, tx := range req.Txs { + if !isBanTx(tx) { + finishedBanTxIdx = i + break + } + var parsedBan model.BanTx + err := json.Unmarshal(tx, &parsedBan) + if err != nil { + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_REJECT}, err + } + bannedUsers[parsedBan.UserName] = struct{}{} + } + + for _, tx := range req.Txs[finishedBanTxIdx:] { + // From this point on, there should be no BanTxs anymore + // If there is one, ParseMessage will return an error as the + // format of the two transactions is different. + msg, err := model.ParseMessage(tx) + if err != nil { + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_REJECT}, err + } + if _, ok := bannedUsers[msg.Sender]; ok { + // sending us a tx from a banned user + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_REJECT}, nil + } + } + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_ACCEPT}, nil +} + +// FinalizeBlock Deliver the decided block to the Application. +func (app *ForumApp) FinalizeBlock(_ context.Context, req *abci.FinalizeBlockRequest) (*abci.FinalizeBlockResponse, error) { + app.logger.Info("Executing Application FinalizeBlock") + + // Iterate over Tx in current block + app.onGoingBlock = app.state.DB.GetDB().NewTransaction(true) + respTxs := make([]*abci.ExecTxResult, len(req.Txs)) + finishedBanTxIdx := len(req.Txs) + for i, tx := range req.Txs { + var err error + + if !isBanTx(tx) { + finishedBanTxIdx = i + break + } + banTx := new(model.BanTx) + err = json.Unmarshal(tx, &banTx) + if err != nil { + // since we did this in ProcessProposal this should never happen here + return nil, err + } + err = UpdateOrSetUser(app.state.DB, banTx.UserName, true, app.onGoingBlock) + if err != nil { + return nil, err + } + respTxs[i] = &abci.ExecTxResult{Code: CodeTypeOK} + } + + for idx, tx := range req.Txs[finishedBanTxIdx:] { + // From this point on, there should be no BanTxs anymore + // If there is one, ParseMessage will return an error as the + // format of the two transactions is different. + msg, err := model.ParseMessage(tx) + i := idx + finishedBanTxIdx + if err != nil { + // since we did this in ProcessProposal this should never happen here + return nil, err + } + + // Check if this sender already existed; if not, add the user too + err = UpdateOrSetUser(app.state.DB, msg.Sender, false, app.onGoingBlock) + if err != nil { + return nil, err + } + // Add the message for this sender + message, err := model.AppendToExistingMessages(app.state.DB, *msg) + if err != nil { + return nil, err + } + err = app.onGoingBlock.Set([]byte(msg.Sender+"msg"), []byte(message)) + if err != nil { + return nil, err + } + chatHistory, err := model.AppendToChat(app.state.DB, *msg) + if err != nil { + return nil, err + } + // Append messages to chat history + err = app.onGoingBlock.Set([]byte("history"), []byte(chatHistory)) + if err != nil { + return nil, err + } + // This adds the user to the DB, but the data is not committed nor persisted until Commit is called + respTxs[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} + app.state.Size++ + } + app.state.Height = req.Height + + response := &abci.FinalizeBlockResponse{TxResults: respTxs, AppHash: app.state.Hash(), NextBlockDelay: 1 * time.Second} + return response, nil +} + +// Commit the application state. +func (app *ForumApp) Commit(_ context.Context, _ *abci.CommitRequest) (*abci.CommitResponse, error) { + app.logger.Info("Executing Application Commit") + + if err := app.onGoingBlock.Commit(); err != nil { + return nil, err + } + err := saveState(&app.state) + if err != nil { + return nil, err + } + return &abci.CommitResponse{}, nil +} + +// ExtendVote returns curse words as vote extensions. +func (app *ForumApp) ExtendVote(_ context.Context, _ *abci.ExtendVoteRequest) (*abci.ExtendVoteResponse, error) { + app.logger.Info("Executing Application ExtendVote") + + return &abci.ExtendVoteResponse{VoteExtension: []byte(app.CurseWords)}, nil +} + +// VerifyVoteExtension verifies the vote extensions and ensure they include the curse words +// It will not be called for extensions generated by this validator. +func (app *ForumApp) VerifyVoteExtension(_ context.Context, req *abci.VerifyVoteExtensionRequest) (*abci.VerifyVoteExtensionResponse, error) { + app.logger.Info("Executing Application VerifyVoteExtension") + + if _, ok := app.valAddrToPubKeyMap[string(req.ValidatorAddress)]; !ok { + // we do not have a validator with this address mapped; this should never happen + return nil, errors.New("unknown validator") + } + + curseWords := strings.Split(string(req.VoteExtension), "|") + if hasDuplicateWords(curseWords) { + return &abci.VerifyVoteExtensionResponse{Status: abci.VERIFY_VOTE_EXTENSION_STATUS_REJECT}, nil + } + + // ensure vote extension curse words limit has not been exceeded + if len(curseWords) > CurseWordsLimitVE { + return &abci.VerifyVoteExtensionResponse{Status: abci.VERIFY_VOTE_EXTENSION_STATUS_REJECT}, nil + } + return &abci.VerifyVoteExtensionResponse{Status: abci.VERIFY_VOTE_EXTENSION_STATUS_ACCEPT}, nil +} + +// getWordsFromVE gets the curse words from the vote extensions as one string, the words are concatenated using a '|' +// this method also ensures there are no duplicate curse words in the final set returned. +func (app *ForumApp) getWordsFromVe(voteExtensions []abci.ExtendedVoteInfo) string { + curseWordMap := make(map[string]int) + for _, vote := range voteExtensions { + // This code gets the curse words and makes sure that we do not add them more than once + // Thus ensuring each validator only adds one word once + curseWords := strings.Split(string(vote.GetVoteExtension()), "|") + + for _, word := range curseWords { + if count, ok := curseWordMap[word]; !ok { + curseWordMap[word] = 1 + } else { + curseWordMap[word] = count + 1 + } + } + } + app.logger.Info("Processed vote extensions", "curse_words", curseWordMap) + majority := len(app.valAddrToPubKeyMap) / 3 // We define the majority to be at least 1/3 of the validators; + + voteExtensionCurseWords := "" + for word, count := range curseWordMap { + if count > majority { + if voteExtensionCurseWords == "" { + voteExtensionCurseWords = word + } else { + voteExtensionCurseWords += "|" + word + } + } + } + return voteExtensionCurseWords +} + +// hasDuplicateWords detects if there are duplicate words in the slice. +func hasDuplicateWords(words []string) bool { + wordMap := make(map[string]struct{}) + + for _, word := range words { + wordMap[word] = struct{}{} + } + + return len(words) != len(wordMap) +} diff --git a/abci/tutorials/abci-v2-forum-app/abci/config.go b/abci/tutorials/abci-v2-forum-app/abci/config.go new file mode 100644 index 00000000000..527cabb75ad --- /dev/null +++ b/abci/tutorials/abci-v2-forum-app/abci/config.go @@ -0,0 +1,36 @@ +package abci + +import ( + "errors" + "fmt" + + "github.com/BurntSushi/toml" +) + +type Config struct { + ChainID string `toml:"chain_id"` + CurseWords string `toml:"curse_words"` +} + +func LoadConfig(file string) (*Config, error) { + cfg := &Config{ + ChainID: "forum_chain", + CurseWords: "bad|apple|muggles", + } + _, err := toml.DecodeFile(file, &cfg) + if err != nil { + return nil, fmt.Errorf("failed to load config from %q: %w", file, err) + } + return cfg, cfg.Validate() +} + +// Validate validates the configuration. We don't do exhaustive config +// validation here, instead relying on Testnet.Validate() to handle it. +func (cfg Config) Validate() error { + switch { + case cfg.ChainID == "": + return errors.New("chain_id parameter is required") + default: + return nil + } +} diff --git a/abci/tutorials/abci-v2-forum-app/abci/state.go b/abci/tutorials/abci-v2-forum-app/abci/state.go new file mode 100644 index 00000000000..a814d3c6b10 --- /dev/null +++ b/abci/tutorials/abci-v2-forum-app/abci/state.go @@ -0,0 +1,55 @@ +package abci + +import ( + "encoding/binary" + "encoding/json" + "errors" + + "github.com/dgraph-io/badger/v4" + + "github.com/cometbft/cometbft/abci/tutorials/abci-v2-forum-app/model" +) + +type AppState struct { + DB *model.DB `json:"db"` + Size int64 `json:"size"` + Height int64 `json:"height"` +} + +var stateKey = "appstate" + +func (s AppState) Hash() []byte { + appHash := make([]byte, 8) + binary.PutVarint(appHash, s.Size) + return appHash +} + +func loadState(db *model.DB) (AppState, error) { + var state AppState + state.DB = db + stateBytes, err := db.Get([]byte(stateKey)) + if err != nil && !errors.Is(err, badger.ErrKeyNotFound) { + return state, nil + } + if len(stateBytes) == 0 { + return state, nil + } + err = json.Unmarshal(stateBytes, &state) + state.DB = db + if err != nil { + return state, err + } + return state, nil +} + +func saveState(state *AppState) error { + stateBytes, err := json.Marshal(state) + if err != nil { + return err + } + err = state.DB.Set([]byte(stateKey), stateBytes) + if err != nil { + return err + } + return nil +} diff --git a/abci/tutorials/abci-v2-forum-app/abci/util.go b/abci/tutorials/abci-v2-forum-app/abci/util.go new file mode 100644 index 00000000000..517d9888710 --- /dev/null +++ b/abci/tutorials/abci-v2-forum-app/abci/util.go @@ -0,0 +1,85 @@ +package abci + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/dgraph-io/badger/v4" + + "github.com/cometbft/cometbft/abci/tutorials/abci-v2-forum-app/model" + "github.com/cometbft/cometbft/abci/types" + cryptoencoding "github.com/cometbft/cometbft/crypto/encoding" +) + +func isBanTx(tx []byte) bool { + return strings.Contains(string(tx), "username") +} + +func (app *ForumApp) updateValidator(v types.ValidatorUpdate) error { + pubKey, err := cryptoencoding.PubKeyFromTypeAndBytes(v.PubKeyType, v.PubKeyBytes) + if err != nil { + return fmt.Errorf("can't decode public key: %w", err) + } + key := []byte("val" + string(pubKey.Bytes())) + + // add or update validator + value := bytes.NewBuffer(make([]byte, 0)) + if err := types.WriteMessage(&v, value); err != nil { + return err + } + if err = app.state.DB.Set(key, value.Bytes()); err != nil { + return err + } + app.valAddrToPubKeyMap[string(pubKey.Address())] = pubKey + return nil +} + +func hasCurseWord(word string, curseWords string) bool { + return strings.Contains(curseWords, strings.ToLower(word)) +} + +const ( + CodeTypeOK uint32 = 0 + CodeTypeEncodingError uint32 = 1 + CodeTypeInvalidTxFormat uint32 = 2 + CodeTypeBanned uint32 = 3 +) + +func UpdateOrSetUser(db *model.DB, uname string, toBan bool, txn *badger.Txn) error { + var u *model.User + u, err := db.FindUserByName(uname) + if errors.Is(err, badger.ErrKeyNotFound) { + u = new(model.User) + u.Name = uname + u.Banned = toBan + } else { + if err != nil { + return errors.New("not able to process user") + } + u.Banned = toBan + } + userBytes, err := json.Marshal(u) + if err != nil { + return fmt.Errorf("error marshaling user: %w", err) + } + return txn.Set([]byte(uname), userBytes) +} + +func DeduplicateCurseWords(inWords string) string { + curseWordMap := make(map[string]struct{}) + for _, word := range strings.Split(inWords, "|") { + curseWordMap[word] = struct{}{} + } + deduplicatedWords := "" + for word := range curseWordMap { + if deduplicatedWords == "" { + deduplicatedWords = word + } else { + deduplicatedWords += "|" + word + } + } + return deduplicatedWords +} diff --git a/abci/tutorials/abci-v2-forum-app/app.toml b/abci/tutorials/abci-v2-forum-app/app.toml new file mode 100644 index 00000000000..aa513ecb7d2 --- /dev/null +++ b/abci/tutorials/abci-v2-forum-app/app.toml @@ -0,0 +1 @@ +curse_words="bad|rain|cry|bloodmagic|muggle" diff --git a/abci/tutorials/abci-v2-forum-app/forum.go b/abci/tutorials/abci-v2-forum-app/forum.go new file mode 100644 index 00000000000..5183fadd4cf --- /dev/null +++ b/abci/tutorials/abci-v2-forum-app/forum.go @@ -0,0 +1,111 @@ +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net/http" + "os" + "os/signal" + "path/filepath" + "syscall" + "time" + + "github.com/spf13/viper" + + "github.com/cometbft/cometbft/abci/tutorials/abci-v2-forum-app/abci" + cfg "github.com/cometbft/cometbft/config" + cmtflags "github.com/cometbft/cometbft/libs/cli/flags" + cmtlog "github.com/cometbft/cometbft/libs/log" + nm "github.com/cometbft/cometbft/node" + "github.com/cometbft/cometbft/p2p/nodekey" + "github.com/cometbft/cometbft/privval" + "github.com/cometbft/cometbft/proxy" +) + +var homeDir string + +func init() { + flag.StringVar(&homeDir, "home", "", "Path to the CometBFT config directory (if empty, uses $HOME/.forumapp)") +} + +func main() { + flag.Parse() + if homeDir == "" { + homeDir = os.ExpandEnv("$HOME/.forumapp") + } + + config := cfg.DefaultConfig() + config.SetRoot(homeDir) + viper.SetConfigFile(fmt.Sprintf("%s/%s", homeDir, "config/config.toml")) + + if err := viper.ReadInConfig(); err != nil { + log.Fatalf("failed to read config: %v", err) + } + + logger := cmtlog.NewLogger(os.Stdout) + logger, err := cmtflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel) + if err != nil { + panic(fmt.Errorf("failed to parse log level: %w", err)) + } + + dbPath := filepath.Join(homeDir, "forum-db") + appConfigPath := "app.toml" + app, err := abci.NewForumApp(dbPath, appConfigPath, logger) + if err != nil { + panic(fmt.Errorf("failed to create Forum Application: %w", err)) + } + + nodeKey, err := nodekey.Load(config.NodeKeyFile()) + if err != nil { + panic(fmt.Errorf("failed to load node key: %w", err)) + } + + pv := privval.LoadFilePV( + config.PrivValidatorKeyFile(), + config.PrivValidatorStateFile(), + ) + + node, err := nm.NewNode( + context.Background(), + config, + pv, + nodeKey, + proxy.NewLocalClientCreator(app), + nm.DefaultGenesisDocProviderFunc(config), + cfg.DefaultDBProvider, + nm.DefaultMetricsProvider(config.Instrumentation), + logger, + ) + + defer func() { + _ = node.Stop() + node.Wait() + }() + + if err != nil { + panic(fmt.Errorf("failed to create CometBFT node: %w", err)) + } + + if err := node.Start(); err != nil { + panic(fmt.Errorf("failed to start CometBFT node: %w", err)) + } + + httpAddr := "127.0.0.1:8080" + + server := &http.Server{ + Addr: httpAddr, + ReadHeaderTimeout: 5 * time.Second, + } + + if err := server.ListenAndServe(); err != nil { + panic(fmt.Errorf("failed to start HTTP server: %w", err)) + } + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + <-sigCh + + log.Println("Forum application stopped") +} diff --git a/abci/tutorials/abci-v2-forum-app/model/db.go b/abci/tutorials/abci-v2-forum-app/model/db.go new file mode 100644 index 00000000000..cdbe56ba0bd --- /dev/null +++ b/abci/tutorials/abci-v2-forum-app/model/db.go @@ -0,0 +1,184 @@ +package model + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + + "github.com/dgraph-io/badger/v4" + + "github.com/cometbft/cometbft/abci/types" +) + +type DB struct { + db *badger.DB +} + +func (db *DB) Init(database *badger.DB) { + db.db = database +} + +func (db *DB) Commit() error { + return db.db.Update(func(txn *badger.Txn) error { + return txn.Commit() + }) +} + +func NewDB(dbPath string) (*DB, error) { + // Open badger DB + opts := badger.DefaultOptions(dbPath) + db, err := badger.Open(opts) + if err != nil { + return nil, err + } + + // Create a new DB instance and initialize with badger DB + dbInstance := &DB{} + dbInstance.Init(db) + + return dbInstance, nil +} + +func (db *DB) GetDB() *badger.DB { + return db.db +} + +func (db *DB) Size() int64 { + lsm, vlog := db.GetDB().Size() + return lsm + vlog +} + +func (db *DB) CreateUser(user *User) error { + // Check if the user already exists + err := db.db.View(func(txn *badger.Txn) error { + _, err := txn.Get([]byte(user.Name)) + return err + }) + if err == nil { + return errors.New("user already exists") + } + + // Save the user to the database + err = db.db.Update(func(txn *badger.Txn) error { + userBytes, err := json.Marshal(user) + if err != nil { + return fmt.Errorf("failed to marshal user to JSON: %w", err) + } + err = txn.Set([]byte(user.Name), userBytes) + if err != nil { + return err + } + return nil + }) + return err +} + +func (db *DB) FindUserByName(name string) (*User, error) { + // Read the user from the database + var user *User + err := db.db.View(func(txn *badger.Txn) error { + item, err := txn.Get([]byte(name)) + if err != nil { + return err + } + err = item.Value(func(val []byte) error { + return json.Unmarshal(val, &user) + }) + return err + }) + if err != nil { + return nil, fmt.Errorf("error in retrieving user: %w", err) + } + return user, nil +} + +func (db *DB) UpdateOrSetUser(uname string, toBan bool, txn *badger.Txn) error { + user, err := db.FindUserByName(uname) + // If user is not in the db, then add it + if errors.Is(err, badger.ErrKeyNotFound) { + u := new(User) + u.Name = uname + u.Banned = toBan + user = u + } else { + if err != nil { + return errors.New("not able to process user") + } + user.Banned = toBan + } + userBytes, err := json.Marshal(user) + if err != nil { + return fmt.Errorf("error marshaling user: %w", err) + } + return txn.Set([]byte(user.Name), userBytes) +} + +func (db *DB) Set(key, value []byte) error { + return db.db.Update(func(txn *badger.Txn) error { + return txn.Set(key, value) + }) +} + +func ViewDB(db *badger.DB, key []byte) ([]byte, error) { + var value []byte + err := db.View(func(txn *badger.Txn) error { + item, err := txn.Get(key) + if err != nil { + if !errors.Is(err, badger.ErrKeyNotFound) { + return err + } + return nil + } + value, err = item.ValueCopy(nil) + return err + }) + if err != nil { + return nil, err + } + return value, nil +} + +func (db *DB) Close() error { + return db.db.Close() +} + +func (db *DB) Get(key []byte) ([]byte, error) { + return ViewDB(db.db, key) +} + +func (db *DB) GetValidators() (validators []types.ValidatorUpdate, err error) { + err = db.db.View(func(txn *badger.Txn) error { + opts := badger.DefaultIteratorOptions + opts.PrefetchSize = 10 + it := txn.NewIterator(opts) + defer it.Close() + for it.Rewind(); it.Valid(); it.Next() { + var err error + item := it.Item() + k := item.Key() + if isValidatorTx(k) { + err := item.Value(func(v []byte) error { + validator := new(types.ValidatorUpdate) + err = types.ReadMessage(bytes.NewBuffer(v), validator) + if err == nil { + validators = append(validators, *validator) + } + return err + }) + if err != nil { + return err + } + } + } + return nil + }) + if err != nil { + return nil, err + } + return validators, nil +} + +func isValidatorTx(tx []byte) bool { + return bytes.HasPrefix(tx, []byte("val")) +} diff --git a/abci/tutorials/abci-v2-forum-app/model/messages.go b/abci/tutorials/abci-v2-forum-app/model/messages.go new file mode 100644 index 00000000000..44cc4e08705 --- /dev/null +++ b/abci/tutorials/abci-v2-forum-app/model/messages.go @@ -0,0 +1,119 @@ +package model + +import ( + "errors" + "fmt" + "strings" + + "github.com/dgraph-io/badger/v4" +) + +type BanTx struct { + UserName string `json:"username"` +} + +// Message represents a message sent by a user. +type Message struct { + Sender string `json:"sender"` + Message string `json:"message"` +} + +type MsgHistory struct { + Msg string `json:"history"` +} + +func AppendToChat(db *DB, message Message) (string, error) { + historyBytes, err := db.Get([]byte("history")) + if err != nil { + return "", fmt.Errorf("error fetching history: %w", err) + } + msgBytes := string(historyBytes) + msgBytes = msgBytes + "{sender:" + message.Sender + ",message:" + message.Message + "}" + return msgBytes, nil +} + +func FetchHistory(db *DB) (string, error) { + historyBytes, err := db.Get([]byte("history")) + if err != nil { + return "", fmt.Errorf("error fetching history: %w", err) + } + msgHistory := string(historyBytes) + return msgHistory, nil +} + +func AppendToExistingMessages(db *DB, message Message) (string, error) { + existingMessages, err := GetMessagesBySender(db, message.Sender) + if err != nil && !errors.Is(err, badger.ErrKeyNotFound) { + return "", err + } + if errors.Is(err, badger.ErrKeyNotFound) { + return message.Message, nil + } + return existingMessages + ";" + message.Message, nil +} + +// GetMessagesBySender retrieves all messages sent by a specific sender +// Get Message using String. +func GetMessagesBySender(db *DB, sender string) (string, error) { + var messages string + err := db.db.View(func(txn *badger.Txn) error { + item, err := txn.Get([]byte(sender + "msg")) + if err != nil { + return err + } + value, err := item.ValueCopy(nil) + if err != nil { + return err + } + messages = string(value) + return nil + }) + if err != nil { + return "", err + } + return messages, nil +} + +// ParseMessage parse messages. +func ParseMessage(tx []byte) (*Message, error) { + msg := &Message{} + + // Parse the message into key-value pairs + pairs := strings.Split(string(tx), ",") + + if len(pairs) != 2 { + return nil, errors.New("invalid number of key-value pairs in message") + } + + for _, pair := range pairs { + kv := strings.Split(pair, ":") + + if len(kv) != 2 { + return nil, fmt.Errorf("invalid key-value pair in message: %s", pair) + } + + key := kv[0] + value := kv[1] + + switch strings.ToLower(key) { + case "sender": + msg.Sender = value + case "message": + msg.Message = value + case "history": + return nil, fmt.Errorf("reserved key name: %s", key) + default: + return nil, fmt.Errorf("unknown key in message: %s", key) + } + } + + // Check if the message contains a sender and message + if msg.Sender == "" { + return nil, errors.New("message is missing sender") + } + if msg.Message == "" { + return nil, errors.New("message is missing message") + } + + return msg, nil +} diff --git a/abci/tutorials/abci-v2-forum-app/model/user.go b/abci/tutorials/abci-v2-forum-app/model/user.go new file mode 100644 index 00000000000..29239274e03 --- /dev/null +++ b/abci/tutorials/abci-v2-forum-app/model/user.go @@ -0,0 +1,10 @@ +package model + +type User struct { + Name string `json:"name"` + Moderator bool `json:"moderator"` + Banned bool `json:"banned"` + NumMessages int64 `json:"numMessages"` + Version uint64 `json:"version"` + SchemaVersion int `json:"schemaVersion"` +} diff --git a/abci/tutorials/abci-v2-forum-app/test/create_user_test.go b/abci/tutorials/abci-v2-forum-app/test/create_user_test.go new file mode 100644 index 00000000000..5b371d814d7 --- /dev/null +++ b/abci/tutorials/abci-v2-forum-app/test/create_user_test.go @@ -0,0 +1,69 @@ +package test + +import ( + "encoding/json" + "testing" + + "github.com/dgraph-io/badger/v4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/abci/tutorials/abci-v2-forum-app/model" +) + +func TestCreateUser(t *testing.T) { + // Open a temporary database for testing + opts := badger.DefaultOptions("").WithInMemory(true) + db, err := badger.Open(opts) + require.NoError(t, err) + defer db.Close() + + // Create a new DB instance for testing + testDB := &model.DB{} + testDB.Init(db) + + // Create a new user + user := &model.User{ + Name: "testuser", + Moderator: false, + Banned: false, + } + + err = testDB.CreateUser(user) + require.NoError(t, err) + + // Check that the user was saved to the database + err = testDB.GetDB().View(func(txn *badger.Txn) error { + item, err := txn.Get([]byte(user.Name)) + if err != nil { + return err + } + var userBytes []byte + err = item.Value(func(val []byte) error { + userBytes = append(userBytes, val...) + return nil + }) + if err != nil { + return err + } + var savedUser model.User + err = json.Unmarshal(userBytes, &savedUser) + if err != nil { + return err + } + assert.Equal(t, user, &savedUser) + return nil + }) + require.NoError(t, err) + + // Try to create the user again + err = testDB.CreateUser(user) + assert.Error(t, err) + + // Find user by Name + user2, err := testDB.FindUserByName(user.Name) + if err != nil { + t.Fatal(err) + } + println("User retrieved is", user2) +} diff --git a/abci/tutorials/abci-v2-forum-app/test/find_user_test.go b/abci/tutorials/abci-v2-forum-app/test/find_user_test.go new file mode 100644 index 00000000000..d28a1ea6e47 --- /dev/null +++ b/abci/tutorials/abci-v2-forum-app/test/find_user_test.go @@ -0,0 +1,55 @@ +package test + +import ( + "testing" + + "github.com/dgraph-io/badger/v4" + "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/abci/tutorials/abci-v2-forum-app/model" +) + +func TestFindUserByName(t *testing.T) { + // Initialize the database + opts := badger.DefaultOptions("").WithInMemory(true) + db, err := badger.Open(opts) + require.NoError(t, err) + defer db.Close() + if err != nil { + t.Fatalf("Failed to create database: %v", err) + } + defer db.Close() + + // Create a new DB instance for testing + testDB := &model.DB{} + testDB.Init(db) + + // Create some test users + println("User being created") + users := []*model.User{ + {Name: "user1", Moderator: false, Banned: false}, + {Name: "user2", Moderator: false, Banned: false}, + {Name: "user3", Moderator: false, Banned: false}, + } + println("User is defined") + for _, user := range users { + err := testDB.CreateUser(user) + if err != nil { + t.Fatalf("Failed to create user: %v", err) + } + } + + // Verify that the correct user was returned + println("Trying to find user") + foundUser1, err1 := testDB.FindUserByName("user2") + if err1 != nil { + t.Fatalf("Failed to find user by name: %v", err1) + } + + if foundUser1 != nil { + require.Equal(t, foundUser1.Name, "user2", "Expected user2, but got %s", foundUser1.Name) + println("Voila! User found") + } else { + t.Fatalf("User not found") + } +} diff --git a/abci/tutorials/abci-v2-forum-app/test/message_test.go b/abci/tutorials/abci-v2-forum-app/test/message_test.go new file mode 100644 index 00000000000..d02fe0aacf7 --- /dev/null +++ b/abci/tutorials/abci-v2-forum-app/test/message_test.go @@ -0,0 +1,52 @@ +package test + +import ( + "reflect" + "testing" + + "github.com/cometbft/cometbft/abci/tutorials/abci-v2-forum-app/model" +) + +func TestParseMessage(t *testing.T) { + // Test valid message + tx := []byte("sender:alice,message:hello") + expected := &model.Message{ + Sender: "alice", + Message: "hello", + } + msg, err := model.ParseMessage(tx) + if err != nil { + t.Errorf("ParseMessage returned error: %v", err) + } + if !reflect.DeepEqual(msg, expected) { + t.Errorf("ParseMessage returned incorrect result, got: %v, want: %v", msg, expected) + } + + // Test message with missing sender + tx = []byte("message:hello") + _, err = model.ParseMessage(tx) + if err == nil { + t.Errorf("ParseMessage did not return error for message with missing sender") + } + + // Test message with missing message + tx = []byte("sender:alice") + _, err = model.ParseMessage(tx) + if err == nil { + t.Errorf("ParseMessage did not return error for message with missing message") + } + + // Test message with invalid key-value pair + tx = []byte("sender:alice,invalid_key:hello") + _, err = model.ParseMessage(tx) + if err == nil { + t.Errorf("ParseMessage did not return error for message with invalid key-value pair") + } + + // Test message with invalid number of key-value pairs + tx = []byte("sender:alice") + _, err = model.ParseMessage(tx) + if err == nil { + t.Errorf("ParseMessage did not return error for message with invalid number of key-value pairs") + } +} diff --git a/abci/types/application.go b/abci/types/application.go index 4ccfd229ebc..bd8394f649e 100644 --- a/abci/types/application.go +++ b/abci/types/application.go @@ -1,6 +1,8 @@ package types -import "context" +import ( + "context" +) //go:generate ../../scripts/mockery_generate.sh Application @@ -8,33 +10,37 @@ import "context" // to be driven by a blockchain-based replication engine via the ABCI. type Application interface { // Info/Query Connection - Info(context.Context, *RequestInfo) (*ResponseInfo, error) // Return application info - Query(context.Context, *RequestQuery) (*ResponseQuery, error) // Query for state + + Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) // Return application info + Query(ctx context.Context, req *QueryRequest) (*QueryResponse, error) // Query for state // Mempool Connection - CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) // Validate a tx for the mempool + + CheckTx(ctx context.Context, req *CheckTxRequest) (*CheckTxResponse, error) // Validate a tx for the mempool // Consensus Connection - InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) // Initialize blockchain w validators/other info from CometBFT - PrepareProposal(context.Context, *RequestPrepareProposal) (*ResponsePrepareProposal, error) - ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error) - // Deliver the decided block with its txs to the Application - FinalizeBlock(context.Context, *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) - // Create application specific vote extension - ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error) - // Verify application's vote extension data - VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) + + InitChain(ctx context.Context, req *InitChainRequest) (*InitChainResponse, error) // Initialize blockchain w validators/other info from CometBFT + PrepareProposal(ctx context.Context, req *PrepareProposalRequest) (*PrepareProposalResponse, error) + ProcessProposal(ctx context.Context, req *ProcessProposalRequest) (*ProcessProposalResponse, error) + // FinalizeBlock delivers the decided block with its txs to the Application + FinalizeBlock(ctx context.Context, req *FinalizeBlockRequest) (*FinalizeBlockResponse, error) + // ExtendVote extends the vote with application specific data + ExtendVote(ctx context.Context, req *ExtendVoteRequest) (*ExtendVoteResponse, error) + // VerifyVoteExtension verifies the application's vote extension data for correctness. + VerifyVoteExtension(ctx context.Context, req *VerifyVoteExtensionRequest) (*VerifyVoteExtensionResponse, error) // Commit the state and return the application Merkle root hash - Commit(context.Context, *RequestCommit) (*ResponseCommit, error) + Commit(ctx context.Context, req *CommitRequest) (*CommitResponse, error) // State Sync Connection - ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) // List available snapshots - OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) // Offer a snapshot to the application - LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) // Load a snapshot chunk - ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) // Apply a shapshot chunk + + ListSnapshots(ctx context.Context, req *ListSnapshotsRequest) (*ListSnapshotsResponse, error) // List available snapshots + OfferSnapshot(ctx context.Context, req *OfferSnapshotRequest) (*OfferSnapshotResponse, error) // Offer a snapshot to the application + LoadSnapshotChunk(ctx context.Context, req *LoadSnapshotChunkRequest) (*LoadSnapshotChunkResponse, error) // Load a snapshot chunk + ApplySnapshotChunk(ctx context.Context, req *ApplySnapshotChunkRequest) (*ApplySnapshotChunkResponse, error) // Apply a snapshot chunk } -//------------------------------------------------------- +// ------------------------------------------------------- // BaseApplication is a base form of Application var _ Application = (*BaseApplication)(nil) @@ -45,43 +51,43 @@ func NewBaseApplication() *BaseApplication { return &BaseApplication{} } -func (BaseApplication) Info(context.Context, *RequestInfo) (*ResponseInfo, error) { - return &ResponseInfo{}, nil +func (BaseApplication) Info(context.Context, *InfoRequest) (*InfoResponse, error) { + return &InfoResponse{}, nil } -func (BaseApplication) CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) { - return &ResponseCheckTx{Code: CodeTypeOK}, nil +func (BaseApplication) CheckTx(context.Context, *CheckTxRequest) (*CheckTxResponse, error) { + return &CheckTxResponse{Code: CodeTypeOK}, nil } -func (BaseApplication) Commit(context.Context, *RequestCommit) (*ResponseCommit, error) { - return &ResponseCommit{}, nil +func (BaseApplication) Commit(context.Context, *CommitRequest) (*CommitResponse, error) { + return &CommitResponse{}, nil } -func (BaseApplication) Query(context.Context, *RequestQuery) (*ResponseQuery, error) { - return &ResponseQuery{Code: CodeTypeOK}, nil +func (BaseApplication) Query(context.Context, *QueryRequest) (*QueryResponse, error) { + return &QueryResponse{Code: CodeTypeOK}, nil } -func (BaseApplication) InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) { - return &ResponseInitChain{}, nil +func (BaseApplication) InitChain(context.Context, *InitChainRequest) (*InitChainResponse, error) { + return &InitChainResponse{}, nil } -func (BaseApplication) ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) { - return &ResponseListSnapshots{}, nil +func (BaseApplication) ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) { + return &ListSnapshotsResponse{}, nil } -func (BaseApplication) OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { - return &ResponseOfferSnapshot{}, nil +func (BaseApplication) OfferSnapshot(context.Context, *OfferSnapshotRequest) (*OfferSnapshotResponse, error) { + return &OfferSnapshotResponse{}, nil } -func (BaseApplication) LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { - return &ResponseLoadSnapshotChunk{}, nil +func (BaseApplication) LoadSnapshotChunk(context.Context, *LoadSnapshotChunkRequest) (*LoadSnapshotChunkResponse, error) { + return &LoadSnapshotChunkResponse{}, nil } -func (BaseApplication) ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { - return &ResponseApplySnapshotChunk{}, nil +func (BaseApplication) ApplySnapshotChunk(context.Context, *ApplySnapshotChunkRequest) (*ApplySnapshotChunkResponse, error) { + return &ApplySnapshotChunkResponse{}, nil } -func (BaseApplication) PrepareProposal(_ context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) { +func (BaseApplication) PrepareProposal(_ context.Context, req *PrepareProposalRequest) (*PrepareProposalResponse, error) { txs := make([][]byte, 0, len(req.Txs)) var totalBytes int64 for _, tx := range req.Txs { @@ -91,29 +97,29 @@ func (BaseApplication) PrepareProposal(_ context.Context, req *RequestPreparePro } txs = append(txs, tx) } - return &ResponsePrepareProposal{Txs: txs}, nil + return &PrepareProposalResponse{Txs: txs}, nil } -func (BaseApplication) ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error) { - return &ResponseProcessProposal{Status: ResponseProcessProposal_ACCEPT}, nil +func (BaseApplication) ProcessProposal(context.Context, *ProcessProposalRequest) (*ProcessProposalResponse, error) { + return &ProcessProposalResponse{Status: PROCESS_PROPOSAL_STATUS_ACCEPT}, nil } -func (BaseApplication) ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error) { - return &ResponseExtendVote{}, nil +func (BaseApplication) ExtendVote(context.Context, *ExtendVoteRequest) (*ExtendVoteResponse, error) { + return &ExtendVoteResponse{}, nil } -func (BaseApplication) VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { - return &ResponseVerifyVoteExtension{ - Status: ResponseVerifyVoteExtension_ACCEPT, +func (BaseApplication) VerifyVoteExtension(context.Context, *VerifyVoteExtensionRequest) (*VerifyVoteExtensionResponse, error) { + return &VerifyVoteExtensionResponse{ + Status: VERIFY_VOTE_EXTENSION_STATUS_ACCEPT, }, nil } -func (BaseApplication) FinalizeBlock(_ context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) { +func (BaseApplication) FinalizeBlock(_ context.Context, req *FinalizeBlockRequest) (*FinalizeBlockResponse, error) { txs := make([]*ExecTxResult, len(req.Txs)) for i := range req.Txs { txs[i] = &ExecTxResult{Code: CodeTypeOK} } - return &ResponseFinalizeBlock{ + return &FinalizeBlockResponse{ TxResults: txs, }, nil } diff --git a/abci/types/messages.go b/abci/types/messages.go index b081098d0bd..4c3a27d99a8 100644 --- a/abci/types/messages.go +++ b/abci/types/messages.go @@ -2,14 +2,16 @@ package types import ( "io" + "math" "github.com/cosmos/gogoproto/proto" + pb "github.com/cometbft/cometbft/api/cometbft/abci/v1" "github.com/cometbft/cometbft/libs/protoio" ) const ( - maxMsgSize = 104857600 // 100MB + maxMsgSize = math.MaxInt32 // 2GB ) // WriteMessage writes a varint length-delimited protobuf message. @@ -25,204 +27,204 @@ func ReadMessage(r io.Reader, msg proto.Message) error { return err } -//---------------------------------------- +// ---------------------------------------- -func ToRequestEcho(message string) *Request { +func ToEchoRequest(message string) *Request { return &Request{ - Value: &Request_Echo{&RequestEcho{Message: message}}, + Value: &pb.Request_Echo{Echo: &EchoRequest{Message: message}}, } } -func ToRequestFlush() *Request { +func ToFlushRequest() *Request { return &Request{ - Value: &Request_Flush{&RequestFlush{}}, + Value: &pb.Request_Flush{Flush: &FlushRequest{}}, } } -func ToRequestInfo(req *RequestInfo) *Request { +func ToInfoRequest(req *InfoRequest) *Request { return &Request{ - Value: &Request_Info{req}, + Value: &pb.Request_Info{Info: req}, } } -func ToRequestCheckTx(req *RequestCheckTx) *Request { +func ToCheckTxRequest(req *CheckTxRequest) *Request { return &Request{ - Value: &Request_CheckTx{req}, + Value: &pb.Request_CheckTx{CheckTx: req}, } } -func ToRequestCommit() *Request { +func ToCommitRequest() *Request { return &Request{ - Value: &Request_Commit{&RequestCommit{}}, + Value: &pb.Request_Commit{Commit: &CommitRequest{}}, } } -func ToRequestQuery(req *RequestQuery) *Request { +func ToQueryRequest(req *QueryRequest) *Request { return &Request{ - Value: &Request_Query{req}, + Value: &pb.Request_Query{Query: req}, } } -func ToRequestInitChain(req *RequestInitChain) *Request { +func ToInitChainRequest(req *InitChainRequest) *Request { return &Request{ - Value: &Request_InitChain{req}, + Value: &pb.Request_InitChain{InitChain: req}, } } -func ToRequestListSnapshots(req *RequestListSnapshots) *Request { +func ToListSnapshotsRequest(req *ListSnapshotsRequest) *Request { return &Request{ - Value: &Request_ListSnapshots{req}, + Value: &pb.Request_ListSnapshots{ListSnapshots: req}, } } -func ToRequestOfferSnapshot(req *RequestOfferSnapshot) *Request { +func ToOfferSnapshotRequest(req *OfferSnapshotRequest) *Request { return &Request{ - Value: &Request_OfferSnapshot{req}, + Value: &pb.Request_OfferSnapshot{OfferSnapshot: req}, } } -func ToRequestLoadSnapshotChunk(req *RequestLoadSnapshotChunk) *Request { +func ToLoadSnapshotChunkRequest(req *LoadSnapshotChunkRequest) *Request { return &Request{ - Value: &Request_LoadSnapshotChunk{req}, + Value: &pb.Request_LoadSnapshotChunk{LoadSnapshotChunk: req}, } } -func ToRequestApplySnapshotChunk(req *RequestApplySnapshotChunk) *Request { +func ToApplySnapshotChunkRequest(req *ApplySnapshotChunkRequest) *Request { return &Request{ - Value: &Request_ApplySnapshotChunk{req}, + Value: &pb.Request_ApplySnapshotChunk{ApplySnapshotChunk: req}, } } -func ToRequestPrepareProposal(req *RequestPrepareProposal) *Request { +func ToPrepareProposalRequest(req *PrepareProposalRequest) *Request { return &Request{ - Value: &Request_PrepareProposal{req}, + Value: &pb.Request_PrepareProposal{PrepareProposal: req}, } } -func ToRequestProcessProposal(req *RequestProcessProposal) *Request { +func ToProcessProposalRequest(req *ProcessProposalRequest) *Request { return &Request{ - Value: &Request_ProcessProposal{req}, + Value: &pb.Request_ProcessProposal{ProcessProposal: req}, } } -func ToRequestExtendVote(req *RequestExtendVote) *Request { +func ToExtendVoteRequest(req *ExtendVoteRequest) *Request { return &Request{ - Value: &Request_ExtendVote{req}, + Value: &pb.Request_ExtendVote{ExtendVote: req}, } } -func ToRequestVerifyVoteExtension(req *RequestVerifyVoteExtension) *Request { +func ToVerifyVoteExtensionRequest(req *VerifyVoteExtensionRequest) *Request { return &Request{ - Value: &Request_VerifyVoteExtension{req}, + Value: &pb.Request_VerifyVoteExtension{VerifyVoteExtension: req}, } } -func ToRequestFinalizeBlock(req *RequestFinalizeBlock) *Request { +func ToFinalizeBlockRequest(req *FinalizeBlockRequest) *Request { return &Request{ - Value: &Request_FinalizeBlock{req}, + Value: &pb.Request_FinalizeBlock{FinalizeBlock: req}, } } -//---------------------------------------- +// ---------------------------------------- -func ToResponseException(errStr string) *Response { +func ToExceptionResponse(errStr string) *Response { return &Response{ - Value: &Response_Exception{&ResponseException{Error: errStr}}, + Value: &pb.Response_Exception{Exception: &ExceptionResponse{Error: errStr}}, } } -func ToResponseEcho(message string) *Response { +func ToEchoResponse(message string) *Response { return &Response{ - Value: &Response_Echo{&ResponseEcho{Message: message}}, + Value: &pb.Response_Echo{Echo: &EchoResponse{Message: message}}, } } -func ToResponseFlush() *Response { +func ToFlushResponse() *Response { return &Response{ - Value: &Response_Flush{&ResponseFlush{}}, + Value: &pb.Response_Flush{Flush: &FlushResponse{}}, } } -func ToResponseInfo(res *ResponseInfo) *Response { +func ToInfoResponse(res *InfoResponse) *Response { return &Response{ - Value: &Response_Info{res}, + Value: &pb.Response_Info{Info: res}, } } -func ToResponseCheckTx(res *ResponseCheckTx) *Response { +func ToCheckTxResponse(res *CheckTxResponse) *Response { return &Response{ - Value: &Response_CheckTx{res}, + Value: &pb.Response_CheckTx{CheckTx: res}, } } -func ToResponseCommit(res *ResponseCommit) *Response { +func ToCommitResponse(res *CommitResponse) *Response { return &Response{ - Value: &Response_Commit{res}, + Value: &pb.Response_Commit{Commit: res}, } } -func ToResponseQuery(res *ResponseQuery) *Response { +func ToQueryResponse(res *QueryResponse) *Response { return &Response{ - Value: &Response_Query{res}, + Value: &pb.Response_Query{Query: res}, } } -func ToResponseInitChain(res *ResponseInitChain) *Response { +func ToInitChainResponse(res *InitChainResponse) *Response { return &Response{ - Value: &Response_InitChain{res}, + Value: &pb.Response_InitChain{InitChain: res}, } } -func ToResponseListSnapshots(res *ResponseListSnapshots) *Response { +func ToListSnapshotsResponse(res *ListSnapshotsResponse) *Response { return &Response{ - Value: &Response_ListSnapshots{res}, + Value: &pb.Response_ListSnapshots{ListSnapshots: res}, } } -func ToResponseOfferSnapshot(res *ResponseOfferSnapshot) *Response { +func ToOfferSnapshotResponse(res *OfferSnapshotResponse) *Response { return &Response{ - Value: &Response_OfferSnapshot{res}, + Value: &pb.Response_OfferSnapshot{OfferSnapshot: res}, } } -func ToResponseLoadSnapshotChunk(res *ResponseLoadSnapshotChunk) *Response { +func ToLoadSnapshotChunkResponse(res *LoadSnapshotChunkResponse) *Response { return &Response{ - Value: &Response_LoadSnapshotChunk{res}, + Value: &pb.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}, } } -func ToResponseApplySnapshotChunk(res *ResponseApplySnapshotChunk) *Response { +func ToApplySnapshotChunkResponse(res *ApplySnapshotChunkResponse) *Response { return &Response{ - Value: &Response_ApplySnapshotChunk{res}, + Value: &pb.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}, } } -func ToResponsePrepareProposal(res *ResponsePrepareProposal) *Response { +func ToPrepareProposalResponse(res *PrepareProposalResponse) *Response { return &Response{ - Value: &Response_PrepareProposal{res}, + Value: &pb.Response_PrepareProposal{PrepareProposal: res}, } } -func ToResponseProcessProposal(res *ResponseProcessProposal) *Response { +func ToProcessProposalResponse(res *ProcessProposalResponse) *Response { return &Response{ - Value: &Response_ProcessProposal{res}, + Value: &pb.Response_ProcessProposal{ProcessProposal: res}, } } -func ToResponseExtendVote(res *ResponseExtendVote) *Response { +func ToExtendVoteResponse(res *ExtendVoteResponse) *Response { return &Response{ - Value: &Response_ExtendVote{res}, + Value: &pb.Response_ExtendVote{ExtendVote: res}, } } -func ToResponseVerifyVoteExtension(res *ResponseVerifyVoteExtension) *Response { +func ToVerifyVoteExtensionResponse(res *VerifyVoteExtensionResponse) *Response { return &Response{ - Value: &Response_VerifyVoteExtension{res}, + Value: &pb.Response_VerifyVoteExtension{VerifyVoteExtension: res}, } } -func ToResponseFinalizeBlock(res *ResponseFinalizeBlock) *Response { +func ToFinalizeBlockResponse(res *FinalizeBlockResponse) *Response { return &Response{ - Value: &Response_FinalizeBlock{res}, + Value: &pb.Response_FinalizeBlock{FinalizeBlock: res}, } } diff --git a/abci/types/messages_test.go b/abci/types/messages_test.go index 624096cbec8..e70ebc7c8cc 100644 --- a/abci/types/messages_test.go +++ b/abci/types/messages_test.go @@ -8,16 +8,17 @@ import ( "github.com/cosmos/gogoproto/proto" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" ) func TestMarshalJSON(t *testing.T) { b, err := json.Marshal(&ExecTxResult{Code: 1}) - assert.NoError(t, err) + require.NoError(t, err) // include empty fields. assert.True(t, strings.Contains(string(b), "code")) - r1 := ResponseCheckTx{ + r1 := CheckTxResponse{ Code: 1, Data: []byte("hello"), GasWanted: 43, @@ -31,17 +32,17 @@ func TestMarshalJSON(t *testing.T) { }, } b, err = json.Marshal(&r1) - assert.Nil(t, err) + require.NoError(t, err) - var r2 ResponseCheckTx + var r2 CheckTxResponse err = json.Unmarshal(b, &r2) - assert.Nil(t, err) + require.NoError(t, err) assert.Equal(t, r1, r2) } func TestWriteReadMessageSimple(t *testing.T) { cases := []proto.Message{ - &RequestEcho{ + &EchoRequest{ Message: "Hello", }, } @@ -49,11 +50,11 @@ func TestWriteReadMessageSimple(t *testing.T) { for _, c := range cases { buf := new(bytes.Buffer) err := WriteMessage(c, buf) - assert.Nil(t, err) + require.NoError(t, err) - msg := new(RequestEcho) + msg := new(EchoRequest) err = ReadMessage(buf, msg) - assert.Nil(t, err) + require.NoError(t, err) assert.True(t, proto.Equal(c, msg)) } @@ -71,11 +72,11 @@ func TestWriteReadMessage(t *testing.T) { for _, c := range cases { buf := new(bytes.Buffer) err := WriteMessage(c, buf) - assert.Nil(t, err) + require.NoError(t, err) msg := new(cmtproto.Header) err = ReadMessage(buf, msg) - assert.Nil(t, err) + require.NoError(t, err) assert.True(t, proto.Equal(c, msg)) } @@ -84,7 +85,7 @@ func TestWriteReadMessage(t *testing.T) { func TestWriteReadMessage2(t *testing.T) { phrase := "hello-world" cases := []proto.Message{ - &ResponseCheckTx{ + &CheckTxResponse{ Data: []byte(phrase), Log: phrase, GasWanted: 10, @@ -103,11 +104,11 @@ func TestWriteReadMessage2(t *testing.T) { for _, c := range cases { buf := new(bytes.Buffer) err := WriteMessage(c, buf) - assert.Nil(t, err) + require.NoError(t, err) - msg := new(ResponseCheckTx) + msg := new(CheckTxResponse) err = ReadMessage(buf, msg) - assert.Nil(t, err) + require.NoError(t, err) assert.True(t, proto.Equal(c, msg)) } diff --git a/abci/types/mocks/application.go b/abci/types/mocks/application.go index 86b30d2404c..aca7044e016 100644 --- a/abci/types/mocks/application.go +++ b/abci/types/mocks/application.go @@ -14,25 +14,29 @@ type Application struct { mock.Mock } -// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1 -func (_m *Application) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - ret := _m.Called(_a0, _a1) +// ApplySnapshotChunk provides a mock function with given fields: ctx, req +func (_m *Application) ApplySnapshotChunk(ctx context.Context, req *types.ApplySnapshotChunkRequest) (*types.ApplySnapshotChunkResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseApplySnapshotChunk + if len(ret) == 0 { + panic("no return value specified for ApplySnapshotChunk") + } + + var r0 *types.ApplySnapshotChunkResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.ApplySnapshotChunkRequest) (*types.ApplySnapshotChunkResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.ApplySnapshotChunkRequest) *types.ApplySnapshotChunkResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk) + r0 = ret.Get(0).(*types.ApplySnapshotChunkResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestApplySnapshotChunk) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *types.ApplySnapshotChunkRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -40,25 +44,29 @@ func (_m *Application) ApplySnapshotChunk(_a0 context.Context, _a1 *types.Reques return r0, r1 } -// CheckTx provides a mock function with given fields: _a0, _a1 -func (_m *Application) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) { - ret := _m.Called(_a0, _a1) +// CheckTx provides a mock function with given fields: ctx, req +func (_m *Application) CheckTx(ctx context.Context, req *types.CheckTxRequest) (*types.CheckTxResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for CheckTx") + } - var r0 *types.ResponseCheckTx + var r0 *types.CheckTxResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) (*types.ResponseCheckTx, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.CheckTxRequest) (*types.CheckTxResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *types.ResponseCheckTx); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.CheckTxRequest) *types.CheckTxResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseCheckTx) + r0 = ret.Get(0).(*types.CheckTxResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *types.CheckTxRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -66,25 +74,29 @@ func (_m *Application) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) ( return r0, r1 } -// Commit provides a mock function with given fields: _a0, _a1 -func (_m *Application) Commit(_a0 context.Context, _a1 *types.RequestCommit) (*types.ResponseCommit, error) { - ret := _m.Called(_a0, _a1) +// Commit provides a mock function with given fields: ctx, req +func (_m *Application) Commit(ctx context.Context, req *types.CommitRequest) (*types.CommitResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseCommit + if len(ret) == 0 { + panic("no return value specified for Commit") + } + + var r0 *types.CommitResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCommit) (*types.ResponseCommit, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.CommitRequest) (*types.CommitResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCommit) *types.ResponseCommit); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.CommitRequest) *types.CommitResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseCommit) + r0 = ret.Get(0).(*types.CommitResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCommit) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *types.CommitRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -92,25 +104,29 @@ func (_m *Application) Commit(_a0 context.Context, _a1 *types.RequestCommit) (*t return r0, r1 } -// ExtendVote provides a mock function with given fields: _a0, _a1 -func (_m *Application) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) { - ret := _m.Called(_a0, _a1) +// ExtendVote provides a mock function with given fields: ctx, req +func (_m *Application) ExtendVote(ctx context.Context, req *types.ExtendVoteRequest) (*types.ExtendVoteResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for ExtendVote") + } - var r0 *types.ResponseExtendVote + var r0 *types.ExtendVoteResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) (*types.ResponseExtendVote, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.ExtendVoteRequest) (*types.ExtendVoteResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) *types.ResponseExtendVote); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.ExtendVoteRequest) *types.ExtendVoteResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseExtendVote) + r0 = ret.Get(0).(*types.ExtendVoteResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestExtendVote) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *types.ExtendVoteRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -118,25 +134,29 @@ func (_m *Application) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendV return r0, r1 } -// FinalizeBlock provides a mock function with given fields: _a0, _a1 -func (_m *Application) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { - ret := _m.Called(_a0, _a1) +// FinalizeBlock provides a mock function with given fields: ctx, req +func (_m *Application) FinalizeBlock(ctx context.Context, req *types.FinalizeBlockRequest) (*types.FinalizeBlockResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for FinalizeBlock") + } - var r0 *types.ResponseFinalizeBlock + var r0 *types.FinalizeBlockResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.FinalizeBlockRequest) (*types.FinalizeBlockResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.FinalizeBlockRequest) *types.FinalizeBlockResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseFinalizeBlock) + r0 = ret.Get(0).(*types.FinalizeBlockResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestFinalizeBlock) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *types.FinalizeBlockRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -144,25 +164,29 @@ func (_m *Application) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFina return r0, r1 } -// Info provides a mock function with given fields: _a0, _a1 -func (_m *Application) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) { - ret := _m.Called(_a0, _a1) +// Info provides a mock function with given fields: ctx, req +func (_m *Application) Info(ctx context.Context, req *types.InfoRequest) (*types.InfoResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseInfo + if len(ret) == 0 { + panic("no return value specified for Info") + } + + var r0 *types.InfoResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) (*types.ResponseInfo, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.InfoRequest) (*types.InfoResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) *types.ResponseInfo); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.InfoRequest) *types.InfoResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseInfo) + r0 = ret.Get(0).(*types.InfoResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInfo) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *types.InfoRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -170,25 +194,29 @@ func (_m *Application) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types return r0, r1 } -// InitChain provides a mock function with given fields: _a0, _a1 -func (_m *Application) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) { - ret := _m.Called(_a0, _a1) +// InitChain provides a mock function with given fields: ctx, req +func (_m *Application) InitChain(ctx context.Context, req *types.InitChainRequest) (*types.InitChainResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for InitChain") + } - var r0 *types.ResponseInitChain + var r0 *types.InitChainResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) (*types.ResponseInitChain, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.InitChainRequest) (*types.InitChainResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) *types.ResponseInitChain); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.InitChainRequest) *types.InitChainResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseInitChain) + r0 = ret.Get(0).(*types.InitChainResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInitChain) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *types.InitChainRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -196,25 +224,29 @@ func (_m *Application) InitChain(_a0 context.Context, _a1 *types.RequestInitChai return r0, r1 } -// ListSnapshots provides a mock function with given fields: _a0, _a1 -func (_m *Application) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { - ret := _m.Called(_a0, _a1) +// ListSnapshots provides a mock function with given fields: ctx, req +func (_m *Application) ListSnapshots(ctx context.Context, req *types.ListSnapshotsRequest) (*types.ListSnapshotsResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseListSnapshots + if len(ret) == 0 { + panic("no return value specified for ListSnapshots") + } + + var r0 *types.ListSnapshotsResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) (*types.ResponseListSnapshots, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.ListSnapshotsRequest) (*types.ListSnapshotsResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) *types.ResponseListSnapshots); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.ListSnapshotsRequest) *types.ListSnapshotsResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseListSnapshots) + r0 = ret.Get(0).(*types.ListSnapshotsResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestListSnapshots) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *types.ListSnapshotsRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -222,25 +254,29 @@ func (_m *Application) ListSnapshots(_a0 context.Context, _a1 *types.RequestList return r0, r1 } -// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1 -func (_m *Application) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - ret := _m.Called(_a0, _a1) +// LoadSnapshotChunk provides a mock function with given fields: ctx, req +func (_m *Application) LoadSnapshotChunk(ctx context.Context, req *types.LoadSnapshotChunkRequest) (*types.LoadSnapshotChunkResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for LoadSnapshotChunk") + } - var r0 *types.ResponseLoadSnapshotChunk + var r0 *types.LoadSnapshotChunkResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.LoadSnapshotChunkRequest) (*types.LoadSnapshotChunkResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.LoadSnapshotChunkRequest) *types.LoadSnapshotChunkResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk) + r0 = ret.Get(0).(*types.LoadSnapshotChunkResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestLoadSnapshotChunk) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *types.LoadSnapshotChunkRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -248,25 +284,29 @@ func (_m *Application) LoadSnapshotChunk(_a0 context.Context, _a1 *types.Request return r0, r1 } -// OfferSnapshot provides a mock function with given fields: _a0, _a1 -func (_m *Application) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { - ret := _m.Called(_a0, _a1) +// OfferSnapshot provides a mock function with given fields: ctx, req +func (_m *Application) OfferSnapshot(ctx context.Context, req *types.OfferSnapshotRequest) (*types.OfferSnapshotResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseOfferSnapshot + if len(ret) == 0 { + panic("no return value specified for OfferSnapshot") + } + + var r0 *types.OfferSnapshotResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.OfferSnapshotRequest) (*types.OfferSnapshotResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.OfferSnapshotRequest) *types.OfferSnapshotResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseOfferSnapshot) + r0 = ret.Get(0).(*types.OfferSnapshotResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestOfferSnapshot) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *types.OfferSnapshotRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -274,25 +314,29 @@ func (_m *Application) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOffe return r0, r1 } -// PrepareProposal provides a mock function with given fields: _a0, _a1 -func (_m *Application) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { - ret := _m.Called(_a0, _a1) +// PrepareProposal provides a mock function with given fields: ctx, req +func (_m *Application) PrepareProposal(ctx context.Context, req *types.PrepareProposalRequest) (*types.PrepareProposalResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for PrepareProposal") + } - var r0 *types.ResponsePrepareProposal + var r0 *types.PrepareProposalResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.PrepareProposalRequest) (*types.PrepareProposalResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.PrepareProposalRequest) *types.PrepareProposalResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponsePrepareProposal) + r0 = ret.Get(0).(*types.PrepareProposalResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestPrepareProposal) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *types.PrepareProposalRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -300,25 +344,29 @@ func (_m *Application) PrepareProposal(_a0 context.Context, _a1 *types.RequestPr return r0, r1 } -// ProcessProposal provides a mock function with given fields: _a0, _a1 -func (_m *Application) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { - ret := _m.Called(_a0, _a1) +// ProcessProposal provides a mock function with given fields: ctx, req +func (_m *Application) ProcessProposal(ctx context.Context, req *types.ProcessProposalRequest) (*types.ProcessProposalResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for ProcessProposal") + } - var r0 *types.ResponseProcessProposal + var r0 *types.ProcessProposalResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) (*types.ResponseProcessProposal, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.ProcessProposalRequest) (*types.ProcessProposalResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) *types.ResponseProcessProposal); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.ProcessProposalRequest) *types.ProcessProposalResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseProcessProposal) + r0 = ret.Get(0).(*types.ProcessProposalResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestProcessProposal) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *types.ProcessProposalRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -326,25 +374,29 @@ func (_m *Application) ProcessProposal(_a0 context.Context, _a1 *types.RequestPr return r0, r1 } -// Query provides a mock function with given fields: _a0, _a1 -func (_m *Application) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) { - ret := _m.Called(_a0, _a1) +// Query provides a mock function with given fields: ctx, req +func (_m *Application) Query(ctx context.Context, req *types.QueryRequest) (*types.QueryResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseQuery + if len(ret) == 0 { + panic("no return value specified for Query") + } + + var r0 *types.QueryResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) (*types.ResponseQuery, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.QueryRequest) (*types.QueryResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) *types.ResponseQuery); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.QueryRequest) *types.QueryResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseQuery) + r0 = ret.Get(0).(*types.QueryResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestQuery) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *types.QueryRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -352,25 +404,29 @@ func (_m *Application) Query(_a0 context.Context, _a1 *types.RequestQuery) (*typ return r0, r1 } -// VerifyVoteExtension provides a mock function with given fields: _a0, _a1 -func (_m *Application) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { - ret := _m.Called(_a0, _a1) +// VerifyVoteExtension provides a mock function with given fields: ctx, req +func (_m *Application) VerifyVoteExtension(ctx context.Context, req *types.VerifyVoteExtensionRequest) (*types.VerifyVoteExtensionResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for VerifyVoteExtension") + } - var r0 *types.ResponseVerifyVoteExtension + var r0 *types.VerifyVoteExtensionResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.VerifyVoteExtensionRequest) (*types.VerifyVoteExtensionResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *types.VerifyVoteExtensionRequest) *types.VerifyVoteExtensionResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension) + r0 = ret.Get(0).(*types.VerifyVoteExtensionResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestVerifyVoteExtension) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *types.VerifyVoteExtensionRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } diff --git a/abci/types/pubkey.go b/abci/types/pubkey.go index 8341c0898d3..ad1718f4f6f 100644 --- a/abci/types/pubkey.go +++ b/abci/types/pubkey.go @@ -1,44 +1,15 @@ package types import ( - fmt "fmt" - - "github.com/cometbft/cometbft/crypto/ed25519" - cryptoenc "github.com/cometbft/cometbft/crypto/encoding" - "github.com/cometbft/cometbft/crypto/secp256k1" + "github.com/cometbft/cometbft/crypto" ) -func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate { - pke := ed25519.PubKey(pk) - - pkp, err := cryptoenc.PubKeyToProto(pke) - if err != nil { - panic(err) - } - +// NewValidatorUpdate creates a new ValidatorUpdate from the given public +// key. +func NewValidatorUpdate(pubKey crypto.PubKey, power int64) ValidatorUpdate { return ValidatorUpdate{ - // Address: - PubKey: pkp, - Power: power, - } -} - -func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate { - switch keyType { - case "", ed25519.KeyType: - return Ed25519ValidatorUpdate(pk, power) - case secp256k1.KeyType: - pke := secp256k1.PubKey(pk) - pkp, err := cryptoenc.PubKeyToProto(pke) - if err != nil { - panic(err) - } - return ValidatorUpdate{ - // Address: - PubKey: pkp, - Power: power, - } - default: - panic(fmt.Sprintf("key type %s not supported", keyType)) + Power: power, + PubKeyType: pubKey.Type(), + PubKeyBytes: pubKey.Bytes(), } } diff --git a/abci/types/types.go b/abci/types/types.go index 4e42241fb1b..4c3f15cf63d 100644 --- a/abci/types/types.go +++ b/abci/types/types.go @@ -1,144 +1,202 @@ +//nolint:stylecheck,revive package types import ( - "bytes" "encoding/json" - "github.com/cosmos/gogoproto/jsonpb" -) + "github.com/cosmos/gogoproto/grpc" -const ( - CodeTypeOK uint32 = 0 + v1 "github.com/cometbft/cometbft/api/cometbft/abci/v1" ) -// IsOK returns true if Code is OK. -func (r ResponseCheckTx) IsOK() bool { - return r.Code == CodeTypeOK -} +type ( + Request = v1.Request + EchoRequest = v1.EchoRequest + FlushRequest = v1.FlushRequest + InfoRequest = v1.InfoRequest + InitChainRequest = v1.InitChainRequest + QueryRequest = v1.QueryRequest + CheckTxRequest = v1.CheckTxRequest + CommitRequest = v1.CommitRequest + ListSnapshotsRequest = v1.ListSnapshotsRequest + OfferSnapshotRequest = v1.OfferSnapshotRequest + LoadSnapshotChunkRequest = v1.LoadSnapshotChunkRequest + ApplySnapshotChunkRequest = v1.ApplySnapshotChunkRequest + PrepareProposalRequest = v1.PrepareProposalRequest + ProcessProposalRequest = v1.ProcessProposalRequest + ExtendVoteRequest = v1.ExtendVoteRequest + VerifyVoteExtensionRequest = v1.VerifyVoteExtensionRequest + FinalizeBlockRequest = v1.FinalizeBlockRequest +) -// IsErr returns true if Code is something other than OK. -func (r ResponseCheckTx) IsErr() bool { - return r.Code != CodeTypeOK -} +// Discriminated Request variants are defined in the latest proto package. +type ( + Request_Echo = v1.Request_Echo + Request_Flush = v1.Request_Flush + Request_Info = v1.Request_Info + Request_InitChain = v1.Request_InitChain + Request_Query = v1.Request_Query + Request_CheckTx = v1.Request_CheckTx + Request_Commit = v1.Request_Commit + Request_ListSnapshots = v1.Request_ListSnapshots + Request_OfferSnapshot = v1.Request_OfferSnapshot + Request_LoadSnapshotChunk = v1.Request_LoadSnapshotChunk + Request_ApplySnapshotChunk = v1.Request_ApplySnapshotChunk + Request_PrepareProposal = v1.Request_PrepareProposal + Request_ProcessProposal = v1.Request_ProcessProposal + Request_ExtendVote = v1.Request_ExtendVote + Request_VerifyVoteExtension = v1.Request_VerifyVoteExtension + Request_FinalizeBlock = v1.Request_FinalizeBlock +) -// IsOK returns true if Code is OK. -func (r ExecTxResult) IsOK() bool { - return r.Code == CodeTypeOK -} +type ( + Response = v1.Response + ExceptionResponse = v1.ExceptionResponse + EchoResponse = v1.EchoResponse + FlushResponse = v1.FlushResponse + InfoResponse = v1.InfoResponse + InitChainResponse = v1.InitChainResponse + QueryResponse = v1.QueryResponse + CheckTxResponse = v1.CheckTxResponse + CommitResponse = v1.CommitResponse + ListSnapshotsResponse = v1.ListSnapshotsResponse + OfferSnapshotResponse = v1.OfferSnapshotResponse + LoadSnapshotChunkResponse = v1.LoadSnapshotChunkResponse + ApplySnapshotChunkResponse = v1.ApplySnapshotChunkResponse + PrepareProposalResponse = v1.PrepareProposalResponse + ProcessProposalResponse = v1.ProcessProposalResponse + ExtendVoteResponse = v1.ExtendVoteResponse + VerifyVoteExtensionResponse = v1.VerifyVoteExtensionResponse + FinalizeBlockResponse = v1.FinalizeBlockResponse +) -// IsErr returns true if Code is something other than OK. -func (r ExecTxResult) IsErr() bool { - return r.Code != CodeTypeOK -} +// Discriminated Response variants are defined in the latest proto package. +type ( + Response_Exception = v1.Response_Exception + Response_Echo = v1.Response_Echo + Response_Flush = v1.Response_Flush + Response_Info = v1.Response_Info + Response_InitChain = v1.Response_InitChain + Response_Query = v1.Response_Query + Response_CheckTx = v1.Response_CheckTx + Response_Commit = v1.Response_Commit + Response_ListSnapshots = v1.Response_ListSnapshots + Response_OfferSnapshot = v1.Response_OfferSnapshot + Response_LoadSnapshotChunk = v1.Response_LoadSnapshotChunk + Response_ApplySnapshotChunk = v1.Response_ApplySnapshotChunk + Response_PrepareProposal = v1.Response_PrepareProposal + Response_ProcessProposal = v1.Response_ProcessProposal + Response_ExtendVote = v1.Response_ExtendVote + Response_VerifyVoteExtension = v1.Response_VerifyVoteExtension + Response_FinalizeBlock = v1.Response_FinalizeBlock +) -// IsOK returns true if Code is OK. -func (r ResponseQuery) IsOK() bool { - return r.Code == CodeTypeOK -} +type ( + CommitInfo = v1.CommitInfo + ExecTxResult = v1.ExecTxResult + ExtendedCommitInfo = v1.ExtendedCommitInfo + ExtendedVoteInfo = v1.ExtendedVoteInfo + Event = v1.Event + EventAttribute = v1.EventAttribute + Misbehavior = v1.Misbehavior + Snapshot = v1.Snapshot + TxResult = v1.TxResult + Validator = v1.Validator + ValidatorUpdate = v1.ValidatorUpdate + VoteInfo = v1.VoteInfo +) -// IsErr returns true if Code is something other than OK. -func (r ResponseQuery) IsErr() bool { - return r.Code != CodeTypeOK -} +type ( + ABCIServiceClient = v1.ABCIServiceClient + ABCIServiceServer = v1.ABCIServiceServer +) -// IsAccepted returns true if Code is ACCEPT -func (r ResponseProcessProposal) IsAccepted() bool { - return r.Status == ResponseProcessProposal_ACCEPT +func NewABCIClient(cc grpc.ClientConn) ABCIServiceClient { + return v1.NewABCIServiceClient(cc) } -// IsStatusUnknown returns true if Code is UNKNOWN -func (r ResponseProcessProposal) IsStatusUnknown() bool { - return r.Status == ResponseProcessProposal_UNKNOWN +func RegisterABCIServer(s grpc.Server, srv ABCIServiceServer) { + v1.RegisterABCIServiceServer(s, srv) } -func (r ResponseVerifyVoteExtension) IsAccepted() bool { - return r.Status == ResponseVerifyVoteExtension_ACCEPT -} +type CheckTxType = v1.CheckTxType -// IsStatusUnknown returns true if Code is Unknown -func (r ResponseVerifyVoteExtension) IsStatusUnknown() bool { - return r.Status == ResponseVerifyVoteExtension_UNKNOWN -} +const ( + CHECK_TX_TYPE_UNKNOWN CheckTxType = v1.CHECK_TX_TYPE_UNKNOWN + CHECK_TX_TYPE_CHECK CheckTxType = v1.CHECK_TX_TYPE_CHECK + CHECK_TX_TYPE_RECHECK CheckTxType = v1.CHECK_TX_TYPE_RECHECK +) -//--------------------------------------------------------------------------- -// override JSON marshaling so we emit defaults (ie. disable omitempty) +type MisbehaviorType = v1.MisbehaviorType -var ( - jsonpbMarshaller = jsonpb.Marshaler{ - EnumsAsInts: true, - EmitDefaults: true, - } - jsonpbUnmarshaller = jsonpb.Unmarshaler{} +const ( + MISBEHAVIOR_TYPE_UNKNOWN MisbehaviorType = v1.MISBEHAVIOR_TYPE_UNKNOWN + MISBEHAVIOR_TYPE_DUPLICATE_VOTE MisbehaviorType = v1.MISBEHAVIOR_TYPE_DUPLICATE_VOTE + MISBEHAVIOR_TYPE_LIGHT_CLIENT_ATTACK MisbehaviorType = v1.MISBEHAVIOR_TYPE_LIGHT_CLIENT_ATTACK ) -func (r *ResponseCheckTx) MarshalJSON() ([]byte, error) { - s, err := jsonpbMarshaller.MarshalToString(r) - return []byte(s), err -} - -func (r *ResponseCheckTx) UnmarshalJSON(b []byte) error { - reader := bytes.NewBuffer(b) - return jsonpbUnmarshaller.Unmarshal(reader, r) -} +type ApplySnapshotChunkResult = v1.ApplySnapshotChunkResult -func (r *ExecTxResult) MarshalJSON() ([]byte, error) { - s, err := jsonpbMarshaller.MarshalToString(r) - return []byte(s), err -} +const ( + APPLY_SNAPSHOT_CHUNK_RESULT_UNKNOWN ApplySnapshotChunkResult = v1.APPLY_SNAPSHOT_CHUNK_RESULT_UNKNOWN + APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT ApplySnapshotChunkResult = v1.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT + APPLY_SNAPSHOT_CHUNK_RESULT_ABORT ApplySnapshotChunkResult = v1.APPLY_SNAPSHOT_CHUNK_RESULT_ABORT + APPLY_SNAPSHOT_CHUNK_RESULT_RETRY ApplySnapshotChunkResult = v1.APPLY_SNAPSHOT_CHUNK_RESULT_RETRY + APPLY_SNAPSHOT_CHUNK_RESULT_RETRY_SNAPSHOT ApplySnapshotChunkResult = v1.APPLY_SNAPSHOT_CHUNK_RESULT_RETRY_SNAPSHOT + APPLY_SNAPSHOT_CHUNK_RESULT_REJECT_SNAPSHOT ApplySnapshotChunkResult = v1.APPLY_SNAPSHOT_CHUNK_RESULT_REJECT_SNAPSHOT +) -func (r *ExecTxResult) UnmarshalJSON(b []byte) error { - reader := bytes.NewBuffer(b) - return jsonpbUnmarshaller.Unmarshal(reader, r) -} +type OfferSnapshotResult = v1.OfferSnapshotResult -func (r *ResponseQuery) MarshalJSON() ([]byte, error) { - s, err := jsonpbMarshaller.MarshalToString(r) - return []byte(s), err -} +const ( + OFFER_SNAPSHOT_RESULT_UNKNOWN OfferSnapshotResult = v1.OFFER_SNAPSHOT_RESULT_UNKNOWN + OFFER_SNAPSHOT_RESULT_ACCEPT OfferSnapshotResult = v1.OFFER_SNAPSHOT_RESULT_ACCEPT + OFFER_SNAPSHOT_RESULT_ABORT OfferSnapshotResult = v1.OFFER_SNAPSHOT_RESULT_ABORT + OFFER_SNAPSHOT_RESULT_REJECT OfferSnapshotResult = v1.OFFER_SNAPSHOT_RESULT_REJECT + OFFER_SNAPSHOT_RESULT_REJECT_FORMAT OfferSnapshotResult = v1.OFFER_SNAPSHOT_RESULT_REJECT_FORMAT + OFFER_SNAPSHOT_RESULT_REJECT_SENDER OfferSnapshotResult = v1.OFFER_SNAPSHOT_RESULT_REJECT_SENDER +) -func (r *ResponseQuery) UnmarshalJSON(b []byte) error { - reader := bytes.NewBuffer(b) - return jsonpbUnmarshaller.Unmarshal(reader, r) -} +type ProcessProposalStatus = v1.ProcessProposalStatus -func (r *ResponseCommit) MarshalJSON() ([]byte, error) { - s, err := jsonpbMarshaller.MarshalToString(r) - return []byte(s), err -} +const ( + PROCESS_PROPOSAL_STATUS_UNKNOWN ProcessProposalStatus = v1.PROCESS_PROPOSAL_STATUS_UNKNOWN + PROCESS_PROPOSAL_STATUS_ACCEPT ProcessProposalStatus = v1.PROCESS_PROPOSAL_STATUS_ACCEPT + PROCESS_PROPOSAL_STATUS_REJECT ProcessProposalStatus = v1.PROCESS_PROPOSAL_STATUS_REJECT +) -func (r *ResponseCommit) UnmarshalJSON(b []byte) error { - reader := bytes.NewBuffer(b) - return jsonpbUnmarshaller.Unmarshal(reader, r) -} +type VerifyVoteExtensionStatus = v1.VerifyVoteExtensionStatus -func (r *EventAttribute) MarshalJSON() ([]byte, error) { - s, err := jsonpbMarshaller.MarshalToString(r) - return []byte(s), err -} +const ( + VERIFY_VOTE_EXTENSION_STATUS_UNKNOWN VerifyVoteExtensionStatus = v1.VERIFY_VOTE_EXTENSION_STATUS_UNKNOWN + VERIFY_VOTE_EXTENSION_STATUS_ACCEPT VerifyVoteExtensionStatus = v1.VERIFY_VOTE_EXTENSION_STATUS_ACCEPT + VERIFY_VOTE_EXTENSION_STATUS_REJECT VerifyVoteExtensionStatus = v1.VERIFY_VOTE_EXTENSION_STATUS_REJECT +) -func (r *EventAttribute) UnmarshalJSON(b []byte) error { - reader := bytes.NewBuffer(b) - return jsonpbUnmarshaller.Unmarshal(reader, r) -} +const ( + CodeTypeOK uint32 = 0 +) // Some compile time assertions to ensure we don't // have accidental runtime surprises later on. -// jsonEncodingRoundTripper ensures that asserted -// interfaces implement both MarshalJSON and UnmarshalJSON +// jsonRoundTripper ensures that asserted +// interfaces implement both MarshalJSON and UnmarshalJSON. type jsonRoundTripper interface { json.Marshaler json.Unmarshaler } -var _ jsonRoundTripper = (*ResponseCommit)(nil) -var _ jsonRoundTripper = (*ResponseQuery)(nil) -var _ jsonRoundTripper = (*ExecTxResult)(nil) -var _ jsonRoundTripper = (*ResponseCheckTx)(nil) +var ( + _ jsonRoundTripper = (*CommitResponse)(nil) + _ jsonRoundTripper = (*QueryResponse)(nil) + _ jsonRoundTripper = (*ExecTxResult)(nil) + _ jsonRoundTripper = (*CheckTxResponse)(nil) +) var _ jsonRoundTripper = (*EventAttribute)(nil) -// constructs a copy of response that omits +// DeterministicExecTxResult constructs a copy of the ExecTxResult response that omits // non-deterministic fields. The input response is not modified. func DeterministicExecTxResult(response *ExecTxResult) *ExecTxResult { return &ExecTxResult{ @@ -149,7 +207,7 @@ func DeterministicExecTxResult(response *ExecTxResult) *ExecTxResult { } } -// MarshalTxResults encodes the the TxResults as a list of byte +// MarshalTxResults encodes the TxResults as a list of byte // slices. It strips off the non-deterministic pieces of the TxResults // so that the resulting data can be used for hash comparisons and used // in Merkle proofs. diff --git a/abci/types/types_test.go b/abci/types/types_test.go index 9c4e62d4c2a..f11af133e27 100644 --- a/abci/types/types_test.go +++ b/abci/types/types_test.go @@ -41,7 +41,7 @@ func TestHashAndProveResults(t *testing.T) { require.NoError(t, err) valid := proofs[i].Verify(root, bz) - assert.NoError(t, valid, "%d", i) + require.NoError(t, valid, "%d", i) } } diff --git a/abci/types/util.go b/abci/types/util.go index 8205fef7e9d..054619a6a15 100644 --- a/abci/types/util.go +++ b/abci/types/util.go @@ -1,12 +1,14 @@ package types import ( + "bytes" "sort" + "strings" ) -//------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------ -// ValidatorUpdates is a list of validators that implements the Sort interface +// ValidatorUpdates is a list of validators that implements the Sort interface. type ValidatorUpdates []ValidatorUpdate var _ sort.Interface = (ValidatorUpdates)(nil) @@ -21,9 +23,9 @@ func (v ValidatorUpdates) Len() int { return len(v) } -// XXX: doesn't distinguish same validator with different power +// XXX: doesn't distinguish same validator with different power. func (v ValidatorUpdates) Less(i, j int) bool { - return v[i].PubKey.Compare(v[j].PubKey) <= 0 + return strings.Compare(v[i].PubKeyType, v[j].PubKeyType) <= 0 && bytes.Compare(v[i].PubKeyBytes, v[j].PubKeyBytes) <= 0 } func (v ValidatorUpdates) Swap(i, j int) { diff --git a/api/README.md b/api/README.md new file mode 100644 index 00000000000..4a593f82cb6 --- /dev/null +++ b/api/README.md @@ -0,0 +1,3 @@ +# CometBFT APIs + +Please refer to [../proto/README.md](../proto/README.md). diff --git a/api/cometbft/abci/v1/service.pb.go b/api/cometbft/abci/v1/service.pb.go new file mode 100644 index 00000000000..29d497862ed --- /dev/null +++ b/api/cometbft/abci/v1/service.pb.go @@ -0,0 +1,717 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/abci/v1/service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("cometbft/abci/v1/service.proto", fileDescriptor_728160d6a27c523b) } + +var fileDescriptor_728160d6a27c523b = []byte{ + // 497 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xdd, 0x6e, 0xd3, 0x30, + 0x18, 0x86, 0x5b, 0x69, 0x0c, 0xe1, 0x09, 0x01, 0xe6, 0x6c, 0x82, 0x30, 0x7e, 0xc4, 0x3f, 0x89, + 0x0a, 0x57, 0xb0, 0x46, 0x9d, 0xa8, 0x36, 0xc1, 0x60, 0xd3, 0x90, 0x38, 0x22, 0xcd, 0xbe, 0x10, + 0xab, 0xa9, 0x6d, 0x6c, 0xa7, 0x5a, 0xb9, 0x0a, 0x2e, 0x8a, 0x03, 0x0e, 0x7b, 0xc8, 0x21, 0x6a, + 0x6f, 0x04, 0xa5, 0xb1, 0x09, 0x49, 0x5c, 0xaf, 0xa7, 0x7e, 0x9f, 0x3c, 0xaf, 0x1d, 0x7d, 0xd2, + 0x87, 0xbc, 0x98, 0x4d, 0x40, 0x8d, 0x12, 0x15, 0x44, 0xa3, 0x98, 0x04, 0xd3, 0x5e, 0x20, 0x41, + 0x4c, 0x49, 0x0c, 0x3e, 0x17, 0x4c, 0x31, 0x7c, 0xd3, 0xe4, 0x7e, 0x91, 0xfb, 0xd3, 0xde, 0xee, + 0x9d, 0xd6, 0x17, 0x6a, 0xc6, 0x41, 0x96, 0xfc, 0xeb, 0x9f, 0x3b, 0x68, 0x67, 0xbf, 0x1f, 0x0e, + 0x4f, 0x4a, 0x0b, 0x1e, 0xa0, 0xad, 0x41, 0x9c, 0x32, 0x7c, 0xd7, 0x6f, 0x8a, 0xfc, 0xe2, 0xfc, + 0x23, 0x7c, 0xcb, 0x41, 0xaa, 0x5d, 0x6f, 0x5d, 0x2c, 0x39, 0xa3, 0x12, 0xf0, 0x5b, 0x74, 0xe5, + 0x20, 0xcb, 0x65, 0x8a, 0x2d, 0xe0, 0x2a, 0x30, 0xa2, 0x7b, 0x6b, 0x73, 0x6d, 0x1a, 0xa0, 0xad, + 0x21, 0x4d, 0xac, 0x17, 0x2a, 0xce, 0x1d, 0x17, 0x2a, 0x63, 0xad, 0x79, 0x87, 0xae, 0x86, 0x29, + 0xc4, 0xe3, 0xd3, 0x0b, 0xbc, 0xd7, 0x46, 0x75, 0x64, 0x64, 0xf7, 0x1d, 0x44, 0xf5, 0xc0, 0x0f, + 0x39, 0x88, 0x99, 0xed, 0x81, 0xab, 0xc0, 0xf1, 0x40, 0x9d, 0x6b, 0xd3, 0x21, 0xda, 0x0e, 0xd9, + 0x64, 0x42, 0x14, 0xb6, 0xa0, 0x65, 0x62, 0x5c, 0x7b, 0xeb, 0x01, 0x2d, 0x3b, 0x45, 0xd7, 0x86, + 0x94, 0xa8, 0x30, 0x8d, 0x08, 0xc5, 0x0f, 0x6c, 0xff, 0x44, 0x87, 0x46, 0xf9, 0xd0, 0xc9, 0x68, + 0xeb, 0x17, 0x74, 0xfd, 0x88, 0x48, 0x75, 0x42, 0x23, 0x2e, 0x53, 0xa6, 0x24, 0x7e, 0xdc, 0xfe, + 0xaa, 0x06, 0x18, 0xfb, 0x93, 0x4b, 0xb9, 0xaa, 0xe1, 0x7d, 0x92, 0x80, 0x30, 0x89, 0xad, 0xa1, + 0x06, 0x38, 0x1a, 0x1a, 0x9c, 0x6e, 0xc8, 0xd0, 0xad, 0x23, 0x16, 0x9d, 0x9b, 0xf3, 0x30, 0xcd, + 0xe9, 0x18, 0x3f, 0xb7, 0xdc, 0xaf, 0x09, 0x99, 0xa6, 0x17, 0x1b, 0xb1, 0xba, 0x8d, 0x21, 0xbc, + 0xcf, 0x79, 0x36, 0xab, 0xd7, 0x59, 0x14, 0x6d, 0xca, 0xf4, 0xbd, 0xdc, 0x0c, 0xd6, 0x85, 0x09, + 0xba, 0x71, 0x2c, 0x80, 0x47, 0x02, 0x8e, 0x05, 0xe3, 0x4c, 0x46, 0x19, 0x7e, 0xda, 0x16, 0x34, + 0x10, 0x53, 0xf5, 0x6c, 0x03, 0xf2, 0xff, 0x1e, 0x16, 0x83, 0x94, 0xee, 0x9e, 0x1a, 0xe2, 0xec, + 0x69, 0x90, 0xba, 0xe7, 0x13, 0x42, 0x83, 0x0b, 0x05, 0xf4, 0xfc, 0x8c, 0x29, 0xc0, 0x96, 0x29, + 0xad, 0x52, 0x63, 0x7f, 0xe4, 0x86, 0xb4, 0x58, 0xa0, 0xdb, 0x67, 0x20, 0x48, 0x32, 0x2b, 0x4e, + 0x57, 0xb9, 0x24, 0x8c, 0x62, 0xcb, 0xdf, 0xb6, 0x60, 0xa6, 0xea, 0xd5, 0x86, 0x74, 0x35, 0xdd, + 0x07, 0x84, 0x46, 0x19, 0xf9, 0x0e, 0xfd, 0x8c, 0xc5, 0x63, 0xdb, 0x74, 0xd7, 0x00, 0xc7, 0x74, + 0x37, 0xb8, 0xb2, 0xa1, 0x7f, 0xf8, 0x6b, 0xe1, 0x75, 0xe7, 0x0b, 0xaf, 0xfb, 0x67, 0xe1, 0x75, + 0x7f, 0x2c, 0xbd, 0xce, 0x7c, 0xe9, 0x75, 0x7e, 0x2f, 0xbd, 0xce, 0xe7, 0xde, 0x57, 0xa2, 0xd2, + 0x7c, 0x54, 0x88, 0x82, 0x7f, 0x9b, 0xa0, 0x5a, 0x09, 0x9c, 0x04, 0xcd, 0xfd, 0x30, 0xda, 0x5e, + 0xad, 0x86, 0x37, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x53, 0x44, 0xb0, 0xec, 0x6c, 0x06, 0x00, + 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ABCIServiceClient is the client API for ABCIService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ABCIServiceClient interface { + // Echo returns back the same message it is sent. + Echo(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (*EchoResponse, error) + // Flush flushes the write buffer. + Flush(ctx context.Context, in *FlushRequest, opts ...grpc.CallOption) (*FlushResponse, error) + // Info returns information about the application state. + Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) + // CheckTx validates a transaction. + CheckTx(ctx context.Context, in *CheckTxRequest, opts ...grpc.CallOption) (*CheckTxResponse, error) + // Query queries the application state. + Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) + // Commit commits a block of transactions. + Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) + // InitChain initializes the blockchain. + InitChain(ctx context.Context, in *InitChainRequest, opts ...grpc.CallOption) (*InitChainResponse, error) + // ListSnapshots lists all the available snapshots. + ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) + // OfferSnapshot sends a snapshot offer. + OfferSnapshot(ctx context.Context, in *OfferSnapshotRequest, opts ...grpc.CallOption) (*OfferSnapshotResponse, error) + // LoadSnapshotChunk returns a chunk of snapshot. + LoadSnapshotChunk(ctx context.Context, in *LoadSnapshotChunkRequest, opts ...grpc.CallOption) (*LoadSnapshotChunkResponse, error) + // ApplySnapshotChunk applies a chunk of snapshot. + ApplySnapshotChunk(ctx context.Context, in *ApplySnapshotChunkRequest, opts ...grpc.CallOption) (*ApplySnapshotChunkResponse, error) + // PrepareProposal returns a proposal for the next block. + PrepareProposal(ctx context.Context, in *PrepareProposalRequest, opts ...grpc.CallOption) (*PrepareProposalResponse, error) + // ProcessProposal validates a proposal. + ProcessProposal(ctx context.Context, in *ProcessProposalRequest, opts ...grpc.CallOption) (*ProcessProposalResponse, error) + // ExtendVote extends a vote with application-injected data (vote extensions). + ExtendVote(ctx context.Context, in *ExtendVoteRequest, opts ...grpc.CallOption) (*ExtendVoteResponse, error) + // VerifyVoteExtension verifies a vote extension. + VerifyVoteExtension(ctx context.Context, in *VerifyVoteExtensionRequest, opts ...grpc.CallOption) (*VerifyVoteExtensionResponse, error) + // FinalizeBlock finalizes a block. + FinalizeBlock(ctx context.Context, in *FinalizeBlockRequest, opts ...grpc.CallOption) (*FinalizeBlockResponse, error) +} + +type aBCIServiceClient struct { + cc grpc1.ClientConn +} + +func NewABCIServiceClient(cc grpc1.ClientConn) ABCIServiceClient { + return &aBCIServiceClient{cc} +} + +func (c *aBCIServiceClient) Echo(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (*EchoResponse, error) { + out := new(EchoResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/Echo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) Flush(ctx context.Context, in *FlushRequest, opts ...grpc.CallOption) (*FlushResponse, error) { + out := new(FlushResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/Flush", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) { + out := new(InfoResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/Info", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) CheckTx(ctx context.Context, in *CheckTxRequest, opts ...grpc.CallOption) (*CheckTxResponse, error) { + out := new(CheckTxResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/CheckTx", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) { + out := new(QueryResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/Query", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) { + out := new(CommitResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/Commit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) InitChain(ctx context.Context, in *InitChainRequest, opts ...grpc.CallOption) (*InitChainResponse, error) { + out := new(InitChainResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/InitChain", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { + out := new(ListSnapshotsResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/ListSnapshots", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) OfferSnapshot(ctx context.Context, in *OfferSnapshotRequest, opts ...grpc.CallOption) (*OfferSnapshotResponse, error) { + out := new(OfferSnapshotResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/OfferSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) LoadSnapshotChunk(ctx context.Context, in *LoadSnapshotChunkRequest, opts ...grpc.CallOption) (*LoadSnapshotChunkResponse, error) { + out := new(LoadSnapshotChunkResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/LoadSnapshotChunk", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) ApplySnapshotChunk(ctx context.Context, in *ApplySnapshotChunkRequest, opts ...grpc.CallOption) (*ApplySnapshotChunkResponse, error) { + out := new(ApplySnapshotChunkResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/ApplySnapshotChunk", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) PrepareProposal(ctx context.Context, in *PrepareProposalRequest, opts ...grpc.CallOption) (*PrepareProposalResponse, error) { + out := new(PrepareProposalResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/PrepareProposal", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) ProcessProposal(ctx context.Context, in *ProcessProposalRequest, opts ...grpc.CallOption) (*ProcessProposalResponse, error) { + out := new(ProcessProposalResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/ProcessProposal", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) ExtendVote(ctx context.Context, in *ExtendVoteRequest, opts ...grpc.CallOption) (*ExtendVoteResponse, error) { + out := new(ExtendVoteResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/ExtendVote", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) VerifyVoteExtension(ctx context.Context, in *VerifyVoteExtensionRequest, opts ...grpc.CallOption) (*VerifyVoteExtensionResponse, error) { + out := new(VerifyVoteExtensionResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/VerifyVoteExtension", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIServiceClient) FinalizeBlock(ctx context.Context, in *FinalizeBlockRequest, opts ...grpc.CallOption) (*FinalizeBlockResponse, error) { + out := new(FinalizeBlockResponse) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1.ABCIService/FinalizeBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ABCIServiceServer is the server API for ABCIService service. +type ABCIServiceServer interface { + // Echo returns back the same message it is sent. + Echo(context.Context, *EchoRequest) (*EchoResponse, error) + // Flush flushes the write buffer. + Flush(context.Context, *FlushRequest) (*FlushResponse, error) + // Info returns information about the application state. + Info(context.Context, *InfoRequest) (*InfoResponse, error) + // CheckTx validates a transaction. + CheckTx(context.Context, *CheckTxRequest) (*CheckTxResponse, error) + // Query queries the application state. + Query(context.Context, *QueryRequest) (*QueryResponse, error) + // Commit commits a block of transactions. + Commit(context.Context, *CommitRequest) (*CommitResponse, error) + // InitChain initializes the blockchain. + InitChain(context.Context, *InitChainRequest) (*InitChainResponse, error) + // ListSnapshots lists all the available snapshots. + ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) + // OfferSnapshot sends a snapshot offer. + OfferSnapshot(context.Context, *OfferSnapshotRequest) (*OfferSnapshotResponse, error) + // LoadSnapshotChunk returns a chunk of snapshot. + LoadSnapshotChunk(context.Context, *LoadSnapshotChunkRequest) (*LoadSnapshotChunkResponse, error) + // ApplySnapshotChunk applies a chunk of snapshot. + ApplySnapshotChunk(context.Context, *ApplySnapshotChunkRequest) (*ApplySnapshotChunkResponse, error) + // PrepareProposal returns a proposal for the next block. + PrepareProposal(context.Context, *PrepareProposalRequest) (*PrepareProposalResponse, error) + // ProcessProposal validates a proposal. + ProcessProposal(context.Context, *ProcessProposalRequest) (*ProcessProposalResponse, error) + // ExtendVote extends a vote with application-injected data (vote extensions). + ExtendVote(context.Context, *ExtendVoteRequest) (*ExtendVoteResponse, error) + // VerifyVoteExtension verifies a vote extension. + VerifyVoteExtension(context.Context, *VerifyVoteExtensionRequest) (*VerifyVoteExtensionResponse, error) + // FinalizeBlock finalizes a block. + FinalizeBlock(context.Context, *FinalizeBlockRequest) (*FinalizeBlockResponse, error) +} + +// UnimplementedABCIServiceServer can be embedded to have forward compatible implementations. +type UnimplementedABCIServiceServer struct { +} + +func (*UnimplementedABCIServiceServer) Echo(ctx context.Context, req *EchoRequest) (*EchoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Echo not implemented") +} +func (*UnimplementedABCIServiceServer) Flush(ctx context.Context, req *FlushRequest) (*FlushResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Flush not implemented") +} +func (*UnimplementedABCIServiceServer) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +} +func (*UnimplementedABCIServiceServer) CheckTx(ctx context.Context, req *CheckTxRequest) (*CheckTxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckTx not implemented") +} +func (*UnimplementedABCIServiceServer) Query(ctx context.Context, req *QueryRequest) (*QueryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Query not implemented") +} +func (*UnimplementedABCIServiceServer) Commit(ctx context.Context, req *CommitRequest) (*CommitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") +} +func (*UnimplementedABCIServiceServer) InitChain(ctx context.Context, req *InitChainRequest) (*InitChainResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method InitChain not implemented") +} +func (*UnimplementedABCIServiceServer) ListSnapshots(ctx context.Context, req *ListSnapshotsRequest) (*ListSnapshotsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") +} +func (*UnimplementedABCIServiceServer) OfferSnapshot(ctx context.Context, req *OfferSnapshotRequest) (*OfferSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method OfferSnapshot not implemented") +} +func (*UnimplementedABCIServiceServer) LoadSnapshotChunk(ctx context.Context, req *LoadSnapshotChunkRequest) (*LoadSnapshotChunkResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LoadSnapshotChunk not implemented") +} +func (*UnimplementedABCIServiceServer) ApplySnapshotChunk(ctx context.Context, req *ApplySnapshotChunkRequest) (*ApplySnapshotChunkResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplySnapshotChunk not implemented") +} +func (*UnimplementedABCIServiceServer) PrepareProposal(ctx context.Context, req *PrepareProposalRequest) (*PrepareProposalResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PrepareProposal not implemented") +} +func (*UnimplementedABCIServiceServer) ProcessProposal(ctx context.Context, req *ProcessProposalRequest) (*ProcessProposalResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ProcessProposal not implemented") +} +func (*UnimplementedABCIServiceServer) ExtendVote(ctx context.Context, req *ExtendVoteRequest) (*ExtendVoteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExtendVote not implemented") +} +func (*UnimplementedABCIServiceServer) VerifyVoteExtension(ctx context.Context, req *VerifyVoteExtensionRequest) (*VerifyVoteExtensionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VerifyVoteExtension not implemented") +} +func (*UnimplementedABCIServiceServer) FinalizeBlock(ctx context.Context, req *FinalizeBlockRequest) (*FinalizeBlockResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinalizeBlock not implemented") +} + +func RegisterABCIServiceServer(s grpc1.Server, srv ABCIServiceServer) { + s.RegisterService(&_ABCIService_serviceDesc, srv) +} + +func _ABCIService_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EchoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).Echo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/Echo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).Echo(ctx, req.(*EchoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_Flush_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FlushRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).Flush(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/Flush", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).Flush(ctx, req.(*FlushRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).Info(ctx, req.(*InfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_CheckTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckTxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).CheckTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/CheckTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).CheckTx(ctx, req.(*CheckTxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).Query(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/Query", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).Query(ctx, req.(*QueryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CommitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).Commit(ctx, req.(*CommitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_InitChain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InitChainRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).InitChain(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/InitChain", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).InitChain(ctx, req.(*InitChainRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnapshotsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/ListSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_OfferSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(OfferSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).OfferSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/OfferSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).OfferSnapshot(ctx, req.(*OfferSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_LoadSnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LoadSnapshotChunkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).LoadSnapshotChunk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/LoadSnapshotChunk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).LoadSnapshotChunk(ctx, req.(*LoadSnapshotChunkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_ApplySnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplySnapshotChunkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).ApplySnapshotChunk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/ApplySnapshotChunk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).ApplySnapshotChunk(ctx, req.(*ApplySnapshotChunkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_PrepareProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PrepareProposalRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).PrepareProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/PrepareProposal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).PrepareProposal(ctx, req.(*PrepareProposalRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_ProcessProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProcessProposalRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).ProcessProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/ProcessProposal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).ProcessProposal(ctx, req.(*ProcessProposalRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_ExtendVote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExtendVoteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).ExtendVote(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/ExtendVote", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).ExtendVote(ctx, req.(*ExtendVoteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_VerifyVoteExtension_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyVoteExtensionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).VerifyVoteExtension(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/VerifyVoteExtension", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).VerifyVoteExtension(ctx, req.(*VerifyVoteExtensionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIService_FinalizeBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FinalizeBlockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServiceServer).FinalizeBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1.ABCIService/FinalizeBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServiceServer).FinalizeBlock(ctx, req.(*FinalizeBlockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var ABCIService_serviceDesc = _ABCIService_serviceDesc +var _ABCIService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cometbft.abci.v1.ABCIService", + HandlerType: (*ABCIServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Echo", + Handler: _ABCIService_Echo_Handler, + }, + { + MethodName: "Flush", + Handler: _ABCIService_Flush_Handler, + }, + { + MethodName: "Info", + Handler: _ABCIService_Info_Handler, + }, + { + MethodName: "CheckTx", + Handler: _ABCIService_CheckTx_Handler, + }, + { + MethodName: "Query", + Handler: _ABCIService_Query_Handler, + }, + { + MethodName: "Commit", + Handler: _ABCIService_Commit_Handler, + }, + { + MethodName: "InitChain", + Handler: _ABCIService_InitChain_Handler, + }, + { + MethodName: "ListSnapshots", + Handler: _ABCIService_ListSnapshots_Handler, + }, + { + MethodName: "OfferSnapshot", + Handler: _ABCIService_OfferSnapshot_Handler, + }, + { + MethodName: "LoadSnapshotChunk", + Handler: _ABCIService_LoadSnapshotChunk_Handler, + }, + { + MethodName: "ApplySnapshotChunk", + Handler: _ABCIService_ApplySnapshotChunk_Handler, + }, + { + MethodName: "PrepareProposal", + Handler: _ABCIService_PrepareProposal_Handler, + }, + { + MethodName: "ProcessProposal", + Handler: _ABCIService_ProcessProposal_Handler, + }, + { + MethodName: "ExtendVote", + Handler: _ABCIService_ExtendVote_Handler, + }, + { + MethodName: "VerifyVoteExtension", + Handler: _ABCIService_VerifyVoteExtension_Handler, + }, + { + MethodName: "FinalizeBlock", + Handler: _ABCIService_FinalizeBlock_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cometbft/abci/v1/service.proto", +} diff --git a/api/cometbft/abci/v1/types.go b/api/cometbft/abci/v1/types.go new file mode 100644 index 00000000000..fd5c873bc37 --- /dev/null +++ b/api/cometbft/abci/v1/types.go @@ -0,0 +1,121 @@ +package v1 + +import ( + "bytes" + + "github.com/cosmos/gogoproto/jsonpb" +) + +const ( + CodeTypeOK uint32 = 0 +) + +// IsOK returns true if Code is OK. +func (r CheckTxResponse) IsOK() bool { + return r.Code == CodeTypeOK +} + +// IsErr returns true if Code is something other than OK. +func (r CheckTxResponse) IsErr() bool { + return r.Code != CodeTypeOK +} + +// IsOK returns true if Code is OK. +func (r QueryResponse) IsOK() bool { + return r.Code == CodeTypeOK +} + +// IsErr returns true if Code is something other than OK. +func (r QueryResponse) IsErr() bool { + return r.Code != CodeTypeOK +} + +// IsAccepted returns true if Code is ACCEPT +func (r ProcessProposalResponse) IsAccepted() bool { + return r.Status == PROCESS_PROPOSAL_STATUS_ACCEPT +} + +// IsStatusUnknown returns true if Code is UNKNOWN +func (r ProcessProposalResponse) IsStatusUnknown() bool { + return r.Status == PROCESS_PROPOSAL_STATUS_UNKNOWN +} + +func (r VerifyVoteExtensionResponse) IsAccepted() bool { + return r.Status == VERIFY_VOTE_EXTENSION_STATUS_ACCEPT +} + +// IsStatusUnknown returns true if Code is Unknown +func (r VerifyVoteExtensionResponse) IsStatusUnknown() bool { + return r.Status == VERIFY_VOTE_EXTENSION_STATUS_UNKNOWN +} + +// IsOK returns true if Code is OK. +func (r ExecTxResult) IsOK() bool { + return r.Code == CodeTypeOK +} + +// IsErr returns true if Code is something other than OK. +func (r ExecTxResult) IsErr() bool { + return r.Code != CodeTypeOK +} + +// --------------------------------------------------------------------------- +// override JSON marshaling so we emit defaults (ie. disable omitempty) + +var ( + jsonpbMarshaller = jsonpb.Marshaler{ + EnumsAsInts: true, + EmitDefaults: true, + } + jsonpbUnmarshaller = jsonpb.Unmarshaler{} +) + +func (r *CheckTxResponse) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *CheckTxResponse) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} + +func (r *QueryResponse) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *QueryResponse) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} + +func (r *CommitResponse) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *CommitResponse) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} + +func (r *EventAttribute) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *EventAttribute) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} + +func (r *ExecTxResult) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *ExecTxResult) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} diff --git a/abci/types/types.pb.go b/api/cometbft/abci/v1/types.pb.go similarity index 73% rename from abci/types/types.pb.go rename to api/cometbft/abci/v1/types.pb.go index 78dcc5c2f45..8df8225be06 100644 --- a/abci/types/types.pb.go +++ b/api/cometbft/abci/v1/types.pb.go @@ -1,21 +1,17 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/abci/types.proto +// source: cometbft/abci/v1/types.proto -package types +package v1 import ( - context "context" fmt "fmt" - crypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - types1 "github.com/cometbft/cometbft/proto/tendermint/types" + v11 "github.com/cometbft/cometbft/api/cometbft/crypto/v1" + v1 "github.com/cometbft/cometbft/api/cometbft/types/v1" _ "github.com/cosmos/gogoproto/gogoproto" - grpc1 "github.com/cosmos/gogoproto/grpc" proto "github.com/cosmos/gogoproto/proto" _ "github.com/cosmos/gogoproto/types" github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" + _ "github.com/golang/protobuf/ptypes/duration" io "io" math "math" math_bits "math/bits" @@ -34,21 +30,32 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// Type of the transaction check request. +// +// This enumeration is incompatible with the CheckTxType definition in +// cometbft.abci.v1beta1 and therefore shall not be used in encoding with the same +// field number. type CheckTxType int32 const ( - CheckTxType_New CheckTxType = 0 - CheckTxType_Recheck CheckTxType = 1 + // Unknown + CHECK_TX_TYPE_UNKNOWN CheckTxType = 0 + // Recheck (2nd, 3rd, etc.) + CHECK_TX_TYPE_RECHECK CheckTxType = 1 + // Check (1st time) + CHECK_TX_TYPE_CHECK CheckTxType = 2 ) var CheckTxType_name = map[int32]string{ - 0: "NEW", - 1: "RECHECK", + 0: "CHECK_TX_TYPE_UNKNOWN", + 1: "CHECK_TX_TYPE_RECHECK", + 2: "CHECK_TX_TYPE_CHECK", } var CheckTxType_value = map[string]int32{ - "NEW": 0, - "RECHECK": 1, + "CHECK_TX_TYPE_UNKNOWN": 0, + "CHECK_TX_TYPE_RECHECK": 1, + "CHECK_TX_TYPE_CHECK": 2, } func (x CheckTxType) String() string { @@ -56,172 +63,200 @@ func (x CheckTxType) String() string { } func (CheckTxType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{0} + return fileDescriptor_95dd8f7b670b96e3, []int{0} } -type MisbehaviorType int32 +// The result of offering a snapshot. +type OfferSnapshotResult int32 const ( - MisbehaviorType_UNKNOWN MisbehaviorType = 0 - MisbehaviorType_DUPLICATE_VOTE MisbehaviorType = 1 - MisbehaviorType_LIGHT_CLIENT_ATTACK MisbehaviorType = 2 + // Unknown result, abort all snapshot restoration + OFFER_SNAPSHOT_RESULT_UNKNOWN OfferSnapshotResult = 0 + // Snapshot accepted, apply chunks + OFFER_SNAPSHOT_RESULT_ACCEPT OfferSnapshotResult = 1 + // Abort all snapshot restoration + OFFER_SNAPSHOT_RESULT_ABORT OfferSnapshotResult = 2 + // Reject this specific snapshot, try others + OFFER_SNAPSHOT_RESULT_REJECT OfferSnapshotResult = 3 + // Reject all snapshots of this format, try others + OFFER_SNAPSHOT_RESULT_REJECT_FORMAT OfferSnapshotResult = 4 + // Reject all snapshots from the sender(s), try others + OFFER_SNAPSHOT_RESULT_REJECT_SENDER OfferSnapshotResult = 5 ) -var MisbehaviorType_name = map[int32]string{ - 0: "UNKNOWN", - 1: "DUPLICATE_VOTE", - 2: "LIGHT_CLIENT_ATTACK", +var OfferSnapshotResult_name = map[int32]string{ + 0: "OFFER_SNAPSHOT_RESULT_UNKNOWN", + 1: "OFFER_SNAPSHOT_RESULT_ACCEPT", + 2: "OFFER_SNAPSHOT_RESULT_ABORT", + 3: "OFFER_SNAPSHOT_RESULT_REJECT", + 4: "OFFER_SNAPSHOT_RESULT_REJECT_FORMAT", + 5: "OFFER_SNAPSHOT_RESULT_REJECT_SENDER", } -var MisbehaviorType_value = map[string]int32{ - "UNKNOWN": 0, - "DUPLICATE_VOTE": 1, - "LIGHT_CLIENT_ATTACK": 2, +var OfferSnapshotResult_value = map[string]int32{ + "OFFER_SNAPSHOT_RESULT_UNKNOWN": 0, + "OFFER_SNAPSHOT_RESULT_ACCEPT": 1, + "OFFER_SNAPSHOT_RESULT_ABORT": 2, + "OFFER_SNAPSHOT_RESULT_REJECT": 3, + "OFFER_SNAPSHOT_RESULT_REJECT_FORMAT": 4, + "OFFER_SNAPSHOT_RESULT_REJECT_SENDER": 5, } -func (x MisbehaviorType) String() string { - return proto.EnumName(MisbehaviorType_name, int32(x)) +func (x OfferSnapshotResult) String() string { + return proto.EnumName(OfferSnapshotResult_name, int32(x)) } -func (MisbehaviorType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{1} +func (OfferSnapshotResult) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{1} } -type ResponseOfferSnapshot_Result int32 +// The result of applying a snapshot chunk. +type ApplySnapshotChunkResult int32 const ( - ResponseOfferSnapshot_UNKNOWN ResponseOfferSnapshot_Result = 0 - ResponseOfferSnapshot_ACCEPT ResponseOfferSnapshot_Result = 1 - ResponseOfferSnapshot_ABORT ResponseOfferSnapshot_Result = 2 - ResponseOfferSnapshot_REJECT ResponseOfferSnapshot_Result = 3 - ResponseOfferSnapshot_REJECT_FORMAT ResponseOfferSnapshot_Result = 4 - ResponseOfferSnapshot_REJECT_SENDER ResponseOfferSnapshot_Result = 5 + // Unknown result, abort all snapshot restoration + APPLY_SNAPSHOT_CHUNK_RESULT_UNKNOWN ApplySnapshotChunkResult = 0 + // Chunk successfully accepted + APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT ApplySnapshotChunkResult = 1 + // Abort all snapshot restoration + APPLY_SNAPSHOT_CHUNK_RESULT_ABORT ApplySnapshotChunkResult = 2 + // Retry chunk (combine with refetch and reject) + APPLY_SNAPSHOT_CHUNK_RESULT_RETRY ApplySnapshotChunkResult = 3 + // Retry snapshot (combine with refetch and reject) + APPLY_SNAPSHOT_CHUNK_RESULT_RETRY_SNAPSHOT ApplySnapshotChunkResult = 4 + // Reject this snapshot, try others + APPLY_SNAPSHOT_CHUNK_RESULT_REJECT_SNAPSHOT ApplySnapshotChunkResult = 5 ) -var ResponseOfferSnapshot_Result_name = map[int32]string{ - 0: "UNKNOWN", - 1: "ACCEPT", - 2: "ABORT", - 3: "REJECT", - 4: "REJECT_FORMAT", - 5: "REJECT_SENDER", +var ApplySnapshotChunkResult_name = map[int32]string{ + 0: "APPLY_SNAPSHOT_CHUNK_RESULT_UNKNOWN", + 1: "APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT", + 2: "APPLY_SNAPSHOT_CHUNK_RESULT_ABORT", + 3: "APPLY_SNAPSHOT_CHUNK_RESULT_RETRY", + 4: "APPLY_SNAPSHOT_CHUNK_RESULT_RETRY_SNAPSHOT", + 5: "APPLY_SNAPSHOT_CHUNK_RESULT_REJECT_SNAPSHOT", } -var ResponseOfferSnapshot_Result_value = map[string]int32{ - "UNKNOWN": 0, - "ACCEPT": 1, - "ABORT": 2, - "REJECT": 3, - "REJECT_FORMAT": 4, - "REJECT_SENDER": 5, +var ApplySnapshotChunkResult_value = map[string]int32{ + "APPLY_SNAPSHOT_CHUNK_RESULT_UNKNOWN": 0, + "APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT": 1, + "APPLY_SNAPSHOT_CHUNK_RESULT_ABORT": 2, + "APPLY_SNAPSHOT_CHUNK_RESULT_RETRY": 3, + "APPLY_SNAPSHOT_CHUNK_RESULT_RETRY_SNAPSHOT": 4, + "APPLY_SNAPSHOT_CHUNK_RESULT_REJECT_SNAPSHOT": 5, } -func (x ResponseOfferSnapshot_Result) String() string { - return proto.EnumName(ResponseOfferSnapshot_Result_name, int32(x)) +func (x ApplySnapshotChunkResult) String() string { + return proto.EnumName(ApplySnapshotChunkResult_name, int32(x)) } -func (ResponseOfferSnapshot_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{27, 0} +func (ApplySnapshotChunkResult) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{2} } -type ResponseApplySnapshotChunk_Result int32 +// ProcessProposalStatus is the status of the proposal processing. +type ProcessProposalStatus int32 const ( - ResponseApplySnapshotChunk_UNKNOWN ResponseApplySnapshotChunk_Result = 0 - ResponseApplySnapshotChunk_ACCEPT ResponseApplySnapshotChunk_Result = 1 - ResponseApplySnapshotChunk_ABORT ResponseApplySnapshotChunk_Result = 2 - ResponseApplySnapshotChunk_RETRY ResponseApplySnapshotChunk_Result = 3 - ResponseApplySnapshotChunk_RETRY_SNAPSHOT ResponseApplySnapshotChunk_Result = 4 - ResponseApplySnapshotChunk_REJECT_SNAPSHOT ResponseApplySnapshotChunk_Result = 5 + // Unknown + PROCESS_PROPOSAL_STATUS_UNKNOWN ProcessProposalStatus = 0 + // Accepted + PROCESS_PROPOSAL_STATUS_ACCEPT ProcessProposalStatus = 1 + // Rejected + PROCESS_PROPOSAL_STATUS_REJECT ProcessProposalStatus = 2 ) -var ResponseApplySnapshotChunk_Result_name = map[int32]string{ - 0: "UNKNOWN", - 1: "ACCEPT", - 2: "ABORT", - 3: "RETRY", - 4: "RETRY_SNAPSHOT", - 5: "REJECT_SNAPSHOT", +var ProcessProposalStatus_name = map[int32]string{ + 0: "PROCESS_PROPOSAL_STATUS_UNKNOWN", + 1: "PROCESS_PROPOSAL_STATUS_ACCEPT", + 2: "PROCESS_PROPOSAL_STATUS_REJECT", } -var ResponseApplySnapshotChunk_Result_value = map[string]int32{ - "UNKNOWN": 0, - "ACCEPT": 1, - "ABORT": 2, - "RETRY": 3, - "RETRY_SNAPSHOT": 4, - "REJECT_SNAPSHOT": 5, +var ProcessProposalStatus_value = map[string]int32{ + "PROCESS_PROPOSAL_STATUS_UNKNOWN": 0, + "PROCESS_PROPOSAL_STATUS_ACCEPT": 1, + "PROCESS_PROPOSAL_STATUS_REJECT": 2, } -func (x ResponseApplySnapshotChunk_Result) String() string { - return proto.EnumName(ResponseApplySnapshotChunk_Result_name, int32(x)) +func (x ProcessProposalStatus) String() string { + return proto.EnumName(ProcessProposalStatus_name, int32(x)) } -func (ResponseApplySnapshotChunk_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{29, 0} +func (ProcessProposalStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{3} } -type ResponseProcessProposal_ProposalStatus int32 +// VerifyVoteExtensionStatus is the status of the vote extension verification. +type VerifyVoteExtensionStatus int32 const ( - ResponseProcessProposal_UNKNOWN ResponseProcessProposal_ProposalStatus = 0 - ResponseProcessProposal_ACCEPT ResponseProcessProposal_ProposalStatus = 1 - ResponseProcessProposal_REJECT ResponseProcessProposal_ProposalStatus = 2 + // Unknown + VERIFY_VOTE_EXTENSION_STATUS_UNKNOWN VerifyVoteExtensionStatus = 0 + // Accepted + VERIFY_VOTE_EXTENSION_STATUS_ACCEPT VerifyVoteExtensionStatus = 1 + // Rejecting the vote extension will reject the entire precommit by the sender. + // Incorrectly implementing this thus has liveness implications as it may affect + // CometBFT's ability to receive 2/3+ valid votes to finalize the block. + // Honest nodes should never be rejected. + VERIFY_VOTE_EXTENSION_STATUS_REJECT VerifyVoteExtensionStatus = 2 ) -var ResponseProcessProposal_ProposalStatus_name = map[int32]string{ - 0: "UNKNOWN", - 1: "ACCEPT", - 2: "REJECT", +var VerifyVoteExtensionStatus_name = map[int32]string{ + 0: "VERIFY_VOTE_EXTENSION_STATUS_UNKNOWN", + 1: "VERIFY_VOTE_EXTENSION_STATUS_ACCEPT", + 2: "VERIFY_VOTE_EXTENSION_STATUS_REJECT", } -var ResponseProcessProposal_ProposalStatus_value = map[string]int32{ - "UNKNOWN": 0, - "ACCEPT": 1, - "REJECT": 2, +var VerifyVoteExtensionStatus_value = map[string]int32{ + "VERIFY_VOTE_EXTENSION_STATUS_UNKNOWN": 0, + "VERIFY_VOTE_EXTENSION_STATUS_ACCEPT": 1, + "VERIFY_VOTE_EXTENSION_STATUS_REJECT": 2, } -func (x ResponseProcessProposal_ProposalStatus) String() string { - return proto.EnumName(ResponseProcessProposal_ProposalStatus_name, int32(x)) +func (x VerifyVoteExtensionStatus) String() string { + return proto.EnumName(VerifyVoteExtensionStatus_name, int32(x)) } -func (ResponseProcessProposal_ProposalStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{31, 0} +func (VerifyVoteExtensionStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{4} } -type ResponseVerifyVoteExtension_VerifyStatus int32 +// The type of misbehavior committed by a validator. +type MisbehaviorType int32 const ( - ResponseVerifyVoteExtension_UNKNOWN ResponseVerifyVoteExtension_VerifyStatus = 0 - ResponseVerifyVoteExtension_ACCEPT ResponseVerifyVoteExtension_VerifyStatus = 1 - // Rejecting the vote extension will reject the entire precommit by the sender. - // Incorrectly implementing this thus has liveness implications as it may affect - // CometBFT's ability to receive 2/3+ valid votes to finalize the block. - // Honest nodes should never be rejected. - ResponseVerifyVoteExtension_REJECT ResponseVerifyVoteExtension_VerifyStatus = 2 + // Unknown + MISBEHAVIOR_TYPE_UNKNOWN MisbehaviorType = 0 + // Duplicate vote + MISBEHAVIOR_TYPE_DUPLICATE_VOTE MisbehaviorType = 1 + // Light client attack + MISBEHAVIOR_TYPE_LIGHT_CLIENT_ATTACK MisbehaviorType = 2 ) -var ResponseVerifyVoteExtension_VerifyStatus_name = map[int32]string{ - 0: "UNKNOWN", - 1: "ACCEPT", - 2: "REJECT", +var MisbehaviorType_name = map[int32]string{ + 0: "MISBEHAVIOR_TYPE_UNKNOWN", + 1: "MISBEHAVIOR_TYPE_DUPLICATE_VOTE", + 2: "MISBEHAVIOR_TYPE_LIGHT_CLIENT_ATTACK", } -var ResponseVerifyVoteExtension_VerifyStatus_value = map[string]int32{ - "UNKNOWN": 0, - "ACCEPT": 1, - "REJECT": 2, +var MisbehaviorType_value = map[string]int32{ + "MISBEHAVIOR_TYPE_UNKNOWN": 0, + "MISBEHAVIOR_TYPE_DUPLICATE_VOTE": 1, + "MISBEHAVIOR_TYPE_LIGHT_CLIENT_ATTACK": 2, } -func (x ResponseVerifyVoteExtension_VerifyStatus) String() string { - return proto.EnumName(ResponseVerifyVoteExtension_VerifyStatus_name, int32(x)) +func (x MisbehaviorType) String() string { + return proto.EnumName(MisbehaviorType_name, int32(x)) } -func (ResponseVerifyVoteExtension_VerifyStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{33, 0} +func (MisbehaviorType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{5} } +// Request represents a request to the ABCI application. type Request struct { + // Sum of all possible messages. + // // Types that are valid to be assigned to Value: // *Request_Echo // *Request_Flush @@ -246,7 +281,7 @@ func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{0} + return fileDescriptor_95dd8f7b670b96e3, []int{0} } func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -282,52 +317,52 @@ type isRequest_Value interface { } type Request_Echo struct { - Echo *RequestEcho `protobuf:"bytes,1,opt,name=echo,proto3,oneof" json:"echo,omitempty"` + Echo *EchoRequest `protobuf:"bytes,1,opt,name=echo,proto3,oneof" json:"echo,omitempty"` } type Request_Flush struct { - Flush *RequestFlush `protobuf:"bytes,2,opt,name=flush,proto3,oneof" json:"flush,omitempty"` + Flush *FlushRequest `protobuf:"bytes,2,opt,name=flush,proto3,oneof" json:"flush,omitempty"` } type Request_Info struct { - Info *RequestInfo `protobuf:"bytes,3,opt,name=info,proto3,oneof" json:"info,omitempty"` + Info *InfoRequest `protobuf:"bytes,3,opt,name=info,proto3,oneof" json:"info,omitempty"` } type Request_InitChain struct { - InitChain *RequestInitChain `protobuf:"bytes,5,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` + InitChain *InitChainRequest `protobuf:"bytes,5,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` } type Request_Query struct { - Query *RequestQuery `protobuf:"bytes,6,opt,name=query,proto3,oneof" json:"query,omitempty"` + Query *QueryRequest `protobuf:"bytes,6,opt,name=query,proto3,oneof" json:"query,omitempty"` } type Request_CheckTx struct { - CheckTx *RequestCheckTx `protobuf:"bytes,8,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` + CheckTx *CheckTxRequest `protobuf:"bytes,8,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` } type Request_Commit struct { - Commit *RequestCommit `protobuf:"bytes,11,opt,name=commit,proto3,oneof" json:"commit,omitempty"` + Commit *CommitRequest `protobuf:"bytes,11,opt,name=commit,proto3,oneof" json:"commit,omitempty"` } type Request_ListSnapshots struct { - ListSnapshots *RequestListSnapshots `protobuf:"bytes,12,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` + ListSnapshots *ListSnapshotsRequest `protobuf:"bytes,12,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` } type Request_OfferSnapshot struct { - OfferSnapshot *RequestOfferSnapshot `protobuf:"bytes,13,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` + OfferSnapshot *OfferSnapshotRequest `protobuf:"bytes,13,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` } type Request_LoadSnapshotChunk struct { - LoadSnapshotChunk *RequestLoadSnapshotChunk `protobuf:"bytes,14,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` + LoadSnapshotChunk *LoadSnapshotChunkRequest `protobuf:"bytes,14,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` } type Request_ApplySnapshotChunk struct { - ApplySnapshotChunk *RequestApplySnapshotChunk `protobuf:"bytes,15,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` + ApplySnapshotChunk *ApplySnapshotChunkRequest `protobuf:"bytes,15,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` } type Request_PrepareProposal struct { - PrepareProposal *RequestPrepareProposal `protobuf:"bytes,16,opt,name=prepare_proposal,json=prepareProposal,proto3,oneof" json:"prepare_proposal,omitempty"` + PrepareProposal *PrepareProposalRequest `protobuf:"bytes,16,opt,name=prepare_proposal,json=prepareProposal,proto3,oneof" json:"prepare_proposal,omitempty"` } type Request_ProcessProposal struct { - ProcessProposal *RequestProcessProposal `protobuf:"bytes,17,opt,name=process_proposal,json=processProposal,proto3,oneof" json:"process_proposal,omitempty"` + ProcessProposal *ProcessProposalRequest `protobuf:"bytes,17,opt,name=process_proposal,json=processProposal,proto3,oneof" json:"process_proposal,omitempty"` } type Request_ExtendVote struct { - ExtendVote *RequestExtendVote `protobuf:"bytes,18,opt,name=extend_vote,json=extendVote,proto3,oneof" json:"extend_vote,omitempty"` + ExtendVote *ExtendVoteRequest `protobuf:"bytes,18,opt,name=extend_vote,json=extendVote,proto3,oneof" json:"extend_vote,omitempty"` } type Request_VerifyVoteExtension struct { - VerifyVoteExtension *RequestVerifyVoteExtension `protobuf:"bytes,19,opt,name=verify_vote_extension,json=verifyVoteExtension,proto3,oneof" json:"verify_vote_extension,omitempty"` + VerifyVoteExtension *VerifyVoteExtensionRequest `protobuf:"bytes,19,opt,name=verify_vote_extension,json=verifyVoteExtension,proto3,oneof" json:"verify_vote_extension,omitempty"` } type Request_FinalizeBlock struct { - FinalizeBlock *RequestFinalizeBlock `protobuf:"bytes,20,opt,name=finalize_block,json=finalizeBlock,proto3,oneof" json:"finalize_block,omitempty"` + FinalizeBlock *FinalizeBlockRequest `protobuf:"bytes,20,opt,name=finalize_block,json=finalizeBlock,proto3,oneof" json:"finalize_block,omitempty"` } func (*Request_Echo) isRequest_Value() {} @@ -354,112 +389,112 @@ func (m *Request) GetValue() isRequest_Value { return nil } -func (m *Request) GetEcho() *RequestEcho { +func (m *Request) GetEcho() *EchoRequest { if x, ok := m.GetValue().(*Request_Echo); ok { return x.Echo } return nil } -func (m *Request) GetFlush() *RequestFlush { +func (m *Request) GetFlush() *FlushRequest { if x, ok := m.GetValue().(*Request_Flush); ok { return x.Flush } return nil } -func (m *Request) GetInfo() *RequestInfo { +func (m *Request) GetInfo() *InfoRequest { if x, ok := m.GetValue().(*Request_Info); ok { return x.Info } return nil } -func (m *Request) GetInitChain() *RequestInitChain { +func (m *Request) GetInitChain() *InitChainRequest { if x, ok := m.GetValue().(*Request_InitChain); ok { return x.InitChain } return nil } -func (m *Request) GetQuery() *RequestQuery { +func (m *Request) GetQuery() *QueryRequest { if x, ok := m.GetValue().(*Request_Query); ok { return x.Query } return nil } -func (m *Request) GetCheckTx() *RequestCheckTx { +func (m *Request) GetCheckTx() *CheckTxRequest { if x, ok := m.GetValue().(*Request_CheckTx); ok { return x.CheckTx } return nil } -func (m *Request) GetCommit() *RequestCommit { +func (m *Request) GetCommit() *CommitRequest { if x, ok := m.GetValue().(*Request_Commit); ok { return x.Commit } return nil } -func (m *Request) GetListSnapshots() *RequestListSnapshots { +func (m *Request) GetListSnapshots() *ListSnapshotsRequest { if x, ok := m.GetValue().(*Request_ListSnapshots); ok { return x.ListSnapshots } return nil } -func (m *Request) GetOfferSnapshot() *RequestOfferSnapshot { +func (m *Request) GetOfferSnapshot() *OfferSnapshotRequest { if x, ok := m.GetValue().(*Request_OfferSnapshot); ok { return x.OfferSnapshot } return nil } -func (m *Request) GetLoadSnapshotChunk() *RequestLoadSnapshotChunk { +func (m *Request) GetLoadSnapshotChunk() *LoadSnapshotChunkRequest { if x, ok := m.GetValue().(*Request_LoadSnapshotChunk); ok { return x.LoadSnapshotChunk } return nil } -func (m *Request) GetApplySnapshotChunk() *RequestApplySnapshotChunk { +func (m *Request) GetApplySnapshotChunk() *ApplySnapshotChunkRequest { if x, ok := m.GetValue().(*Request_ApplySnapshotChunk); ok { return x.ApplySnapshotChunk } return nil } -func (m *Request) GetPrepareProposal() *RequestPrepareProposal { +func (m *Request) GetPrepareProposal() *PrepareProposalRequest { if x, ok := m.GetValue().(*Request_PrepareProposal); ok { return x.PrepareProposal } return nil } -func (m *Request) GetProcessProposal() *RequestProcessProposal { +func (m *Request) GetProcessProposal() *ProcessProposalRequest { if x, ok := m.GetValue().(*Request_ProcessProposal); ok { return x.ProcessProposal } return nil } -func (m *Request) GetExtendVote() *RequestExtendVote { +func (m *Request) GetExtendVote() *ExtendVoteRequest { if x, ok := m.GetValue().(*Request_ExtendVote); ok { return x.ExtendVote } return nil } -func (m *Request) GetVerifyVoteExtension() *RequestVerifyVoteExtension { +func (m *Request) GetVerifyVoteExtension() *VerifyVoteExtensionRequest { if x, ok := m.GetValue().(*Request_VerifyVoteExtension); ok { return x.VerifyVoteExtension } return nil } -func (m *Request) GetFinalizeBlock() *RequestFinalizeBlock { +func (m *Request) GetFinalizeBlock() *FinalizeBlockRequest { if x, ok := m.GetValue().(*Request_FinalizeBlock); ok { return x.FinalizeBlock } @@ -488,22 +523,23 @@ func (*Request) XXX_OneofWrappers() []interface{} { } } -type RequestEcho struct { +// EchoRequest is a request to "echo" the given string. +type EchoRequest struct { Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` } -func (m *RequestEcho) Reset() { *m = RequestEcho{} } -func (m *RequestEcho) String() string { return proto.CompactTextString(m) } -func (*RequestEcho) ProtoMessage() {} -func (*RequestEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{1} +func (m *EchoRequest) Reset() { *m = EchoRequest{} } +func (m *EchoRequest) String() string { return proto.CompactTextString(m) } +func (*EchoRequest) ProtoMessage() {} +func (*EchoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{1} } -func (m *RequestEcho) XXX_Unmarshal(b []byte) error { +func (m *EchoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestEcho) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *EchoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestEcho.Marshal(b, m, deterministic) + return xxx_messageInfo_EchoRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -513,40 +549,41 @@ func (m *RequestEcho) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *RequestEcho) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestEcho.Merge(m, src) +func (m *EchoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EchoRequest.Merge(m, src) } -func (m *RequestEcho) XXX_Size() int { +func (m *EchoRequest) XXX_Size() int { return m.Size() } -func (m *RequestEcho) XXX_DiscardUnknown() { - xxx_messageInfo_RequestEcho.DiscardUnknown(m) +func (m *EchoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EchoRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestEcho proto.InternalMessageInfo +var xxx_messageInfo_EchoRequest proto.InternalMessageInfo -func (m *RequestEcho) GetMessage() string { +func (m *EchoRequest) GetMessage() string { if m != nil { return m.Message } return "" } -type RequestFlush struct { +// FlushRequest is a request to flush the write buffer. +type FlushRequest struct { } -func (m *RequestFlush) Reset() { *m = RequestFlush{} } -func (m *RequestFlush) String() string { return proto.CompactTextString(m) } -func (*RequestFlush) ProtoMessage() {} -func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{2} +func (m *FlushRequest) Reset() { *m = FlushRequest{} } +func (m *FlushRequest) String() string { return proto.CompactTextString(m) } +func (*FlushRequest) ProtoMessage() {} +func (*FlushRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{2} } -func (m *RequestFlush) XXX_Unmarshal(b []byte) error { +func (m *FlushRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestFlush) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestFlush.Marshal(b, m, deterministic) + return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -556,37 +593,38 @@ func (m *RequestFlush) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *RequestFlush) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestFlush.Merge(m, src) +func (m *FlushRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlushRequest.Merge(m, src) } -func (m *RequestFlush) XXX_Size() int { +func (m *FlushRequest) XXX_Size() int { return m.Size() } -func (m *RequestFlush) XXX_DiscardUnknown() { - xxx_messageInfo_RequestFlush.DiscardUnknown(m) +func (m *FlushRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FlushRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestFlush proto.InternalMessageInfo +var xxx_messageInfo_FlushRequest proto.InternalMessageInfo -type RequestInfo struct { +// InfoRequest is a request for the ABCI application version. +type InfoRequest struct { Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` BlockVersion uint64 `protobuf:"varint,2,opt,name=block_version,json=blockVersion,proto3" json:"block_version,omitempty"` P2PVersion uint64 `protobuf:"varint,3,opt,name=p2p_version,json=p2pVersion,proto3" json:"p2p_version,omitempty"` AbciVersion string `protobuf:"bytes,4,opt,name=abci_version,json=abciVersion,proto3" json:"abci_version,omitempty"` } -func (m *RequestInfo) Reset() { *m = RequestInfo{} } -func (m *RequestInfo) String() string { return proto.CompactTextString(m) } -func (*RequestInfo) ProtoMessage() {} -func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{3} +func (m *InfoRequest) Reset() { *m = InfoRequest{} } +func (m *InfoRequest) String() string { return proto.CompactTextString(m) } +func (*InfoRequest) ProtoMessage() {} +func (*InfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{3} } -func (m *RequestInfo) XXX_Unmarshal(b []byte) error { +func (m *InfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestInfo.Marshal(b, m, deterministic) + return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -596,67 +634,68 @@ func (m *RequestInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *RequestInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestInfo.Merge(m, src) +func (m *InfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoRequest.Merge(m, src) } -func (m *RequestInfo) XXX_Size() int { +func (m *InfoRequest) XXX_Size() int { return m.Size() } -func (m *RequestInfo) XXX_DiscardUnknown() { - xxx_messageInfo_RequestInfo.DiscardUnknown(m) +func (m *InfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InfoRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestInfo proto.InternalMessageInfo +var xxx_messageInfo_InfoRequest proto.InternalMessageInfo -func (m *RequestInfo) GetVersion() string { +func (m *InfoRequest) GetVersion() string { if m != nil { return m.Version } return "" } -func (m *RequestInfo) GetBlockVersion() uint64 { +func (m *InfoRequest) GetBlockVersion() uint64 { if m != nil { return m.BlockVersion } return 0 } -func (m *RequestInfo) GetP2PVersion() uint64 { +func (m *InfoRequest) GetP2PVersion() uint64 { if m != nil { return m.P2PVersion } return 0 } -func (m *RequestInfo) GetAbciVersion() string { +func (m *InfoRequest) GetAbciVersion() string { if m != nil { return m.AbciVersion } return "" } -type RequestInitChain struct { - Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time"` - ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - ConsensusParams *types1.ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` - Validators []ValidatorUpdate `protobuf:"bytes,4,rep,name=validators,proto3" json:"validators"` - AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` - InitialHeight int64 `protobuf:"varint,6,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` +// InitChainRequest is a request to initialize the blockchain. +type InitChainRequest struct { + Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + ConsensusParams *v1.ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` + Validators []ValidatorUpdate `protobuf:"bytes,4,rep,name=validators,proto3" json:"validators"` + AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` + InitialHeight int64 `protobuf:"varint,6,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` } -func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } -func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } -func (*RequestInitChain) ProtoMessage() {} -func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{4} +func (m *InitChainRequest) Reset() { *m = InitChainRequest{} } +func (m *InitChainRequest) String() string { return proto.CompactTextString(m) } +func (*InitChainRequest) ProtoMessage() {} +func (*InitChainRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{4} } -func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { +func (m *InitChainRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *InitChainRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestInitChain.Marshal(b, m, deterministic) + return xxx_messageInfo_InitChainRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -666,79 +705,80 @@ func (m *RequestInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return b[:n], nil } } -func (m *RequestInitChain) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestInitChain.Merge(m, src) +func (m *InitChainRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitChainRequest.Merge(m, src) } -func (m *RequestInitChain) XXX_Size() int { +func (m *InitChainRequest) XXX_Size() int { return m.Size() } -func (m *RequestInitChain) XXX_DiscardUnknown() { - xxx_messageInfo_RequestInitChain.DiscardUnknown(m) +func (m *InitChainRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InitChainRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestInitChain proto.InternalMessageInfo +var xxx_messageInfo_InitChainRequest proto.InternalMessageInfo -func (m *RequestInitChain) GetTime() time.Time { +func (m *InitChainRequest) GetTime() time.Time { if m != nil { return m.Time } return time.Time{} } -func (m *RequestInitChain) GetChainId() string { +func (m *InitChainRequest) GetChainId() string { if m != nil { return m.ChainId } return "" } -func (m *RequestInitChain) GetConsensusParams() *types1.ConsensusParams { +func (m *InitChainRequest) GetConsensusParams() *v1.ConsensusParams { if m != nil { return m.ConsensusParams } return nil } -func (m *RequestInitChain) GetValidators() []ValidatorUpdate { +func (m *InitChainRequest) GetValidators() []ValidatorUpdate { if m != nil { return m.Validators } return nil } -func (m *RequestInitChain) GetAppStateBytes() []byte { +func (m *InitChainRequest) GetAppStateBytes() []byte { if m != nil { return m.AppStateBytes } return nil } -func (m *RequestInitChain) GetInitialHeight() int64 { +func (m *InitChainRequest) GetInitialHeight() int64 { if m != nil { return m.InitialHeight } return 0 } -type RequestQuery struct { +// QueryRequest is a request to query the application state. +type QueryRequest struct { Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` Prove bool `protobuf:"varint,4,opt,name=prove,proto3" json:"prove,omitempty"` } -func (m *RequestQuery) Reset() { *m = RequestQuery{} } -func (m *RequestQuery) String() string { return proto.CompactTextString(m) } -func (*RequestQuery) ProtoMessage() {} -func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{5} +func (m *QueryRequest) Reset() { *m = QueryRequest{} } +func (m *QueryRequest) String() string { return proto.CompactTextString(m) } +func (*QueryRequest) ProtoMessage() {} +func (*QueryRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{5} } -func (m *RequestQuery) XXX_Unmarshal(b []byte) error { +func (m *QueryRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *QueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestQuery.Marshal(b, m, deterministic) + return xxx_messageInfo_QueryRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -748,63 +788,64 @@ func (m *RequestQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *RequestQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestQuery.Merge(m, src) +func (m *QueryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRequest.Merge(m, src) } -func (m *RequestQuery) XXX_Size() int { +func (m *QueryRequest) XXX_Size() int { return m.Size() } -func (m *RequestQuery) XXX_DiscardUnknown() { - xxx_messageInfo_RequestQuery.DiscardUnknown(m) +func (m *QueryRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestQuery proto.InternalMessageInfo +var xxx_messageInfo_QueryRequest proto.InternalMessageInfo -func (m *RequestQuery) GetData() []byte { +func (m *QueryRequest) GetData() []byte { if m != nil { return m.Data } return nil } -func (m *RequestQuery) GetPath() string { +func (m *QueryRequest) GetPath() string { if m != nil { return m.Path } return "" } -func (m *RequestQuery) GetHeight() int64 { +func (m *QueryRequest) GetHeight() int64 { if m != nil { return m.Height } return 0 } -func (m *RequestQuery) GetProve() bool { +func (m *QueryRequest) GetProve() bool { if m != nil { return m.Prove } return false } -type RequestCheckTx struct { +// CheckTxRequest is a request to check that the transaction is valid. +type CheckTxRequest struct { Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` - Type CheckTxType `protobuf:"varint,2,opt,name=type,proto3,enum=tendermint.abci.CheckTxType" json:"type,omitempty"` + Type CheckTxType `protobuf:"varint,3,opt,name=type,proto3,enum=cometbft.abci.v1.CheckTxType" json:"type,omitempty"` } -func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } -func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } -func (*RequestCheckTx) ProtoMessage() {} -func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{6} +func (m *CheckTxRequest) Reset() { *m = CheckTxRequest{} } +func (m *CheckTxRequest) String() string { return proto.CompactTextString(m) } +func (*CheckTxRequest) ProtoMessage() {} +func (*CheckTxRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{6} } -func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { +func (m *CheckTxRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestCheckTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *CheckTxRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestCheckTx.Marshal(b, m, deterministic) + return xxx_messageInfo_CheckTxRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -814,47 +855,48 @@ func (m *RequestCheckTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return b[:n], nil } } -func (m *RequestCheckTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestCheckTx.Merge(m, src) +func (m *CheckTxRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckTxRequest.Merge(m, src) } -func (m *RequestCheckTx) XXX_Size() int { +func (m *CheckTxRequest) XXX_Size() int { return m.Size() } -func (m *RequestCheckTx) XXX_DiscardUnknown() { - xxx_messageInfo_RequestCheckTx.DiscardUnknown(m) +func (m *CheckTxRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CheckTxRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestCheckTx proto.InternalMessageInfo +var xxx_messageInfo_CheckTxRequest proto.InternalMessageInfo -func (m *RequestCheckTx) GetTx() []byte { +func (m *CheckTxRequest) GetTx() []byte { if m != nil { return m.Tx } return nil } -func (m *RequestCheckTx) GetType() CheckTxType { +func (m *CheckTxRequest) GetType() CheckTxType { if m != nil { return m.Type } - return CheckTxType_New + return CHECK_TX_TYPE_UNKNOWN } -type RequestCommit struct { +// CommitRequest is a request to commit the pending application state. +type CommitRequest struct { } -func (m *RequestCommit) Reset() { *m = RequestCommit{} } -func (m *RequestCommit) String() string { return proto.CompactTextString(m) } -func (*RequestCommit) ProtoMessage() {} -func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{7} +func (m *CommitRequest) Reset() { *m = CommitRequest{} } +func (m *CommitRequest) String() string { return proto.CompactTextString(m) } +func (*CommitRequest) ProtoMessage() {} +func (*CommitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{7} } -func (m *RequestCommit) XXX_Unmarshal(b []byte) error { +func (m *CommitRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *CommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestCommit.Marshal(b, m, deterministic) + return xxx_messageInfo_CommitRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -864,34 +906,34 @@ func (m *RequestCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return b[:n], nil } } -func (m *RequestCommit) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestCommit.Merge(m, src) +func (m *CommitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitRequest.Merge(m, src) } -func (m *RequestCommit) XXX_Size() int { +func (m *CommitRequest) XXX_Size() int { return m.Size() } -func (m *RequestCommit) XXX_DiscardUnknown() { - xxx_messageInfo_RequestCommit.DiscardUnknown(m) +func (m *CommitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CommitRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestCommit proto.InternalMessageInfo +var xxx_messageInfo_CommitRequest proto.InternalMessageInfo -// lists available snapshots -type RequestListSnapshots struct { +// Request to list available snapshots. +type ListSnapshotsRequest struct { } -func (m *RequestListSnapshots) Reset() { *m = RequestListSnapshots{} } -func (m *RequestListSnapshots) String() string { return proto.CompactTextString(m) } -func (*RequestListSnapshots) ProtoMessage() {} -func (*RequestListSnapshots) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{8} +func (m *ListSnapshotsRequest) Reset() { *m = ListSnapshotsRequest{} } +func (m *ListSnapshotsRequest) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsRequest) ProtoMessage() {} +func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{8} } -func (m *RequestListSnapshots) XXX_Unmarshal(b []byte) error { +func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestListSnapshots) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestListSnapshots.Marshal(b, m, deterministic) + return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -901,36 +943,36 @@ func (m *RequestListSnapshots) XXX_Marshal(b []byte, deterministic bool) ([]byte return b[:n], nil } } -func (m *RequestListSnapshots) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestListSnapshots.Merge(m, src) +func (m *ListSnapshotsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsRequest.Merge(m, src) } -func (m *RequestListSnapshots) XXX_Size() int { +func (m *ListSnapshotsRequest) XXX_Size() int { return m.Size() } -func (m *RequestListSnapshots) XXX_DiscardUnknown() { - xxx_messageInfo_RequestListSnapshots.DiscardUnknown(m) +func (m *ListSnapshotsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestListSnapshots proto.InternalMessageInfo +var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo -// offers a snapshot to the application -type RequestOfferSnapshot struct { +// Request offering a snapshot to the application. +type OfferSnapshotRequest struct { Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` AppHash []byte `protobuf:"bytes,2,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` } -func (m *RequestOfferSnapshot) Reset() { *m = RequestOfferSnapshot{} } -func (m *RequestOfferSnapshot) String() string { return proto.CompactTextString(m) } -func (*RequestOfferSnapshot) ProtoMessage() {} -func (*RequestOfferSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{9} +func (m *OfferSnapshotRequest) Reset() { *m = OfferSnapshotRequest{} } +func (m *OfferSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*OfferSnapshotRequest) ProtoMessage() {} +func (*OfferSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{9} } -func (m *RequestOfferSnapshot) XXX_Unmarshal(b []byte) error { +func (m *OfferSnapshotRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestOfferSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *OfferSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestOfferSnapshot.Marshal(b, m, deterministic) + return xxx_messageInfo_OfferSnapshotRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -940,51 +982,51 @@ func (m *RequestOfferSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte return b[:n], nil } } -func (m *RequestOfferSnapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestOfferSnapshot.Merge(m, src) +func (m *OfferSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_OfferSnapshotRequest.Merge(m, src) } -func (m *RequestOfferSnapshot) XXX_Size() int { +func (m *OfferSnapshotRequest) XXX_Size() int { return m.Size() } -func (m *RequestOfferSnapshot) XXX_DiscardUnknown() { - xxx_messageInfo_RequestOfferSnapshot.DiscardUnknown(m) +func (m *OfferSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_OfferSnapshotRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestOfferSnapshot proto.InternalMessageInfo +var xxx_messageInfo_OfferSnapshotRequest proto.InternalMessageInfo -func (m *RequestOfferSnapshot) GetSnapshot() *Snapshot { +func (m *OfferSnapshotRequest) GetSnapshot() *Snapshot { if m != nil { return m.Snapshot } return nil } -func (m *RequestOfferSnapshot) GetAppHash() []byte { +func (m *OfferSnapshotRequest) GetAppHash() []byte { if m != nil { return m.AppHash } return nil } -// loads a snapshot chunk -type RequestLoadSnapshotChunk struct { +// Request to load a snapshot chunk. +type LoadSnapshotChunkRequest struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` Chunk uint32 `protobuf:"varint,3,opt,name=chunk,proto3" json:"chunk,omitempty"` } -func (m *RequestLoadSnapshotChunk) Reset() { *m = RequestLoadSnapshotChunk{} } -func (m *RequestLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } -func (*RequestLoadSnapshotChunk) ProtoMessage() {} -func (*RequestLoadSnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{10} +func (m *LoadSnapshotChunkRequest) Reset() { *m = LoadSnapshotChunkRequest{} } +func (m *LoadSnapshotChunkRequest) String() string { return proto.CompactTextString(m) } +func (*LoadSnapshotChunkRequest) ProtoMessage() {} +func (*LoadSnapshotChunkRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{10} } -func (m *RequestLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { +func (m *LoadSnapshotChunkRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestLoadSnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *LoadSnapshotChunkRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestLoadSnapshotChunk.Marshal(b, m, deterministic) + return xxx_messageInfo_LoadSnapshotChunkRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -994,58 +1036,58 @@ func (m *RequestLoadSnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([] return b[:n], nil } } -func (m *RequestLoadSnapshotChunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestLoadSnapshotChunk.Merge(m, src) +func (m *LoadSnapshotChunkRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadSnapshotChunkRequest.Merge(m, src) } -func (m *RequestLoadSnapshotChunk) XXX_Size() int { +func (m *LoadSnapshotChunkRequest) XXX_Size() int { return m.Size() } -func (m *RequestLoadSnapshotChunk) XXX_DiscardUnknown() { - xxx_messageInfo_RequestLoadSnapshotChunk.DiscardUnknown(m) +func (m *LoadSnapshotChunkRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LoadSnapshotChunkRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestLoadSnapshotChunk proto.InternalMessageInfo +var xxx_messageInfo_LoadSnapshotChunkRequest proto.InternalMessageInfo -func (m *RequestLoadSnapshotChunk) GetHeight() uint64 { +func (m *LoadSnapshotChunkRequest) GetHeight() uint64 { if m != nil { return m.Height } return 0 } -func (m *RequestLoadSnapshotChunk) GetFormat() uint32 { +func (m *LoadSnapshotChunkRequest) GetFormat() uint32 { if m != nil { return m.Format } return 0 } -func (m *RequestLoadSnapshotChunk) GetChunk() uint32 { +func (m *LoadSnapshotChunkRequest) GetChunk() uint32 { if m != nil { return m.Chunk } return 0 } -// Applies a snapshot chunk -type RequestApplySnapshotChunk struct { +// Request to apply a snapshot chunk. +type ApplySnapshotChunkRequest struct { Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` Chunk []byte `protobuf:"bytes,2,opt,name=chunk,proto3" json:"chunk,omitempty"` Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"` } -func (m *RequestApplySnapshotChunk) Reset() { *m = RequestApplySnapshotChunk{} } -func (m *RequestApplySnapshotChunk) String() string { return proto.CompactTextString(m) } -func (*RequestApplySnapshotChunk) ProtoMessage() {} -func (*RequestApplySnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{11} +func (m *ApplySnapshotChunkRequest) Reset() { *m = ApplySnapshotChunkRequest{} } +func (m *ApplySnapshotChunkRequest) String() string { return proto.CompactTextString(m) } +func (*ApplySnapshotChunkRequest) ProtoMessage() {} +func (*ApplySnapshotChunkRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{11} } -func (m *RequestApplySnapshotChunk) XXX_Unmarshal(b []byte) error { +func (m *ApplySnapshotChunkRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestApplySnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ApplySnapshotChunkRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestApplySnapshotChunk.Marshal(b, m, deterministic) + return xxx_messageInfo_ApplySnapshotChunkRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1055,40 +1097,42 @@ func (m *RequestApplySnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([ return b[:n], nil } } -func (m *RequestApplySnapshotChunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestApplySnapshotChunk.Merge(m, src) +func (m *ApplySnapshotChunkRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplySnapshotChunkRequest.Merge(m, src) } -func (m *RequestApplySnapshotChunk) XXX_Size() int { +func (m *ApplySnapshotChunkRequest) XXX_Size() int { return m.Size() } -func (m *RequestApplySnapshotChunk) XXX_DiscardUnknown() { - xxx_messageInfo_RequestApplySnapshotChunk.DiscardUnknown(m) +func (m *ApplySnapshotChunkRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ApplySnapshotChunkRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestApplySnapshotChunk proto.InternalMessageInfo +var xxx_messageInfo_ApplySnapshotChunkRequest proto.InternalMessageInfo -func (m *RequestApplySnapshotChunk) GetIndex() uint32 { +func (m *ApplySnapshotChunkRequest) GetIndex() uint32 { if m != nil { return m.Index } return 0 } -func (m *RequestApplySnapshotChunk) GetChunk() []byte { +func (m *ApplySnapshotChunkRequest) GetChunk() []byte { if m != nil { return m.Chunk } return nil } -func (m *RequestApplySnapshotChunk) GetSender() string { +func (m *ApplySnapshotChunkRequest) GetSender() string { if m != nil { return m.Sender } return "" } -type RequestPrepareProposal struct { +// PrepareProposalRequest is a request for the ABCI application to prepare a new +// block proposal. +type PrepareProposalRequest struct { // the modified transactions cannot exceed this size. MaxTxBytes int64 `protobuf:"varint,1,opt,name=max_tx_bytes,json=maxTxBytes,proto3" json:"max_tx_bytes,omitempty"` // txs is an array of transactions that will be included in a block, @@ -1103,18 +1147,18 @@ type RequestPrepareProposal struct { ProposerAddress []byte `protobuf:"bytes,8,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` } -func (m *RequestPrepareProposal) Reset() { *m = RequestPrepareProposal{} } -func (m *RequestPrepareProposal) String() string { return proto.CompactTextString(m) } -func (*RequestPrepareProposal) ProtoMessage() {} -func (*RequestPrepareProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{12} +func (m *PrepareProposalRequest) Reset() { *m = PrepareProposalRequest{} } +func (m *PrepareProposalRequest) String() string { return proto.CompactTextString(m) } +func (*PrepareProposalRequest) ProtoMessage() {} +func (*PrepareProposalRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{12} } -func (m *RequestPrepareProposal) XXX_Unmarshal(b []byte) error { +func (m *PrepareProposalRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestPrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PrepareProposalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestPrepareProposal.Marshal(b, m, deterministic) + return xxx_messageInfo_PrepareProposalRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1124,79 +1168,81 @@ func (m *RequestPrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]by return b[:n], nil } } -func (m *RequestPrepareProposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestPrepareProposal.Merge(m, src) +func (m *PrepareProposalRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProposalRequest.Merge(m, src) } -func (m *RequestPrepareProposal) XXX_Size() int { +func (m *PrepareProposalRequest) XXX_Size() int { return m.Size() } -func (m *RequestPrepareProposal) XXX_DiscardUnknown() { - xxx_messageInfo_RequestPrepareProposal.DiscardUnknown(m) +func (m *PrepareProposalRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProposalRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestPrepareProposal proto.InternalMessageInfo +var xxx_messageInfo_PrepareProposalRequest proto.InternalMessageInfo -func (m *RequestPrepareProposal) GetMaxTxBytes() int64 { +func (m *PrepareProposalRequest) GetMaxTxBytes() int64 { if m != nil { return m.MaxTxBytes } return 0 } -func (m *RequestPrepareProposal) GetTxs() [][]byte { +func (m *PrepareProposalRequest) GetTxs() [][]byte { if m != nil { return m.Txs } return nil } -func (m *RequestPrepareProposal) GetLocalLastCommit() ExtendedCommitInfo { +func (m *PrepareProposalRequest) GetLocalLastCommit() ExtendedCommitInfo { if m != nil { return m.LocalLastCommit } return ExtendedCommitInfo{} } -func (m *RequestPrepareProposal) GetMisbehavior() []Misbehavior { +func (m *PrepareProposalRequest) GetMisbehavior() []Misbehavior { if m != nil { return m.Misbehavior } return nil } -func (m *RequestPrepareProposal) GetHeight() int64 { +func (m *PrepareProposalRequest) GetHeight() int64 { if m != nil { return m.Height } return 0 } -func (m *RequestPrepareProposal) GetTime() time.Time { +func (m *PrepareProposalRequest) GetTime() time.Time { if m != nil { return m.Time } return time.Time{} } -func (m *RequestPrepareProposal) GetNextValidatorsHash() []byte { +func (m *PrepareProposalRequest) GetNextValidatorsHash() []byte { if m != nil { return m.NextValidatorsHash } return nil } -func (m *RequestPrepareProposal) GetProposerAddress() []byte { +func (m *PrepareProposalRequest) GetProposerAddress() []byte { if m != nil { return m.ProposerAddress } return nil } -type RequestProcessProposal struct { +// ProcessProposalRequest is a request for the ABCI application to process a proposal +// received from another validator. +type ProcessProposalRequest struct { Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` ProposedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=proposed_last_commit,json=proposedLastCommit,proto3" json:"proposed_last_commit"` Misbehavior []Misbehavior `protobuf:"bytes,3,rep,name=misbehavior,proto3" json:"misbehavior"` - // hash is the merkle root hash of the fields of the proposed block. + // Merkle root hash of the fields of the proposed block. Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` @@ -1205,18 +1251,18 @@ type RequestProcessProposal struct { ProposerAddress []byte `protobuf:"bytes,8,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` } -func (m *RequestProcessProposal) Reset() { *m = RequestProcessProposal{} } -func (m *RequestProcessProposal) String() string { return proto.CompactTextString(m) } -func (*RequestProcessProposal) ProtoMessage() {} -func (*RequestProcessProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{13} +func (m *ProcessProposalRequest) Reset() { *m = ProcessProposalRequest{} } +func (m *ProcessProposalRequest) String() string { return proto.CompactTextString(m) } +func (*ProcessProposalRequest) ProtoMessage() {} +func (*ProcessProposalRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{13} } -func (m *RequestProcessProposal) XXX_Unmarshal(b []byte) error { +func (m *ProcessProposalRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ProcessProposalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestProcessProposal.Marshal(b, m, deterministic) + return xxx_messageInfo_ProcessProposalRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1226,76 +1272,76 @@ func (m *RequestProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]by return b[:n], nil } } -func (m *RequestProcessProposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestProcessProposal.Merge(m, src) +func (m *ProcessProposalRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProcessProposalRequest.Merge(m, src) } -func (m *RequestProcessProposal) XXX_Size() int { +func (m *ProcessProposalRequest) XXX_Size() int { return m.Size() } -func (m *RequestProcessProposal) XXX_DiscardUnknown() { - xxx_messageInfo_RequestProcessProposal.DiscardUnknown(m) +func (m *ProcessProposalRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ProcessProposalRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestProcessProposal proto.InternalMessageInfo +var xxx_messageInfo_ProcessProposalRequest proto.InternalMessageInfo -func (m *RequestProcessProposal) GetTxs() [][]byte { +func (m *ProcessProposalRequest) GetTxs() [][]byte { if m != nil { return m.Txs } return nil } -func (m *RequestProcessProposal) GetProposedLastCommit() CommitInfo { +func (m *ProcessProposalRequest) GetProposedLastCommit() CommitInfo { if m != nil { return m.ProposedLastCommit } return CommitInfo{} } -func (m *RequestProcessProposal) GetMisbehavior() []Misbehavior { +func (m *ProcessProposalRequest) GetMisbehavior() []Misbehavior { if m != nil { return m.Misbehavior } return nil } -func (m *RequestProcessProposal) GetHash() []byte { +func (m *ProcessProposalRequest) GetHash() []byte { if m != nil { return m.Hash } return nil } -func (m *RequestProcessProposal) GetHeight() int64 { +func (m *ProcessProposalRequest) GetHeight() int64 { if m != nil { return m.Height } return 0 } -func (m *RequestProcessProposal) GetTime() time.Time { +func (m *ProcessProposalRequest) GetTime() time.Time { if m != nil { return m.Time } return time.Time{} } -func (m *RequestProcessProposal) GetNextValidatorsHash() []byte { +func (m *ProcessProposalRequest) GetNextValidatorsHash() []byte { if m != nil { return m.NextValidatorsHash } return nil } -func (m *RequestProcessProposal) GetProposerAddress() []byte { +func (m *ProcessProposalRequest) GetProposerAddress() []byte { if m != nil { return m.ProposerAddress } return nil } -// Extends a vote with application-injected data -type RequestExtendVote struct { +// ExtendVoteRequest extends a precommit vote with application-injected data. +type ExtendVoteRequest struct { // the hash of the block that this vote may be referring to Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` // the height of the extended vote @@ -1310,18 +1356,18 @@ type RequestExtendVote struct { ProposerAddress []byte `protobuf:"bytes,8,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` } -func (m *RequestExtendVote) Reset() { *m = RequestExtendVote{} } -func (m *RequestExtendVote) String() string { return proto.CompactTextString(m) } -func (*RequestExtendVote) ProtoMessage() {} -func (*RequestExtendVote) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{14} +func (m *ExtendVoteRequest) Reset() { *m = ExtendVoteRequest{} } +func (m *ExtendVoteRequest) String() string { return proto.CompactTextString(m) } +func (*ExtendVoteRequest) ProtoMessage() {} +func (*ExtendVoteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{14} } -func (m *RequestExtendVote) XXX_Unmarshal(b []byte) error { +func (m *ExtendVoteRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestExtendVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ExtendVoteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestExtendVote.Marshal(b, m, deterministic) + return xxx_messageInfo_ExtendVoteRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1331,76 +1377,77 @@ func (m *RequestExtendVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return b[:n], nil } } -func (m *RequestExtendVote) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestExtendVote.Merge(m, src) +func (m *ExtendVoteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtendVoteRequest.Merge(m, src) } -func (m *RequestExtendVote) XXX_Size() int { +func (m *ExtendVoteRequest) XXX_Size() int { return m.Size() } -func (m *RequestExtendVote) XXX_DiscardUnknown() { - xxx_messageInfo_RequestExtendVote.DiscardUnknown(m) +func (m *ExtendVoteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExtendVoteRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestExtendVote proto.InternalMessageInfo +var xxx_messageInfo_ExtendVoteRequest proto.InternalMessageInfo -func (m *RequestExtendVote) GetHash() []byte { +func (m *ExtendVoteRequest) GetHash() []byte { if m != nil { return m.Hash } return nil } -func (m *RequestExtendVote) GetHeight() int64 { +func (m *ExtendVoteRequest) GetHeight() int64 { if m != nil { return m.Height } return 0 } -func (m *RequestExtendVote) GetTime() time.Time { +func (m *ExtendVoteRequest) GetTime() time.Time { if m != nil { return m.Time } return time.Time{} } -func (m *RequestExtendVote) GetTxs() [][]byte { +func (m *ExtendVoteRequest) GetTxs() [][]byte { if m != nil { return m.Txs } return nil } -func (m *RequestExtendVote) GetProposedLastCommit() CommitInfo { +func (m *ExtendVoteRequest) GetProposedLastCommit() CommitInfo { if m != nil { return m.ProposedLastCommit } return CommitInfo{} } -func (m *RequestExtendVote) GetMisbehavior() []Misbehavior { +func (m *ExtendVoteRequest) GetMisbehavior() []Misbehavior { if m != nil { return m.Misbehavior } return nil } -func (m *RequestExtendVote) GetNextValidatorsHash() []byte { +func (m *ExtendVoteRequest) GetNextValidatorsHash() []byte { if m != nil { return m.NextValidatorsHash } return nil } -func (m *RequestExtendVote) GetProposerAddress() []byte { +func (m *ExtendVoteRequest) GetProposerAddress() []byte { if m != nil { return m.ProposerAddress } return nil } -// Verify the vote extension -type RequestVerifyVoteExtension struct { +// VerifyVoteExtensionRequest is a request for the application to verify a vote extension +// produced by a different validator. +type VerifyVoteExtensionRequest struct { // the hash of the block that this received vote corresponds to Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` // the validator that signed the vote extension @@ -1409,18 +1456,18 @@ type RequestVerifyVoteExtension struct { VoteExtension []byte `protobuf:"bytes,4,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` } -func (m *RequestVerifyVoteExtension) Reset() { *m = RequestVerifyVoteExtension{} } -func (m *RequestVerifyVoteExtension) String() string { return proto.CompactTextString(m) } -func (*RequestVerifyVoteExtension) ProtoMessage() {} -func (*RequestVerifyVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{15} +func (m *VerifyVoteExtensionRequest) Reset() { *m = VerifyVoteExtensionRequest{} } +func (m *VerifyVoteExtensionRequest) String() string { return proto.CompactTextString(m) } +func (*VerifyVoteExtensionRequest) ProtoMessage() {} +func (*VerifyVoteExtensionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{15} } -func (m *RequestVerifyVoteExtension) XXX_Unmarshal(b []byte) error { +func (m *VerifyVoteExtensionRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestVerifyVoteExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *VerifyVoteExtensionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestVerifyVoteExtension.Marshal(b, m, deterministic) + return xxx_messageInfo_VerifyVoteExtensionRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1430,71 +1477,74 @@ func (m *RequestVerifyVoteExtension) XXX_Marshal(b []byte, deterministic bool) ( return b[:n], nil } } -func (m *RequestVerifyVoteExtension) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestVerifyVoteExtension.Merge(m, src) +func (m *VerifyVoteExtensionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerifyVoteExtensionRequest.Merge(m, src) } -func (m *RequestVerifyVoteExtension) XXX_Size() int { +func (m *VerifyVoteExtensionRequest) XXX_Size() int { return m.Size() } -func (m *RequestVerifyVoteExtension) XXX_DiscardUnknown() { - xxx_messageInfo_RequestVerifyVoteExtension.DiscardUnknown(m) +func (m *VerifyVoteExtensionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VerifyVoteExtensionRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestVerifyVoteExtension proto.InternalMessageInfo +var xxx_messageInfo_VerifyVoteExtensionRequest proto.InternalMessageInfo -func (m *RequestVerifyVoteExtension) GetHash() []byte { +func (m *VerifyVoteExtensionRequest) GetHash() []byte { if m != nil { return m.Hash } return nil } -func (m *RequestVerifyVoteExtension) GetValidatorAddress() []byte { +func (m *VerifyVoteExtensionRequest) GetValidatorAddress() []byte { if m != nil { return m.ValidatorAddress } return nil } -func (m *RequestVerifyVoteExtension) GetHeight() int64 { +func (m *VerifyVoteExtensionRequest) GetHeight() int64 { if m != nil { return m.Height } return 0 } -func (m *RequestVerifyVoteExtension) GetVoteExtension() []byte { +func (m *VerifyVoteExtensionRequest) GetVoteExtension() []byte { if m != nil { return m.VoteExtension } return nil } -type RequestFinalizeBlock struct { +// FinalizeBlockRequest is a request to finalize the block. +type FinalizeBlockRequest struct { Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` DecidedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=decided_last_commit,json=decidedLastCommit,proto3" json:"decided_last_commit"` Misbehavior []Misbehavior `protobuf:"bytes,3,rep,name=misbehavior,proto3" json:"misbehavior"` - // hash is the merkle root hash of the fields of the decided block. + // Merkle root hash of the fields of the decided block. Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` - // proposer_address is the address of the public key of the original proposer of the block. + // address of the public key of the original proposer of the block. ProposerAddress []byte `protobuf:"bytes,8,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` + // If the node is syncing/replaying blocks - target height. If not, syncing_to == height. + SyncingToHeight int64 `protobuf:"varint,9,opt,name=syncing_to_height,json=syncingToHeight,proto3" json:"syncing_to_height,omitempty"` } -func (m *RequestFinalizeBlock) Reset() { *m = RequestFinalizeBlock{} } -func (m *RequestFinalizeBlock) String() string { return proto.CompactTextString(m) } -func (*RequestFinalizeBlock) ProtoMessage() {} -func (*RequestFinalizeBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{16} +func (m *FinalizeBlockRequest) Reset() { *m = FinalizeBlockRequest{} } +func (m *FinalizeBlockRequest) String() string { return proto.CompactTextString(m) } +func (*FinalizeBlockRequest) ProtoMessage() {} +func (*FinalizeBlockRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{16} } -func (m *RequestFinalizeBlock) XXX_Unmarshal(b []byte) error { +func (m *FinalizeBlockRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestFinalizeBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *FinalizeBlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestFinalizeBlock.Marshal(b, m, deterministic) + return xxx_messageInfo_FinalizeBlockRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1504,75 +1554,85 @@ func (m *RequestFinalizeBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte return b[:n], nil } } -func (m *RequestFinalizeBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestFinalizeBlock.Merge(m, src) +func (m *FinalizeBlockRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FinalizeBlockRequest.Merge(m, src) } -func (m *RequestFinalizeBlock) XXX_Size() int { +func (m *FinalizeBlockRequest) XXX_Size() int { return m.Size() } -func (m *RequestFinalizeBlock) XXX_DiscardUnknown() { - xxx_messageInfo_RequestFinalizeBlock.DiscardUnknown(m) +func (m *FinalizeBlockRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FinalizeBlockRequest.DiscardUnknown(m) } -var xxx_messageInfo_RequestFinalizeBlock proto.InternalMessageInfo +var xxx_messageInfo_FinalizeBlockRequest proto.InternalMessageInfo -func (m *RequestFinalizeBlock) GetTxs() [][]byte { +func (m *FinalizeBlockRequest) GetTxs() [][]byte { if m != nil { return m.Txs } return nil } -func (m *RequestFinalizeBlock) GetDecidedLastCommit() CommitInfo { +func (m *FinalizeBlockRequest) GetDecidedLastCommit() CommitInfo { if m != nil { return m.DecidedLastCommit } return CommitInfo{} } -func (m *RequestFinalizeBlock) GetMisbehavior() []Misbehavior { +func (m *FinalizeBlockRequest) GetMisbehavior() []Misbehavior { if m != nil { return m.Misbehavior } return nil } -func (m *RequestFinalizeBlock) GetHash() []byte { +func (m *FinalizeBlockRequest) GetHash() []byte { if m != nil { return m.Hash } return nil } -func (m *RequestFinalizeBlock) GetHeight() int64 { +func (m *FinalizeBlockRequest) GetHeight() int64 { if m != nil { return m.Height } return 0 } -func (m *RequestFinalizeBlock) GetTime() time.Time { +func (m *FinalizeBlockRequest) GetTime() time.Time { if m != nil { return m.Time } return time.Time{} } -func (m *RequestFinalizeBlock) GetNextValidatorsHash() []byte { +func (m *FinalizeBlockRequest) GetNextValidatorsHash() []byte { if m != nil { return m.NextValidatorsHash } return nil } -func (m *RequestFinalizeBlock) GetProposerAddress() []byte { +func (m *FinalizeBlockRequest) GetProposerAddress() []byte { if m != nil { return m.ProposerAddress } return nil } +func (m *FinalizeBlockRequest) GetSyncingToHeight() int64 { + if m != nil { + return m.SyncingToHeight + } + return 0 +} + +// Response represents a response from the ABCI application. type Response struct { + // Sum of all possible messages. + // // Types that are valid to be assigned to Value: // // *Response_Exception @@ -1599,7 +1659,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{17} + return fileDescriptor_95dd8f7b670b96e3, []int{17} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1635,55 +1695,55 @@ type isResponse_Value interface { } type Response_Exception struct { - Exception *ResponseException `protobuf:"bytes,1,opt,name=exception,proto3,oneof" json:"exception,omitempty"` + Exception *ExceptionResponse `protobuf:"bytes,1,opt,name=exception,proto3,oneof" json:"exception,omitempty"` } type Response_Echo struct { - Echo *ResponseEcho `protobuf:"bytes,2,opt,name=echo,proto3,oneof" json:"echo,omitempty"` + Echo *EchoResponse `protobuf:"bytes,2,opt,name=echo,proto3,oneof" json:"echo,omitempty"` } type Response_Flush struct { - Flush *ResponseFlush `protobuf:"bytes,3,opt,name=flush,proto3,oneof" json:"flush,omitempty"` + Flush *FlushResponse `protobuf:"bytes,3,opt,name=flush,proto3,oneof" json:"flush,omitempty"` } type Response_Info struct { - Info *ResponseInfo `protobuf:"bytes,4,opt,name=info,proto3,oneof" json:"info,omitempty"` + Info *InfoResponse `protobuf:"bytes,4,opt,name=info,proto3,oneof" json:"info,omitempty"` } type Response_InitChain struct { - InitChain *ResponseInitChain `protobuf:"bytes,6,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` + InitChain *InitChainResponse `protobuf:"bytes,6,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` } type Response_Query struct { - Query *ResponseQuery `protobuf:"bytes,7,opt,name=query,proto3,oneof" json:"query,omitempty"` + Query *QueryResponse `protobuf:"bytes,7,opt,name=query,proto3,oneof" json:"query,omitempty"` } type Response_CheckTx struct { - CheckTx *ResponseCheckTx `protobuf:"bytes,9,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` + CheckTx *CheckTxResponse `protobuf:"bytes,9,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` } type Response_Commit struct { - Commit *ResponseCommit `protobuf:"bytes,12,opt,name=commit,proto3,oneof" json:"commit,omitempty"` + Commit *CommitResponse `protobuf:"bytes,12,opt,name=commit,proto3,oneof" json:"commit,omitempty"` } type Response_ListSnapshots struct { - ListSnapshots *ResponseListSnapshots `protobuf:"bytes,13,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` + ListSnapshots *ListSnapshotsResponse `protobuf:"bytes,13,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` } type Response_OfferSnapshot struct { - OfferSnapshot *ResponseOfferSnapshot `protobuf:"bytes,14,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` + OfferSnapshot *OfferSnapshotResponse `protobuf:"bytes,14,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` } type Response_LoadSnapshotChunk struct { - LoadSnapshotChunk *ResponseLoadSnapshotChunk `protobuf:"bytes,15,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` + LoadSnapshotChunk *LoadSnapshotChunkResponse `protobuf:"bytes,15,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` } type Response_ApplySnapshotChunk struct { - ApplySnapshotChunk *ResponseApplySnapshotChunk `protobuf:"bytes,16,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` + ApplySnapshotChunk *ApplySnapshotChunkResponse `protobuf:"bytes,16,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` } type Response_PrepareProposal struct { - PrepareProposal *ResponsePrepareProposal `protobuf:"bytes,17,opt,name=prepare_proposal,json=prepareProposal,proto3,oneof" json:"prepare_proposal,omitempty"` + PrepareProposal *PrepareProposalResponse `protobuf:"bytes,17,opt,name=prepare_proposal,json=prepareProposal,proto3,oneof" json:"prepare_proposal,omitempty"` } type Response_ProcessProposal struct { - ProcessProposal *ResponseProcessProposal `protobuf:"bytes,18,opt,name=process_proposal,json=processProposal,proto3,oneof" json:"process_proposal,omitempty"` + ProcessProposal *ProcessProposalResponse `protobuf:"bytes,18,opt,name=process_proposal,json=processProposal,proto3,oneof" json:"process_proposal,omitempty"` } type Response_ExtendVote struct { - ExtendVote *ResponseExtendVote `protobuf:"bytes,19,opt,name=extend_vote,json=extendVote,proto3,oneof" json:"extend_vote,omitempty"` + ExtendVote *ExtendVoteResponse `protobuf:"bytes,19,opt,name=extend_vote,json=extendVote,proto3,oneof" json:"extend_vote,omitempty"` } type Response_VerifyVoteExtension struct { - VerifyVoteExtension *ResponseVerifyVoteExtension `protobuf:"bytes,20,opt,name=verify_vote_extension,json=verifyVoteExtension,proto3,oneof" json:"verify_vote_extension,omitempty"` + VerifyVoteExtension *VerifyVoteExtensionResponse `protobuf:"bytes,20,opt,name=verify_vote_extension,json=verifyVoteExtension,proto3,oneof" json:"verify_vote_extension,omitempty"` } type Response_FinalizeBlock struct { - FinalizeBlock *ResponseFinalizeBlock `protobuf:"bytes,21,opt,name=finalize_block,json=finalizeBlock,proto3,oneof" json:"finalize_block,omitempty"` + FinalizeBlock *FinalizeBlockResponse `protobuf:"bytes,21,opt,name=finalize_block,json=finalizeBlock,proto3,oneof" json:"finalize_block,omitempty"` } func (*Response_Exception) isResponse_Value() {} @@ -1711,119 +1771,119 @@ func (m *Response) GetValue() isResponse_Value { return nil } -func (m *Response) GetException() *ResponseException { +func (m *Response) GetException() *ExceptionResponse { if x, ok := m.GetValue().(*Response_Exception); ok { return x.Exception } return nil } -func (m *Response) GetEcho() *ResponseEcho { +func (m *Response) GetEcho() *EchoResponse { if x, ok := m.GetValue().(*Response_Echo); ok { return x.Echo } return nil } -func (m *Response) GetFlush() *ResponseFlush { +func (m *Response) GetFlush() *FlushResponse { if x, ok := m.GetValue().(*Response_Flush); ok { return x.Flush } return nil } -func (m *Response) GetInfo() *ResponseInfo { +func (m *Response) GetInfo() *InfoResponse { if x, ok := m.GetValue().(*Response_Info); ok { return x.Info } return nil } -func (m *Response) GetInitChain() *ResponseInitChain { +func (m *Response) GetInitChain() *InitChainResponse { if x, ok := m.GetValue().(*Response_InitChain); ok { return x.InitChain } return nil } -func (m *Response) GetQuery() *ResponseQuery { +func (m *Response) GetQuery() *QueryResponse { if x, ok := m.GetValue().(*Response_Query); ok { return x.Query } return nil } -func (m *Response) GetCheckTx() *ResponseCheckTx { +func (m *Response) GetCheckTx() *CheckTxResponse { if x, ok := m.GetValue().(*Response_CheckTx); ok { return x.CheckTx } return nil } -func (m *Response) GetCommit() *ResponseCommit { +func (m *Response) GetCommit() *CommitResponse { if x, ok := m.GetValue().(*Response_Commit); ok { return x.Commit } return nil } -func (m *Response) GetListSnapshots() *ResponseListSnapshots { +func (m *Response) GetListSnapshots() *ListSnapshotsResponse { if x, ok := m.GetValue().(*Response_ListSnapshots); ok { return x.ListSnapshots } return nil } -func (m *Response) GetOfferSnapshot() *ResponseOfferSnapshot { +func (m *Response) GetOfferSnapshot() *OfferSnapshotResponse { if x, ok := m.GetValue().(*Response_OfferSnapshot); ok { return x.OfferSnapshot } return nil } -func (m *Response) GetLoadSnapshotChunk() *ResponseLoadSnapshotChunk { +func (m *Response) GetLoadSnapshotChunk() *LoadSnapshotChunkResponse { if x, ok := m.GetValue().(*Response_LoadSnapshotChunk); ok { return x.LoadSnapshotChunk } return nil } -func (m *Response) GetApplySnapshotChunk() *ResponseApplySnapshotChunk { +func (m *Response) GetApplySnapshotChunk() *ApplySnapshotChunkResponse { if x, ok := m.GetValue().(*Response_ApplySnapshotChunk); ok { return x.ApplySnapshotChunk } return nil } -func (m *Response) GetPrepareProposal() *ResponsePrepareProposal { +func (m *Response) GetPrepareProposal() *PrepareProposalResponse { if x, ok := m.GetValue().(*Response_PrepareProposal); ok { return x.PrepareProposal } return nil } -func (m *Response) GetProcessProposal() *ResponseProcessProposal { +func (m *Response) GetProcessProposal() *ProcessProposalResponse { if x, ok := m.GetValue().(*Response_ProcessProposal); ok { return x.ProcessProposal } return nil } -func (m *Response) GetExtendVote() *ResponseExtendVote { +func (m *Response) GetExtendVote() *ExtendVoteResponse { if x, ok := m.GetValue().(*Response_ExtendVote); ok { return x.ExtendVote } return nil } -func (m *Response) GetVerifyVoteExtension() *ResponseVerifyVoteExtension { +func (m *Response) GetVerifyVoteExtension() *VerifyVoteExtensionResponse { if x, ok := m.GetValue().(*Response_VerifyVoteExtension); ok { return x.VerifyVoteExtension } return nil } -func (m *Response) GetFinalizeBlock() *ResponseFinalizeBlock { +func (m *Response) GetFinalizeBlock() *FinalizeBlockResponse { if x, ok := m.GetValue().(*Response_FinalizeBlock); ok { return x.FinalizeBlock } @@ -1854,22 +1914,22 @@ func (*Response) XXX_OneofWrappers() []interface{} { } // nondeterministic -type ResponseException struct { +type ExceptionResponse struct { Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } -func (m *ResponseException) Reset() { *m = ResponseException{} } -func (m *ResponseException) String() string { return proto.CompactTextString(m) } -func (*ResponseException) ProtoMessage() {} -func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{18} +func (m *ExceptionResponse) Reset() { *m = ExceptionResponse{} } +func (m *ExceptionResponse) String() string { return proto.CompactTextString(m) } +func (*ExceptionResponse) ProtoMessage() {} +func (*ExceptionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{18} } -func (m *ResponseException) XXX_Unmarshal(b []byte) error { +func (m *ExceptionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseException) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ExceptionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseException.Marshal(b, m, deterministic) + return xxx_messageInfo_ExceptionResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1879,41 +1939,42 @@ func (m *ResponseException) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return b[:n], nil } } -func (m *ResponseException) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseException.Merge(m, src) +func (m *ExceptionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExceptionResponse.Merge(m, src) } -func (m *ResponseException) XXX_Size() int { +func (m *ExceptionResponse) XXX_Size() int { return m.Size() } -func (m *ResponseException) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseException.DiscardUnknown(m) +func (m *ExceptionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExceptionResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseException proto.InternalMessageInfo +var xxx_messageInfo_ExceptionResponse proto.InternalMessageInfo -func (m *ResponseException) GetError() string { +func (m *ExceptionResponse) GetError() string { if m != nil { return m.Error } return "" } -type ResponseEcho struct { +// EchoResponse indicates that the connection is still alive. +type EchoResponse struct { Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` } -func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } -func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } -func (*ResponseEcho) ProtoMessage() {} -func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{19} +func (m *EchoResponse) Reset() { *m = EchoResponse{} } +func (m *EchoResponse) String() string { return proto.CompactTextString(m) } +func (*EchoResponse) ProtoMessage() {} +func (*EchoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{19} } -func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { +func (m *EchoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseEcho) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *EchoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseEcho.Marshal(b, m, deterministic) + return xxx_messageInfo_EchoResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1923,40 +1984,41 @@ func (m *ResponseEcho) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *ResponseEcho) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseEcho.Merge(m, src) +func (m *EchoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_EchoResponse.Merge(m, src) } -func (m *ResponseEcho) XXX_Size() int { +func (m *EchoResponse) XXX_Size() int { return m.Size() } -func (m *ResponseEcho) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseEcho.DiscardUnknown(m) +func (m *EchoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_EchoResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseEcho proto.InternalMessageInfo +var xxx_messageInfo_EchoResponse proto.InternalMessageInfo -func (m *ResponseEcho) GetMessage() string { +func (m *EchoResponse) GetMessage() string { if m != nil { return m.Message } return "" } -type ResponseFlush struct { +// FlushResponse indicates that the write buffer was flushed. +type FlushResponse struct { } -func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } -func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } -func (*ResponseFlush) ProtoMessage() {} -func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{20} +func (m *FlushResponse) Reset() { *m = FlushResponse{} } +func (m *FlushResponse) String() string { return proto.CompactTextString(m) } +func (*FlushResponse) ProtoMessage() {} +func (*FlushResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{20} } -func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { +func (m *FlushResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseFlush) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *FlushResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseFlush.Marshal(b, m, deterministic) + return xxx_messageInfo_FlushResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1966,38 +2028,41 @@ func (m *ResponseFlush) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return b[:n], nil } } -func (m *ResponseFlush) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseFlush.Merge(m, src) +func (m *FlushResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlushResponse.Merge(m, src) } -func (m *ResponseFlush) XXX_Size() int { +func (m *FlushResponse) XXX_Size() int { return m.Size() } -func (m *ResponseFlush) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseFlush.DiscardUnknown(m) +func (m *FlushResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FlushResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseFlush proto.InternalMessageInfo +var xxx_messageInfo_FlushResponse proto.InternalMessageInfo -type ResponseInfo struct { - Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - AppVersion uint64 `protobuf:"varint,3,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` - LastBlockHeight int64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` - LastBlockAppHash []byte `protobuf:"bytes,5,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` +// InfoResponse contains the ABCI application version information. +type InfoResponse struct { + Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + AppVersion uint64 `protobuf:"varint,3,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` + LastBlockHeight int64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockAppHash []byte `protobuf:"bytes,5,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` + LanePriorities map[string]uint32 `protobuf:"bytes,6,rep,name=lane_priorities,json=lanePriorities,proto3" json:"lane_priorities,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + DefaultLane string `protobuf:"bytes,7,opt,name=default_lane,json=defaultLane,proto3" json:"default_lane,omitempty"` } -func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } -func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } -func (*ResponseInfo) ProtoMessage() {} -func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{21} +func (m *InfoResponse) Reset() { *m = InfoResponse{} } +func (m *InfoResponse) String() string { return proto.CompactTextString(m) } +func (*InfoResponse) ProtoMessage() {} +func (*InfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{21} } -func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { +func (m *InfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseInfo.Marshal(b, m, deterministic) + return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2007,71 +2072,87 @@ func (m *ResponseInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *ResponseInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseInfo.Merge(m, src) +func (m *InfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoResponse.Merge(m, src) } -func (m *ResponseInfo) XXX_Size() int { +func (m *InfoResponse) XXX_Size() int { return m.Size() } -func (m *ResponseInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseInfo.DiscardUnknown(m) +func (m *InfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseInfo proto.InternalMessageInfo +var xxx_messageInfo_InfoResponse proto.InternalMessageInfo -func (m *ResponseInfo) GetData() string { +func (m *InfoResponse) GetData() string { if m != nil { return m.Data } return "" } -func (m *ResponseInfo) GetVersion() string { +func (m *InfoResponse) GetVersion() string { if m != nil { return m.Version } return "" } -func (m *ResponseInfo) GetAppVersion() uint64 { +func (m *InfoResponse) GetAppVersion() uint64 { if m != nil { return m.AppVersion } return 0 } -func (m *ResponseInfo) GetLastBlockHeight() int64 { +func (m *InfoResponse) GetLastBlockHeight() int64 { if m != nil { return m.LastBlockHeight } return 0 } -func (m *ResponseInfo) GetLastBlockAppHash() []byte { +func (m *InfoResponse) GetLastBlockAppHash() []byte { if m != nil { return m.LastBlockAppHash } return nil } -type ResponseInitChain struct { - ConsensusParams *types1.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` - Validators []ValidatorUpdate `protobuf:"bytes,2,rep,name=validators,proto3" json:"validators"` - AppHash []byte `protobuf:"bytes,3,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` +func (m *InfoResponse) GetLanePriorities() map[string]uint32 { + if m != nil { + return m.LanePriorities + } + return nil } -func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } -func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } -func (*ResponseInitChain) ProtoMessage() {} -func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{22} +func (m *InfoResponse) GetDefaultLane() string { + if m != nil { + return m.DefaultLane + } + return "" +} + +// InitChainResponse contains the ABCI application's hash and updates to the +// validator set and/or the consensus params, if any. +type InitChainResponse struct { + ConsensusParams *v1.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` + Validators []ValidatorUpdate `protobuf:"bytes,2,rep,name=validators,proto3" json:"validators"` + AppHash []byte `protobuf:"bytes,3,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` } -func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { + +func (m *InitChainResponse) Reset() { *m = InitChainResponse{} } +func (m *InitChainResponse) String() string { return proto.CompactTextString(m) } +func (*InitChainResponse) ProtoMessage() {} +func (*InitChainResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{22} +} +func (m *InitChainResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *InitChainResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseInitChain.Marshal(b, m, deterministic) + return xxx_messageInfo_InitChainResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2081,64 +2162,65 @@ func (m *ResponseInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return b[:n], nil } } -func (m *ResponseInitChain) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseInitChain.Merge(m, src) +func (m *InitChainResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitChainResponse.Merge(m, src) } -func (m *ResponseInitChain) XXX_Size() int { +func (m *InitChainResponse) XXX_Size() int { return m.Size() } -func (m *ResponseInitChain) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseInitChain.DiscardUnknown(m) +func (m *InitChainResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InitChainResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseInitChain proto.InternalMessageInfo +var xxx_messageInfo_InitChainResponse proto.InternalMessageInfo -func (m *ResponseInitChain) GetConsensusParams() *types1.ConsensusParams { +func (m *InitChainResponse) GetConsensusParams() *v1.ConsensusParams { if m != nil { return m.ConsensusParams } return nil } -func (m *ResponseInitChain) GetValidators() []ValidatorUpdate { +func (m *InitChainResponse) GetValidators() []ValidatorUpdate { if m != nil { return m.Validators } return nil } -func (m *ResponseInitChain) GetAppHash() []byte { +func (m *InitChainResponse) GetAppHash() []byte { if m != nil { return m.AppHash } return nil } -type ResponseQuery struct { +// QueryResponse contains the ABCI application data along with a proof. +type QueryResponse struct { Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // bytes data = 2; // use "value" instead. - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` - Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` - ProofOps *crypto.ProofOps `protobuf:"bytes,8,opt,name=proof_ops,json=proofOps,proto3" json:"proof_ops,omitempty"` - Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` - Codespace string `protobuf:"bytes,10,opt,name=codespace,proto3" json:"codespace,omitempty"` -} - -func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } -func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } -func (*ResponseQuery) ProtoMessage() {} -func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{23} -} -func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` + Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` + ProofOps *v11.ProofOps `protobuf:"bytes,8,opt,name=proof_ops,json=proofOps,proto3" json:"proof_ops,omitempty"` + Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` + Codespace string `protobuf:"bytes,10,opt,name=codespace,proto3" json:"codespace,omitempty"` +} + +func (m *QueryResponse) Reset() { *m = QueryResponse{} } +func (m *QueryResponse) String() string { return proto.CompactTextString(m) } +func (*QueryResponse) ProtoMessage() {} +func (*QueryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{23} +} +func (m *QueryResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *QueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseQuery.Marshal(b, m, deterministic) + return xxx_messageInfo_QueryResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2148,82 +2230,84 @@ func (m *ResponseQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return b[:n], nil } } -func (m *ResponseQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseQuery.Merge(m, src) +func (m *QueryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResponse.Merge(m, src) } -func (m *ResponseQuery) XXX_Size() int { +func (m *QueryResponse) XXX_Size() int { return m.Size() } -func (m *ResponseQuery) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseQuery.DiscardUnknown(m) +func (m *QueryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseQuery proto.InternalMessageInfo +var xxx_messageInfo_QueryResponse proto.InternalMessageInfo -func (m *ResponseQuery) GetCode() uint32 { +func (m *QueryResponse) GetCode() uint32 { if m != nil { return m.Code } return 0 } -func (m *ResponseQuery) GetLog() string { +func (m *QueryResponse) GetLog() string { if m != nil { return m.Log } return "" } -func (m *ResponseQuery) GetInfo() string { +func (m *QueryResponse) GetInfo() string { if m != nil { return m.Info } return "" } -func (m *ResponseQuery) GetIndex() int64 { +func (m *QueryResponse) GetIndex() int64 { if m != nil { return m.Index } return 0 } -func (m *ResponseQuery) GetKey() []byte { +func (m *QueryResponse) GetKey() []byte { if m != nil { return m.Key } return nil } -func (m *ResponseQuery) GetValue() []byte { +func (m *QueryResponse) GetValue() []byte { if m != nil { return m.Value } return nil } -func (m *ResponseQuery) GetProofOps() *crypto.ProofOps { +func (m *QueryResponse) GetProofOps() *v11.ProofOps { if m != nil { return m.ProofOps } return nil } -func (m *ResponseQuery) GetHeight() int64 { +func (m *QueryResponse) GetHeight() int64 { if m != nil { return m.Height } return 0 } -func (m *ResponseQuery) GetCodespace() string { +func (m *QueryResponse) GetCodespace() string { if m != nil { return m.Codespace } return "" } -type ResponseCheckTx struct { +// CheckTxResponse shows if the transaction was deemed valid by the ABCI +// application. +type CheckTxResponse struct { Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` @@ -2232,20 +2316,21 @@ type ResponseCheckTx struct { GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + LaneId string `protobuf:"bytes,12,opt,name=lane_id,json=laneId,proto3" json:"lane_id,omitempty"` } -func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } -func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } -func (*ResponseCheckTx) ProtoMessage() {} -func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{24} +func (m *CheckTxResponse) Reset() { *m = CheckTxResponse{} } +func (m *CheckTxResponse) String() string { return proto.CompactTextString(m) } +func (*CheckTxResponse) ProtoMessage() {} +func (*CheckTxResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{24} } -func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { +func (m *CheckTxResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseCheckTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *CheckTxResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseCheckTx.Marshal(b, m, deterministic) + return xxx_messageInfo_CheckTxResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2255,90 +2340,98 @@ func (m *ResponseCheckTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, err return b[:n], nil } } -func (m *ResponseCheckTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseCheckTx.Merge(m, src) +func (m *CheckTxResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckTxResponse.Merge(m, src) } -func (m *ResponseCheckTx) XXX_Size() int { +func (m *CheckTxResponse) XXX_Size() int { return m.Size() } -func (m *ResponseCheckTx) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseCheckTx.DiscardUnknown(m) +func (m *CheckTxResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CheckTxResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseCheckTx proto.InternalMessageInfo +var xxx_messageInfo_CheckTxResponse proto.InternalMessageInfo -func (m *ResponseCheckTx) GetCode() uint32 { +func (m *CheckTxResponse) GetCode() uint32 { if m != nil { return m.Code } return 0 } -func (m *ResponseCheckTx) GetData() []byte { +func (m *CheckTxResponse) GetData() []byte { if m != nil { return m.Data } return nil } -func (m *ResponseCheckTx) GetLog() string { +func (m *CheckTxResponse) GetLog() string { if m != nil { return m.Log } return "" } -func (m *ResponseCheckTx) GetInfo() string { +func (m *CheckTxResponse) GetInfo() string { if m != nil { return m.Info } return "" } -func (m *ResponseCheckTx) GetGasWanted() int64 { +func (m *CheckTxResponse) GetGasWanted() int64 { if m != nil { return m.GasWanted } return 0 } -func (m *ResponseCheckTx) GetGasUsed() int64 { +func (m *CheckTxResponse) GetGasUsed() int64 { if m != nil { return m.GasUsed } return 0 } -func (m *ResponseCheckTx) GetEvents() []Event { +func (m *CheckTxResponse) GetEvents() []Event { if m != nil { return m.Events } return nil } -func (m *ResponseCheckTx) GetCodespace() string { +func (m *CheckTxResponse) GetCodespace() string { if m != nil { return m.Codespace } return "" } -type ResponseCommit struct { +func (m *CheckTxResponse) GetLaneId() string { + if m != nil { + return m.LaneId + } + return "" +} + +// CommitResponse indicates how much blocks should CometBFT retain. +type CommitResponse struct { RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` } -func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } -func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } -func (*ResponseCommit) ProtoMessage() {} -func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{25} +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} +func (*CommitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{25} } -func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { +func (m *CommitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseCommit.Marshal(b, m, deterministic) + return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2348,41 +2441,42 @@ func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return b[:n], nil } } -func (m *ResponseCommit) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseCommit.Merge(m, src) +func (m *CommitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitResponse.Merge(m, src) } -func (m *ResponseCommit) XXX_Size() int { +func (m *CommitResponse) XXX_Size() int { return m.Size() } -func (m *ResponseCommit) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseCommit.DiscardUnknown(m) +func (m *CommitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CommitResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseCommit proto.InternalMessageInfo +var xxx_messageInfo_CommitResponse proto.InternalMessageInfo -func (m *ResponseCommit) GetRetainHeight() int64 { +func (m *CommitResponse) GetRetainHeight() int64 { if m != nil { return m.RetainHeight } return 0 } -type ResponseListSnapshots struct { +// ListSnapshotsResponse contains the list of snapshots. +type ListSnapshotsResponse struct { Snapshots []*Snapshot `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` } -func (m *ResponseListSnapshots) Reset() { *m = ResponseListSnapshots{} } -func (m *ResponseListSnapshots) String() string { return proto.CompactTextString(m) } -func (*ResponseListSnapshots) ProtoMessage() {} -func (*ResponseListSnapshots) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{26} +func (m *ListSnapshotsResponse) Reset() { *m = ListSnapshotsResponse{} } +func (m *ListSnapshotsResponse) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsResponse) ProtoMessage() {} +func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{26} } -func (m *ResponseListSnapshots) XXX_Unmarshal(b []byte) error { +func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseListSnapshots) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseListSnapshots.Marshal(b, m, deterministic) + return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2392,41 +2486,43 @@ func (m *ResponseListSnapshots) XXX_Marshal(b []byte, deterministic bool) ([]byt return b[:n], nil } } -func (m *ResponseListSnapshots) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseListSnapshots.Merge(m, src) +func (m *ListSnapshotsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsResponse.Merge(m, src) } -func (m *ResponseListSnapshots) XXX_Size() int { +func (m *ListSnapshotsResponse) XXX_Size() int { return m.Size() } -func (m *ResponseListSnapshots) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseListSnapshots.DiscardUnknown(m) +func (m *ListSnapshotsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseListSnapshots proto.InternalMessageInfo +var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo -func (m *ResponseListSnapshots) GetSnapshots() []*Snapshot { +func (m *ListSnapshotsResponse) GetSnapshots() []*Snapshot { if m != nil { return m.Snapshots } return nil } -type ResponseOfferSnapshot struct { - Result ResponseOfferSnapshot_Result `protobuf:"varint,1,opt,name=result,proto3,enum=tendermint.abci.ResponseOfferSnapshot_Result" json:"result,omitempty"` +// OfferSnapshotResponse indicates the ABCI application decision whenever to +// provide a snapshot to the requester or not. +type OfferSnapshotResponse struct { + Result OfferSnapshotResult `protobuf:"varint,1,opt,name=result,proto3,enum=cometbft.abci.v1.OfferSnapshotResult" json:"result,omitempty"` } -func (m *ResponseOfferSnapshot) Reset() { *m = ResponseOfferSnapshot{} } -func (m *ResponseOfferSnapshot) String() string { return proto.CompactTextString(m) } -func (*ResponseOfferSnapshot) ProtoMessage() {} -func (*ResponseOfferSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{27} +func (m *OfferSnapshotResponse) Reset() { *m = OfferSnapshotResponse{} } +func (m *OfferSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*OfferSnapshotResponse) ProtoMessage() {} +func (*OfferSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{27} } -func (m *ResponseOfferSnapshot) XXX_Unmarshal(b []byte) error { +func (m *OfferSnapshotResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseOfferSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *OfferSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseOfferSnapshot.Marshal(b, m, deterministic) + return xxx_messageInfo_OfferSnapshotResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2436,41 +2532,42 @@ func (m *ResponseOfferSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byt return b[:n], nil } } -func (m *ResponseOfferSnapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseOfferSnapshot.Merge(m, src) +func (m *OfferSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_OfferSnapshotResponse.Merge(m, src) } -func (m *ResponseOfferSnapshot) XXX_Size() int { +func (m *OfferSnapshotResponse) XXX_Size() int { return m.Size() } -func (m *ResponseOfferSnapshot) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseOfferSnapshot.DiscardUnknown(m) +func (m *OfferSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_OfferSnapshotResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseOfferSnapshot proto.InternalMessageInfo +var xxx_messageInfo_OfferSnapshotResponse proto.InternalMessageInfo -func (m *ResponseOfferSnapshot) GetResult() ResponseOfferSnapshot_Result { +func (m *OfferSnapshotResponse) GetResult() OfferSnapshotResult { if m != nil { return m.Result } - return ResponseOfferSnapshot_UNKNOWN + return OFFER_SNAPSHOT_RESULT_UNKNOWN } -type ResponseLoadSnapshotChunk struct { +// LoadSnapshotChunkResponse returns a snapshot's chunk. +type LoadSnapshotChunkResponse struct { Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"` } -func (m *ResponseLoadSnapshotChunk) Reset() { *m = ResponseLoadSnapshotChunk{} } -func (m *ResponseLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } -func (*ResponseLoadSnapshotChunk) ProtoMessage() {} -func (*ResponseLoadSnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{28} +func (m *LoadSnapshotChunkResponse) Reset() { *m = LoadSnapshotChunkResponse{} } +func (m *LoadSnapshotChunkResponse) String() string { return proto.CompactTextString(m) } +func (*LoadSnapshotChunkResponse) ProtoMessage() {} +func (*LoadSnapshotChunkResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{28} } -func (m *ResponseLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { +func (m *LoadSnapshotChunkResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseLoadSnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *LoadSnapshotChunkResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseLoadSnapshotChunk.Marshal(b, m, deterministic) + return xxx_messageInfo_LoadSnapshotChunkResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2480,43 +2577,44 @@ func (m *ResponseLoadSnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([ return b[:n], nil } } -func (m *ResponseLoadSnapshotChunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseLoadSnapshotChunk.Merge(m, src) +func (m *LoadSnapshotChunkResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadSnapshotChunkResponse.Merge(m, src) } -func (m *ResponseLoadSnapshotChunk) XXX_Size() int { +func (m *LoadSnapshotChunkResponse) XXX_Size() int { return m.Size() } -func (m *ResponseLoadSnapshotChunk) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseLoadSnapshotChunk.DiscardUnknown(m) +func (m *LoadSnapshotChunkResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LoadSnapshotChunkResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseLoadSnapshotChunk proto.InternalMessageInfo +var xxx_messageInfo_LoadSnapshotChunkResponse proto.InternalMessageInfo -func (m *ResponseLoadSnapshotChunk) GetChunk() []byte { +func (m *LoadSnapshotChunkResponse) GetChunk() []byte { if m != nil { return m.Chunk } return nil } -type ResponseApplySnapshotChunk struct { - Result ResponseApplySnapshotChunk_Result `protobuf:"varint,1,opt,name=result,proto3,enum=tendermint.abci.ResponseApplySnapshotChunk_Result" json:"result,omitempty"` - RefetchChunks []uint32 `protobuf:"varint,2,rep,packed,name=refetch_chunks,json=refetchChunks,proto3" json:"refetch_chunks,omitempty"` - RejectSenders []string `protobuf:"bytes,3,rep,name=reject_senders,json=rejectSenders,proto3" json:"reject_senders,omitempty"` +// ApplySnapshotChunkResponse returns a result of applying the specified chunk. +type ApplySnapshotChunkResponse struct { + Result ApplySnapshotChunkResult `protobuf:"varint,1,opt,name=result,proto3,enum=cometbft.abci.v1.ApplySnapshotChunkResult" json:"result,omitempty"` + RefetchChunks []uint32 `protobuf:"varint,2,rep,packed,name=refetch_chunks,json=refetchChunks,proto3" json:"refetch_chunks,omitempty"` + RejectSenders []string `protobuf:"bytes,3,rep,name=reject_senders,json=rejectSenders,proto3" json:"reject_senders,omitempty"` } -func (m *ResponseApplySnapshotChunk) Reset() { *m = ResponseApplySnapshotChunk{} } -func (m *ResponseApplySnapshotChunk) String() string { return proto.CompactTextString(m) } -func (*ResponseApplySnapshotChunk) ProtoMessage() {} -func (*ResponseApplySnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{29} +func (m *ApplySnapshotChunkResponse) Reset() { *m = ApplySnapshotChunkResponse{} } +func (m *ApplySnapshotChunkResponse) String() string { return proto.CompactTextString(m) } +func (*ApplySnapshotChunkResponse) ProtoMessage() {} +func (*ApplySnapshotChunkResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{29} } -func (m *ResponseApplySnapshotChunk) XXX_Unmarshal(b []byte) error { +func (m *ApplySnapshotChunkResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseApplySnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ApplySnapshotChunkResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseApplySnapshotChunk.Marshal(b, m, deterministic) + return xxx_messageInfo_ApplySnapshotChunkResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2526,55 +2624,56 @@ func (m *ResponseApplySnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ( return b[:n], nil } } -func (m *ResponseApplySnapshotChunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseApplySnapshotChunk.Merge(m, src) +func (m *ApplySnapshotChunkResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplySnapshotChunkResponse.Merge(m, src) } -func (m *ResponseApplySnapshotChunk) XXX_Size() int { +func (m *ApplySnapshotChunkResponse) XXX_Size() int { return m.Size() } -func (m *ResponseApplySnapshotChunk) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseApplySnapshotChunk.DiscardUnknown(m) +func (m *ApplySnapshotChunkResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ApplySnapshotChunkResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseApplySnapshotChunk proto.InternalMessageInfo +var xxx_messageInfo_ApplySnapshotChunkResponse proto.InternalMessageInfo -func (m *ResponseApplySnapshotChunk) GetResult() ResponseApplySnapshotChunk_Result { +func (m *ApplySnapshotChunkResponse) GetResult() ApplySnapshotChunkResult { if m != nil { return m.Result } - return ResponseApplySnapshotChunk_UNKNOWN + return APPLY_SNAPSHOT_CHUNK_RESULT_UNKNOWN } -func (m *ResponseApplySnapshotChunk) GetRefetchChunks() []uint32 { +func (m *ApplySnapshotChunkResponse) GetRefetchChunks() []uint32 { if m != nil { return m.RefetchChunks } return nil } -func (m *ResponseApplySnapshotChunk) GetRejectSenders() []string { +func (m *ApplySnapshotChunkResponse) GetRejectSenders() []string { if m != nil { return m.RejectSenders } return nil } -type ResponsePrepareProposal struct { +// PrepareProposalResponse contains a list of transactions, which will form a block. +type PrepareProposalResponse struct { Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` } -func (m *ResponsePrepareProposal) Reset() { *m = ResponsePrepareProposal{} } -func (m *ResponsePrepareProposal) String() string { return proto.CompactTextString(m) } -func (*ResponsePrepareProposal) ProtoMessage() {} -func (*ResponsePrepareProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{30} +func (m *PrepareProposalResponse) Reset() { *m = PrepareProposalResponse{} } +func (m *PrepareProposalResponse) String() string { return proto.CompactTextString(m) } +func (*PrepareProposalResponse) ProtoMessage() {} +func (*PrepareProposalResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{30} } -func (m *ResponsePrepareProposal) XXX_Unmarshal(b []byte) error { +func (m *PrepareProposalResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponsePrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PrepareProposalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponsePrepareProposal.Marshal(b, m, deterministic) + return xxx_messageInfo_PrepareProposalResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2584,41 +2683,43 @@ func (m *ResponsePrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]b return b[:n], nil } } -func (m *ResponsePrepareProposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponsePrepareProposal.Merge(m, src) +func (m *PrepareProposalResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProposalResponse.Merge(m, src) } -func (m *ResponsePrepareProposal) XXX_Size() int { +func (m *PrepareProposalResponse) XXX_Size() int { return m.Size() } -func (m *ResponsePrepareProposal) XXX_DiscardUnknown() { - xxx_messageInfo_ResponsePrepareProposal.DiscardUnknown(m) +func (m *PrepareProposalResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProposalResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponsePrepareProposal proto.InternalMessageInfo +var xxx_messageInfo_PrepareProposalResponse proto.InternalMessageInfo -func (m *ResponsePrepareProposal) GetTxs() [][]byte { +func (m *PrepareProposalResponse) GetTxs() [][]byte { if m != nil { return m.Txs } return nil } -type ResponseProcessProposal struct { - Status ResponseProcessProposal_ProposalStatus `protobuf:"varint,1,opt,name=status,proto3,enum=tendermint.abci.ResponseProcessProposal_ProposalStatus" json:"status,omitempty"` +// ProcessProposalResponse indicates the ABCI application's decision whenever +// the given proposal should be accepted or not. +type ProcessProposalResponse struct { + Status ProcessProposalStatus `protobuf:"varint,1,opt,name=status,proto3,enum=cometbft.abci.v1.ProcessProposalStatus" json:"status,omitempty"` } -func (m *ResponseProcessProposal) Reset() { *m = ResponseProcessProposal{} } -func (m *ResponseProcessProposal) String() string { return proto.CompactTextString(m) } -func (*ResponseProcessProposal) ProtoMessage() {} -func (*ResponseProcessProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{31} +func (m *ProcessProposalResponse) Reset() { *m = ProcessProposalResponse{} } +func (m *ProcessProposalResponse) String() string { return proto.CompactTextString(m) } +func (*ProcessProposalResponse) ProtoMessage() {} +func (*ProcessProposalResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{31} } -func (m *ResponseProcessProposal) XXX_Unmarshal(b []byte) error { +func (m *ProcessProposalResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ProcessProposalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseProcessProposal.Marshal(b, m, deterministic) + return xxx_messageInfo_ProcessProposalResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2628,41 +2729,43 @@ func (m *ResponseProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]b return b[:n], nil } } -func (m *ResponseProcessProposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseProcessProposal.Merge(m, src) +func (m *ProcessProposalResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProcessProposalResponse.Merge(m, src) } -func (m *ResponseProcessProposal) XXX_Size() int { +func (m *ProcessProposalResponse) XXX_Size() int { return m.Size() } -func (m *ResponseProcessProposal) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseProcessProposal.DiscardUnknown(m) +func (m *ProcessProposalResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ProcessProposalResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseProcessProposal proto.InternalMessageInfo +var xxx_messageInfo_ProcessProposalResponse proto.InternalMessageInfo -func (m *ResponseProcessProposal) GetStatus() ResponseProcessProposal_ProposalStatus { +func (m *ProcessProposalResponse) GetStatus() ProcessProposalStatus { if m != nil { return m.Status } - return ResponseProcessProposal_UNKNOWN + return PROCESS_PROPOSAL_STATUS_UNKNOWN } -type ResponseExtendVote struct { +// ExtendVoteResponse contains the vote extension that the application would like to +// attach to its next precommit vote. +type ExtendVoteResponse struct { VoteExtension []byte `protobuf:"bytes,1,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` } -func (m *ResponseExtendVote) Reset() { *m = ResponseExtendVote{} } -func (m *ResponseExtendVote) String() string { return proto.CompactTextString(m) } -func (*ResponseExtendVote) ProtoMessage() {} -func (*ResponseExtendVote) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{32} +func (m *ExtendVoteResponse) Reset() { *m = ExtendVoteResponse{} } +func (m *ExtendVoteResponse) String() string { return proto.CompactTextString(m) } +func (*ExtendVoteResponse) ProtoMessage() {} +func (*ExtendVoteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{32} } -func (m *ResponseExtendVote) XXX_Unmarshal(b []byte) error { +func (m *ExtendVoteResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseExtendVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ExtendVoteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseExtendVote.Marshal(b, m, deterministic) + return xxx_messageInfo_ExtendVoteResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2672,41 +2775,43 @@ func (m *ResponseExtendVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (m *ResponseExtendVote) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseExtendVote.Merge(m, src) +func (m *ExtendVoteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtendVoteResponse.Merge(m, src) } -func (m *ResponseExtendVote) XXX_Size() int { +func (m *ExtendVoteResponse) XXX_Size() int { return m.Size() } -func (m *ResponseExtendVote) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseExtendVote.DiscardUnknown(m) +func (m *ExtendVoteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExtendVoteResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseExtendVote proto.InternalMessageInfo +var xxx_messageInfo_ExtendVoteResponse proto.InternalMessageInfo -func (m *ResponseExtendVote) GetVoteExtension() []byte { +func (m *ExtendVoteResponse) GetVoteExtension() []byte { if m != nil { return m.VoteExtension } return nil } -type ResponseVerifyVoteExtension struct { - Status ResponseVerifyVoteExtension_VerifyStatus `protobuf:"varint,1,opt,name=status,proto3,enum=tendermint.abci.ResponseVerifyVoteExtension_VerifyStatus" json:"status,omitempty"` +// VerifyVoteExtensionResponse indicates the ABCI application's decision +// whenever the vote extension should be accepted or not. +type VerifyVoteExtensionResponse struct { + Status VerifyVoteExtensionStatus `protobuf:"varint,1,opt,name=status,proto3,enum=cometbft.abci.v1.VerifyVoteExtensionStatus" json:"status,omitempty"` } -func (m *ResponseVerifyVoteExtension) Reset() { *m = ResponseVerifyVoteExtension{} } -func (m *ResponseVerifyVoteExtension) String() string { return proto.CompactTextString(m) } -func (*ResponseVerifyVoteExtension) ProtoMessage() {} -func (*ResponseVerifyVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{33} +func (m *VerifyVoteExtensionResponse) Reset() { *m = VerifyVoteExtensionResponse{} } +func (m *VerifyVoteExtensionResponse) String() string { return proto.CompactTextString(m) } +func (*VerifyVoteExtensionResponse) ProtoMessage() {} +func (*VerifyVoteExtensionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{33} } -func (m *ResponseVerifyVoteExtension) XXX_Unmarshal(b []byte) error { +func (m *VerifyVoteExtensionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseVerifyVoteExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *VerifyVoteExtensionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseVerifyVoteExtension.Marshal(b, m, deterministic) + return xxx_messageInfo_VerifyVoteExtensionResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2716,27 +2821,28 @@ func (m *ResponseVerifyVoteExtension) XXX_Marshal(b []byte, deterministic bool) return b[:n], nil } } -func (m *ResponseVerifyVoteExtension) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseVerifyVoteExtension.Merge(m, src) +func (m *VerifyVoteExtensionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerifyVoteExtensionResponse.Merge(m, src) } -func (m *ResponseVerifyVoteExtension) XXX_Size() int { +func (m *VerifyVoteExtensionResponse) XXX_Size() int { return m.Size() } -func (m *ResponseVerifyVoteExtension) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseVerifyVoteExtension.DiscardUnknown(m) +func (m *VerifyVoteExtensionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VerifyVoteExtensionResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseVerifyVoteExtension proto.InternalMessageInfo +var xxx_messageInfo_VerifyVoteExtensionResponse proto.InternalMessageInfo -func (m *ResponseVerifyVoteExtension) GetStatus() ResponseVerifyVoteExtension_VerifyStatus { +func (m *VerifyVoteExtensionResponse) GetStatus() VerifyVoteExtensionStatus { if m != nil { return m.Status } - return ResponseVerifyVoteExtension_UNKNOWN + return VERIFY_VOTE_EXTENSION_STATUS_UNKNOWN } -type ResponseFinalizeBlock struct { - // set of block events emmitted as part of executing the block +// FinalizeBlockResponse contains the result of executing the block. +type FinalizeBlockResponse struct { + // set of block events emitted as part of executing the block Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` // the result of executing each transaction including the events // the particular transaction emitted. This should match the order @@ -2745,23 +2851,28 @@ type ResponseFinalizeBlock struct { // a list of updates to the validator set. These will reflect the validator set at current height + 2. ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,3,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` // updates to the consensus params, if any. - ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,4,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` - // app_hash is the hash of the applications' state which is used to confirm that execution of the transactions was deterministic. It is up to the application to decide which algorithm to use. + ConsensusParamUpdates *v1.ConsensusParams `protobuf:"bytes,4,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + // app_hash is the hash of the applications' state which is used to confirm + // that execution of the transactions was deterministic. + // It is up to the application to decide which algorithm to use. AppHash []byte `protobuf:"bytes,5,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + // delay between the time when this block is committed and the next height is started. + // previously `timeout_commit` in config.toml + NextBlockDelay time.Duration `protobuf:"bytes,6,opt,name=next_block_delay,json=nextBlockDelay,proto3,stdduration" json:"next_block_delay"` } -func (m *ResponseFinalizeBlock) Reset() { *m = ResponseFinalizeBlock{} } -func (m *ResponseFinalizeBlock) String() string { return proto.CompactTextString(m) } -func (*ResponseFinalizeBlock) ProtoMessage() {} -func (*ResponseFinalizeBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{34} +func (m *FinalizeBlockResponse) Reset() { *m = FinalizeBlockResponse{} } +func (m *FinalizeBlockResponse) String() string { return proto.CompactTextString(m) } +func (*FinalizeBlockResponse) ProtoMessage() {} +func (*FinalizeBlockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_95dd8f7b670b96e3, []int{34} } -func (m *ResponseFinalizeBlock) XXX_Unmarshal(b []byte) error { +func (m *FinalizeBlockResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseFinalizeBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *FinalizeBlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseFinalizeBlock.Marshal(b, m, deterministic) + return xxx_messageInfo_FinalizeBlockResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2771,53 +2882,61 @@ func (m *ResponseFinalizeBlock) XXX_Marshal(b []byte, deterministic bool) ([]byt return b[:n], nil } } -func (m *ResponseFinalizeBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseFinalizeBlock.Merge(m, src) +func (m *FinalizeBlockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FinalizeBlockResponse.Merge(m, src) } -func (m *ResponseFinalizeBlock) XXX_Size() int { +func (m *FinalizeBlockResponse) XXX_Size() int { return m.Size() } -func (m *ResponseFinalizeBlock) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseFinalizeBlock.DiscardUnknown(m) +func (m *FinalizeBlockResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FinalizeBlockResponse.DiscardUnknown(m) } -var xxx_messageInfo_ResponseFinalizeBlock proto.InternalMessageInfo +var xxx_messageInfo_FinalizeBlockResponse proto.InternalMessageInfo -func (m *ResponseFinalizeBlock) GetEvents() []Event { +func (m *FinalizeBlockResponse) GetEvents() []Event { if m != nil { return m.Events } return nil } -func (m *ResponseFinalizeBlock) GetTxResults() []*ExecTxResult { +func (m *FinalizeBlockResponse) GetTxResults() []*ExecTxResult { if m != nil { return m.TxResults } return nil } -func (m *ResponseFinalizeBlock) GetValidatorUpdates() []ValidatorUpdate { +func (m *FinalizeBlockResponse) GetValidatorUpdates() []ValidatorUpdate { if m != nil { return m.ValidatorUpdates } return nil } -func (m *ResponseFinalizeBlock) GetConsensusParamUpdates() *types1.ConsensusParams { +func (m *FinalizeBlockResponse) GetConsensusParamUpdates() *v1.ConsensusParams { if m != nil { return m.ConsensusParamUpdates } return nil } -func (m *ResponseFinalizeBlock) GetAppHash() []byte { +func (m *FinalizeBlockResponse) GetAppHash() []byte { if m != nil { return m.AppHash } return nil } +func (m *FinalizeBlockResponse) GetNextBlockDelay() time.Duration { + if m != nil { + return m.NextBlockDelay + } + return 0 +} + +// CommitInfo contains votes for the particular round. type CommitInfo struct { Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` @@ -2827,7 +2946,7 @@ func (m *CommitInfo) Reset() { *m = CommitInfo{} } func (m *CommitInfo) String() string { return proto.CompactTextString(m) } func (*CommitInfo) ProtoMessage() {} func (*CommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{35} + return fileDescriptor_95dd8f7b670b96e3, []int{35} } func (m *CommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2871,7 +2990,7 @@ func (m *CommitInfo) GetVotes() []VoteInfo { } // ExtendedCommitInfo is similar to CommitInfo except that it is only used in -// the PrepareProposal request such that CometBFT can provide vote extensions +// the PrepareProposal request such that Tendermint can provide vote extensions // to the application. type ExtendedCommitInfo struct { // The round at which the block proposer decided in the previous height. @@ -2885,7 +3004,7 @@ func (m *ExtendedCommitInfo) Reset() { *m = ExtendedCommitInfo{} } func (m *ExtendedCommitInfo) String() string { return proto.CompactTextString(m) } func (*ExtendedCommitInfo) ProtoMessage() {} func (*ExtendedCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{36} + return fileDescriptor_95dd8f7b670b96e3, []int{36} } func (m *ExtendedCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2930,6 +3049,8 @@ func (m *ExtendedCommitInfo) GetVotes() []ExtendedVoteInfo { // Event allows application developers to attach additional information to // ResponseFinalizeBlock and ResponseCheckTx. +// Up to 0.37, this could also be used in ResponseBeginBlock, ResponseEndBlock, +// and ResponseDeliverTx. // Later, transactions may be queried using these events. type Event struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` @@ -2940,7 +3061,7 @@ func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{37} + return fileDescriptor_95dd8f7b670b96e3, []int{37} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2994,7 +3115,7 @@ func (m *EventAttribute) Reset() { *m = EventAttribute{} } func (m *EventAttribute) String() string { return proto.CompactTextString(m) } func (*EventAttribute) ProtoMessage() {} func (*EventAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{38} + return fileDescriptor_95dd8f7b670b96e3, []int{38} } func (m *EventAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3062,7 +3183,7 @@ func (m *ExecTxResult) Reset() { *m = ExecTxResult{} } func (m *ExecTxResult) String() string { return proto.CompactTextString(m) } func (*ExecTxResult) ProtoMessage() {} func (*ExecTxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{39} + return fileDescriptor_95dd8f7b670b96e3, []int{39} } func (m *ExecTxResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3161,7 +3282,7 @@ func (m *TxResult) Reset() { *m = TxResult{} } func (m *TxResult) String() string { return proto.CompactTextString(m) } func (*TxResult) ProtoMessage() {} func (*TxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{40} + return fileDescriptor_95dd8f7b670b96e3, []int{40} } func (m *TxResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3218,6 +3339,7 @@ func (m *TxResult) GetResult() ExecTxResult { return ExecTxResult{} } +// Validator in the validator set. type Validator struct { Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // PubKey pub_key = 2 [(gogoproto.nullable)=false]; @@ -3228,7 +3350,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{41} + return fileDescriptor_95dd8f7b670b96e3, []int{41} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3271,16 +3393,18 @@ func (m *Validator) GetPower() int64 { return 0 } +// ValidatorUpdate is a singular update to a validator set. type ValidatorUpdate struct { - PubKey crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` - Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` + Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` + PubKeyBytes []byte `protobuf:"bytes,3,opt,name=pub_key_bytes,json=pubKeyBytes,proto3" json:"pub_key_bytes,omitempty"` + PubKeyType string `protobuf:"bytes,4,opt,name=pub_key_type,json=pubKeyType,proto3" json:"pub_key_type,omitempty"` } func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{42} + return fileDescriptor_95dd8f7b670b96e3, []int{42} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3309,30 +3433,38 @@ func (m *ValidatorUpdate) XXX_DiscardUnknown() { var xxx_messageInfo_ValidatorUpdate proto.InternalMessageInfo -func (m *ValidatorUpdate) GetPubKey() crypto.PublicKey { +func (m *ValidatorUpdate) GetPower() int64 { if m != nil { - return m.PubKey + return m.Power } - return crypto.PublicKey{} + return 0 } -func (m *ValidatorUpdate) GetPower() int64 { +func (m *ValidatorUpdate) GetPubKeyBytes() []byte { if m != nil { - return m.Power + return m.PubKeyBytes } - return 0 + return nil +} + +func (m *ValidatorUpdate) GetPubKeyType() string { + if m != nil { + return m.PubKeyType + } + return "" } +// VoteInfo contains the information about the vote. type VoteInfo struct { - Validator Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator"` - BlockIdFlag types1.BlockIDFlag `protobuf:"varint,3,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=tendermint.types.BlockIDFlag" json:"block_id_flag,omitempty"` + Validator Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator"` + BlockIdFlag v1.BlockIDFlag `protobuf:"varint,3,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=cometbft.types.v1.BlockIDFlag" json:"block_id_flag,omitempty"` } func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{43} + return fileDescriptor_95dd8f7b670b96e3, []int{43} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3368,13 +3500,14 @@ func (m *VoteInfo) GetValidator() Validator { return Validator{} } -func (m *VoteInfo) GetBlockIdFlag() types1.BlockIDFlag { +func (m *VoteInfo) GetBlockIdFlag() v1.BlockIDFlag { if m != nil { return m.BlockIdFlag } - return types1.BlockIDFlagUnknown + return v1.BlockIDFlagUnknown } +// ExtendedVoteInfo extends VoteInfo with the vote extensions (non-deterministic). type ExtendedVoteInfo struct { // The validator that sent the vote. Validator Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator"` @@ -3383,14 +3516,14 @@ type ExtendedVoteInfo struct { // Vote extension signature created by CometBFT ExtensionSignature []byte `protobuf:"bytes,4,opt,name=extension_signature,json=extensionSignature,proto3" json:"extension_signature,omitempty"` // block_id_flag indicates whether the validator voted for a block, nil, or did not vote at all - BlockIdFlag types1.BlockIDFlag `protobuf:"varint,5,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=tendermint.types.BlockIDFlag" json:"block_id_flag,omitempty"` + BlockIdFlag v1.BlockIDFlag `protobuf:"varint,5,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=cometbft.types.v1.BlockIDFlag" json:"block_id_flag,omitempty"` } func (m *ExtendedVoteInfo) Reset() { *m = ExtendedVoteInfo{} } func (m *ExtendedVoteInfo) String() string { return proto.CompactTextString(m) } func (*ExtendedVoteInfo) ProtoMessage() {} func (*ExtendedVoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{44} + return fileDescriptor_95dd8f7b670b96e3, []int{44} } func (m *ExtendedVoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,15 +3573,16 @@ func (m *ExtendedVoteInfo) GetExtensionSignature() []byte { return nil } -func (m *ExtendedVoteInfo) GetBlockIdFlag() types1.BlockIDFlag { +func (m *ExtendedVoteInfo) GetBlockIdFlag() v1.BlockIDFlag { if m != nil { return m.BlockIdFlag } - return types1.BlockIDFlagUnknown + return v1.BlockIDFlagUnknown } +// Misbehavior is a type of misbehavior committed by a validator. type Misbehavior struct { - Type MisbehaviorType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.abci.MisbehaviorType" json:"type,omitempty"` + Type MisbehaviorType `protobuf:"varint,1,opt,name=type,proto3,enum=cometbft.abci.v1.MisbehaviorType" json:"type,omitempty"` // The offending validator Validator Validator `protobuf:"bytes,2,opt,name=validator,proto3" json:"validator"` // The height when the offense occurred @@ -3465,7 +3599,7 @@ func (m *Misbehavior) Reset() { *m = Misbehavior{} } func (m *Misbehavior) String() string { return proto.CompactTextString(m) } func (*Misbehavior) ProtoMessage() {} func (*Misbehavior) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{45} + return fileDescriptor_95dd8f7b670b96e3, []int{45} } func (m *Misbehavior) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3498,7 +3632,7 @@ func (m *Misbehavior) GetType() MisbehaviorType { if m != nil { return m.Type } - return MisbehaviorType_UNKNOWN + return MISBEHAVIOR_TYPE_UNKNOWN } func (m *Misbehavior) GetValidator() Validator { @@ -3529,6 +3663,7 @@ func (m *Misbehavior) GetTotalVotingPower() int64 { return 0 } +// Snapshot of the ABCI application state. type Snapshot struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` @@ -3541,7 +3676,7 @@ func (m *Snapshot) Reset() { *m = Snapshot{} } func (m *Snapshot) String() string { return proto.CompactTextString(m) } func (*Snapshot) ProtoMessage() {} func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{46} + return fileDescriptor_95dd8f7b670b96e3, []int{46} } func (m *Snapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3606,883 +3741,273 @@ func (m *Snapshot) GetMetadata() []byte { } func init() { - proto.RegisterEnum("tendermint.abci.CheckTxType", CheckTxType_name, CheckTxType_value) - proto.RegisterEnum("tendermint.abci.MisbehaviorType", MisbehaviorType_name, MisbehaviorType_value) - proto.RegisterEnum("tendermint.abci.ResponseOfferSnapshot_Result", ResponseOfferSnapshot_Result_name, ResponseOfferSnapshot_Result_value) - proto.RegisterEnum("tendermint.abci.ResponseApplySnapshotChunk_Result", ResponseApplySnapshotChunk_Result_name, ResponseApplySnapshotChunk_Result_value) - proto.RegisterEnum("tendermint.abci.ResponseProcessProposal_ProposalStatus", ResponseProcessProposal_ProposalStatus_name, ResponseProcessProposal_ProposalStatus_value) - proto.RegisterEnum("tendermint.abci.ResponseVerifyVoteExtension_VerifyStatus", ResponseVerifyVoteExtension_VerifyStatus_name, ResponseVerifyVoteExtension_VerifyStatus_value) - proto.RegisterType((*Request)(nil), "tendermint.abci.Request") - proto.RegisterType((*RequestEcho)(nil), "tendermint.abci.RequestEcho") - proto.RegisterType((*RequestFlush)(nil), "tendermint.abci.RequestFlush") - proto.RegisterType((*RequestInfo)(nil), "tendermint.abci.RequestInfo") - proto.RegisterType((*RequestInitChain)(nil), "tendermint.abci.RequestInitChain") - proto.RegisterType((*RequestQuery)(nil), "tendermint.abci.RequestQuery") - proto.RegisterType((*RequestCheckTx)(nil), "tendermint.abci.RequestCheckTx") - proto.RegisterType((*RequestCommit)(nil), "tendermint.abci.RequestCommit") - proto.RegisterType((*RequestListSnapshots)(nil), "tendermint.abci.RequestListSnapshots") - proto.RegisterType((*RequestOfferSnapshot)(nil), "tendermint.abci.RequestOfferSnapshot") - proto.RegisterType((*RequestLoadSnapshotChunk)(nil), "tendermint.abci.RequestLoadSnapshotChunk") - proto.RegisterType((*RequestApplySnapshotChunk)(nil), "tendermint.abci.RequestApplySnapshotChunk") - proto.RegisterType((*RequestPrepareProposal)(nil), "tendermint.abci.RequestPrepareProposal") - proto.RegisterType((*RequestProcessProposal)(nil), "tendermint.abci.RequestProcessProposal") - proto.RegisterType((*RequestExtendVote)(nil), "tendermint.abci.RequestExtendVote") - proto.RegisterType((*RequestVerifyVoteExtension)(nil), "tendermint.abci.RequestVerifyVoteExtension") - proto.RegisterType((*RequestFinalizeBlock)(nil), "tendermint.abci.RequestFinalizeBlock") - proto.RegisterType((*Response)(nil), "tendermint.abci.Response") - proto.RegisterType((*ResponseException)(nil), "tendermint.abci.ResponseException") - proto.RegisterType((*ResponseEcho)(nil), "tendermint.abci.ResponseEcho") - proto.RegisterType((*ResponseFlush)(nil), "tendermint.abci.ResponseFlush") - proto.RegisterType((*ResponseInfo)(nil), "tendermint.abci.ResponseInfo") - proto.RegisterType((*ResponseInitChain)(nil), "tendermint.abci.ResponseInitChain") - proto.RegisterType((*ResponseQuery)(nil), "tendermint.abci.ResponseQuery") - proto.RegisterType((*ResponseCheckTx)(nil), "tendermint.abci.ResponseCheckTx") - proto.RegisterType((*ResponseCommit)(nil), "tendermint.abci.ResponseCommit") - proto.RegisterType((*ResponseListSnapshots)(nil), "tendermint.abci.ResponseListSnapshots") - proto.RegisterType((*ResponseOfferSnapshot)(nil), "tendermint.abci.ResponseOfferSnapshot") - proto.RegisterType((*ResponseLoadSnapshotChunk)(nil), "tendermint.abci.ResponseLoadSnapshotChunk") - proto.RegisterType((*ResponseApplySnapshotChunk)(nil), "tendermint.abci.ResponseApplySnapshotChunk") - proto.RegisterType((*ResponsePrepareProposal)(nil), "tendermint.abci.ResponsePrepareProposal") - proto.RegisterType((*ResponseProcessProposal)(nil), "tendermint.abci.ResponseProcessProposal") - proto.RegisterType((*ResponseExtendVote)(nil), "tendermint.abci.ResponseExtendVote") - proto.RegisterType((*ResponseVerifyVoteExtension)(nil), "tendermint.abci.ResponseVerifyVoteExtension") - proto.RegisterType((*ResponseFinalizeBlock)(nil), "tendermint.abci.ResponseFinalizeBlock") - proto.RegisterType((*CommitInfo)(nil), "tendermint.abci.CommitInfo") - proto.RegisterType((*ExtendedCommitInfo)(nil), "tendermint.abci.ExtendedCommitInfo") - proto.RegisterType((*Event)(nil), "tendermint.abci.Event") - proto.RegisterType((*EventAttribute)(nil), "tendermint.abci.EventAttribute") - proto.RegisterType((*ExecTxResult)(nil), "tendermint.abci.ExecTxResult") - proto.RegisterType((*TxResult)(nil), "tendermint.abci.TxResult") - proto.RegisterType((*Validator)(nil), "tendermint.abci.Validator") - proto.RegisterType((*ValidatorUpdate)(nil), "tendermint.abci.ValidatorUpdate") - proto.RegisterType((*VoteInfo)(nil), "tendermint.abci.VoteInfo") - proto.RegisterType((*ExtendedVoteInfo)(nil), "tendermint.abci.ExtendedVoteInfo") - proto.RegisterType((*Misbehavior)(nil), "tendermint.abci.Misbehavior") - proto.RegisterType((*Snapshot)(nil), "tendermint.abci.Snapshot") -} - -func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } - -var fileDescriptor_252557cfdd89a31a = []byte{ - // 3167 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x73, 0x23, 0xd5, - 0xd5, 0x57, 0xeb, 0xad, 0xa3, 0x87, 0xdb, 0xd7, 0x9e, 0x41, 0x23, 0x06, 0xdb, 0x34, 0x05, 0x0c, - 0x03, 0xd8, 0x7c, 0x9e, 0x6f, 0x78, 0xd4, 0xc0, 0x57, 0x25, 0x6b, 0x34, 0x9f, 0xec, 0x31, 0xb6, - 0x69, 0xcb, 0x43, 0x91, 0x07, 0x4d, 0x5b, 0xba, 0xb2, 0x9a, 0x91, 0xd4, 0x4d, 0xf7, 0x95, 0x91, - 0x59, 0xa5, 0x42, 0x52, 0x95, 0x62, 0x45, 0x55, 0xb2, 0x60, 0x11, 0x16, 0x59, 0x64, 0x93, 0xbf, - 0x20, 0xab, 0x64, 0x93, 0x05, 0x8b, 0x2c, 0x58, 0x66, 0x45, 0x52, 0xb0, 0x63, 0x9b, 0x45, 0xb6, - 0xa9, 0xfb, 0xe8, 0x97, 0xa4, 0xb6, 0xa4, 0x81, 0x2c, 0x52, 0xc9, 0xae, 0xef, 0xe9, 0x73, 0xce, - 0xed, 0x7b, 0xee, 0xb9, 0xe7, 0xf1, 0xeb, 0x0b, 0x8f, 0x13, 0x3c, 0x68, 0x63, 0xbb, 0x6f, 0x0c, - 0xc8, 0x96, 0x7e, 0xda, 0x32, 0xb6, 0xc8, 0x85, 0x85, 0x9d, 0x4d, 0xcb, 0x36, 0x89, 0x89, 0x96, - 0xfc, 0x97, 0x9b, 0xf4, 0x65, 0xe5, 0x89, 0x00, 0x77, 0xcb, 0xbe, 0xb0, 0x88, 0xb9, 0x65, 0xd9, - 0xa6, 0xd9, 0xe1, 0xfc, 0x95, 0xeb, 0x93, 0xaf, 0x1f, 0xe2, 0x0b, 0xa1, 0x2d, 0x24, 0xcc, 0x66, - 0xd9, 0xb2, 0x74, 0x5b, 0xef, 0xbb, 0xaf, 0x37, 0x26, 0x5e, 0x9f, 0xeb, 0x3d, 0xa3, 0xad, 0x13, - 0xd3, 0x16, 0x1c, 0xeb, 0x67, 0xa6, 0x79, 0xd6, 0xc3, 0x5b, 0x6c, 0x74, 0x3a, 0xec, 0x6c, 0x11, - 0xa3, 0x8f, 0x1d, 0xa2, 0xf7, 0x2d, 0xc1, 0xb0, 0x7a, 0x66, 0x9e, 0x99, 0xec, 0x71, 0x8b, 0x3e, - 0x71, 0xaa, 0xf2, 0xc7, 0x1c, 0x64, 0x54, 0xfc, 0xc1, 0x10, 0x3b, 0x04, 0x6d, 0x43, 0x12, 0xb7, - 0xba, 0x66, 0x59, 0xda, 0x90, 0x6e, 0xe4, 0xb7, 0xaf, 0x6f, 0x8e, 0x2d, 0x70, 0x53, 0xf0, 0xd5, - 0x5b, 0x5d, 0xb3, 0x11, 0x53, 0x19, 0x2f, 0xba, 0x0d, 0xa9, 0x4e, 0x6f, 0xe8, 0x74, 0xcb, 0x71, - 0x26, 0xf4, 0x44, 0x94, 0xd0, 0x3d, 0xca, 0xd4, 0x88, 0xa9, 0x9c, 0x9b, 0x4e, 0x65, 0x0c, 0x3a, - 0x66, 0x39, 0x71, 0xf9, 0x54, 0xbb, 0x83, 0x0e, 0x9b, 0x8a, 0xf2, 0xa2, 0x1d, 0x00, 0x63, 0x60, - 0x10, 0xad, 0xd5, 0xd5, 0x8d, 0x41, 0x39, 0xc5, 0x24, 0x9f, 0x8c, 0x96, 0x34, 0x48, 0x8d, 0x32, - 0x36, 0x62, 0x6a, 0xce, 0x70, 0x07, 0xf4, 0x73, 0x3f, 0x18, 0x62, 0xfb, 0xa2, 0x9c, 0xbe, 0xfc, - 0x73, 0xdf, 0xa2, 0x4c, 0xf4, 0x73, 0x19, 0x37, 0x7a, 0x1d, 0xb2, 0xad, 0x2e, 0x6e, 0x3d, 0xd4, - 0xc8, 0xa8, 0x9c, 0x65, 0x92, 0xeb, 0x51, 0x92, 0x35, 0xca, 0xd7, 0x1c, 0x35, 0x62, 0x6a, 0xa6, - 0xc5, 0x1f, 0xd1, 0xab, 0x90, 0x6e, 0x99, 0xfd, 0xbe, 0x41, 0xca, 0x79, 0x26, 0xbb, 0x16, 0x29, - 0xcb, 0xb8, 0x1a, 0x31, 0x55, 0xf0, 0xa3, 0x03, 0x28, 0xf5, 0x0c, 0x87, 0x68, 0xce, 0x40, 0xb7, - 0x9c, 0xae, 0x49, 0x9c, 0x72, 0x81, 0x69, 0x78, 0x3a, 0x4a, 0xc3, 0xbe, 0xe1, 0x90, 0x63, 0x97, - 0xb9, 0x11, 0x53, 0x8b, 0xbd, 0x20, 0x81, 0xea, 0x33, 0x3b, 0x1d, 0x6c, 0x7b, 0x0a, 0xcb, 0xc5, - 0xcb, 0xf5, 0x1d, 0x52, 0x6e, 0x57, 0x9e, 0xea, 0x33, 0x83, 0x04, 0xf4, 0x43, 0x58, 0xe9, 0x99, - 0x7a, 0xdb, 0x53, 0xa7, 0xb5, 0xba, 0xc3, 0xc1, 0xc3, 0x72, 0x89, 0x29, 0x7d, 0x2e, 0xf2, 0x23, - 0x4d, 0xbd, 0xed, 0xaa, 0xa8, 0x51, 0x81, 0x46, 0x4c, 0x5d, 0xee, 0x8d, 0x13, 0xd1, 0xbb, 0xb0, - 0xaa, 0x5b, 0x56, 0xef, 0x62, 0x5c, 0xfb, 0x12, 0xd3, 0x7e, 0x33, 0x4a, 0x7b, 0x95, 0xca, 0x8c, - 0xab, 0x47, 0xfa, 0x04, 0x15, 0x35, 0x41, 0xb6, 0x6c, 0x6c, 0xe9, 0x36, 0xd6, 0x2c, 0xdb, 0xb4, - 0x4c, 0x47, 0xef, 0x95, 0x65, 0xa6, 0xfb, 0xd9, 0x28, 0xdd, 0x47, 0x9c, 0xff, 0x48, 0xb0, 0x37, - 0x62, 0xea, 0x92, 0x15, 0x26, 0x71, 0xad, 0x66, 0x0b, 0x3b, 0x8e, 0xaf, 0x75, 0x79, 0x96, 0x56, - 0xc6, 0x1f, 0xd6, 0x1a, 0x22, 0xa1, 0x3a, 0xe4, 0xf1, 0x88, 0x8a, 0x6b, 0xe7, 0x26, 0xc1, 0x65, - 0xc4, 0x14, 0x2a, 0x91, 0x27, 0x94, 0xb1, 0x3e, 0x30, 0x09, 0x6e, 0xc4, 0x54, 0xc0, 0xde, 0x08, - 0xe9, 0x70, 0xe5, 0x1c, 0xdb, 0x46, 0xe7, 0x82, 0xa9, 0xd1, 0xd8, 0x1b, 0xc7, 0x30, 0x07, 0xe5, - 0x15, 0xa6, 0xf0, 0xf9, 0x28, 0x85, 0x0f, 0x98, 0x10, 0x55, 0x51, 0x77, 0x45, 0x1a, 0x31, 0x75, - 0xe5, 0x7c, 0x92, 0x4c, 0x5d, 0xac, 0x63, 0x0c, 0xf4, 0x9e, 0xf1, 0x11, 0xd6, 0x4e, 0x7b, 0x66, - 0xeb, 0x61, 0x79, 0xf5, 0x72, 0x17, 0xbb, 0x27, 0xb8, 0x77, 0x28, 0x33, 0x75, 0xb1, 0x4e, 0x90, - 0xb0, 0x93, 0x81, 0xd4, 0xb9, 0xde, 0x1b, 0xe2, 0xbd, 0x64, 0x36, 0x29, 0xa7, 0xf6, 0x92, 0xd9, - 0x8c, 0x9c, 0xdd, 0x4b, 0x66, 0x73, 0x32, 0xec, 0x25, 0xb3, 0x20, 0xe7, 0x95, 0x67, 0x21, 0x1f, - 0x08, 0x4c, 0xa8, 0x0c, 0x99, 0x3e, 0x76, 0x1c, 0xfd, 0x0c, 0xb3, 0x38, 0x96, 0x53, 0xdd, 0xa1, - 0x52, 0x82, 0x42, 0x30, 0x18, 0x29, 0x9f, 0x4a, 0x9e, 0x24, 0x8d, 0x33, 0x54, 0xf2, 0x1c, 0xdb, - 0xcc, 0x1c, 0x42, 0x52, 0x0c, 0xd1, 0x53, 0x50, 0x64, 0x4b, 0xd1, 0xdc, 0xf7, 0x34, 0xd8, 0x25, - 0xd5, 0x02, 0x23, 0x3e, 0x10, 0x4c, 0xeb, 0x90, 0xb7, 0xb6, 0x2d, 0x8f, 0x25, 0xc1, 0x58, 0xc0, - 0xda, 0xb6, 0x5c, 0x86, 0x27, 0xa1, 0x40, 0xd7, 0xed, 0x71, 0x24, 0xd9, 0x24, 0x79, 0x4a, 0x13, - 0x2c, 0xca, 0x9f, 0xe3, 0x20, 0x8f, 0x07, 0x30, 0xf4, 0x2a, 0x24, 0x69, 0x2c, 0x17, 0x61, 0xb9, - 0xb2, 0xc9, 0x03, 0xfd, 0xa6, 0x1b, 0xe8, 0x37, 0x9b, 0x6e, 0xa0, 0xdf, 0xc9, 0x7e, 0xf1, 0xd5, - 0x7a, 0xec, 0xd3, 0xbf, 0xae, 0x4b, 0x2a, 0x93, 0x40, 0xd7, 0x68, 0xd8, 0xd2, 0x8d, 0x81, 0x66, - 0xb4, 0xd9, 0x27, 0xe7, 0x68, 0x4c, 0xd2, 0x8d, 0xc1, 0x6e, 0x1b, 0xed, 0x83, 0xdc, 0x32, 0x07, - 0x0e, 0x1e, 0x38, 0x43, 0x47, 0xe3, 0xa9, 0x46, 0x04, 0xe3, 0x50, 0x48, 0xe5, 0x09, 0xaf, 0xe6, - 0x72, 0x1e, 0x31, 0x46, 0x75, 0xa9, 0x15, 0x26, 0xa0, 0x7b, 0x00, 0x5e, 0x3e, 0x72, 0xca, 0xc9, - 0x8d, 0xc4, 0x8d, 0xfc, 0xf6, 0xc6, 0xc4, 0x86, 0x3f, 0x70, 0x59, 0x4e, 0xac, 0xb6, 0x4e, 0xf0, - 0x4e, 0x92, 0x7e, 0xae, 0x1a, 0x90, 0x44, 0xcf, 0xc0, 0x92, 0x6e, 0x59, 0x9a, 0x43, 0x74, 0x82, - 0xb5, 0xd3, 0x0b, 0x82, 0x1d, 0x16, 0xe7, 0x0b, 0x6a, 0x51, 0xb7, 0xac, 0x63, 0x4a, 0xdd, 0xa1, - 0x44, 0xf4, 0x34, 0x94, 0x68, 0x4c, 0x37, 0xf4, 0x9e, 0xd6, 0xc5, 0xc6, 0x59, 0x97, 0xb0, 0x78, - 0x9e, 0x50, 0x8b, 0x82, 0xda, 0x60, 0x44, 0xa5, 0xed, 0xed, 0x38, 0x8b, 0xe7, 0x08, 0x41, 0xb2, - 0xad, 0x13, 0x9d, 0x59, 0xb2, 0xa0, 0xb2, 0x67, 0x4a, 0xb3, 0x74, 0xd2, 0x15, 0xf6, 0x61, 0xcf, - 0xe8, 0x2a, 0xa4, 0x85, 0xda, 0x04, 0x53, 0x2b, 0x46, 0x68, 0x15, 0x52, 0x96, 0x6d, 0x9e, 0x63, - 0xb6, 0x75, 0x59, 0x95, 0x0f, 0x14, 0x15, 0x4a, 0xe1, 0xd8, 0x8f, 0x4a, 0x10, 0x27, 0x23, 0x31, - 0x4b, 0x9c, 0x8c, 0xd0, 0x4b, 0x90, 0xa4, 0x86, 0x64, 0x73, 0x94, 0xa6, 0x64, 0x3b, 0x21, 0xd7, - 0xbc, 0xb0, 0xb0, 0xca, 0x38, 0x95, 0x25, 0x28, 0x86, 0x72, 0x82, 0x72, 0x15, 0x56, 0xa7, 0x85, - 0x78, 0xa5, 0xeb, 0xd1, 0x43, 0xa1, 0x1a, 0xdd, 0x86, 0xac, 0x17, 0xe3, 0xb9, 0xe3, 0x5c, 0x9b, - 0x98, 0xd6, 0x65, 0x56, 0x3d, 0x56, 0xea, 0x31, 0x74, 0x03, 0xba, 0xba, 0xc8, 0xe8, 0x05, 0x35, - 0xa3, 0x5b, 0x56, 0x43, 0x77, 0xba, 0xca, 0x7b, 0x50, 0x8e, 0x8a, 0xdf, 0x01, 0x83, 0x49, 0xcc, - 0xed, 0x5d, 0x83, 0x5d, 0x85, 0x74, 0xc7, 0xb4, 0xfb, 0x3a, 0x61, 0xca, 0x8a, 0xaa, 0x18, 0x51, - 0x43, 0xf2, 0x58, 0x9e, 0x60, 0x64, 0x3e, 0x50, 0x34, 0xb8, 0x16, 0x19, 0xc3, 0xa9, 0x88, 0x31, - 0x68, 0x63, 0x6e, 0xd6, 0xa2, 0xca, 0x07, 0xbe, 0x22, 0xfe, 0xb1, 0x7c, 0x40, 0xa7, 0x75, 0xd8, - 0x5a, 0x99, 0xfe, 0x9c, 0x2a, 0x46, 0xca, 0x67, 0x09, 0xb8, 0x3a, 0x3d, 0x92, 0xa3, 0x0d, 0x28, - 0xf4, 0xf5, 0x91, 0x46, 0x46, 0xc2, 0xed, 0x24, 0xb6, 0xf1, 0xd0, 0xd7, 0x47, 0xcd, 0x11, 0xf7, - 0x39, 0x19, 0x12, 0x64, 0xe4, 0x94, 0xe3, 0x1b, 0x89, 0x1b, 0x05, 0x95, 0x3e, 0xa2, 0x13, 0x58, - 0xee, 0x99, 0x2d, 0xbd, 0xa7, 0xf5, 0x74, 0x87, 0x68, 0x22, 0xc5, 0xf3, 0x43, 0xf4, 0xd4, 0x84, - 0xb1, 0x79, 0x4c, 0xc6, 0x6d, 0xbe, 0x9f, 0x34, 0xe0, 0x08, 0xff, 0x5f, 0x62, 0x3a, 0xf6, 0x75, - 0x77, 0xab, 0xd1, 0x5d, 0xc8, 0xf7, 0x0d, 0xe7, 0x14, 0x77, 0xf5, 0x73, 0xc3, 0xb4, 0xc5, 0x69, - 0x9a, 0x74, 0x9a, 0x37, 0x7d, 0x1e, 0xa1, 0x29, 0x28, 0x16, 0xd8, 0x92, 0x54, 0xc8, 0x87, 0xdd, - 0x68, 0x92, 0x5e, 0x38, 0x9a, 0xbc, 0x04, 0xab, 0x03, 0x3c, 0x22, 0x9a, 0x7f, 0x5e, 0xb9, 0x9f, - 0x64, 0x98, 0xe9, 0x11, 0x7d, 0xe7, 0x9d, 0x70, 0x87, 0xba, 0x0c, 0x7a, 0x8e, 0xe5, 0x42, 0xcb, - 0x74, 0xb0, 0xad, 0xe9, 0xed, 0xb6, 0x8d, 0x1d, 0x87, 0x95, 0x4f, 0x05, 0x96, 0xe0, 0x18, 0xbd, - 0xca, 0xc9, 0xca, 0x2f, 0x82, 0x5b, 0x13, 0xce, 0x7d, 0xc2, 0xf0, 0x92, 0x6f, 0xf8, 0x63, 0x58, - 0x15, 0xf2, 0xed, 0x90, 0xed, 0x79, 0x0d, 0xfa, 0xf8, 0xe4, 0xf9, 0x1a, 0xb7, 0x39, 0x72, 0xc5, - 0xa3, 0xcd, 0x9e, 0x78, 0x34, 0xb3, 0x23, 0x48, 0x32, 0xa3, 0x24, 0x79, 0x88, 0xa1, 0xcf, 0xff, - 0x6e, 0x5b, 0xf1, 0x71, 0x02, 0x96, 0x27, 0x0a, 0x09, 0x6f, 0x61, 0xd2, 0xd4, 0x85, 0xc5, 0xa7, - 0x2e, 0x2c, 0xb1, 0xf0, 0xc2, 0xc4, 0x5e, 0x27, 0x67, 0xef, 0x75, 0xea, 0x7b, 0xdc, 0xeb, 0xf4, - 0xa3, 0xed, 0xf5, 0xbf, 0x74, 0x17, 0x7e, 0x2d, 0x41, 0x25, 0xba, 0xfa, 0x9a, 0xba, 0x1d, 0xcf, - 0xc3, 0xb2, 0xf7, 0x29, 0x9e, 0x7a, 0x1e, 0x18, 0x65, 0xef, 0x85, 0xd0, 0x1f, 0x99, 0xe3, 0x9e, - 0x86, 0xd2, 0x58, 0x6d, 0xc8, 0x5d, 0xb9, 0x78, 0x1e, 0x9c, 0x5f, 0xf9, 0x59, 0xc2, 0x4b, 0x3c, - 0xa1, 0x02, 0x6e, 0xca, 0x69, 0x7d, 0x0b, 0x56, 0xda, 0xb8, 0x65, 0xb4, 0x1f, 0xf5, 0xb0, 0x2e, - 0x0b, 0xe9, 0xff, 0x9e, 0xd5, 0x49, 0x2f, 0xf9, 0x15, 0x40, 0x56, 0xc5, 0x8e, 0x45, 0xeb, 0x31, - 0xb4, 0x03, 0x39, 0x3c, 0x6a, 0x61, 0x8b, 0xb8, 0x25, 0xec, 0xf4, 0x16, 0x81, 0x73, 0xd7, 0x5d, - 0x4e, 0xda, 0x20, 0x7b, 0x62, 0xe8, 0x96, 0xc0, 0x00, 0xa2, 0xdb, 0x79, 0x21, 0x1e, 0x04, 0x01, - 0x5e, 0x76, 0x41, 0x80, 0x44, 0x64, 0x7f, 0xcb, 0xa5, 0xc6, 0x50, 0x80, 0x5b, 0x02, 0x05, 0x48, - 0xce, 0x98, 0x2c, 0x04, 0x03, 0xd4, 0x42, 0x30, 0x40, 0x7a, 0xc6, 0x32, 0x23, 0x70, 0x80, 0x97, - 0x5d, 0x1c, 0x20, 0x33, 0xe3, 0x8b, 0xc7, 0x80, 0x80, 0x37, 0x02, 0x40, 0x40, 0x8e, 0x89, 0x6e, - 0x44, 0x8a, 0x4e, 0x41, 0x02, 0x5e, 0xf3, 0x90, 0x80, 0x42, 0x24, 0x8a, 0x20, 0x84, 0xc7, 0xa1, - 0x80, 0xc3, 0x09, 0x28, 0x80, 0xb7, 0xee, 0xcf, 0x44, 0xaa, 0x98, 0x81, 0x05, 0x1c, 0x4e, 0x60, - 0x01, 0xa5, 0x19, 0x0a, 0x67, 0x80, 0x01, 0x3f, 0x9a, 0x0e, 0x06, 0x44, 0xb7, 0xeb, 0xe2, 0x33, - 0xe7, 0x43, 0x03, 0xb4, 0x08, 0x34, 0x40, 0x8e, 0xec, 0x5c, 0xb9, 0xfa, 0xb9, 0xe1, 0x80, 0x93, - 0x29, 0x70, 0x00, 0x6f, 0xdc, 0x6f, 0x44, 0x2a, 0x9f, 0x03, 0x0f, 0x38, 0x99, 0x82, 0x07, 0xa0, - 0x99, 0x6a, 0x67, 0x02, 0x02, 0xf7, 0xc2, 0x80, 0xc0, 0x4a, 0x44, 0xd5, 0xe9, 0x9f, 0xf6, 0x08, - 0x44, 0xe0, 0x34, 0x0a, 0x11, 0xe0, 0x5d, 0xfb, 0x0b, 0x91, 0x1a, 0x17, 0x80, 0x04, 0x0e, 0x27, - 0x20, 0x81, 0x2b, 0x33, 0x3c, 0x6d, 0x7e, 0x4c, 0x20, 0x25, 0xa7, 0xf7, 0x92, 0xd9, 0xac, 0x9c, - 0xe3, 0x68, 0xc0, 0x5e, 0x32, 0x9b, 0x97, 0x0b, 0xca, 0x73, 0xb4, 0x82, 0x19, 0x8b, 0x73, 0xb4, - 0x57, 0xc0, 0xb6, 0x6d, 0xda, 0xa2, 0xbb, 0xe7, 0x03, 0xe5, 0x06, 0xed, 0x11, 0xfd, 0x98, 0x76, - 0x09, 0x7e, 0xc0, 0x7a, 0xb2, 0x40, 0x1c, 0x53, 0x7e, 0x2f, 0xf9, 0xb2, 0x0c, 0x41, 0x08, 0xf6, - 0x97, 0x39, 0xd1, 0x5f, 0x06, 0x50, 0x85, 0x78, 0x18, 0x55, 0x58, 0x87, 0x3c, 0xed, 0xb5, 0xc6, - 0x00, 0x03, 0xdd, 0xf2, 0x00, 0x83, 0x9b, 0xb0, 0xcc, 0x12, 0x26, 0xc7, 0x1e, 0x44, 0x5a, 0x4a, - 0xb2, 0xb4, 0xb4, 0x44, 0x5f, 0x70, 0xeb, 0xf0, 0xfc, 0xf4, 0x22, 0xac, 0x04, 0x78, 0xbd, 0x1e, - 0x8e, 0x77, 0xcf, 0xb2, 0xc7, 0x5d, 0x15, 0xcd, 0xdc, 0x9f, 0x24, 0xdf, 0x42, 0x3e, 0xd2, 0x30, - 0x0d, 0x14, 0x90, 0xbe, 0x27, 0x50, 0x20, 0xfe, 0xc8, 0xa0, 0x40, 0xb0, 0x27, 0x4d, 0x84, 0x7b, - 0xd2, 0x7f, 0x48, 0xfe, 0x9e, 0x78, 0x2d, 0x7e, 0xcb, 0x6c, 0x63, 0xd1, 0x25, 0xb2, 0x67, 0x5a, - 0x92, 0xf4, 0xcc, 0x33, 0xd1, 0x0b, 0xd2, 0x47, 0xca, 0xe5, 0x25, 0x9e, 0x9c, 0xc8, 0x2b, 0x5e, - 0x83, 0xc9, 0x13, 0xbf, 0x68, 0x30, 0x65, 0x48, 0x3c, 0xc4, 0x1c, 0x2e, 0x2e, 0xa8, 0xf4, 0x91, - 0xf2, 0x31, 0xe7, 0x13, 0x09, 0x9c, 0x0f, 0xd0, 0xab, 0x90, 0x63, 0x60, 0xbf, 0x66, 0x5a, 0x8e, - 0x80, 0x88, 0x43, 0xa5, 0x0d, 0x47, 0xfc, 0x37, 0x8f, 0x28, 0xcf, 0xa1, 0xe5, 0xa8, 0x59, 0x4b, - 0x3c, 0x05, 0x2a, 0x8e, 0x5c, 0xa8, 0xe2, 0xb8, 0x0e, 0x39, 0xfa, 0xf5, 0x8e, 0xa5, 0xb7, 0x70, - 0x19, 0xd8, 0x87, 0xfa, 0x04, 0xe5, 0x77, 0x71, 0x58, 0x1a, 0x4b, 0x34, 0x53, 0xd7, 0xee, 0xba, - 0x64, 0x3c, 0x00, 0x79, 0xcc, 0x67, 0x8f, 0x35, 0x80, 0x33, 0xdd, 0xd1, 0x3e, 0xd4, 0x07, 0x04, - 0xb7, 0x85, 0x51, 0x02, 0x14, 0x54, 0x81, 0x2c, 0x1d, 0x0d, 0x1d, 0xdc, 0x16, 0xe8, 0x8b, 0x37, - 0x46, 0x0d, 0x48, 0xe3, 0x73, 0x3c, 0x20, 0x4e, 0x39, 0xc3, 0xb6, 0xfd, 0xea, 0x64, 0x3b, 0x4c, - 0x5f, 0xef, 0x94, 0xe9, 0x66, 0x7f, 0xfb, 0xd5, 0xba, 0xcc, 0xb9, 0x5f, 0x30, 0xfb, 0x06, 0xc1, - 0x7d, 0x8b, 0x5c, 0xa8, 0x42, 0x3e, 0x6c, 0x85, 0xec, 0x98, 0x15, 0x18, 0x0e, 0x58, 0x70, 0xdb, - 0x7b, 0x6a, 0x53, 0xc3, 0xb4, 0x0d, 0x72, 0xa1, 0x16, 0xfb, 0xb8, 0x6f, 0x99, 0x66, 0x4f, 0xe3, - 0x67, 0xbc, 0x0a, 0xa5, 0x70, 0x5e, 0x45, 0x4f, 0x41, 0xd1, 0xc6, 0x44, 0x37, 0x06, 0x5a, 0xa8, - 0x08, 0x2e, 0x70, 0x22, 0x3f, 0x53, 0x7b, 0xc9, 0xac, 0x24, 0xc7, 0xf7, 0x92, 0xd9, 0xb8, 0x9c, - 0x50, 0x8e, 0xe0, 0xca, 0xd4, 0xbc, 0x8a, 0x5e, 0x81, 0x9c, 0x9f, 0x92, 0x25, 0xb6, 0xda, 0x4b, - 0x90, 0x16, 0x9f, 0x57, 0xf9, 0x83, 0xe4, 0xab, 0x0c, 0x63, 0x37, 0x75, 0x48, 0xdb, 0xd8, 0x19, - 0xf6, 0x38, 0x9a, 0x52, 0xda, 0x7e, 0x71, 0xbe, 0x8c, 0x4c, 0xa9, 0xc3, 0x1e, 0x51, 0x85, 0xb0, - 0xf2, 0x2e, 0xa4, 0x39, 0x05, 0xe5, 0x21, 0x73, 0x72, 0x70, 0xff, 0xe0, 0xf0, 0xed, 0x03, 0x39, - 0x86, 0x00, 0xd2, 0xd5, 0x5a, 0xad, 0x7e, 0xd4, 0x94, 0x25, 0x94, 0x83, 0x54, 0x75, 0xe7, 0x50, - 0x6d, 0xca, 0x71, 0x4a, 0x56, 0xeb, 0x7b, 0xf5, 0x5a, 0x53, 0x4e, 0xa0, 0x65, 0x28, 0xf2, 0x67, - 0xed, 0xde, 0xa1, 0xfa, 0x66, 0xb5, 0x29, 0x27, 0x03, 0xa4, 0xe3, 0xfa, 0xc1, 0xdd, 0xba, 0x2a, - 0xa7, 0x94, 0xff, 0x81, 0x6b, 0x91, 0x39, 0xdc, 0x07, 0x66, 0xa4, 0x00, 0x30, 0xa3, 0x7c, 0x16, - 0xa7, 0x4d, 0x4d, 0x54, 0x62, 0x46, 0x7b, 0x63, 0x0b, 0xdf, 0x5e, 0x20, 0xab, 0x8f, 0xad, 0x9e, - 0xf6, 0x31, 0x36, 0xee, 0x60, 0xd2, 0xea, 0xf2, 0x42, 0x81, 0x47, 0xa0, 0xa2, 0x5a, 0x14, 0x54, - 0x26, 0xe4, 0x70, 0xb6, 0xf7, 0x71, 0x8b, 0x68, 0xdc, 0x89, 0x1c, 0xd6, 0x4c, 0xe4, 0x28, 0x1b, - 0xa5, 0x1e, 0x73, 0xa2, 0xf2, 0xde, 0x42, 0xb6, 0xcc, 0x41, 0x4a, 0xad, 0x37, 0xd5, 0x77, 0xe4, - 0x04, 0x42, 0x50, 0x62, 0x8f, 0xda, 0xf1, 0x41, 0xf5, 0xe8, 0xb8, 0x71, 0x48, 0x6d, 0xb9, 0x02, - 0x4b, 0xae, 0x2d, 0x5d, 0x62, 0x4a, 0x79, 0x1e, 0x1e, 0x8b, 0xa8, 0x2a, 0x26, 0x5b, 0x2a, 0xe5, - 0x37, 0x52, 0x90, 0x3b, 0x5c, 0x19, 0x1c, 0x42, 0xda, 0x21, 0x3a, 0x19, 0x3a, 0xc2, 0x88, 0xaf, - 0xcc, 0x5b, 0x66, 0x6c, 0xba, 0x0f, 0xc7, 0x4c, 0x5c, 0x15, 0x6a, 0x94, 0xdb, 0x50, 0x0a, 0xbf, - 0x89, 0xb6, 0x81, 0xef, 0x44, 0x71, 0xe5, 0x0e, 0xa0, 0xc9, 0xea, 0x63, 0x4a, 0x7b, 0x29, 0x4d, - 0x6b, 0x2f, 0x7f, 0x2b, 0xc1, 0xe3, 0x97, 0x54, 0x1a, 0xe8, 0xad, 0xb1, 0x45, 0xbe, 0xb6, 0x48, - 0x9d, 0xb2, 0xc9, 0x69, 0x63, 0xcb, 0xbc, 0x05, 0x85, 0x20, 0x7d, 0xbe, 0x45, 0x7e, 0x1b, 0xf7, - 0x0f, 0x71, 0xb8, 0x0f, 0xf6, 0x43, 0xa0, 0xf4, 0x1d, 0x43, 0xe0, 0xeb, 0x00, 0x64, 0xa4, 0x71, - 0xb7, 0x76, 0xf3, 0xe8, 0x13, 0x53, 0xf0, 0x45, 0xdc, 0x6a, 0x8e, 0xc4, 0x21, 0xc8, 0x11, 0xf1, - 0xe4, 0xa0, 0xe3, 0x20, 0x28, 0x30, 0x64, 0x39, 0xd6, 0x11, 0x0d, 0xf3, 0xbc, 0xc9, 0xd8, 0x07, - 0x0f, 0x38, 0xd9, 0x41, 0xef, 0xc0, 0x63, 0x63, 0x85, 0x82, 0xa7, 0x3a, 0x39, 0x6f, 0xbd, 0x70, - 0x25, 0x5c, 0x2f, 0xb8, 0xaa, 0x83, 0xd9, 0x3e, 0x15, 0xce, 0xf6, 0xef, 0x00, 0xf8, 0xe0, 0x00, - 0x8d, 0x30, 0xb6, 0x39, 0x1c, 0xb4, 0x99, 0x07, 0xa4, 0x54, 0x3e, 0x40, 0xb7, 0x21, 0x45, 0x3d, - 0xc9, 0xb5, 0xd3, 0x64, 0x28, 0xa6, 0x9e, 0x10, 0x00, 0x17, 0x38, 0xb7, 0x62, 0x00, 0x9a, 0x04, - 0x68, 0x23, 0xa6, 0x78, 0x23, 0x3c, 0xc5, 0x93, 0x91, 0x50, 0xef, 0xf4, 0xa9, 0x3e, 0x82, 0x14, - 0xdb, 0x79, 0x9a, 0x74, 0xd9, 0x5f, 0x01, 0x51, 0x2d, 0xd2, 0x67, 0xf4, 0x63, 0x00, 0x9d, 0x10, - 0xdb, 0x38, 0x1d, 0xfa, 0x13, 0xac, 0x4f, 0xf7, 0x9c, 0xaa, 0xcb, 0xb7, 0x73, 0x5d, 0xb8, 0xd0, - 0xaa, 0x2f, 0x1a, 0x70, 0xa3, 0x80, 0x42, 0xe5, 0x00, 0x4a, 0x61, 0x59, 0xb7, 0xbe, 0xe1, 0xdf, - 0x10, 0xae, 0x6f, 0x78, 0xb9, 0x2a, 0xea, 0x1b, 0xaf, 0x3a, 0x4a, 0xf0, 0x5f, 0x1f, 0x6c, 0xa0, - 0xfc, 0x24, 0x0e, 0x85, 0xa0, 0xe3, 0xfd, 0xe7, 0x95, 0x20, 0xca, 0xcf, 0x25, 0xc8, 0x7a, 0xcb, - 0x0f, 0xff, 0x07, 0x09, 0xfd, 0x38, 0xe2, 0xd6, 0x8b, 0x07, 0x7f, 0x5e, 0xf0, 0xdf, 0x44, 0x09, - 0xef, 0x37, 0xd1, 0x1d, 0x2f, 0xfd, 0x45, 0x01, 0x22, 0x41, 0x5b, 0x0b, 0xaf, 0x72, 0xb3, 0xfd, - 0x1d, 0xc8, 0x79, 0xa7, 0x97, 0x36, 0x1d, 0x2e, 0x70, 0x24, 0x89, 0x33, 0x24, 0x60, 0xbf, 0x55, - 0x48, 0x59, 0xe6, 0x87, 0xe2, 0xcf, 0x48, 0x42, 0xe5, 0x03, 0xa5, 0x0d, 0x4b, 0x63, 0x47, 0x1f, - 0xdd, 0x81, 0x8c, 0x35, 0x3c, 0xd5, 0x5c, 0xe7, 0x18, 0x83, 0xd7, 0xdc, 0x72, 0x76, 0x78, 0xda, - 0x33, 0x5a, 0xf7, 0xf1, 0x85, 0xfb, 0x31, 0xd6, 0xf0, 0xf4, 0x3e, 0xf7, 0x21, 0x3e, 0x4b, 0x3c, - 0x38, 0xcb, 0x2f, 0x25, 0xc8, 0xba, 0x67, 0x02, 0xfd, 0x1f, 0xe4, 0xbc, 0xb0, 0xe2, 0xfd, 0xda, - 0x8c, 0x8c, 0x47, 0x42, 0xbf, 0x2f, 0x82, 0xaa, 0xee, 0x3f, 0x59, 0xa3, 0xad, 0x75, 0x7a, 0x3a, - 0xf7, 0xa5, 0x52, 0xd8, 0x66, 0x3c, 0xf0, 0xb0, 0x78, 0xbc, 0x7b, 0xf7, 0x5e, 0x4f, 0x3f, 0x53, - 0xf3, 0x4c, 0x66, 0xb7, 0x4d, 0x07, 0xa2, 0xb2, 0xfb, 0xbb, 0x04, 0xf2, 0xf8, 0x89, 0xfd, 0xce, - 0x5f, 0x37, 0x99, 0xe6, 0x12, 0x53, 0xd2, 0x1c, 0xda, 0x82, 0x15, 0x8f, 0x43, 0x73, 0x8c, 0xb3, - 0x81, 0x4e, 0x86, 0x36, 0x16, 0x80, 0x24, 0xf2, 0x5e, 0x1d, 0xbb, 0x6f, 0x26, 0x57, 0x9d, 0x7a, - 0xc4, 0x55, 0x7f, 0x1c, 0x87, 0x7c, 0x00, 0x1e, 0x45, 0xff, 0x1b, 0x08, 0x46, 0xa5, 0x29, 0x99, - 0x21, 0xc0, 0xeb, 0xff, 0xa6, 0x0c, 0x9b, 0x29, 0xbe, 0xb8, 0x99, 0xa2, 0x40, 0x68, 0x17, 0x6d, - 0x4d, 0x2e, 0x8c, 0xb6, 0xbe, 0x00, 0x88, 0x98, 0x44, 0xef, 0x69, 0xe7, 0x26, 0x31, 0x06, 0x67, - 0x1a, 0x77, 0x43, 0x1e, 0x3a, 0x64, 0xf6, 0xe6, 0x01, 0x7b, 0x71, 0xc4, 0x3c, 0xf2, 0xa7, 0x12, - 0x64, 0xbd, 0xb2, 0x7b, 0xd1, 0x9f, 0x98, 0x57, 0x21, 0x2d, 0x2a, 0x4b, 0xfe, 0x17, 0x53, 0x8c, - 0xa6, 0xc2, 0xca, 0x15, 0xc8, 0xf6, 0x31, 0xd1, 0x59, 0x1c, 0xe4, 0x59, 0xcd, 0x1b, 0xdf, 0x7c, - 0x0d, 0xf2, 0x81, 0x1f, 0xc0, 0x34, 0x34, 0x1e, 0xd4, 0xdf, 0x96, 0x63, 0x95, 0xcc, 0x27, 0x9f, - 0x6f, 0x24, 0x0e, 0xf0, 0x87, 0xf4, 0x34, 0xab, 0xf5, 0x5a, 0xa3, 0x5e, 0xbb, 0x2f, 0x4b, 0x95, - 0xfc, 0x27, 0x9f, 0x6f, 0x64, 0x54, 0xcc, 0x10, 0xc5, 0x9b, 0xf7, 0x61, 0x69, 0x6c, 0x63, 0xc2, - 0x65, 0x0b, 0x82, 0xd2, 0xdd, 0x93, 0xa3, 0xfd, 0xdd, 0x5a, 0xb5, 0x59, 0xd7, 0x1e, 0x1c, 0x36, - 0xeb, 0xb2, 0x84, 0x1e, 0x83, 0x95, 0xfd, 0xdd, 0xff, 0x6f, 0x34, 0xb5, 0xda, 0xfe, 0x6e, 0xfd, - 0xa0, 0xa9, 0x55, 0x9b, 0xcd, 0x6a, 0xed, 0xbe, 0x1c, 0xdf, 0xfe, 0x3c, 0x0f, 0xc9, 0xea, 0x4e, - 0x6d, 0x17, 0xd5, 0x20, 0xc9, 0xa0, 0x90, 0x4b, 0x6f, 0x80, 0x55, 0x2e, 0xc7, 0x86, 0xd1, 0x3d, - 0x48, 0x31, 0x94, 0x04, 0x5d, 0x7e, 0x25, 0xac, 0x32, 0x03, 0x2c, 0xa6, 0x1f, 0xc3, 0x4e, 0xe4, - 0xa5, 0x77, 0xc4, 0x2a, 0x97, 0x63, 0xc7, 0x68, 0x1f, 0x32, 0x6e, 0x93, 0x3c, 0xeb, 0xe2, 0x56, - 0x65, 0x26, 0xa0, 0x4b, 0x97, 0xc6, 0xc1, 0x86, 0xcb, 0xaf, 0x8f, 0x55, 0x66, 0xa0, 0xca, 0x68, - 0x17, 0xd2, 0xa2, 0x1d, 0x9d, 0x71, 0x23, 0xac, 0x32, 0x0b, 0x27, 0x46, 0x2a, 0xe4, 0x7c, 0x18, - 0x67, 0xf6, 0xa5, 0xb8, 0xca, 0x1c, 0x80, 0x39, 0x7a, 0x17, 0x8a, 0xe1, 0x56, 0x77, 0xbe, 0x5b, - 0x67, 0x95, 0x39, 0x11, 0x69, 0xaa, 0x3f, 0xdc, 0xf7, 0xce, 0x77, 0x0b, 0xad, 0x32, 0x27, 0x40, - 0x8d, 0xde, 0x87, 0xe5, 0xc9, 0xbe, 0x74, 0xfe, 0x4b, 0x69, 0x95, 0x05, 0x20, 0x6b, 0xd4, 0x07, - 0x34, 0xa5, 0x9f, 0x5d, 0xe0, 0x8e, 0x5a, 0x65, 0x11, 0x04, 0x1b, 0xb5, 0x61, 0x69, 0xbc, 0x49, - 0x9c, 0xf7, 0xce, 0x5a, 0x65, 0x6e, 0x34, 0x9b, 0xcf, 0x12, 0x6e, 0x2e, 0xe7, 0xbd, 0xc3, 0x56, - 0x99, 0x1b, 0xdc, 0x46, 0x27, 0x00, 0x81, 0xfe, 0x70, 0x8e, 0x3b, 0x6d, 0x95, 0x79, 0x60, 0x6e, - 0x64, 0xc1, 0xca, 0xb4, 0xc6, 0x71, 0x91, 0x2b, 0x6e, 0x95, 0x85, 0xd0, 0x6f, 0xea, 0xcf, 0xe1, - 0x16, 0x70, 0xbe, 0x2b, 0x6f, 0x95, 0x39, 0x61, 0xf0, 0x9d, 0xea, 0x17, 0x5f, 0xaf, 0x49, 0x5f, - 0x7e, 0xbd, 0x26, 0xfd, 0xed, 0xeb, 0x35, 0xe9, 0xd3, 0x6f, 0xd6, 0x62, 0x5f, 0x7e, 0xb3, 0x16, - 0xfb, 0xcb, 0x37, 0x6b, 0xb1, 0x1f, 0x3c, 0x7b, 0x66, 0x90, 0xee, 0xf0, 0x74, 0xb3, 0x65, 0xf6, - 0xb7, 0x5a, 0x66, 0x1f, 0x93, 0xd3, 0x0e, 0xf1, 0x1f, 0xfc, 0x9b, 0xcb, 0xa7, 0x69, 0x96, 0x41, - 0x6f, 0xfd, 0x33, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd0, 0x90, 0x6e, 0xd9, 0x2c, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ABCIClient is the client API for ABCI service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ABCIClient interface { - Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) - Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) - Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) - CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) - Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) - Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) - InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) - ListSnapshots(ctx context.Context, in *RequestListSnapshots, opts ...grpc.CallOption) (*ResponseListSnapshots, error) - OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) - LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) - ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) - PrepareProposal(ctx context.Context, in *RequestPrepareProposal, opts ...grpc.CallOption) (*ResponsePrepareProposal, error) - ProcessProposal(ctx context.Context, in *RequestProcessProposal, opts ...grpc.CallOption) (*ResponseProcessProposal, error) - ExtendVote(ctx context.Context, in *RequestExtendVote, opts ...grpc.CallOption) (*ResponseExtendVote, error) - VerifyVoteExtension(ctx context.Context, in *RequestVerifyVoteExtension, opts ...grpc.CallOption) (*ResponseVerifyVoteExtension, error) - FinalizeBlock(ctx context.Context, in *RequestFinalizeBlock, opts ...grpc.CallOption) (*ResponseFinalizeBlock, error) -} - -type aBCIClient struct { - cc grpc1.ClientConn -} - -func NewABCIClient(cc grpc1.ClientConn) ABCIClient { - return &aBCIClient{cc} -} - -func (c *aBCIClient) Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) { - out := new(ResponseEcho) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/Echo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) { - out := new(ResponseFlush) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/Flush", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) { - out := new(ResponseInfo) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/Info", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) { - out := new(ResponseCheckTx) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/CheckTx", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) { - out := new(ResponseQuery) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/Query", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) { - out := new(ResponseCommit) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/Commit", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) { - out := new(ResponseInitChain) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/InitChain", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) ListSnapshots(ctx context.Context, in *RequestListSnapshots, opts ...grpc.CallOption) (*ResponseListSnapshots, error) { - out := new(ResponseListSnapshots) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/ListSnapshots", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) { - out := new(ResponseOfferSnapshot) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/OfferSnapshot", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) { - out := new(ResponseLoadSnapshotChunk) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/LoadSnapshotChunk", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) { - out := new(ResponseApplySnapshotChunk) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/ApplySnapshotChunk", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) PrepareProposal(ctx context.Context, in *RequestPrepareProposal, opts ...grpc.CallOption) (*ResponsePrepareProposal, error) { - out := new(ResponsePrepareProposal) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/PrepareProposal", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) ProcessProposal(ctx context.Context, in *RequestProcessProposal, opts ...grpc.CallOption) (*ResponseProcessProposal, error) { - out := new(ResponseProcessProposal) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/ProcessProposal", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) ExtendVote(ctx context.Context, in *RequestExtendVote, opts ...grpc.CallOption) (*ResponseExtendVote, error) { - out := new(ResponseExtendVote) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/ExtendVote", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) VerifyVoteExtension(ctx context.Context, in *RequestVerifyVoteExtension, opts ...grpc.CallOption) (*ResponseVerifyVoteExtension, error) { - out := new(ResponseVerifyVoteExtension) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/VerifyVoteExtension", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIClient) FinalizeBlock(ctx context.Context, in *RequestFinalizeBlock, opts ...grpc.CallOption) (*ResponseFinalizeBlock, error) { - out := new(ResponseFinalizeBlock) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCI/FinalizeBlock", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ABCIServer is the server API for ABCI service. -type ABCIServer interface { - Echo(context.Context, *RequestEcho) (*ResponseEcho, error) - Flush(context.Context, *RequestFlush) (*ResponseFlush, error) - Info(context.Context, *RequestInfo) (*ResponseInfo, error) - CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) - Query(context.Context, *RequestQuery) (*ResponseQuery, error) - Commit(context.Context, *RequestCommit) (*ResponseCommit, error) - InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) - ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) - OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) - LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) - ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) - PrepareProposal(context.Context, *RequestPrepareProposal) (*ResponsePrepareProposal, error) - ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error) - ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error) - VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) - FinalizeBlock(context.Context, *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) -} - -// UnimplementedABCIServer can be embedded to have forward compatible implementations. -type UnimplementedABCIServer struct { -} - -func (*UnimplementedABCIServer) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) { - return nil, status.Errorf(codes.Unimplemented, "method Echo not implemented") -} -func (*UnimplementedABCIServer) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) { - return nil, status.Errorf(codes.Unimplemented, "method Flush not implemented") -} -func (*UnimplementedABCIServer) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) { - return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") -} -func (*UnimplementedABCIServer) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) { - return nil, status.Errorf(codes.Unimplemented, "method CheckTx not implemented") -} -func (*UnimplementedABCIServer) Query(ctx context.Context, req *RequestQuery) (*ResponseQuery, error) { - return nil, status.Errorf(codes.Unimplemented, "method Query not implemented") -} -func (*UnimplementedABCIServer) Commit(ctx context.Context, req *RequestCommit) (*ResponseCommit, error) { - return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") -} -func (*UnimplementedABCIServer) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) { - return nil, status.Errorf(codes.Unimplemented, "method InitChain not implemented") -} -func (*UnimplementedABCIServer) ListSnapshots(ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") -} -func (*UnimplementedABCIServer) OfferSnapshot(ctx context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { - return nil, status.Errorf(codes.Unimplemented, "method OfferSnapshot not implemented") -} -func (*UnimplementedABCIServer) LoadSnapshotChunk(ctx context.Context, req *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { - return nil, status.Errorf(codes.Unimplemented, "method LoadSnapshotChunk not implemented") -} -func (*UnimplementedABCIServer) ApplySnapshotChunk(ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { - return nil, status.Errorf(codes.Unimplemented, "method ApplySnapshotChunk not implemented") -} -func (*UnimplementedABCIServer) PrepareProposal(ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) { - return nil, status.Errorf(codes.Unimplemented, "method PrepareProposal not implemented") -} -func (*UnimplementedABCIServer) ProcessProposal(ctx context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) { - return nil, status.Errorf(codes.Unimplemented, "method ProcessProposal not implemented") -} -func (*UnimplementedABCIServer) ExtendVote(ctx context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) { - return nil, status.Errorf(codes.Unimplemented, "method ExtendVote not implemented") -} -func (*UnimplementedABCIServer) VerifyVoteExtension(ctx context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { - return nil, status.Errorf(codes.Unimplemented, "method VerifyVoteExtension not implemented") -} -func (*UnimplementedABCIServer) FinalizeBlock(ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) { - return nil, status.Errorf(codes.Unimplemented, "method FinalizeBlock not implemented") -} - -func RegisterABCIServer(s grpc1.Server, srv ABCIServer) { - s.RegisterService(&_ABCI_serviceDesc, srv) -} - -func _ABCI_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestEcho) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).Echo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/Echo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).Echo(ctx, req.(*RequestEcho)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_Flush_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestFlush) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).Flush(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/Flush", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).Flush(ctx, req.(*RequestFlush)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestInfo) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).Info(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/Info", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).Info(ctx, req.(*RequestInfo)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_CheckTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestCheckTx) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).CheckTx(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/CheckTx", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).CheckTx(ctx, req.(*RequestCheckTx)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestQuery) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).Query(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/Query", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).Query(ctx, req.(*RequestQuery)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestCommit) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).Commit(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/Commit", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).Commit(ctx, req.(*RequestCommit)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_InitChain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestInitChain) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).InitChain(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/InitChain", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).InitChain(ctx, req.(*RequestInitChain)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestListSnapshots) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).ListSnapshots(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/ListSnapshots", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).ListSnapshots(ctx, req.(*RequestListSnapshots)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_OfferSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestOfferSnapshot) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).OfferSnapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/OfferSnapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).OfferSnapshot(ctx, req.(*RequestOfferSnapshot)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_LoadSnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestLoadSnapshotChunk) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).LoadSnapshotChunk(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/LoadSnapshotChunk", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).LoadSnapshotChunk(ctx, req.(*RequestLoadSnapshotChunk)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_ApplySnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestApplySnapshotChunk) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).ApplySnapshotChunk(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/ApplySnapshotChunk", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).ApplySnapshotChunk(ctx, req.(*RequestApplySnapshotChunk)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_PrepareProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestPrepareProposal) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).PrepareProposal(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/PrepareProposal", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).PrepareProposal(ctx, req.(*RequestPrepareProposal)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_ProcessProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestProcessProposal) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).ProcessProposal(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/ProcessProposal", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).ProcessProposal(ctx, req.(*RequestProcessProposal)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_ExtendVote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestExtendVote) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).ExtendVote(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/ExtendVote", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).ExtendVote(ctx, req.(*RequestExtendVote)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_VerifyVoteExtension_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestVerifyVoteExtension) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).VerifyVoteExtension(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/VerifyVoteExtension", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).VerifyVoteExtension(ctx, req.(*RequestVerifyVoteExtension)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCI_FinalizeBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestFinalizeBlock) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIServer).FinalizeBlock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCI/FinalizeBlock", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIServer).FinalizeBlock(ctx, req.(*RequestFinalizeBlock)) - } - return interceptor(ctx, in, info, handler) -} - -var _ABCI_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tendermint.abci.ABCI", - HandlerType: (*ABCIServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Echo", - Handler: _ABCI_Echo_Handler, - }, - { - MethodName: "Flush", - Handler: _ABCI_Flush_Handler, - }, - { - MethodName: "Info", - Handler: _ABCI_Info_Handler, - }, - { - MethodName: "CheckTx", - Handler: _ABCI_CheckTx_Handler, - }, - { - MethodName: "Query", - Handler: _ABCI_Query_Handler, - }, - { - MethodName: "Commit", - Handler: _ABCI_Commit_Handler, - }, - { - MethodName: "InitChain", - Handler: _ABCI_InitChain_Handler, - }, - { - MethodName: "ListSnapshots", - Handler: _ABCI_ListSnapshots_Handler, - }, - { - MethodName: "OfferSnapshot", - Handler: _ABCI_OfferSnapshot_Handler, - }, - { - MethodName: "LoadSnapshotChunk", - Handler: _ABCI_LoadSnapshotChunk_Handler, - }, - { - MethodName: "ApplySnapshotChunk", - Handler: _ABCI_ApplySnapshotChunk_Handler, - }, - { - MethodName: "PrepareProposal", - Handler: _ABCI_PrepareProposal_Handler, - }, - { - MethodName: "ProcessProposal", - Handler: _ABCI_ProcessProposal_Handler, - }, - { - MethodName: "ExtendVote", - Handler: _ABCI_ExtendVote_Handler, - }, - { - MethodName: "VerifyVoteExtension", - Handler: _ABCI_VerifyVoteExtension_Handler, - }, - { - MethodName: "FinalizeBlock", - Handler: _ABCI_FinalizeBlock_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "tendermint/abci/types.proto", + proto.RegisterEnum("cometbft.abci.v1.CheckTxType", CheckTxType_name, CheckTxType_value) + proto.RegisterEnum("cometbft.abci.v1.OfferSnapshotResult", OfferSnapshotResult_name, OfferSnapshotResult_value) + proto.RegisterEnum("cometbft.abci.v1.ApplySnapshotChunkResult", ApplySnapshotChunkResult_name, ApplySnapshotChunkResult_value) + proto.RegisterEnum("cometbft.abci.v1.ProcessProposalStatus", ProcessProposalStatus_name, ProcessProposalStatus_value) + proto.RegisterEnum("cometbft.abci.v1.VerifyVoteExtensionStatus", VerifyVoteExtensionStatus_name, VerifyVoteExtensionStatus_value) + proto.RegisterEnum("cometbft.abci.v1.MisbehaviorType", MisbehaviorType_name, MisbehaviorType_value) + proto.RegisterType((*Request)(nil), "cometbft.abci.v1.Request") + proto.RegisterType((*EchoRequest)(nil), "cometbft.abci.v1.EchoRequest") + proto.RegisterType((*FlushRequest)(nil), "cometbft.abci.v1.FlushRequest") + proto.RegisterType((*InfoRequest)(nil), "cometbft.abci.v1.InfoRequest") + proto.RegisterType((*InitChainRequest)(nil), "cometbft.abci.v1.InitChainRequest") + proto.RegisterType((*QueryRequest)(nil), "cometbft.abci.v1.QueryRequest") + proto.RegisterType((*CheckTxRequest)(nil), "cometbft.abci.v1.CheckTxRequest") + proto.RegisterType((*CommitRequest)(nil), "cometbft.abci.v1.CommitRequest") + proto.RegisterType((*ListSnapshotsRequest)(nil), "cometbft.abci.v1.ListSnapshotsRequest") + proto.RegisterType((*OfferSnapshotRequest)(nil), "cometbft.abci.v1.OfferSnapshotRequest") + proto.RegisterType((*LoadSnapshotChunkRequest)(nil), "cometbft.abci.v1.LoadSnapshotChunkRequest") + proto.RegisterType((*ApplySnapshotChunkRequest)(nil), "cometbft.abci.v1.ApplySnapshotChunkRequest") + proto.RegisterType((*PrepareProposalRequest)(nil), "cometbft.abci.v1.PrepareProposalRequest") + proto.RegisterType((*ProcessProposalRequest)(nil), "cometbft.abci.v1.ProcessProposalRequest") + proto.RegisterType((*ExtendVoteRequest)(nil), "cometbft.abci.v1.ExtendVoteRequest") + proto.RegisterType((*VerifyVoteExtensionRequest)(nil), "cometbft.abci.v1.VerifyVoteExtensionRequest") + proto.RegisterType((*FinalizeBlockRequest)(nil), "cometbft.abci.v1.FinalizeBlockRequest") + proto.RegisterType((*Response)(nil), "cometbft.abci.v1.Response") + proto.RegisterType((*ExceptionResponse)(nil), "cometbft.abci.v1.ExceptionResponse") + proto.RegisterType((*EchoResponse)(nil), "cometbft.abci.v1.EchoResponse") + proto.RegisterType((*FlushResponse)(nil), "cometbft.abci.v1.FlushResponse") + proto.RegisterType((*InfoResponse)(nil), "cometbft.abci.v1.InfoResponse") + proto.RegisterMapType((map[string]uint32)(nil), "cometbft.abci.v1.InfoResponse.LanePrioritiesEntry") + proto.RegisterType((*InitChainResponse)(nil), "cometbft.abci.v1.InitChainResponse") + proto.RegisterType((*QueryResponse)(nil), "cometbft.abci.v1.QueryResponse") + proto.RegisterType((*CheckTxResponse)(nil), "cometbft.abci.v1.CheckTxResponse") + proto.RegisterType((*CommitResponse)(nil), "cometbft.abci.v1.CommitResponse") + proto.RegisterType((*ListSnapshotsResponse)(nil), "cometbft.abci.v1.ListSnapshotsResponse") + proto.RegisterType((*OfferSnapshotResponse)(nil), "cometbft.abci.v1.OfferSnapshotResponse") + proto.RegisterType((*LoadSnapshotChunkResponse)(nil), "cometbft.abci.v1.LoadSnapshotChunkResponse") + proto.RegisterType((*ApplySnapshotChunkResponse)(nil), "cometbft.abci.v1.ApplySnapshotChunkResponse") + proto.RegisterType((*PrepareProposalResponse)(nil), "cometbft.abci.v1.PrepareProposalResponse") + proto.RegisterType((*ProcessProposalResponse)(nil), "cometbft.abci.v1.ProcessProposalResponse") + proto.RegisterType((*ExtendVoteResponse)(nil), "cometbft.abci.v1.ExtendVoteResponse") + proto.RegisterType((*VerifyVoteExtensionResponse)(nil), "cometbft.abci.v1.VerifyVoteExtensionResponse") + proto.RegisterType((*FinalizeBlockResponse)(nil), "cometbft.abci.v1.FinalizeBlockResponse") + proto.RegisterType((*CommitInfo)(nil), "cometbft.abci.v1.CommitInfo") + proto.RegisterType((*ExtendedCommitInfo)(nil), "cometbft.abci.v1.ExtendedCommitInfo") + proto.RegisterType((*Event)(nil), "cometbft.abci.v1.Event") + proto.RegisterType((*EventAttribute)(nil), "cometbft.abci.v1.EventAttribute") + proto.RegisterType((*ExecTxResult)(nil), "cometbft.abci.v1.ExecTxResult") + proto.RegisterType((*TxResult)(nil), "cometbft.abci.v1.TxResult") + proto.RegisterType((*Validator)(nil), "cometbft.abci.v1.Validator") + proto.RegisterType((*ValidatorUpdate)(nil), "cometbft.abci.v1.ValidatorUpdate") + proto.RegisterType((*VoteInfo)(nil), "cometbft.abci.v1.VoteInfo") + proto.RegisterType((*ExtendedVoteInfo)(nil), "cometbft.abci.v1.ExtendedVoteInfo") + proto.RegisterType((*Misbehavior)(nil), "cometbft.abci.v1.Misbehavior") + proto.RegisterType((*Snapshot)(nil), "cometbft.abci.v1.Snapshot") +} + +func init() { proto.RegisterFile("cometbft/abci/v1/types.proto", fileDescriptor_95dd8f7b670b96e3) } + +var fileDescriptor_95dd8f7b670b96e3 = []byte{ + // 3301 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7, + 0xf5, 0xf7, 0xf2, 0x4b, 0xe4, 0xe3, 0x87, 0x56, 0x23, 0xc9, 0xa6, 0x15, 0x47, 0x92, 0xd7, 0x71, + 0xec, 0xd8, 0x89, 0xf4, 0xb7, 0x93, 0x7f, 0x3e, 0x9b, 0x04, 0x14, 0x4d, 0x45, 0x92, 0x65, 0x89, + 0x59, 0xd2, 0x6a, 0xec, 0x7e, 0x6c, 0x56, 0xe4, 0x50, 0xdc, 0x98, 0xdc, 0xdd, 0xec, 0x0e, 0x15, + 0xa9, 0x3d, 0xb5, 0x68, 0x8a, 0x22, 0xa7, 0x5c, 0x0a, 0x14, 0x45, 0x0b, 0x14, 0x28, 0x7a, 0xed, + 0xa1, 0xf7, 0xde, 0x8a, 0x22, 0xa7, 0x36, 0xc7, 0x9e, 0xd2, 0x22, 0xb9, 0xf5, 0x1e, 0xa0, 0x40, + 0x2f, 0xc5, 0x7c, 0xec, 0x17, 0xb9, 0x2b, 0xd9, 0x4e, 0x7a, 0x28, 0xda, 0x1b, 0x67, 0xe6, 0xf7, + 0xde, 0xcc, 0xbe, 0x79, 0xf3, 0xde, 0x9b, 0xdf, 0x10, 0x2e, 0x74, 0xac, 0x21, 0x26, 0xfb, 0x3d, + 0xb2, 0xaa, 0xef, 0x77, 0x8c, 0xd5, 0xc3, 0x1b, 0xab, 0xe4, 0xd8, 0xc6, 0xee, 0x8a, 0xed, 0x58, + 0xc4, 0x42, 0xb2, 0x37, 0xba, 0x42, 0x47, 0x57, 0x0e, 0x6f, 0x2c, 0x2c, 0xfa, 0xf8, 0x8e, 0x73, + 0x6c, 0x13, 0x8b, 0x4a, 0xd8, 0x8e, 0x65, 0xf5, 0xb8, 0x44, 0x68, 0x9c, 0xe9, 0x61, 0xc3, 0xba, + 0xa3, 0x0f, 0x85, 0xc6, 0x85, 0x8b, 0x93, 0xe3, 0x87, 0xfa, 0xc0, 0xe8, 0xea, 0xc4, 0x72, 0x04, + 0x64, 0xee, 0xc0, 0x3a, 0xb0, 0xd8, 0xcf, 0x55, 0xfa, 0x4b, 0xf4, 0x2e, 0x1d, 0x58, 0xd6, 0xc1, + 0x00, 0xaf, 0xb2, 0xd6, 0xfe, 0xa8, 0xb7, 0x4a, 0x8c, 0x21, 0x76, 0x89, 0x3e, 0xb4, 0xbd, 0x99, + 0xc7, 0x01, 0xdd, 0x91, 0xa3, 0x13, 0xc3, 0x32, 0xf9, 0xb8, 0xf2, 0xe7, 0x02, 0x4c, 0xa9, 0xf8, + 0xfd, 0x11, 0x76, 0x09, 0x7a, 0x1e, 0x32, 0xb8, 0xd3, 0xb7, 0xaa, 0xd2, 0xb2, 0x74, 0xb5, 0x78, + 0xf3, 0xc9, 0x95, 0xf1, 0xcf, 0x5c, 0x69, 0x74, 0xfa, 0x96, 0x00, 0x6f, 0x9c, 0x51, 0x19, 0x18, + 0xbd, 0x08, 0xd9, 0xde, 0x60, 0xe4, 0xf6, 0xab, 0x29, 0x26, 0xb5, 0x38, 0x29, 0xb5, 0x4e, 0x87, + 0x03, 0x31, 0x0e, 0xa7, 0x93, 0x19, 0x66, 0xcf, 0xaa, 0xa6, 0x93, 0x26, 0xdb, 0x34, 0x7b, 0xe1, + 0xc9, 0x28, 0x18, 0xd5, 0x01, 0x0c, 0xd3, 0x20, 0x5a, 0xa7, 0xaf, 0x1b, 0x66, 0x35, 0xcb, 0x44, + 0x95, 0x38, 0x51, 0x83, 0xd4, 0x29, 0x24, 0x90, 0x2f, 0x18, 0x5e, 0x1f, 0x5d, 0xf1, 0xfb, 0x23, + 0xec, 0x1c, 0x57, 0x73, 0x49, 0x2b, 0x7e, 0x9b, 0x0e, 0x87, 0x56, 0xcc, 0xe0, 0xe8, 0x75, 0xc8, + 0x77, 0xfa, 0xb8, 0xf3, 0x40, 0x23, 0x47, 0xd5, 0x3c, 0x13, 0x5d, 0x9e, 0x14, 0xad, 0x53, 0x44, + 0xfb, 0x28, 0x10, 0x9e, 0xea, 0xf0, 0x1e, 0xf4, 0x0a, 0xe4, 0x3a, 0xd6, 0x70, 0x68, 0x90, 0x6a, + 0x91, 0x09, 0x2f, 0xc5, 0x08, 0xb3, 0xf1, 0x40, 0x56, 0x08, 0xa0, 0x5d, 0xa8, 0x0c, 0x0c, 0x97, + 0x68, 0xae, 0xa9, 0xdb, 0x6e, 0xdf, 0x22, 0x6e, 0xb5, 0xc4, 0x54, 0x3c, 0x3d, 0xa9, 0x62, 0xdb, + 0x70, 0x49, 0xcb, 0x83, 0x05, 0x9a, 0xca, 0x83, 0x70, 0x3f, 0x55, 0x68, 0xf5, 0x7a, 0xd8, 0xf1, + 0x35, 0x56, 0xcb, 0x49, 0x0a, 0x77, 0x29, 0xce, 0x93, 0x0c, 0x29, 0xb4, 0xc2, 0xfd, 0xe8, 0xdb, + 0x30, 0x3b, 0xb0, 0xf4, 0xae, 0xaf, 0x4f, 0xeb, 0xf4, 0x47, 0xe6, 0x83, 0x6a, 0x85, 0x69, 0xbd, + 0x16, 0xb3, 0x4c, 0x4b, 0xef, 0x7a, 0xc2, 0x75, 0x0a, 0x0d, 0x34, 0xcf, 0x0c, 0xc6, 0xc7, 0x90, + 0x06, 0x73, 0xba, 0x6d, 0x0f, 0x8e, 0xc7, 0xd5, 0x4f, 0x33, 0xf5, 0xd7, 0x27, 0xd5, 0xd7, 0x28, + 0x3a, 0x41, 0x3f, 0xd2, 0x27, 0x06, 0xd1, 0x5d, 0x90, 0x6d, 0x07, 0xdb, 0xba, 0x83, 0x35, 0xdb, + 0xb1, 0x6c, 0xcb, 0xd5, 0x07, 0x55, 0x99, 0x29, 0xbf, 0x3a, 0xa9, 0xbc, 0xc9, 0x91, 0x4d, 0x01, + 0x0c, 0x34, 0x4f, 0xdb, 0xd1, 0x11, 0xae, 0xd6, 0xea, 0x60, 0xd7, 0x0d, 0xd4, 0xce, 0x24, 0xab, + 0x65, 0xc8, 0x58, 0xb5, 0x91, 0x11, 0xb4, 0x0e, 0x45, 0x7c, 0x44, 0xb0, 0xd9, 0xd5, 0x0e, 0x2d, + 0x82, 0xab, 0x88, 0x69, 0xbc, 0x14, 0x73, 0x5c, 0x19, 0x68, 0xcf, 0x22, 0x38, 0x50, 0x06, 0xd8, + 0xef, 0x44, 0xfb, 0x30, 0x7f, 0x88, 0x1d, 0xa3, 0x77, 0xcc, 0xf4, 0x68, 0x6c, 0xc4, 0x35, 0x2c, + 0xb3, 0x3a, 0xcb, 0x34, 0x3e, 0x3b, 0xa9, 0x71, 0x8f, 0xc1, 0xa9, 0x70, 0xc3, 0x03, 0x07, 0xaa, + 0x67, 0x0f, 0x27, 0x47, 0xa9, 0xa7, 0xf5, 0x0c, 0x53, 0x1f, 0x18, 0xdf, 0xc3, 0xda, 0xfe, 0xc0, + 0xea, 0x3c, 0xa8, 0xce, 0x25, 0x79, 0xda, 0xba, 0xc0, 0xad, 0x51, 0x58, 0xc8, 0xd3, 0x7a, 0xe1, + 0xfe, 0xb5, 0x29, 0xc8, 0x1e, 0xea, 0x83, 0x11, 0xde, 0xca, 0xe4, 0x33, 0x72, 0x76, 0x2b, 0x93, + 0x9f, 0x92, 0xf3, 0x5b, 0x99, 0x7c, 0x41, 0x86, 0xad, 0x4c, 0x1e, 0xe4, 0xa2, 0x72, 0x05, 0x8a, + 0xa1, 0x38, 0x85, 0xaa, 0x30, 0x35, 0xc4, 0xae, 0xab, 0x1f, 0x60, 0x16, 0xd7, 0x0a, 0xaa, 0xd7, + 0x54, 0x2a, 0x50, 0x0a, 0x87, 0x26, 0xe5, 0x63, 0x09, 0x8a, 0xa1, 0xa0, 0x43, 0x25, 0x0f, 0xb1, + 0xc3, 0x0c, 0x22, 0x24, 0x45, 0x13, 0x5d, 0x82, 0x32, 0xfb, 0x16, 0xcd, 0x1b, 0xa7, 0xb1, 0x2f, + 0xa3, 0x96, 0x58, 0xe7, 0x9e, 0x00, 0x2d, 0x41, 0xd1, 0xbe, 0x69, 0xfb, 0x90, 0x34, 0x83, 0x80, + 0x7d, 0xd3, 0xf6, 0x00, 0x17, 0xa1, 0x44, 0x3f, 0xdd, 0x47, 0x64, 0xd8, 0x24, 0x45, 0xda, 0x27, + 0x20, 0xca, 0x9f, 0x52, 0x20, 0x8f, 0x07, 0x33, 0xf4, 0x32, 0x64, 0x68, 0x94, 0x17, 0x61, 0x7a, + 0x61, 0x85, 0x47, 0xf8, 0x15, 0x2f, 0xc2, 0xaf, 0xb4, 0xbd, 0x14, 0xb0, 0x96, 0xff, 0xe4, 0xb3, + 0xa5, 0x33, 0x1f, 0xff, 0x75, 0x49, 0x52, 0x99, 0x04, 0x3a, 0x4f, 0x23, 0x98, 0x6e, 0x98, 0x9a, + 0xd1, 0x65, 0x4b, 0x2e, 0xd0, 0xe8, 0xa4, 0x1b, 0xe6, 0x66, 0x17, 0xdd, 0x01, 0xb9, 0x63, 0x99, + 0x2e, 0x36, 0xdd, 0x91, 0xab, 0xf1, 0xdc, 0x24, 0x42, 0x73, 0x28, 0xbe, 0xf2, 0x24, 0xc8, 0x02, + 0x95, 0x80, 0x36, 0x19, 0x52, 0x9d, 0xee, 0x44, 0x3b, 0xd0, 0x5b, 0x00, 0x7e, 0x02, 0x73, 0xab, + 0x99, 0xe5, 0xf4, 0xd5, 0xe2, 0xcd, 0x8b, 0x31, 0xfe, 0xe4, 0x61, 0xee, 0xda, 0x5d, 0x9d, 0xe0, + 0xb5, 0x0c, 0x5d, 0xb0, 0x1a, 0x12, 0x45, 0x4f, 0xc3, 0xb4, 0x6e, 0xdb, 0x9a, 0x4b, 0x74, 0x82, + 0xb5, 0xfd, 0x63, 0x82, 0x5d, 0x16, 0xf6, 0x4b, 0x6a, 0x59, 0xb7, 0xed, 0x16, 0xed, 0x5d, 0xa3, + 0x9d, 0xe8, 0x32, 0x54, 0x68, 0x84, 0x37, 0xf4, 0x81, 0xd6, 0xc7, 0xc6, 0x41, 0x9f, 0xb0, 0xe8, + 0x9e, 0x56, 0xcb, 0xa2, 0x77, 0x83, 0x75, 0x2a, 0x5d, 0x28, 0x85, 0x83, 0x3b, 0x42, 0x90, 0xe9, + 0xea, 0x44, 0x67, 0xb6, 0x2c, 0xa9, 0xec, 0x37, 0xed, 0xb3, 0x75, 0xd2, 0x17, 0x16, 0x62, 0xbf, + 0xd1, 0x59, 0xc8, 0x09, 0xb5, 0x69, 0xa6, 0x56, 0xb4, 0xd0, 0x1c, 0x64, 0x6d, 0xc7, 0x3a, 0xc4, + 0x6c, 0xf3, 0xf2, 0x2a, 0x6f, 0x28, 0xf7, 0xa0, 0x12, 0xcd, 0x03, 0xa8, 0x02, 0x29, 0x72, 0x24, + 0x66, 0x49, 0x91, 0x23, 0x74, 0x03, 0x32, 0xd4, 0x98, 0x4c, 0x5b, 0x25, 0x2e, 0xfb, 0x09, 0xf9, + 0xf6, 0xb1, 0x8d, 0x55, 0x06, 0xdd, 0xca, 0xe4, 0x53, 0x72, 0x5a, 0x99, 0x86, 0x72, 0x24, 0x4b, + 0x28, 0x67, 0x61, 0x2e, 0x2e, 0xe6, 0x2b, 0x06, 0xcc, 0xc5, 0x85, 0x6e, 0xf4, 0x22, 0xe4, 0xfd, + 0xa0, 0xef, 0x79, 0xd0, 0xc4, 0xec, 0xbe, 0x90, 0x8f, 0xa5, 0xbe, 0x43, 0x37, 0xa2, 0xaf, 0x8b, + 0x54, 0x5f, 0x52, 0xa7, 0x74, 0xdb, 0xde, 0xd0, 0xdd, 0xbe, 0xf2, 0x2e, 0x54, 0x93, 0xe2, 0x79, + 0xc8, 0x70, 0x12, 0x3b, 0x00, 0x9e, 0xe1, 0xce, 0x42, 0xae, 0x67, 0x39, 0x43, 0x9d, 0x30, 0x65, + 0x65, 0x55, 0xb4, 0xa8, 0x41, 0x79, 0x6c, 0x4f, 0xb3, 0x6e, 0xde, 0x50, 0x34, 0x38, 0x9f, 0x18, + 0xd2, 0xa9, 0x88, 0x61, 0x76, 0x31, 0x37, 0x6f, 0x59, 0xe5, 0x8d, 0x40, 0x11, 0x5f, 0x2c, 0x6f, + 0xd0, 0x69, 0x5d, 0x6c, 0x76, 0xb1, 0xc3, 0xf4, 0x17, 0x54, 0xd1, 0x52, 0x7e, 0x9e, 0x86, 0xb3, + 0xf1, 0x71, 0x1d, 0x2d, 0x43, 0x69, 0xa8, 0x1f, 0x69, 0xe4, 0x48, 0xb8, 0x9f, 0xc4, 0x1c, 0x00, + 0x86, 0xfa, 0x51, 0xfb, 0x88, 0xfb, 0x9e, 0x0c, 0x69, 0x72, 0xe4, 0x56, 0x53, 0xcb, 0xe9, 0xab, + 0x25, 0x95, 0xfe, 0x44, 0x7b, 0x30, 0x33, 0xb0, 0x3a, 0xfa, 0x40, 0x1b, 0xe8, 0x2e, 0xd1, 0x44, + 0xda, 0xe7, 0xc7, 0xe9, 0xa9, 0xa4, 0x38, 0x8d, 0xbb, 0x7c, 0x63, 0x69, 0x08, 0x12, 0x07, 0x61, + 0x9a, 0x29, 0xd9, 0xd6, 0x5d, 0xc2, 0x87, 0x50, 0x03, 0x8a, 0x43, 0xc3, 0xdd, 0xc7, 0x7d, 0xfd, + 0xd0, 0xb0, 0x1c, 0x71, 0xae, 0x62, 0xbc, 0xe7, 0x4e, 0x00, 0x12, 0xaa, 0xc2, 0x72, 0xa1, 0x4d, + 0xc9, 0x46, 0xbc, 0xd9, 0x8b, 0x2c, 0xb9, 0x47, 0x8e, 0x2c, 0xff, 0x07, 0x73, 0x26, 0x3e, 0x22, + 0x5a, 0x70, 0x72, 0xb9, 0xa7, 0x4c, 0x31, 0xe3, 0x23, 0x3a, 0xe6, 0x9f, 0x75, 0x97, 0x3a, 0x0d, + 0x7a, 0x86, 0xe5, 0x46, 0xdb, 0x72, 0xb1, 0xa3, 0xe9, 0xdd, 0xae, 0x83, 0x5d, 0x97, 0x55, 0x55, + 0x25, 0x96, 0xef, 0x58, 0x7f, 0x8d, 0x77, 0x2b, 0x1f, 0xb1, 0xcd, 0x89, 0xcb, 0x8e, 0x9e, 0xe9, + 0xa5, 0xc0, 0xf4, 0x6d, 0x98, 0x13, 0xf2, 0xdd, 0x88, 0xf5, 0x79, 0x79, 0x7a, 0x21, 0xa9, 0xe8, + 0x0a, 0x59, 0x1d, 0x79, 0xf2, 0xc9, 0x86, 0x4f, 0x3f, 0xa6, 0xe1, 0x11, 0x64, 0x98, 0x59, 0x32, + 0x3c, 0xdc, 0xd0, 0xdf, 0xff, 0x69, 0x9b, 0xf1, 0x61, 0x1a, 0x66, 0x26, 0x0a, 0x0b, 0xff, 0xc3, + 0xa4, 0xd8, 0x0f, 0x4b, 0xc5, 0x7e, 0x58, 0xfa, 0x91, 0x3f, 0x4c, 0xec, 0x76, 0xe6, 0xf4, 0xdd, + 0xce, 0x7e, 0x9d, 0xbb, 0x9d, 0x7b, 0xcc, 0xdd, 0xfe, 0xb7, 0xee, 0xc3, 0x2f, 0x24, 0x58, 0x48, + 0x2e, 0xc7, 0x62, 0x37, 0xe4, 0x3a, 0xcc, 0xf8, 0x4b, 0xf1, 0xd5, 0xf3, 0xf0, 0x28, 0xfb, 0x03, + 0x42, 0x7f, 0x62, 0xc6, 0xbb, 0x0c, 0x95, 0xb1, 0x6a, 0x91, 0x3b, 0x73, 0xf9, 0x30, 0xbc, 0x0c, + 0xe5, 0x77, 0x69, 0x98, 0x8b, 0x2b, 0xe8, 0x62, 0x4e, 0xac, 0x0a, 0xb3, 0x5d, 0xdc, 0x31, 0xba, + 0x8f, 0x7d, 0x60, 0x67, 0x84, 0xf8, 0xff, 0xce, 0xeb, 0xa4, 0x9f, 0xa0, 0x6b, 0x30, 0xe3, 0x1e, + 0x9b, 0x1d, 0xc3, 0x3c, 0xd0, 0x88, 0xe5, 0xd5, 0x46, 0x05, 0xb6, 0xf2, 0x69, 0x31, 0xd0, 0xb6, + 0x44, 0x75, 0xf4, 0x1b, 0x80, 0xbc, 0x8a, 0x5d, 0x9b, 0x16, 0x73, 0xa8, 0x0e, 0x05, 0x7c, 0xd4, + 0xc1, 0x36, 0xf1, 0x0a, 0xe0, 0x84, 0x3b, 0x86, 0x80, 0x78, 0x72, 0xf4, 0xae, 0xed, 0xcb, 0xa1, + 0x17, 0x04, 0xa5, 0x90, 0x48, 0x0e, 0xf0, 0x52, 0xdd, 0x17, 0xe5, 0x9c, 0xc2, 0x4b, 0x1e, 0xa7, + 0x90, 0x4e, 0xba, 0x29, 0x8b, 0xc2, 0xdd, 0x97, 0x13, 0xa4, 0xc2, 0x0b, 0x82, 0x54, 0xc8, 0x24, + 0x4d, 0xc7, 0xeb, 0xfb, 0x60, 0x3a, 0xc6, 0x2a, 0xdc, 0x8a, 0xb0, 0x0a, 0xb9, 0xa4, 0x4f, 0x0d, + 0x15, 0xe2, 0xc1, 0xa7, 0x06, 0xb4, 0xc2, 0x4b, 0x1e, 0xad, 0x30, 0x95, 0xb4, 0x68, 0x51, 0x79, + 0x06, 0x8b, 0xe6, 0xbc, 0xc2, 0x1b, 0x21, 0x5e, 0xa1, 0xc0, 0x64, 0x2f, 0x9e, 0xc0, 0x2b, 0xf8, + 0xd2, 0x3e, 0xb1, 0xf0, 0xaa, 0x4f, 0x2c, 0x94, 0x12, 0x59, 0x09, 0x51, 0x32, 0xfa, 0xc2, 0x1e, + 0xb3, 0xd0, 0x9c, 0x60, 0x16, 0x38, 0x11, 0x70, 0xe5, 0x54, 0x66, 0xc1, 0x57, 0x35, 0x46, 0x2d, + 0x34, 0x27, 0xa8, 0x85, 0x4a, 0x92, 0xc6, 0xb1, 0xfa, 0x34, 0xd0, 0x18, 0xe5, 0x16, 0xbe, 0x13, + 0xcf, 0x2d, 0x24, 0x5e, 0xfe, 0x63, 0x6a, 0x51, 0x5f, 0x75, 0x0c, 0xb9, 0xf0, 0x6e, 0x02, 0xb9, + 0x20, 0x27, 0x5d, 0x82, 0xe3, 0x2a, 0x51, 0x7f, 0x82, 0x38, 0x76, 0x61, 0x2f, 0x86, 0x5d, 0xe0, + 0x34, 0xc0, 0x33, 0x0f, 0xc1, 0x2e, 0xf8, 0xaa, 0x27, 0xe8, 0x85, 0xbd, 0x18, 0x7a, 0x01, 0x25, + 0xeb, 0x1d, 0x2b, 0xa0, 0xc2, 0x7a, 0xa3, 0xfc, 0xc2, 0x5b, 0x51, 0x7e, 0x61, 0xf6, 0xe4, 0xba, + 0x95, 0x97, 0x01, 0xbe, 0xb6, 0x30, 0xc1, 0xd0, 0x49, 0x22, 0x18, 0x38, 0x07, 0xf0, 0xdc, 0x43, + 0x12, 0x0c, 0xbe, 0xee, 0x58, 0x86, 0xa1, 0x39, 0xc1, 0x30, 0xcc, 0x27, 0x39, 0xdc, 0x58, 0x42, + 0x0a, 0x1c, 0x2e, 0x91, 0x62, 0xc8, 0xca, 0xb9, 0xad, 0x4c, 0x3e, 0x2f, 0x17, 0x38, 0xb9, 0xb0, + 0x95, 0xc9, 0x17, 0xe5, 0x92, 0xf2, 0x0c, 0x2d, 0x81, 0xc6, 0xe2, 0x1e, 0xbd, 0x70, 0x60, 0xc7, + 0xb1, 0x1c, 0x41, 0x16, 0xf0, 0x86, 0x72, 0x15, 0x4a, 0xe1, 0x10, 0x77, 0x02, 0x1d, 0x31, 0x0d, + 0xe5, 0x48, 0x54, 0x53, 0xfe, 0x99, 0x82, 0x52, 0x38, 0x5e, 0x45, 0x2e, 0xab, 0x05, 0x71, 0x59, + 0x0d, 0x91, 0x14, 0xa9, 0x28, 0x49, 0xb1, 0x04, 0x45, 0x7a, 0x61, 0x1b, 0xe3, 0x1f, 0x74, 0xdb, + 0xe7, 0x1f, 0xae, 0xc1, 0x0c, 0xcb, 0xb7, 0x9c, 0xca, 0x10, 0x99, 0x21, 0xc3, 0x33, 0x03, 0x1d, + 0x60, 0xc6, 0xe0, 0x99, 0x01, 0x3d, 0x07, 0xb3, 0x21, 0xac, 0x7f, 0x11, 0xe4, 0x57, 0x71, 0xd9, + 0x47, 0xd7, 0xf8, 0x8d, 0x10, 0x7d, 0x0b, 0xa6, 0x07, 0xba, 0x49, 0xdd, 0xdd, 0xb0, 0x1c, 0x83, + 0x18, 0xd8, 0x15, 0x45, 0xd4, 0xcd, 0x93, 0x43, 0xf2, 0xca, 0xb6, 0x6e, 0xe2, 0xa6, 0x2f, 0xd4, + 0x30, 0x89, 0x73, 0xac, 0x56, 0x06, 0x91, 0x4e, 0x74, 0x11, 0x4a, 0x5d, 0xdc, 0xd3, 0x47, 0x03, + 0xa2, 0xd1, 0x11, 0x16, 0x6f, 0x0b, 0x6a, 0x51, 0xf4, 0x51, 0x0d, 0x0b, 0x35, 0x98, 0x8d, 0xd1, + 0x44, 0x6b, 0x8f, 0x07, 0xf8, 0x58, 0xd8, 0x8f, 0xfe, 0xa4, 0x9b, 0xc6, 0xb6, 0x5a, 0xdc, 0x42, + 0x79, 0xe3, 0xd5, 0xd4, 0xcb, 0x92, 0xf2, 0x47, 0x09, 0x66, 0x26, 0x22, 0x7e, 0x2c, 0x4d, 0x22, + 0x7d, 0x5d, 0x34, 0x49, 0xea, 0xf1, 0x69, 0x92, 0xf0, 0xed, 0x3c, 0x1d, 0xbd, 0x9d, 0xff, 0x43, + 0x82, 0x72, 0x24, 0xf3, 0x50, 0x3f, 0xea, 0x58, 0x5d, 0x2c, 0xee, 0xcb, 0xec, 0x37, 0x35, 0xcd, + 0xc0, 0x3a, 0x10, 0xb7, 0x62, 0xfa, 0x93, 0xa2, 0xfc, 0x5c, 0x5a, 0x10, 0x99, 0xd2, 0xbf, 0x6a, + 0xf3, 0xd2, 0x47, 0x5c, 0xb5, 0x85, 0x59, 0x73, 0x6c, 0xde, 0xa8, 0x59, 0x79, 0x09, 0xc3, 0x1b, + 0xe8, 0x15, 0x28, 0xb0, 0x47, 0x11, 0xcd, 0xb2, 0x5d, 0xc1, 0xa0, 0x87, 0xca, 0x3b, 0xfe, 0x72, + 0x22, 0x42, 0x95, 0xd5, 0xdb, 0xb5, 0x5d, 0x35, 0x6f, 0x8b, 0x5f, 0xa1, 0xa2, 0xab, 0x10, 0x29, + 0xba, 0x2e, 0x40, 0x81, 0x2e, 0xdf, 0xb5, 0xf5, 0x0e, 0xae, 0x02, 0x5b, 0x69, 0xd0, 0xa1, 0xfc, + 0x21, 0x05, 0xd3, 0x63, 0x89, 0x33, 0xf6, 0xe3, 0xbd, 0x83, 0x95, 0x0a, 0xb1, 0x40, 0x0f, 0x67, + 0x90, 0x45, 0x80, 0x03, 0xdd, 0xd5, 0x3e, 0xd0, 0x4d, 0x82, 0xbb, 0xc2, 0x2a, 0xa1, 0x1e, 0xb4, + 0x00, 0x79, 0xda, 0x1a, 0xb9, 0xb8, 0x2b, 0x08, 0x29, 0xbf, 0x8d, 0x36, 0x21, 0x87, 0x0f, 0xb1, + 0x49, 0xdc, 0xea, 0x14, 0xdb, 0xf8, 0x73, 0x31, 0x11, 0x96, 0x8e, 0xaf, 0x55, 0xe9, 0x76, 0xff, + 0xfd, 0xb3, 0x25, 0x99, 0xc3, 0x9f, 0xb5, 0x86, 0x06, 0xc1, 0x43, 0x9b, 0x1c, 0xab, 0x42, 0x41, + 0xd4, 0x0c, 0xf9, 0x31, 0x33, 0xa0, 0x73, 0x30, 0xc5, 0x4e, 0xa3, 0xd1, 0x65, 0x15, 0x42, 0x41, + 0xcd, 0xd1, 0xe6, 0x66, 0x97, 0xd1, 0xa6, 0x25, 0x8f, 0x03, 0xa1, 0xd6, 0x66, 0xc7, 0xe5, 0x58, + 0x2d, 0x0f, 0xf1, 0xd0, 0xb6, 0xac, 0x81, 0xc6, 0x63, 0x58, 0x0d, 0x2a, 0xd1, 0x02, 0x02, 0x5d, + 0x82, 0xb2, 0x83, 0x89, 0x6e, 0x98, 0x5a, 0xe4, 0x8e, 0x50, 0xe2, 0x9d, 0x3c, 0x66, 0x6c, 0x65, + 0xf2, 0x92, 0x9c, 0x12, 0xb4, 0xd5, 0xdb, 0x30, 0x1f, 0x5b, 0x3f, 0xa0, 0x97, 0xa1, 0x10, 0xd4, + 0x1e, 0x12, 0xb3, 0xc3, 0x49, 0x7c, 0x54, 0x00, 0x56, 0xf6, 0x60, 0x3e, 0xb6, 0x80, 0x40, 0xaf, + 0x43, 0xce, 0xc1, 0xee, 0x68, 0xc0, 0x29, 0xa7, 0xca, 0xcd, 0xcb, 0xa7, 0x57, 0x1e, 0xa3, 0x01, + 0x51, 0x85, 0x90, 0x72, 0x03, 0xce, 0x27, 0x56, 0x10, 0x01, 0xab, 0x24, 0x85, 0x58, 0x25, 0xe5, + 0xb7, 0x12, 0x2c, 0x24, 0x57, 0x05, 0x68, 0x6d, 0x6c, 0x41, 0xd7, 0x1e, 0xb2, 0xa6, 0x08, 0xad, + 0x8a, 0x5e, 0xbb, 0x1c, 0xdc, 0xc3, 0xa4, 0xd3, 0xe7, 0xe5, 0x09, 0x8f, 0x16, 0x65, 0xb5, 0x2c, + 0x7a, 0x99, 0x8c, 0xcb, 0x61, 0xef, 0xe1, 0x0e, 0xd1, 0xf8, 0xa6, 0xba, 0xec, 0xea, 0x53, 0xa0, + 0x30, 0xda, 0xdb, 0xe2, 0x9d, 0xca, 0x75, 0x38, 0x97, 0x50, 0x67, 0x4c, 0xde, 0xcf, 0x94, 0xfb, + 0x14, 0x1c, 0x5b, 0x3c, 0xa0, 0x37, 0x21, 0xe7, 0x12, 0x9d, 0x8c, 0x5c, 0xf1, 0x65, 0x57, 0x4e, + 0xad, 0x3b, 0x5a, 0x0c, 0xae, 0x0a, 0x31, 0xe5, 0x35, 0x40, 0x93, 0x55, 0x44, 0xcc, 0x1d, 0x53, + 0x8a, 0xbb, 0x63, 0xee, 0xc3, 0x13, 0x27, 0xd4, 0x0b, 0xa8, 0x3e, 0xb6, 0xb8, 0xeb, 0x0f, 0x55, + 0x6e, 0x8c, 0x2d, 0xf0, 0xf7, 0x69, 0x98, 0x8f, 0x2d, 0x1b, 0x42, 0xc7, 0x57, 0xfa, 0xaa, 0xc7, + 0xf7, 0x75, 0x00, 0x72, 0xa4, 0xf1, 0x9d, 0xf6, 0xd2, 0x40, 0xdc, 0x5d, 0xe9, 0x08, 0x77, 0x58, + 0x24, 0xa3, 0x8e, 0x51, 0x20, 0xe2, 0x97, 0x8b, 0xda, 0xe1, 0x7b, 0xfd, 0x88, 0xa5, 0x08, 0x57, + 0x5c, 0x79, 0x1f, 0x3a, 0x99, 0x04, 0x04, 0x00, 0xef, 0x76, 0xd1, 0x7d, 0x38, 0x37, 0x96, 0xea, + 0x7c, 0xdd, 0x99, 0x87, 0xce, 0x78, 0xf3, 0xd1, 0x8c, 0xe7, 0xe9, 0x0e, 0xa7, 0xab, 0x6c, 0x24, + 0x5d, 0xd1, 0x0c, 0xcb, 0x2e, 0xc3, 0xbc, 0xd2, 0xe8, 0xe2, 0x81, 0xee, 0x3d, 0xd4, 0x9e, 0x9f, + 0xb8, 0x52, 0xdf, 0x12, 0x6f, 0xd9, 0xfc, 0x46, 0xfd, 0x33, 0x7a, 0xa3, 0xae, 0x50, 0x61, 0xb6, + 0x51, 0xb7, 0xa8, 0xa8, 0x72, 0x1f, 0x20, 0xe0, 0x0b, 0xe8, 0xf1, 0x75, 0xac, 0x91, 0xd9, 0x65, + 0x1e, 0x91, 0x55, 0x79, 0x03, 0xbd, 0x08, 0x59, 0xea, 0x58, 0x9e, 0xe5, 0x63, 0xe2, 0x0f, 0xf5, + 0x90, 0x10, 0xe1, 0xc0, 0xe1, 0xca, 0x7b, 0x9e, 0xf3, 0x86, 0xa9, 0xdb, 0x84, 0x39, 0xde, 0x88, + 0xce, 0xa1, 0x24, 0xb3, 0xc0, 0xf1, 0x73, 0x7d, 0x1f, 0xb2, 0xcc, 0x9b, 0x68, 0x16, 0x62, 0x2f, + 0x07, 0xa2, 0x08, 0xa4, 0xbf, 0xd1, 0x77, 0x01, 0x74, 0x42, 0x1c, 0x63, 0x7f, 0x14, 0xcc, 0xb0, + 0x9c, 0xe0, 0x8e, 0x35, 0x0f, 0xb8, 0x76, 0x41, 0xf8, 0xe5, 0x5c, 0x20, 0x1b, 0xf2, 0xcd, 0x90, + 0x46, 0x65, 0x07, 0x2a, 0x51, 0xd9, 0xd3, 0x2a, 0xa9, 0x82, 0x97, 0xf2, 0xfd, 0x82, 0x21, 0xcd, + 0xdf, 0x47, 0x58, 0x43, 0xf9, 0x41, 0x0a, 0x4a, 0x61, 0x67, 0xfe, 0x2f, 0x4c, 0xca, 0xca, 0x8f, + 0x25, 0xc8, 0xfb, 0xdf, 0x1f, 0x7d, 0x25, 0x89, 0x3c, 0x2f, 0x71, 0xf3, 0xa5, 0xc2, 0x4f, 0x1b, + 0xfc, 0x31, 0x29, 0xed, 0x3f, 0x26, 0x7d, 0xc3, 0xcf, 0x2f, 0x89, 0xbc, 0x47, 0xd8, 0xda, 0xc2, + 0xb1, 0xbc, 0x7c, 0xf7, 0x1a, 0x14, 0xfc, 0x90, 0x40, 0xaf, 0x13, 0x1e, 0x9f, 0x24, 0x89, 0x73, + 0x29, 0x78, 0xa4, 0x39, 0xc8, 0xda, 0xd6, 0x07, 0xe2, 0xe1, 0x24, 0xad, 0xf2, 0x86, 0xe2, 0xc2, + 0xf4, 0x58, 0x3c, 0x09, 0x80, 0xa9, 0x10, 0x10, 0x29, 0x50, 0xb6, 0x47, 0xfb, 0xda, 0x03, 0x7c, + 0x2c, 0x9e, 0x51, 0xf8, 0xf2, 0x8b, 0xf6, 0x68, 0xff, 0x36, 0x3e, 0xe6, 0xef, 0x28, 0xcb, 0x50, + 0xf2, 0x30, 0xcc, 0xc5, 0xf9, 0x9e, 0x02, 0x87, 0xb4, 0xf9, 0x1b, 0x98, 0x24, 0xa7, 0x94, 0x9f, + 0x4a, 0x90, 0xf7, 0x4e, 0x09, 0x7a, 0x13, 0x0a, 0x7e, 0xe8, 0x12, 0xa5, 0xf8, 0x13, 0x27, 0x04, + 0x3d, 0xf1, 0xf1, 0x81, 0x0c, 0x5a, 0xf3, 0x1e, 0x73, 0x8d, 0xae, 0xd6, 0x1b, 0xe8, 0x07, 0xe2, + 0x4d, 0x6e, 0x31, 0x26, 0xba, 0xb1, 0xb8, 0xb2, 0x79, 0x6b, 0x7d, 0xa0, 0x1f, 0xa8, 0x45, 0x26, + 0xb4, 0xd9, 0xa5, 0x0d, 0x51, 0xe4, 0x7c, 0x29, 0x81, 0x3c, 0x7e, 0x8a, 0xbf, 0xfa, 0xfa, 0x26, + 0x93, 0x61, 0x3a, 0x26, 0x19, 0xa2, 0x55, 0x98, 0xf5, 0x11, 0x9a, 0x6b, 0x1c, 0x98, 0x3a, 0x19, + 0x39, 0x58, 0x30, 0x97, 0xc8, 0x1f, 0x6a, 0x79, 0x23, 0x93, 0xdf, 0x9d, 0x7d, 0xdc, 0xef, 0xfe, + 0x30, 0x05, 0xc5, 0x10, 0x91, 0x8a, 0xfe, 0x3f, 0x14, 0xa2, 0x2a, 0x71, 0x29, 0x28, 0x04, 0x0e, + 0x1e, 0x38, 0xa3, 0x96, 0x4a, 0x3d, 0x86, 0xa5, 0x92, 0x28, 0x6b, 0x8f, 0x99, 0xcd, 0x3c, 0x32, + 0x33, 0xfb, 0x2c, 0x20, 0x62, 0x11, 0x7d, 0xa0, 0x1d, 0x5a, 0xc4, 0x30, 0x0f, 0x34, 0xee, 0xd8, + 0x3c, 0xa2, 0xc8, 0x6c, 0x64, 0x8f, 0x0d, 0x34, 0xd9, 0x61, 0xf8, 0xa1, 0x04, 0x79, 0x9f, 0xb5, + 0x7a, 0xd4, 0x87, 0xcf, 0xb3, 0x90, 0x13, 0x85, 0x1d, 0x7f, 0xf9, 0x14, 0xad, 0x58, 0x0a, 0x7a, + 0x01, 0xf2, 0x43, 0x4c, 0x74, 0x16, 0x1e, 0x79, 0xfa, 0xf4, 0xdb, 0xd7, 0xf6, 0xa1, 0x18, 0x7a, + 0x3b, 0x46, 0xe7, 0x61, 0xbe, 0xbe, 0xd1, 0xa8, 0xdf, 0xd6, 0xda, 0xef, 0x68, 0xed, 0x7b, 0xcd, + 0x86, 0x76, 0x77, 0xe7, 0xf6, 0xce, 0xee, 0x37, 0x77, 0xe4, 0x33, 0x93, 0x43, 0x6a, 0x83, 0xb5, + 0x65, 0x09, 0x9d, 0x83, 0xd9, 0xe8, 0x10, 0x1f, 0x48, 0x2d, 0x64, 0x7e, 0xf2, 0xeb, 0xc5, 0x33, + 0xd7, 0xbe, 0x94, 0x60, 0x36, 0xa6, 0x84, 0x46, 0x17, 0xe1, 0xc9, 0xdd, 0xf5, 0xf5, 0x86, 0xaa, + 0xb5, 0x76, 0x6a, 0xcd, 0xd6, 0xc6, 0x6e, 0x5b, 0x53, 0x1b, 0xad, 0xbb, 0xdb, 0xed, 0xd0, 0xa4, + 0xcb, 0x70, 0x21, 0x1e, 0x52, 0xab, 0xd7, 0x1b, 0xcd, 0xb6, 0x2c, 0xa1, 0x25, 0x78, 0x22, 0x01, + 0xb1, 0xb6, 0xab, 0xb6, 0xe5, 0x54, 0xb2, 0x0a, 0xb5, 0xb1, 0xd5, 0xa8, 0xb7, 0xe5, 0x34, 0xba, + 0x02, 0x97, 0x4e, 0x42, 0x68, 0xeb, 0xbb, 0xea, 0x9d, 0x5a, 0x5b, 0xce, 0x9c, 0x0a, 0x6c, 0x35, + 0x76, 0x6e, 0x35, 0x54, 0x39, 0x2b, 0xbe, 0xfb, 0x57, 0x29, 0xa8, 0x26, 0x55, 0xea, 0x54, 0x57, + 0xad, 0xd9, 0xdc, 0xbe, 0x17, 0xe8, 0xaa, 0x6f, 0xdc, 0xdd, 0xb9, 0x3d, 0x69, 0x82, 0xa7, 0x41, + 0x39, 0x09, 0xe8, 0x1b, 0xe2, 0x32, 0x5c, 0x3c, 0x11, 0x27, 0xcc, 0x71, 0x0a, 0x4c, 0x6d, 0xb4, + 0xd5, 0x7b, 0x72, 0x1a, 0xad, 0xc0, 0xb5, 0x53, 0x61, 0xfe, 0x98, 0x9c, 0x41, 0xab, 0x70, 0xfd, + 0x64, 0x3c, 0x37, 0x90, 0x27, 0xe0, 0x99, 0xe8, 0x23, 0x09, 0xe6, 0x63, 0x4b, 0x7e, 0x74, 0x09, + 0x96, 0x9a, 0xea, 0x6e, 0xbd, 0xd1, 0x6a, 0x69, 0x4d, 0x75, 0xb7, 0xb9, 0xdb, 0xaa, 0x6d, 0x6b, + 0xad, 0x76, 0xad, 0x7d, 0xb7, 0x15, 0xb2, 0x8d, 0x02, 0x8b, 0x49, 0x20, 0xdf, 0x2e, 0x27, 0x60, + 0x84, 0x07, 0x78, 0x7e, 0xfa, 0x4b, 0x09, 0xce, 0x27, 0x96, 0xf8, 0xe8, 0x2a, 0x3c, 0xb5, 0xd7, + 0x50, 0x37, 0xd7, 0xef, 0x69, 0x7b, 0xbb, 0xed, 0x86, 0xd6, 0x78, 0xa7, 0xdd, 0xd8, 0x69, 0x6d, + 0xee, 0xee, 0x4c, 0xae, 0xea, 0x0a, 0x5c, 0x3a, 0x11, 0xe9, 0x2f, 0xed, 0x34, 0xe0, 0xd8, 0xfa, + 0x7e, 0x24, 0xc1, 0xf4, 0x58, 0x2c, 0x44, 0x17, 0xa0, 0x7a, 0x67, 0xb3, 0xb5, 0xd6, 0xd8, 0xa8, + 0xed, 0x6d, 0xee, 0xaa, 0xe3, 0x67, 0xf6, 0x12, 0x2c, 0x4d, 0x8c, 0xde, 0xba, 0xdb, 0xdc, 0xde, + 0xac, 0xd7, 0xda, 0x0d, 0x36, 0xa9, 0x2c, 0xd1, 0x0f, 0x9b, 0x00, 0x6d, 0x6f, 0xbe, 0xb5, 0xd1, + 0xd6, 0xea, 0xdb, 0x9b, 0x8d, 0x9d, 0xb6, 0x56, 0x6b, 0xb7, 0x6b, 0xc1, 0x71, 0x5e, 0xbb, 0xfd, + 0xc9, 0xe7, 0x8b, 0xd2, 0xa7, 0x9f, 0x2f, 0x4a, 0x7f, 0xfb, 0x7c, 0x51, 0xfa, 0xf8, 0x8b, 0xc5, + 0x33, 0x9f, 0x7e, 0xb1, 0x78, 0xe6, 0x2f, 0x5f, 0x2c, 0x9e, 0xb9, 0x7f, 0xe3, 0xc0, 0x20, 0xfd, + 0xd1, 0x3e, 0x8d, 0xc2, 0xab, 0xc1, 0x5f, 0x5c, 0xfd, 0xff, 0xc6, 0xda, 0xc6, 0xea, 0xf8, 0x1f, + 0x65, 0xf7, 0x73, 0x2c, 0xac, 0x3e, 0xff, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd0, 0x2b, 0x8f, + 0x43, 0x43, 0x2b, 0x00, 0x00, } func (m *Request) Marshal() (dAtA []byte, err error) { @@ -4863,7 +4388,7 @@ func (m *Request_FinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *RequestEcho) Marshal() (dAtA []byte, err error) { +func (m *EchoRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4873,12 +4398,12 @@ func (m *RequestEcho) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestEcho) MarshalTo(dAtA []byte) (int, error) { +func (m *EchoRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *EchoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4893,7 +4418,7 @@ func (m *RequestEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestFlush) Marshal() (dAtA []byte, err error) { +func (m *FlushRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4903,12 +4428,12 @@ func (m *RequestFlush) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestFlush) MarshalTo(dAtA []byte) (int, error) { +func (m *FlushRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestFlush) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *FlushRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4916,7 +4441,7 @@ func (m *RequestFlush) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestInfo) Marshal() (dAtA []byte, err error) { +func (m *InfoRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4926,12 +4451,12 @@ func (m *RequestInfo) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestInfo) MarshalTo(dAtA []byte) (int, error) { +func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4963,7 +4488,7 @@ func (m *RequestInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestInitChain) Marshal() (dAtA []byte, err error) { +func (m *InitChainRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4973,12 +4498,12 @@ func (m *RequestInitChain) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestInitChain) MarshalTo(dAtA []byte) (int, error) { +func (m *InitChainRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InitChainRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5039,7 +4564,7 @@ func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestQuery) Marshal() (dAtA []byte, err error) { +func (m *QueryRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5049,12 +4574,12 @@ func (m *RequestQuery) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestQuery) MarshalTo(dAtA []byte) (int, error) { +func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5091,7 +4616,7 @@ func (m *RequestQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { +func (m *CheckTxRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5101,12 +4626,12 @@ func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestCheckTx) MarshalTo(dAtA []byte) (int, error) { +func (m *CheckTxRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *CheckTxRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5114,7 +4639,7 @@ func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { if m.Type != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Type)) i-- - dAtA[i] = 0x10 + dAtA[i] = 0x18 } if len(m.Tx) > 0 { i -= len(m.Tx) @@ -5126,7 +4651,7 @@ func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestCommit) Marshal() (dAtA []byte, err error) { +func (m *CommitRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5136,12 +4661,12 @@ func (m *RequestCommit) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestCommit) MarshalTo(dAtA []byte) (int, error) { +func (m *CommitRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *CommitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5149,7 +4674,7 @@ func (m *RequestCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestListSnapshots) Marshal() (dAtA []byte, err error) { +func (m *ListSnapshotsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5159,12 +4684,12 @@ func (m *RequestListSnapshots) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestListSnapshots) MarshalTo(dAtA []byte) (int, error) { +func (m *ListSnapshotsRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ListSnapshotsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5172,7 +4697,7 @@ func (m *RequestListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestOfferSnapshot) Marshal() (dAtA []byte, err error) { +func (m *OfferSnapshotRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5182,12 +4707,12 @@ func (m *RequestOfferSnapshot) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { +func (m *OfferSnapshotRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *OfferSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5214,7 +4739,7 @@ func (m *RequestOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestLoadSnapshotChunk) Marshal() (dAtA []byte, err error) { +func (m *LoadSnapshotChunkRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5224,12 +4749,12 @@ func (m *RequestLoadSnapshotChunk) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestLoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { +func (m *LoadSnapshotChunkRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *LoadSnapshotChunkRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5252,7 +4777,7 @@ func (m *RequestLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *RequestApplySnapshotChunk) Marshal() (dAtA []byte, err error) { +func (m *ApplySnapshotChunkRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5262,12 +4787,12 @@ func (m *RequestApplySnapshotChunk) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { +func (m *ApplySnapshotChunkRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ApplySnapshotChunkRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5294,7 +4819,7 @@ func (m *RequestApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *RequestPrepareProposal) Marshal() (dAtA []byte, err error) { +func (m *PrepareProposalRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5304,12 +4829,12 @@ func (m *RequestPrepareProposal) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestPrepareProposal) MarshalTo(dAtA []byte) (int, error) { +func (m *PrepareProposalRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PrepareProposalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5382,7 +4907,7 @@ func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *RequestProcessProposal) Marshal() (dAtA []byte, err error) { +func (m *ProcessProposalRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5392,12 +4917,12 @@ func (m *RequestProcessProposal) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestProcessProposal) MarshalTo(dAtA []byte) (int, error) { +func (m *ProcessProposalRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ProcessProposalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5472,7 +4997,7 @@ func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *RequestExtendVote) Marshal() (dAtA []byte, err error) { +func (m *ExtendVoteRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5482,12 +5007,12 @@ func (m *RequestExtendVote) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestExtendVote) MarshalTo(dAtA []byte) (int, error) { +func (m *ExtendVoteRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ExtendVoteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5562,7 +5087,7 @@ func (m *RequestExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestVerifyVoteExtension) Marshal() (dAtA []byte, err error) { +func (m *VerifyVoteExtensionRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5572,12 +5097,12 @@ func (m *RequestVerifyVoteExtension) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestVerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { +func (m *VerifyVoteExtensionRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestVerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *VerifyVoteExtensionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5611,7 +5136,7 @@ func (m *RequestVerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *RequestFinalizeBlock) Marshal() (dAtA []byte, err error) { +func (m *FinalizeBlockRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5621,16 +5146,21 @@ func (m *RequestFinalizeBlock) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestFinalizeBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *FinalizeBlockRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *FinalizeBlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if m.SyncingToHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.SyncingToHeight)) + i-- + dAtA[i] = 0x48 + } if len(m.ProposerAddress) > 0 { i -= len(m.ProposerAddress) copy(dAtA[i:], m.ProposerAddress) @@ -6102,7 +5632,7 @@ func (m *Response_FinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) } return len(dAtA) - i, nil } -func (m *ResponseException) Marshal() (dAtA []byte, err error) { +func (m *ExceptionResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6112,12 +5642,12 @@ func (m *ResponseException) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseException) MarshalTo(dAtA []byte) (int, error) { +func (m *ExceptionResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseException) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ExceptionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6132,7 +5662,7 @@ func (m *ResponseException) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseEcho) Marshal() (dAtA []byte, err error) { +func (m *EchoResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6142,12 +5672,12 @@ func (m *ResponseEcho) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseEcho) MarshalTo(dAtA []byte) (int, error) { +func (m *EchoResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *EchoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6162,7 +5692,7 @@ func (m *ResponseEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseFlush) Marshal() (dAtA []byte, err error) { +func (m *FlushResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6172,12 +5702,12 @@ func (m *ResponseFlush) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseFlush) MarshalTo(dAtA []byte) (int, error) { +func (m *FlushResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseFlush) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *FlushResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6185,7 +5715,7 @@ func (m *ResponseFlush) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseInfo) Marshal() (dAtA []byte, err error) { +func (m *InfoResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6195,16 +5725,40 @@ func (m *ResponseInfo) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseInfo) MarshalTo(dAtA []byte) (int, error) { +func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if len(m.DefaultLane) > 0 { + i -= len(m.DefaultLane) + copy(dAtA[i:], m.DefaultLane) + i = encodeVarintTypes(dAtA, i, uint64(len(m.DefaultLane))) + i-- + dAtA[i] = 0x3a + } + if len(m.LanePriorities) > 0 { + for k := range m.LanePriorities { + v := m.LanePriorities[k] + baseI := i + i = encodeVarintTypes(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintTypes(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } if len(m.LastBlockAppHash) > 0 { i -= len(m.LastBlockAppHash) copy(dAtA[i:], m.LastBlockAppHash) @@ -6239,7 +5793,7 @@ func (m *ResponseInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseInitChain) Marshal() (dAtA []byte, err error) { +func (m *InitChainResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6249,12 +5803,12 @@ func (m *ResponseInitChain) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseInitChain) MarshalTo(dAtA []byte) (int, error) { +func (m *InitChainResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InitChainResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6295,7 +5849,7 @@ func (m *ResponseInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseQuery) Marshal() (dAtA []byte, err error) { +func (m *QueryResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6305,12 +5859,12 @@ func (m *ResponseQuery) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseQuery) MarshalTo(dAtA []byte) (int, error) { +func (m *QueryResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *QueryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6380,7 +5934,7 @@ func (m *ResponseQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { +func (m *CheckTxResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6390,16 +5944,23 @@ func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseCheckTx) MarshalTo(dAtA []byte) (int, error) { +func (m *CheckTxResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *CheckTxResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if len(m.LaneId) > 0 { + i -= len(m.LaneId) + copy(dAtA[i:], m.LaneId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LaneId))) + i-- + dAtA[i] = 0x62 + } if len(m.Codespace) > 0 { i -= len(m.Codespace) copy(dAtA[i:], m.Codespace) @@ -6460,7 +6021,7 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { +func (m *CommitResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6470,12 +6031,12 @@ func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseCommit) MarshalTo(dAtA []byte) (int, error) { +func (m *CommitResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *CommitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6488,7 +6049,7 @@ func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseListSnapshots) Marshal() (dAtA []byte, err error) { +func (m *ListSnapshotsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6498,12 +6059,12 @@ func (m *ResponseListSnapshots) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseListSnapshots) MarshalTo(dAtA []byte) (int, error) { +func (m *ListSnapshotsResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ListSnapshotsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6525,7 +6086,7 @@ func (m *ResponseListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseOfferSnapshot) Marshal() (dAtA []byte, err error) { +func (m *OfferSnapshotResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6535,12 +6096,12 @@ func (m *ResponseOfferSnapshot) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { +func (m *OfferSnapshotResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *OfferSnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6553,7 +6114,7 @@ func (m *ResponseOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseLoadSnapshotChunk) Marshal() (dAtA []byte, err error) { +func (m *LoadSnapshotChunkResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6563,12 +6124,12 @@ func (m *ResponseLoadSnapshotChunk) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseLoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { +func (m *LoadSnapshotChunkResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *LoadSnapshotChunkResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6583,7 +6144,7 @@ func (m *ResponseLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *ResponseApplySnapshotChunk) Marshal() (dAtA []byte, err error) { +func (m *ApplySnapshotChunkResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6593,12 +6154,12 @@ func (m *ResponseApplySnapshotChunk) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { +func (m *ApplySnapshotChunkResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ApplySnapshotChunkResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6638,7 +6199,7 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *ResponsePrepareProposal) Marshal() (dAtA []byte, err error) { +func (m *PrepareProposalResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6648,12 +6209,12 @@ func (m *ResponsePrepareProposal) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponsePrepareProposal) MarshalTo(dAtA []byte) (int, error) { +func (m *PrepareProposalResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponsePrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PrepareProposalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6670,7 +6231,7 @@ func (m *ResponsePrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *ResponseProcessProposal) Marshal() (dAtA []byte, err error) { +func (m *ProcessProposalResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6680,12 +6241,12 @@ func (m *ResponseProcessProposal) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseProcessProposal) MarshalTo(dAtA []byte) (int, error) { +func (m *ProcessProposalResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ProcessProposalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6698,7 +6259,7 @@ func (m *ResponseProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *ResponseExtendVote) Marshal() (dAtA []byte, err error) { +func (m *ExtendVoteResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6708,12 +6269,12 @@ func (m *ResponseExtendVote) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseExtendVote) MarshalTo(dAtA []byte) (int, error) { +func (m *ExtendVoteResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ExtendVoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6728,7 +6289,7 @@ func (m *ResponseExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseVerifyVoteExtension) Marshal() (dAtA []byte, err error) { +func (m *VerifyVoteExtensionResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6738,12 +6299,12 @@ func (m *ResponseVerifyVoteExtension) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseVerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { +func (m *VerifyVoteExtensionResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseVerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *VerifyVoteExtensionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6756,7 +6317,7 @@ func (m *ResponseVerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, er return len(dAtA) - i, nil } -func (m *ResponseFinalizeBlock) Marshal() (dAtA []byte, err error) { +func (m *FinalizeBlockResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6766,16 +6327,24 @@ func (m *ResponseFinalizeBlock) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseFinalizeBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *FinalizeBlockResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *FinalizeBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + n49, err49 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(m.NextBlockDelay, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.NextBlockDelay):]) + if err49 != nil { + return 0, err49 + } + i -= n49 + i = encodeVarintTypes(dAtA, i, uint64(n49)) + i-- + dAtA[i] = 0x32 if len(m.AppHash) > 0 { i -= len(m.AppHash) copy(dAtA[i:], m.AppHash) @@ -7200,21 +6769,25 @@ func (m *ValidatorUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.PubKeyType) > 0 { + i -= len(m.PubKeyType) + copy(dAtA[i:], m.PubKeyType) + i = encodeVarintTypes(dAtA, i, uint64(len(m.PubKeyType))) + i-- + dAtA[i] = 0x22 + } + if len(m.PubKeyBytes) > 0 { + i -= len(m.PubKeyBytes) + copy(dAtA[i:], m.PubKeyBytes) + i = encodeVarintTypes(dAtA, i, uint64(len(m.PubKeyBytes))) + i-- + dAtA[i] = 0x1a + } if m.Power != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Power)) i-- dAtA[i] = 0x10 } - { - size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -7631,7 +7204,7 @@ func (m *Request_FinalizeBlock) Size() (n int) { } return n } -func (m *RequestEcho) Size() (n int) { +func (m *EchoRequest) Size() (n int) { if m == nil { return 0 } @@ -7644,7 +7217,7 @@ func (m *RequestEcho) Size() (n int) { return n } -func (m *RequestFlush) Size() (n int) { +func (m *FlushRequest) Size() (n int) { if m == nil { return 0 } @@ -7653,7 +7226,7 @@ func (m *RequestFlush) Size() (n int) { return n } -func (m *RequestInfo) Size() (n int) { +func (m *InfoRequest) Size() (n int) { if m == nil { return 0 } @@ -7676,7 +7249,7 @@ func (m *RequestInfo) Size() (n int) { return n } -func (m *RequestInitChain) Size() (n int) { +func (m *InitChainRequest) Size() (n int) { if m == nil { return 0 } @@ -7708,7 +7281,7 @@ func (m *RequestInitChain) Size() (n int) { return n } -func (m *RequestQuery) Size() (n int) { +func (m *QueryRequest) Size() (n int) { if m == nil { return 0 } @@ -7731,7 +7304,7 @@ func (m *RequestQuery) Size() (n int) { return n } -func (m *RequestCheckTx) Size() (n int) { +func (m *CheckTxRequest) Size() (n int) { if m == nil { return 0 } @@ -7747,7 +7320,7 @@ func (m *RequestCheckTx) Size() (n int) { return n } -func (m *RequestCommit) Size() (n int) { +func (m *CommitRequest) Size() (n int) { if m == nil { return 0 } @@ -7756,7 +7329,7 @@ func (m *RequestCommit) Size() (n int) { return n } -func (m *RequestListSnapshots) Size() (n int) { +func (m *ListSnapshotsRequest) Size() (n int) { if m == nil { return 0 } @@ -7765,7 +7338,7 @@ func (m *RequestListSnapshots) Size() (n int) { return n } -func (m *RequestOfferSnapshot) Size() (n int) { +func (m *OfferSnapshotRequest) Size() (n int) { if m == nil { return 0 } @@ -7782,7 +7355,7 @@ func (m *RequestOfferSnapshot) Size() (n int) { return n } -func (m *RequestLoadSnapshotChunk) Size() (n int) { +func (m *LoadSnapshotChunkRequest) Size() (n int) { if m == nil { return 0 } @@ -7800,7 +7373,7 @@ func (m *RequestLoadSnapshotChunk) Size() (n int) { return n } -func (m *RequestApplySnapshotChunk) Size() (n int) { +func (m *ApplySnapshotChunkRequest) Size() (n int) { if m == nil { return 0 } @@ -7820,7 +7393,7 @@ func (m *RequestApplySnapshotChunk) Size() (n int) { return n } -func (m *RequestPrepareProposal) Size() (n int) { +func (m *PrepareProposalRequest) Size() (n int) { if m == nil { return 0 } @@ -7859,7 +7432,7 @@ func (m *RequestPrepareProposal) Size() (n int) { return n } -func (m *RequestProcessProposal) Size() (n int) { +func (m *ProcessProposalRequest) Size() (n int) { if m == nil { return 0 } @@ -7899,7 +7472,7 @@ func (m *RequestProcessProposal) Size() (n int) { return n } -func (m *RequestExtendVote) Size() (n int) { +func (m *ExtendVoteRequest) Size() (n int) { if m == nil { return 0 } @@ -7939,7 +7512,7 @@ func (m *RequestExtendVote) Size() (n int) { return n } -func (m *RequestVerifyVoteExtension) Size() (n int) { +func (m *VerifyVoteExtensionRequest) Size() (n int) { if m == nil { return 0 } @@ -7963,7 +7536,7 @@ func (m *RequestVerifyVoteExtension) Size() (n int) { return n } -func (m *RequestFinalizeBlock) Size() (n int) { +func (m *FinalizeBlockRequest) Size() (n int) { if m == nil { return 0 } @@ -8000,6 +7573,9 @@ func (m *RequestFinalizeBlock) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.SyncingToHeight != 0 { + n += 1 + sovTypes(uint64(m.SyncingToHeight)) + } return n } @@ -8219,7 +7795,7 @@ func (m *Response_FinalizeBlock) Size() (n int) { } return n } -func (m *ResponseException) Size() (n int) { +func (m *ExceptionResponse) Size() (n int) { if m == nil { return 0 } @@ -8232,7 +7808,7 @@ func (m *ResponseException) Size() (n int) { return n } -func (m *ResponseEcho) Size() (n int) { +func (m *EchoResponse) Size() (n int) { if m == nil { return 0 } @@ -8245,7 +7821,7 @@ func (m *ResponseEcho) Size() (n int) { return n } -func (m *ResponseFlush) Size() (n int) { +func (m *FlushResponse) Size() (n int) { if m == nil { return 0 } @@ -8254,7 +7830,7 @@ func (m *ResponseFlush) Size() (n int) { return n } -func (m *ResponseInfo) Size() (n int) { +func (m *InfoResponse) Size() (n int) { if m == nil { return 0 } @@ -8278,10 +7854,22 @@ func (m *ResponseInfo) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if len(m.LanePriorities) > 0 { + for k, v := range m.LanePriorities { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + sovTypes(uint64(v)) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + l = len(m.DefaultLane) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } return n } -func (m *ResponseInitChain) Size() (n int) { +func (m *InitChainResponse) Size() (n int) { if m == nil { return 0 } @@ -8304,7 +7892,7 @@ func (m *ResponseInitChain) Size() (n int) { return n } -func (m *ResponseQuery) Size() (n int) { +func (m *QueryResponse) Size() (n int) { if m == nil { return 0 } @@ -8346,7 +7934,7 @@ func (m *ResponseQuery) Size() (n int) { return n } -func (m *ResponseCheckTx) Size() (n int) { +func (m *CheckTxResponse) Size() (n int) { if m == nil { return 0 } @@ -8383,10 +7971,14 @@ func (m *ResponseCheckTx) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + l = len(m.LaneId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } return n } -func (m *ResponseCommit) Size() (n int) { +func (m *CommitResponse) Size() (n int) { if m == nil { return 0 } @@ -8398,7 +7990,7 @@ func (m *ResponseCommit) Size() (n int) { return n } -func (m *ResponseListSnapshots) Size() (n int) { +func (m *ListSnapshotsResponse) Size() (n int) { if m == nil { return 0 } @@ -8413,7 +8005,7 @@ func (m *ResponseListSnapshots) Size() (n int) { return n } -func (m *ResponseOfferSnapshot) Size() (n int) { +func (m *OfferSnapshotResponse) Size() (n int) { if m == nil { return 0 } @@ -8425,7 +8017,7 @@ func (m *ResponseOfferSnapshot) Size() (n int) { return n } -func (m *ResponseLoadSnapshotChunk) Size() (n int) { +func (m *LoadSnapshotChunkResponse) Size() (n int) { if m == nil { return 0 } @@ -8438,7 +8030,7 @@ func (m *ResponseLoadSnapshotChunk) Size() (n int) { return n } -func (m *ResponseApplySnapshotChunk) Size() (n int) { +func (m *ApplySnapshotChunkResponse) Size() (n int) { if m == nil { return 0 } @@ -8463,7 +8055,7 @@ func (m *ResponseApplySnapshotChunk) Size() (n int) { return n } -func (m *ResponsePrepareProposal) Size() (n int) { +func (m *PrepareProposalResponse) Size() (n int) { if m == nil { return 0 } @@ -8478,7 +8070,7 @@ func (m *ResponsePrepareProposal) Size() (n int) { return n } -func (m *ResponseProcessProposal) Size() (n int) { +func (m *ProcessProposalResponse) Size() (n int) { if m == nil { return 0 } @@ -8490,7 +8082,7 @@ func (m *ResponseProcessProposal) Size() (n int) { return n } -func (m *ResponseExtendVote) Size() (n int) { +func (m *ExtendVoteResponse) Size() (n int) { if m == nil { return 0 } @@ -8503,7 +8095,7 @@ func (m *ResponseExtendVote) Size() (n int) { return n } -func (m *ResponseVerifyVoteExtension) Size() (n int) { +func (m *VerifyVoteExtensionResponse) Size() (n int) { if m == nil { return 0 } @@ -8515,7 +8107,7 @@ func (m *ResponseVerifyVoteExtension) Size() (n int) { return n } -func (m *ResponseFinalizeBlock) Size() (n int) { +func (m *FinalizeBlockResponse) Size() (n int) { if m == nil { return 0 } @@ -8547,6 +8139,8 @@ func (m *ResponseFinalizeBlock) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + l = github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.NextBlockDelay) + n += 1 + l + sovTypes(uint64(l)) return n } @@ -8708,11 +8302,17 @@ func (m *ValidatorUpdate) Size() (n int) { } var l int _ = l - l = m.PubKey.Size() - n += 1 + l + sovTypes(uint64(l)) if m.Power != 0 { n += 1 + sovTypes(uint64(m.Power)) } + l = len(m.PubKeyBytes) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.PubKeyType) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -8864,7 +8464,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestEcho{} + v := &EchoRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -8899,7 +8499,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestFlush{} + v := &FlushRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -8934,7 +8534,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestInfo{} + v := &InfoRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -8969,7 +8569,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestInitChain{} + v := &InitChainRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9004,7 +8604,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestQuery{} + v := &QueryRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9039,7 +8639,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestCheckTx{} + v := &CheckTxRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9074,7 +8674,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestCommit{} + v := &CommitRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9109,7 +8709,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestListSnapshots{} + v := &ListSnapshotsRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9144,7 +8744,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestOfferSnapshot{} + v := &OfferSnapshotRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9179,7 +8779,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestLoadSnapshotChunk{} + v := &LoadSnapshotChunkRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9214,7 +8814,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestApplySnapshotChunk{} + v := &ApplySnapshotChunkRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9249,7 +8849,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestPrepareProposal{} + v := &PrepareProposalRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9284,7 +8884,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestProcessProposal{} + v := &ProcessProposalRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9319,7 +8919,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestExtendVote{} + v := &ExtendVoteRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9354,7 +8954,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestVerifyVoteExtension{} + v := &VerifyVoteExtensionRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9389,7 +8989,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestFinalizeBlock{} + v := &FinalizeBlockRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9416,7 +9016,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestEcho) Unmarshal(dAtA []byte) error { +func (m *EchoRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9439,10 +9039,10 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestEcho: wiretype end group for non-group") + return fmt.Errorf("proto: EchoRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestEcho: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EchoRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -9498,7 +9098,7 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestFlush) Unmarshal(dAtA []byte) error { +func (m *FlushRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9521,10 +9121,10 @@ func (m *RequestFlush) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestFlush: wiretype end group for non-group") + return fmt.Errorf("proto: FlushRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestFlush: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FlushRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -9548,7 +9148,7 @@ func (m *RequestFlush) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestInfo) Unmarshal(dAtA []byte) error { +func (m *InfoRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9571,10 +9171,10 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestInfo: wiretype end group for non-group") + return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestInfo: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -9700,7 +9300,7 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestInitChain) Unmarshal(dAtA []byte) error { +func (m *InitChainRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9723,10 +9323,10 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestInitChain: wiretype end group for non-group") + return fmt.Errorf("proto: InitChainRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitChainRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -9824,7 +9424,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ConsensusParams == nil { - m.ConsensusParams = &types1.ConsensusParams{} + m.ConsensusParams = &v1.ConsensusParams{} } if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -9938,7 +9538,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestQuery) Unmarshal(dAtA []byte) error { +func (m *QueryRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9961,10 +9561,10 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") + return fmt.Errorf("proto: QueryRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -10093,7 +9693,7 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { +func (m *CheckTxRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10116,10 +9716,10 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") + return fmt.Errorf("proto: CheckTxRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CheckTxRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -10156,7 +9756,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { m.Tx = []byte{} } iNdEx = postIndex - case 2: + case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } @@ -10196,7 +9796,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestCommit) Unmarshal(dAtA []byte) error { +func (m *CommitRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10219,10 +9819,10 @@ func (m *RequestCommit) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestCommit: wiretype end group for non-group") + return fmt.Errorf("proto: CommitRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestCommit: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CommitRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -10246,7 +9846,7 @@ func (m *RequestCommit) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { +func (m *ListSnapshotsRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10269,10 +9869,10 @@ func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestListSnapshots: wiretype end group for non-group") + return fmt.Errorf("proto: ListSnapshotsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestListSnapshots: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ListSnapshotsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -10296,7 +9896,7 @@ func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { +func (m *OfferSnapshotRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10319,10 +9919,10 @@ func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestOfferSnapshot: wiretype end group for non-group") + return fmt.Errorf("proto: OfferSnapshotRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestOfferSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OfferSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -10416,7 +10016,7 @@ func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { +func (m *LoadSnapshotChunkRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10439,10 +10039,10 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestLoadSnapshotChunk: wiretype end group for non-group") + return fmt.Errorf("proto: LoadSnapshotChunkRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestLoadSnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LoadSnapshotChunkRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -10523,7 +10123,7 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { +func (m *ApplySnapshotChunkRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10546,10 +10146,10 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestApplySnapshotChunk: wiretype end group for non-group") + return fmt.Errorf("proto: ApplySnapshotChunkRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestApplySnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplySnapshotChunkRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -10658,7 +10258,7 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { +func (m *PrepareProposalRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10681,10 +10281,10 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestPrepareProposal: wiretype end group for non-group") + return fmt.Errorf("proto: PrepareProposalRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestPrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PrepareProposalRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -10946,7 +10546,7 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { +func (m *ProcessProposalRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10969,10 +10569,10 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestProcessProposal: wiretype end group for non-group") + return fmt.Errorf("proto: ProcessProposalRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestProcessProposal: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ProcessProposalRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -11249,7 +10849,7 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { +func (m *ExtendVoteRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11272,10 +10872,10 @@ func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestExtendVote: wiretype end group for non-group") + return fmt.Errorf("proto: ExtendVoteRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestExtendVote: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExtendVoteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -11552,7 +11152,7 @@ func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestVerifyVoteExtension) Unmarshal(dAtA []byte) error { +func (m *VerifyVoteExtensionRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11575,10 +11175,10 @@ func (m *RequestVerifyVoteExtension) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestVerifyVoteExtension: wiretype end group for non-group") + return fmt.Errorf("proto: VerifyVoteExtensionRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestVerifyVoteExtension: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VerifyVoteExtensionRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -11723,7 +11323,7 @@ func (m *RequestVerifyVoteExtension) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { +func (m *FinalizeBlockRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11746,10 +11346,10 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestFinalizeBlock: wiretype end group for non-group") + return fmt.Errorf("proto: FinalizeBlockRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestFinalizeBlock: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FinalizeBlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -12005,6 +11605,25 @@ func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { m.ProposerAddress = []byte{} } iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SyncingToHeight", wireType) + } + m.SyncingToHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SyncingToHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -12084,7 +11703,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseException{} + v := &ExceptionResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12119,7 +11738,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseEcho{} + v := &EchoResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12154,7 +11773,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseFlush{} + v := &FlushResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12189,7 +11808,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseInfo{} + v := &InfoResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12224,7 +11843,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseInitChain{} + v := &InitChainResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12259,7 +11878,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseQuery{} + v := &QueryResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12294,7 +11913,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseCheckTx{} + v := &CheckTxResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12329,7 +11948,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseCommit{} + v := &CommitResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12364,7 +11983,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseListSnapshots{} + v := &ListSnapshotsResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12399,7 +12018,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseOfferSnapshot{} + v := &OfferSnapshotResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12434,7 +12053,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseLoadSnapshotChunk{} + v := &LoadSnapshotChunkResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12469,7 +12088,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseApplySnapshotChunk{} + v := &ApplySnapshotChunkResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12504,7 +12123,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponsePrepareProposal{} + v := &PrepareProposalResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12539,7 +12158,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseProcessProposal{} + v := &ProcessProposalResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12574,7 +12193,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseExtendVote{} + v := &ExtendVoteResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12609,7 +12228,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseVerifyVoteExtension{} + v := &VerifyVoteExtensionResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12644,7 +12263,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseFinalizeBlock{} + v := &FinalizeBlockResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12671,7 +12290,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseException) Unmarshal(dAtA []byte) error { +func (m *ExceptionResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12694,10 +12313,10 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseException: wiretype end group for non-group") + return fmt.Errorf("proto: ExceptionResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseException: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExceptionResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -12753,7 +12372,7 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseEcho) Unmarshal(dAtA []byte) error { +func (m *EchoResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12776,10 +12395,10 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseEcho: wiretype end group for non-group") + return fmt.Errorf("proto: EchoResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseEcho: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EchoResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -12835,7 +12454,7 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseFlush) Unmarshal(dAtA []byte) error { +func (m *FlushResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12858,10 +12477,10 @@ func (m *ResponseFlush) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseFlush: wiretype end group for non-group") + return fmt.Errorf("proto: FlushResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseFlush: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FlushResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -12885,7 +12504,7 @@ func (m *ResponseFlush) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseInfo) Unmarshal(dAtA []byte) error { +func (m *InfoResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12908,10 +12527,10 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseInfo: wiretype end group for non-group") + return fmt.Errorf("proto: InfoResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseInfo: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13050,6 +12669,151 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { m.LastBlockAppHash = []byte{} } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LanePriorities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LanePriorities == nil { + m.LanePriorities = make(map[string]uint32) + } + var mapkey string + var mapvalue uint32 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthTypes + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.LanePriorities[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultLane", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultLane = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -13071,7 +12835,7 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { +func (m *InitChainResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13094,10 +12858,10 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseInitChain: wiretype end group for non-group") + return fmt.Errorf("proto: InitChainResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitChainResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13130,7 +12894,7 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ConsensusParams == nil { - m.ConsensusParams = &types1.ConsensusParams{} + m.ConsensusParams = &v1.ConsensusParams{} } if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -13225,7 +12989,7 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseQuery) Unmarshal(dAtA []byte) error { +func (m *QueryResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13248,10 +13012,10 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseQuery: wiretype end group for non-group") + return fmt.Errorf("proto: QueryResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseQuery: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13454,7 +13218,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ProofOps == nil { - m.ProofOps = &crypto.ProofOps{} + m.ProofOps = &v11.ProofOps{} } if err := m.ProofOps.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -13532,7 +13296,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { +func (m *CheckTxResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13555,10 +13319,10 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseCheckTx: wiretype end group for non-group") + return fmt.Errorf("proto: CheckTxResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CheckTxResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13782,6 +13546,38 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } m.Codespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LaneId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LaneId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -13803,7 +13599,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseCommit) Unmarshal(dAtA []byte) error { +func (m *CommitResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13826,10 +13622,10 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseCommit: wiretype end group for non-group") + return fmt.Errorf("proto: CommitResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseCommit: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CommitResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 3: @@ -13872,7 +13668,7 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { +func (m *ListSnapshotsResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13895,10 +13691,10 @@ func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseListSnapshots: wiretype end group for non-group") + return fmt.Errorf("proto: ListSnapshotsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseListSnapshots: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ListSnapshotsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13956,7 +13752,7 @@ func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { +func (m *OfferSnapshotResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13979,10 +13775,10 @@ func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseOfferSnapshot: wiretype end group for non-group") + return fmt.Errorf("proto: OfferSnapshotResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseOfferSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OfferSnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13999,7 +13795,7 @@ func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Result |= ResponseOfferSnapshot_Result(b&0x7F) << shift + m.Result |= OfferSnapshotResult(b&0x7F) << shift if b < 0x80 { break } @@ -14025,7 +13821,7 @@ func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { +func (m *LoadSnapshotChunkResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14048,10 +13844,10 @@ func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseLoadSnapshotChunk: wiretype end group for non-group") + return fmt.Errorf("proto: LoadSnapshotChunkResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseLoadSnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LoadSnapshotChunkResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -14109,7 +13905,7 @@ func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { +func (m *ApplySnapshotChunkResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14132,10 +13928,10 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseApplySnapshotChunk: wiretype end group for non-group") + return fmt.Errorf("proto: ApplySnapshotChunkResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseApplySnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplySnapshotChunkResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -14152,7 +13948,7 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Result |= ResponseApplySnapshotChunk_Result(b&0x7F) << shift + m.Result |= ApplySnapshotChunkResult(b&0x7F) << shift if b < 0x80 { break } @@ -14286,7 +14082,7 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { +func (m *PrepareProposalResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14309,10 +14105,10 @@ func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponsePrepareProposal: wiretype end group for non-group") + return fmt.Errorf("proto: PrepareProposalResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponsePrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PrepareProposalResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -14368,7 +14164,7 @@ func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { +func (m *ProcessProposalResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14391,10 +14187,10 @@ func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseProcessProposal: wiretype end group for non-group") + return fmt.Errorf("proto: ProcessProposalResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseProcessProposal: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ProcessProposalResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -14411,7 +14207,7 @@ func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Status |= ResponseProcessProposal_ProposalStatus(b&0x7F) << shift + m.Status |= ProcessProposalStatus(b&0x7F) << shift if b < 0x80 { break } @@ -14437,7 +14233,7 @@ func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { +func (m *ExtendVoteResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14460,10 +14256,10 @@ func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseExtendVote: wiretype end group for non-group") + return fmt.Errorf("proto: ExtendVoteResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseExtendVote: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExtendVoteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -14521,7 +14317,7 @@ func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { +func (m *VerifyVoteExtensionResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14544,10 +14340,10 @@ func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseVerifyVoteExtension: wiretype end group for non-group") + return fmt.Errorf("proto: VerifyVoteExtensionResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseVerifyVoteExtension: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VerifyVoteExtensionResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -14564,7 +14360,7 @@ func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Status |= ResponseVerifyVoteExtension_VerifyStatus(b&0x7F) << shift + m.Status |= VerifyVoteExtensionStatus(b&0x7F) << shift if b < 0x80 { break } @@ -14590,7 +14386,7 @@ func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { +func (m *FinalizeBlockResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14613,10 +14409,10 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseFinalizeBlock: wiretype end group for non-group") + return fmt.Errorf("proto: FinalizeBlockResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseFinalizeBlock: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FinalizeBlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -14751,7 +14547,7 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ConsensusParamUpdates == nil { - m.ConsensusParamUpdates = &types1.ConsensusParams{} + m.ConsensusParamUpdates = &v1.ConsensusParams{} } if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -14791,6 +14587,39 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { m.AppHash = []byte{} } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextBlockDelay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdDurationUnmarshal(&m.NextBlockDelay, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -15826,11 +15655,30 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: ValidatorUpdate: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Power", wireType) + } + m.Power = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Power |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyBytes", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15840,30 +15688,31 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.PubKeyBytes = append(m.PubKeyBytes[:0], dAtA[iNdEx:postIndex]...) + if m.PubKeyBytes == nil { + m.PubKeyBytes = []byte{} } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Power", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyType", wireType) } - m.Power = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15873,11 +15722,24 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Power |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKeyType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -15975,7 +15837,7 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BlockIdFlag |= types1.BlockIDFlag(b&0x7F) << shift + m.BlockIdFlag |= v1.BlockIDFlag(b&0x7F) << shift if b < 0x80 { break } @@ -16145,7 +16007,7 @@ func (m *ExtendedVoteInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BlockIdFlag |= types1.BlockIDFlag(b&0x7F) << shift + m.BlockIdFlag |= v1.BlockIDFlag(b&0x7F) << shift if b < 0x80 { break } diff --git a/api/cometbft/abci/v1beta1/types.go b/api/cometbft/abci/v1beta1/types.go new file mode 100644 index 00000000000..d069a705447 --- /dev/null +++ b/api/cometbft/abci/v1beta1/types.go @@ -0,0 +1,42 @@ +package v1beta1 + +import ( + "bytes" + + "github.com/cosmos/gogoproto/jsonpb" +) + +const ( + CodeTypeOK uint32 = 0 +) + +// IsOK returns true if Code is OK. +func (r ResponseQuery) IsOK() bool { + return r.Code == CodeTypeOK +} + +// IsErr returns true if Code is something other than OK. +func (r ResponseQuery) IsErr() bool { + return r.Code != CodeTypeOK +} + +// --------------------------------------------------------------------------- +// override JSON marshaling so we emit defaults (ie. disable omitempty) + +var ( + jsonpbMarshaller = jsonpb.Marshaler{ + EnumsAsInts: true, + EmitDefaults: true, + } + jsonpbUnmarshaller = jsonpb.Unmarshaler{} +) + +func (r *ResponseQuery) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *ResponseQuery) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} diff --git a/api/cometbft/abci/v1beta1/types.pb.go b/api/cometbft/abci/v1beta1/types.pb.go new file mode 100644 index 00000000000..f42019acdef --- /dev/null +++ b/api/cometbft/abci/v1beta1/types.pb.go @@ -0,0 +1,14663 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/abci/v1beta1/types.proto + +package v1beta1 + +import ( + context "context" + fmt "fmt" + v1 "github.com/cometbft/cometbft/api/cometbft/crypto/v1" + v1beta1 "github.com/cometbft/cometbft/api/cometbft/types/v1beta1" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "github.com/cosmos/gogoproto/types" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Type of the transaction check request. +type CheckTxType int32 + +const ( + // New + CheckTxType_New CheckTxType = 0 + // Recheck (2nd, 3rd, etc.) + CheckTxType_Recheck CheckTxType = 1 +) + +var CheckTxType_name = map[int32]string{ + 0: "NEW", + 1: "RECHECK", +} + +var CheckTxType_value = map[string]int32{ + "NEW": 0, + "RECHECK": 1, +} + +func (x CheckTxType) String() string { + return proto.EnumName(CheckTxType_name, int32(x)) +} + +func (CheckTxType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{0} +} + +// The type of evidence. +type EvidenceType int32 + +const ( + // Unknown + EvidenceType_UNKNOWN EvidenceType = 0 + // Duplicate vote + EvidenceType_DUPLICATE_VOTE EvidenceType = 1 + // Light client attack + EvidenceType_LIGHT_CLIENT_ATTACK EvidenceType = 2 +) + +var EvidenceType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "DUPLICATE_VOTE", + 2: "LIGHT_CLIENT_ATTACK", +} + +var EvidenceType_value = map[string]int32{ + "UNKNOWN": 0, + "DUPLICATE_VOTE": 1, + "LIGHT_CLIENT_ATTACK": 2, +} + +func (x EvidenceType) String() string { + return proto.EnumName(EvidenceType_name, int32(x)) +} + +func (EvidenceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{1} +} + +// The status code. +type ResponseOfferSnapshot_Result int32 + +const ( + // Unknown result, abort all snapshot restoration + ResponseOfferSnapshot_UNKNOWN ResponseOfferSnapshot_Result = 0 + // Snapshot accepted, apply chunks + ResponseOfferSnapshot_ACCEPT ResponseOfferSnapshot_Result = 1 + // Abort all snapshot restoration + ResponseOfferSnapshot_ABORT ResponseOfferSnapshot_Result = 2 + // Reject this specific snapshot, try others + ResponseOfferSnapshot_REJECT ResponseOfferSnapshot_Result = 3 + // Reject all snapshots of this format, try others + ResponseOfferSnapshot_REJECT_FORMAT ResponseOfferSnapshot_Result = 4 + // Reject all snapshots from the sender(s), try others + ResponseOfferSnapshot_REJECT_SENDER ResponseOfferSnapshot_Result = 5 +) + +var ResponseOfferSnapshot_Result_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ACCEPT", + 2: "ABORT", + 3: "REJECT", + 4: "REJECT_FORMAT", + 5: "REJECT_SENDER", +} + +var ResponseOfferSnapshot_Result_value = map[string]int32{ + "UNKNOWN": 0, + "ACCEPT": 1, + "ABORT": 2, + "REJECT": 3, + "REJECT_FORMAT": 4, + "REJECT_SENDER": 5, +} + +func (x ResponseOfferSnapshot_Result) String() string { + return proto.EnumName(ResponseOfferSnapshot_Result_name, int32(x)) +} + +func (ResponseOfferSnapshot_Result) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{30, 0} +} + +// The status code. +type ResponseApplySnapshotChunk_Result int32 + +const ( + // Unknown result, abort all snapshot restoration + ResponseApplySnapshotChunk_UNKNOWN ResponseApplySnapshotChunk_Result = 0 + // Chunk successfully accepted + ResponseApplySnapshotChunk_ACCEPT ResponseApplySnapshotChunk_Result = 1 + // Abort all snapshot restoration + ResponseApplySnapshotChunk_ABORT ResponseApplySnapshotChunk_Result = 2 + // Retry chunk (combine with refetch and reject) + ResponseApplySnapshotChunk_RETRY ResponseApplySnapshotChunk_Result = 3 + // Retry snapshot (combine with refetch and reject) + ResponseApplySnapshotChunk_RETRY_SNAPSHOT ResponseApplySnapshotChunk_Result = 4 + // Reject this snapshot, try others + ResponseApplySnapshotChunk_REJECT_SNAPSHOT ResponseApplySnapshotChunk_Result = 5 +) + +var ResponseApplySnapshotChunk_Result_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ACCEPT", + 2: "ABORT", + 3: "RETRY", + 4: "RETRY_SNAPSHOT", + 5: "REJECT_SNAPSHOT", +} + +var ResponseApplySnapshotChunk_Result_value = map[string]int32{ + "UNKNOWN": 0, + "ACCEPT": 1, + "ABORT": 2, + "RETRY": 3, + "RETRY_SNAPSHOT": 4, + "REJECT_SNAPSHOT": 5, +} + +func (x ResponseApplySnapshotChunk_Result) String() string { + return proto.EnumName(ResponseApplySnapshotChunk_Result_name, int32(x)) +} + +func (ResponseApplySnapshotChunk_Result) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{32, 0} +} + +// Request represents a request to the ABCI application. +type Request struct { + // Sum of all possible messages. + // + // Types that are valid to be assigned to Value: + // *Request_Echo + // *Request_Flush + // *Request_Info + // *Request_SetOption + // *Request_InitChain + // *Request_Query + // *Request_BeginBlock + // *Request_CheckTx + // *Request_DeliverTx + // *Request_EndBlock + // *Request_Commit + // *Request_ListSnapshots + // *Request_OfferSnapshot + // *Request_LoadSnapshotChunk + // *Request_ApplySnapshotChunk + Value isRequest_Value `protobuf_oneof:"value"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(m, src) +} +func (m *Request) XXX_Size() int { + return m.Size() +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +type isRequest_Value interface { + isRequest_Value() + MarshalTo([]byte) (int, error) + Size() int +} + +type Request_Echo struct { + Echo *RequestEcho `protobuf:"bytes,1,opt,name=echo,proto3,oneof" json:"echo,omitempty"` +} +type Request_Flush struct { + Flush *RequestFlush `protobuf:"bytes,2,opt,name=flush,proto3,oneof" json:"flush,omitempty"` +} +type Request_Info struct { + Info *RequestInfo `protobuf:"bytes,3,opt,name=info,proto3,oneof" json:"info,omitempty"` +} +type Request_SetOption struct { + SetOption *RequestSetOption `protobuf:"bytes,4,opt,name=set_option,json=setOption,proto3,oneof" json:"set_option,omitempty"` +} +type Request_InitChain struct { + InitChain *RequestInitChain `protobuf:"bytes,5,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` +} +type Request_Query struct { + Query *RequestQuery `protobuf:"bytes,6,opt,name=query,proto3,oneof" json:"query,omitempty"` +} +type Request_BeginBlock struct { + BeginBlock *RequestBeginBlock `protobuf:"bytes,7,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` +} +type Request_CheckTx struct { + CheckTx *RequestCheckTx `protobuf:"bytes,8,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` +} +type Request_DeliverTx struct { + DeliverTx *RequestDeliverTx `protobuf:"bytes,9,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` +} +type Request_EndBlock struct { + EndBlock *RequestEndBlock `protobuf:"bytes,10,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` +} +type Request_Commit struct { + Commit *RequestCommit `protobuf:"bytes,11,opt,name=commit,proto3,oneof" json:"commit,omitempty"` +} +type Request_ListSnapshots struct { + ListSnapshots *RequestListSnapshots `protobuf:"bytes,12,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` +} +type Request_OfferSnapshot struct { + OfferSnapshot *RequestOfferSnapshot `protobuf:"bytes,13,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` +} +type Request_LoadSnapshotChunk struct { + LoadSnapshotChunk *RequestLoadSnapshotChunk `protobuf:"bytes,14,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` +} +type Request_ApplySnapshotChunk struct { + ApplySnapshotChunk *RequestApplySnapshotChunk `protobuf:"bytes,15,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` +} + +func (*Request_Echo) isRequest_Value() {} +func (*Request_Flush) isRequest_Value() {} +func (*Request_Info) isRequest_Value() {} +func (*Request_SetOption) isRequest_Value() {} +func (*Request_InitChain) isRequest_Value() {} +func (*Request_Query) isRequest_Value() {} +func (*Request_BeginBlock) isRequest_Value() {} +func (*Request_CheckTx) isRequest_Value() {} +func (*Request_DeliverTx) isRequest_Value() {} +func (*Request_EndBlock) isRequest_Value() {} +func (*Request_Commit) isRequest_Value() {} +func (*Request_ListSnapshots) isRequest_Value() {} +func (*Request_OfferSnapshot) isRequest_Value() {} +func (*Request_LoadSnapshotChunk) isRequest_Value() {} +func (*Request_ApplySnapshotChunk) isRequest_Value() {} + +func (m *Request) GetValue() isRequest_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Request) GetEcho() *RequestEcho { + if x, ok := m.GetValue().(*Request_Echo); ok { + return x.Echo + } + return nil +} + +func (m *Request) GetFlush() *RequestFlush { + if x, ok := m.GetValue().(*Request_Flush); ok { + return x.Flush + } + return nil +} + +func (m *Request) GetInfo() *RequestInfo { + if x, ok := m.GetValue().(*Request_Info); ok { + return x.Info + } + return nil +} + +func (m *Request) GetSetOption() *RequestSetOption { + if x, ok := m.GetValue().(*Request_SetOption); ok { + return x.SetOption + } + return nil +} + +func (m *Request) GetInitChain() *RequestInitChain { + if x, ok := m.GetValue().(*Request_InitChain); ok { + return x.InitChain + } + return nil +} + +func (m *Request) GetQuery() *RequestQuery { + if x, ok := m.GetValue().(*Request_Query); ok { + return x.Query + } + return nil +} + +func (m *Request) GetBeginBlock() *RequestBeginBlock { + if x, ok := m.GetValue().(*Request_BeginBlock); ok { + return x.BeginBlock + } + return nil +} + +func (m *Request) GetCheckTx() *RequestCheckTx { + if x, ok := m.GetValue().(*Request_CheckTx); ok { + return x.CheckTx + } + return nil +} + +func (m *Request) GetDeliverTx() *RequestDeliverTx { + if x, ok := m.GetValue().(*Request_DeliverTx); ok { + return x.DeliverTx + } + return nil +} + +func (m *Request) GetEndBlock() *RequestEndBlock { + if x, ok := m.GetValue().(*Request_EndBlock); ok { + return x.EndBlock + } + return nil +} + +func (m *Request) GetCommit() *RequestCommit { + if x, ok := m.GetValue().(*Request_Commit); ok { + return x.Commit + } + return nil +} + +func (m *Request) GetListSnapshots() *RequestListSnapshots { + if x, ok := m.GetValue().(*Request_ListSnapshots); ok { + return x.ListSnapshots + } + return nil +} + +func (m *Request) GetOfferSnapshot() *RequestOfferSnapshot { + if x, ok := m.GetValue().(*Request_OfferSnapshot); ok { + return x.OfferSnapshot + } + return nil +} + +func (m *Request) GetLoadSnapshotChunk() *RequestLoadSnapshotChunk { + if x, ok := m.GetValue().(*Request_LoadSnapshotChunk); ok { + return x.LoadSnapshotChunk + } + return nil +} + +func (m *Request) GetApplySnapshotChunk() *RequestApplySnapshotChunk { + if x, ok := m.GetValue().(*Request_ApplySnapshotChunk); ok { + return x.ApplySnapshotChunk + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Request) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Request_Echo)(nil), + (*Request_Flush)(nil), + (*Request_Info)(nil), + (*Request_SetOption)(nil), + (*Request_InitChain)(nil), + (*Request_Query)(nil), + (*Request_BeginBlock)(nil), + (*Request_CheckTx)(nil), + (*Request_DeliverTx)(nil), + (*Request_EndBlock)(nil), + (*Request_Commit)(nil), + (*Request_ListSnapshots)(nil), + (*Request_OfferSnapshot)(nil), + (*Request_LoadSnapshotChunk)(nil), + (*Request_ApplySnapshotChunk)(nil), + } +} + +// RequestEcho is a request to "echo" the given string. +type RequestEcho struct { + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *RequestEcho) Reset() { *m = RequestEcho{} } +func (m *RequestEcho) String() string { return proto.CompactTextString(m) } +func (*RequestEcho) ProtoMessage() {} +func (*RequestEcho) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{1} +} +func (m *RequestEcho) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestEcho) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestEcho.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestEcho) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestEcho.Merge(m, src) +} +func (m *RequestEcho) XXX_Size() int { + return m.Size() +} +func (m *RequestEcho) XXX_DiscardUnknown() { + xxx_messageInfo_RequestEcho.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestEcho proto.InternalMessageInfo + +func (m *RequestEcho) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// RequestFlush is a request to flush the write buffer. +type RequestFlush struct { +} + +func (m *RequestFlush) Reset() { *m = RequestFlush{} } +func (m *RequestFlush) String() string { return proto.CompactTextString(m) } +func (*RequestFlush) ProtoMessage() {} +func (*RequestFlush) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{2} +} +func (m *RequestFlush) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestFlush) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestFlush.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestFlush) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestFlush.Merge(m, src) +} +func (m *RequestFlush) XXX_Size() int { + return m.Size() +} +func (m *RequestFlush) XXX_DiscardUnknown() { + xxx_messageInfo_RequestFlush.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestFlush proto.InternalMessageInfo + +// RequestInfo is a request for the ABCI application version. +type RequestInfo struct { + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + BlockVersion uint64 `protobuf:"varint,2,opt,name=block_version,json=blockVersion,proto3" json:"block_version,omitempty"` + P2PVersion uint64 `protobuf:"varint,3,opt,name=p2p_version,json=p2pVersion,proto3" json:"p2p_version,omitempty"` +} + +func (m *RequestInfo) Reset() { *m = RequestInfo{} } +func (m *RequestInfo) String() string { return proto.CompactTextString(m) } +func (*RequestInfo) ProtoMessage() {} +func (*RequestInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{3} +} +func (m *RequestInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestInfo.Merge(m, src) +} +func (m *RequestInfo) XXX_Size() int { + return m.Size() +} +func (m *RequestInfo) XXX_DiscardUnknown() { + xxx_messageInfo_RequestInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestInfo proto.InternalMessageInfo + +func (m *RequestInfo) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *RequestInfo) GetBlockVersion() uint64 { + if m != nil { + return m.BlockVersion + } + return 0 +} + +func (m *RequestInfo) GetP2PVersion() uint64 { + if m != nil { + return m.P2PVersion + } + return 0 +} + +// nondeterministic +type RequestSetOption struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } +func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } +func (*RequestSetOption) ProtoMessage() {} +func (*RequestSetOption) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{4} +} +func (m *RequestSetOption) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestSetOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestSetOption.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestSetOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestSetOption.Merge(m, src) +} +func (m *RequestSetOption) XXX_Size() int { + return m.Size() +} +func (m *RequestSetOption) XXX_DiscardUnknown() { + xxx_messageInfo_RequestSetOption.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestSetOption proto.InternalMessageInfo + +func (m *RequestSetOption) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *RequestSetOption) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// RequestInitChain is a request to initialize the blockchain. +type RequestInitChain struct { + Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + ConsensusParams *ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` + Validators []ValidatorUpdate `protobuf:"bytes,4,rep,name=validators,proto3" json:"validators"` + AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` + InitialHeight int64 `protobuf:"varint,6,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` +} + +func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } +func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } +func (*RequestInitChain) ProtoMessage() {} +func (*RequestInitChain) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{5} +} +func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestInitChain.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestInitChain) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestInitChain.Merge(m, src) +} +func (m *RequestInitChain) XXX_Size() int { + return m.Size() +} +func (m *RequestInitChain) XXX_DiscardUnknown() { + xxx_messageInfo_RequestInitChain.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestInitChain proto.InternalMessageInfo + +func (m *RequestInitChain) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *RequestInitChain) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +func (m *RequestInitChain) GetConsensusParams() *ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return nil +} + +func (m *RequestInitChain) GetValidators() []ValidatorUpdate { + if m != nil { + return m.Validators + } + return nil +} + +func (m *RequestInitChain) GetAppStateBytes() []byte { + if m != nil { + return m.AppStateBytes + } + return nil +} + +func (m *RequestInitChain) GetInitialHeight() int64 { + if m != nil { + return m.InitialHeight + } + return 0 +} + +// RequestQuery is a request to query the application state. +type RequestQuery struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + Prove bool `protobuf:"varint,4,opt,name=prove,proto3" json:"prove,omitempty"` +} + +func (m *RequestQuery) Reset() { *m = RequestQuery{} } +func (m *RequestQuery) String() string { return proto.CompactTextString(m) } +func (*RequestQuery) ProtoMessage() {} +func (*RequestQuery) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{6} +} +func (m *RequestQuery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestQuery.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestQuery) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestQuery.Merge(m, src) +} +func (m *RequestQuery) XXX_Size() int { + return m.Size() +} +func (m *RequestQuery) XXX_DiscardUnknown() { + xxx_messageInfo_RequestQuery.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestQuery proto.InternalMessageInfo + +func (m *RequestQuery) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *RequestQuery) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *RequestQuery) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RequestQuery) GetProve() bool { + if m != nil { + return m.Prove + } + return false +} + +// RequestBeginBlock indicates the beginning of committing the block. +type RequestBeginBlock struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Header v1beta1.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` + LastCommitInfo LastCommitInfo `protobuf:"bytes,3,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` + ByzantineValidators []Evidence `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` +} + +func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } +func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } +func (*RequestBeginBlock) ProtoMessage() {} +func (*RequestBeginBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{7} +} +func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestBeginBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestBeginBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestBeginBlock.Merge(m, src) +} +func (m *RequestBeginBlock) XXX_Size() int { + return m.Size() +} +func (m *RequestBeginBlock) XXX_DiscardUnknown() { + xxx_messageInfo_RequestBeginBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestBeginBlock proto.InternalMessageInfo + +func (m *RequestBeginBlock) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *RequestBeginBlock) GetHeader() v1beta1.Header { + if m != nil { + return m.Header + } + return v1beta1.Header{} +} + +func (m *RequestBeginBlock) GetLastCommitInfo() LastCommitInfo { + if m != nil { + return m.LastCommitInfo + } + return LastCommitInfo{} +} + +func (m *RequestBeginBlock) GetByzantineValidators() []Evidence { + if m != nil { + return m.ByzantineValidators + } + return nil +} + +// RequestCheckTx is a request to check the transaction. +type RequestCheckTx struct { + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` + Type CheckTxType `protobuf:"varint,2,opt,name=type,proto3,enum=cometbft.abci.v1beta1.CheckTxType" json:"type,omitempty"` +} + +func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } +func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } +func (*RequestCheckTx) ProtoMessage() {} +func (*RequestCheckTx) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{8} +} +func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestCheckTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestCheckTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestCheckTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestCheckTx.Merge(m, src) +} +func (m *RequestCheckTx) XXX_Size() int { + return m.Size() +} +func (m *RequestCheckTx) XXX_DiscardUnknown() { + xxx_messageInfo_RequestCheckTx.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestCheckTx proto.InternalMessageInfo + +func (m *RequestCheckTx) GetTx() []byte { + if m != nil { + return m.Tx + } + return nil +} + +func (m *RequestCheckTx) GetType() CheckTxType { + if m != nil { + return m.Type + } + return CheckTxType_New +} + +// RequestDeliverTx is a request to apply the transaction. +type RequestDeliverTx struct { + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` +} + +func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } +func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } +func (*RequestDeliverTx) ProtoMessage() {} +func (*RequestDeliverTx) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{9} +} +func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestDeliverTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestDeliverTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestDeliverTx.Merge(m, src) +} +func (m *RequestDeliverTx) XXX_Size() int { + return m.Size() +} +func (m *RequestDeliverTx) XXX_DiscardUnknown() { + xxx_messageInfo_RequestDeliverTx.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestDeliverTx proto.InternalMessageInfo + +func (m *RequestDeliverTx) GetTx() []byte { + if m != nil { + return m.Tx + } + return nil +} + +// RequestEndBlock indicates the end of committing the block. +type RequestEndBlock struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } +func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } +func (*RequestEndBlock) ProtoMessage() {} +func (*RequestEndBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{10} +} +func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestEndBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestEndBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestEndBlock.Merge(m, src) +} +func (m *RequestEndBlock) XXX_Size() int { + return m.Size() +} +func (m *RequestEndBlock) XXX_DiscardUnknown() { + xxx_messageInfo_RequestEndBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestEndBlock proto.InternalMessageInfo + +func (m *RequestEndBlock) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +// RequestCommit is a request to commit the pending application state. +type RequestCommit struct { +} + +func (m *RequestCommit) Reset() { *m = RequestCommit{} } +func (m *RequestCommit) String() string { return proto.CompactTextString(m) } +func (*RequestCommit) ProtoMessage() {} +func (*RequestCommit) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{11} +} +func (m *RequestCommit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestCommit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestCommit) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestCommit.Merge(m, src) +} +func (m *RequestCommit) XXX_Size() int { + return m.Size() +} +func (m *RequestCommit) XXX_DiscardUnknown() { + xxx_messageInfo_RequestCommit.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestCommit proto.InternalMessageInfo + +// lists available snapshots +type RequestListSnapshots struct { +} + +func (m *RequestListSnapshots) Reset() { *m = RequestListSnapshots{} } +func (m *RequestListSnapshots) String() string { return proto.CompactTextString(m) } +func (*RequestListSnapshots) ProtoMessage() {} +func (*RequestListSnapshots) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{12} +} +func (m *RequestListSnapshots) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestListSnapshots) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestListSnapshots.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestListSnapshots) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestListSnapshots.Merge(m, src) +} +func (m *RequestListSnapshots) XXX_Size() int { + return m.Size() +} +func (m *RequestListSnapshots) XXX_DiscardUnknown() { + xxx_messageInfo_RequestListSnapshots.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestListSnapshots proto.InternalMessageInfo + +// offers a snapshot to the application +type RequestOfferSnapshot struct { + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + AppHash []byte `protobuf:"bytes,2,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` +} + +func (m *RequestOfferSnapshot) Reset() { *m = RequestOfferSnapshot{} } +func (m *RequestOfferSnapshot) String() string { return proto.CompactTextString(m) } +func (*RequestOfferSnapshot) ProtoMessage() {} +func (*RequestOfferSnapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{13} +} +func (m *RequestOfferSnapshot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestOfferSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestOfferSnapshot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestOfferSnapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestOfferSnapshot.Merge(m, src) +} +func (m *RequestOfferSnapshot) XXX_Size() int { + return m.Size() +} +func (m *RequestOfferSnapshot) XXX_DiscardUnknown() { + xxx_messageInfo_RequestOfferSnapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestOfferSnapshot proto.InternalMessageInfo + +func (m *RequestOfferSnapshot) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +func (m *RequestOfferSnapshot) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +// loads a snapshot chunk +type RequestLoadSnapshotChunk struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` + Chunk uint32 `protobuf:"varint,3,opt,name=chunk,proto3" json:"chunk,omitempty"` +} + +func (m *RequestLoadSnapshotChunk) Reset() { *m = RequestLoadSnapshotChunk{} } +func (m *RequestLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*RequestLoadSnapshotChunk) ProtoMessage() {} +func (*RequestLoadSnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{14} +} +func (m *RequestLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestLoadSnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestLoadSnapshotChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestLoadSnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestLoadSnapshotChunk.Merge(m, src) +} +func (m *RequestLoadSnapshotChunk) XXX_Size() int { + return m.Size() +} +func (m *RequestLoadSnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_RequestLoadSnapshotChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestLoadSnapshotChunk proto.InternalMessageInfo + +func (m *RequestLoadSnapshotChunk) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RequestLoadSnapshotChunk) GetFormat() uint32 { + if m != nil { + return m.Format + } + return 0 +} + +func (m *RequestLoadSnapshotChunk) GetChunk() uint32 { + if m != nil { + return m.Chunk + } + return 0 +} + +// Applies a snapshot chunk +type RequestApplySnapshotChunk struct { + Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Chunk []byte `protobuf:"bytes,2,opt,name=chunk,proto3" json:"chunk,omitempty"` + Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"` +} + +func (m *RequestApplySnapshotChunk) Reset() { *m = RequestApplySnapshotChunk{} } +func (m *RequestApplySnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*RequestApplySnapshotChunk) ProtoMessage() {} +func (*RequestApplySnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{15} +} +func (m *RequestApplySnapshotChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestApplySnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestApplySnapshotChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestApplySnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestApplySnapshotChunk.Merge(m, src) +} +func (m *RequestApplySnapshotChunk) XXX_Size() int { + return m.Size() +} +func (m *RequestApplySnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_RequestApplySnapshotChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestApplySnapshotChunk proto.InternalMessageInfo + +func (m *RequestApplySnapshotChunk) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *RequestApplySnapshotChunk) GetChunk() []byte { + if m != nil { + return m.Chunk + } + return nil +} + +func (m *RequestApplySnapshotChunk) GetSender() string { + if m != nil { + return m.Sender + } + return "" +} + +// Response represents a response from the ABCI application. +type Response struct { + // Sum of all possible messages. + // + // Types that are valid to be assigned to Value: + // + // *Response_Exception + // *Response_Echo + // *Response_Flush + // *Response_Info + // *Response_SetOption + // *Response_InitChain + // *Response_Query + // *Response_BeginBlock + // *Response_CheckTx + // *Response_DeliverTx + // *Response_EndBlock + // *Response_Commit + // *Response_ListSnapshots + // *Response_OfferSnapshot + // *Response_LoadSnapshotChunk + // *Response_ApplySnapshotChunk + Value isResponse_Value `protobuf_oneof:"value"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{16} +} +func (m *Response) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Response.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(m, src) +} +func (m *Response) XXX_Size() int { + return m.Size() +} +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Response proto.InternalMessageInfo + +type isResponse_Value interface { + isResponse_Value() + MarshalTo([]byte) (int, error) + Size() int +} + +type Response_Exception struct { + Exception *ResponseException `protobuf:"bytes,1,opt,name=exception,proto3,oneof" json:"exception,omitempty"` +} +type Response_Echo struct { + Echo *ResponseEcho `protobuf:"bytes,2,opt,name=echo,proto3,oneof" json:"echo,omitempty"` +} +type Response_Flush struct { + Flush *ResponseFlush `protobuf:"bytes,3,opt,name=flush,proto3,oneof" json:"flush,omitempty"` +} +type Response_Info struct { + Info *ResponseInfo `protobuf:"bytes,4,opt,name=info,proto3,oneof" json:"info,omitempty"` +} +type Response_SetOption struct { + SetOption *ResponseSetOption `protobuf:"bytes,5,opt,name=set_option,json=setOption,proto3,oneof" json:"set_option,omitempty"` +} +type Response_InitChain struct { + InitChain *ResponseInitChain `protobuf:"bytes,6,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` +} +type Response_Query struct { + Query *ResponseQuery `protobuf:"bytes,7,opt,name=query,proto3,oneof" json:"query,omitempty"` +} +type Response_BeginBlock struct { + BeginBlock *ResponseBeginBlock `protobuf:"bytes,8,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` +} +type Response_CheckTx struct { + CheckTx *ResponseCheckTx `protobuf:"bytes,9,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` +} +type Response_DeliverTx struct { + DeliverTx *ResponseDeliverTx `protobuf:"bytes,10,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` +} +type Response_EndBlock struct { + EndBlock *ResponseEndBlock `protobuf:"bytes,11,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` +} +type Response_Commit struct { + Commit *ResponseCommit `protobuf:"bytes,12,opt,name=commit,proto3,oneof" json:"commit,omitempty"` +} +type Response_ListSnapshots struct { + ListSnapshots *ResponseListSnapshots `protobuf:"bytes,13,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` +} +type Response_OfferSnapshot struct { + OfferSnapshot *ResponseOfferSnapshot `protobuf:"bytes,14,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` +} +type Response_LoadSnapshotChunk struct { + LoadSnapshotChunk *ResponseLoadSnapshotChunk `protobuf:"bytes,15,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` +} +type Response_ApplySnapshotChunk struct { + ApplySnapshotChunk *ResponseApplySnapshotChunk `protobuf:"bytes,16,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` +} + +func (*Response_Exception) isResponse_Value() {} +func (*Response_Echo) isResponse_Value() {} +func (*Response_Flush) isResponse_Value() {} +func (*Response_Info) isResponse_Value() {} +func (*Response_SetOption) isResponse_Value() {} +func (*Response_InitChain) isResponse_Value() {} +func (*Response_Query) isResponse_Value() {} +func (*Response_BeginBlock) isResponse_Value() {} +func (*Response_CheckTx) isResponse_Value() {} +func (*Response_DeliverTx) isResponse_Value() {} +func (*Response_EndBlock) isResponse_Value() {} +func (*Response_Commit) isResponse_Value() {} +func (*Response_ListSnapshots) isResponse_Value() {} +func (*Response_OfferSnapshot) isResponse_Value() {} +func (*Response_LoadSnapshotChunk) isResponse_Value() {} +func (*Response_ApplySnapshotChunk) isResponse_Value() {} + +func (m *Response) GetValue() isResponse_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Response) GetException() *ResponseException { + if x, ok := m.GetValue().(*Response_Exception); ok { + return x.Exception + } + return nil +} + +func (m *Response) GetEcho() *ResponseEcho { + if x, ok := m.GetValue().(*Response_Echo); ok { + return x.Echo + } + return nil +} + +func (m *Response) GetFlush() *ResponseFlush { + if x, ok := m.GetValue().(*Response_Flush); ok { + return x.Flush + } + return nil +} + +func (m *Response) GetInfo() *ResponseInfo { + if x, ok := m.GetValue().(*Response_Info); ok { + return x.Info + } + return nil +} + +func (m *Response) GetSetOption() *ResponseSetOption { + if x, ok := m.GetValue().(*Response_SetOption); ok { + return x.SetOption + } + return nil +} + +func (m *Response) GetInitChain() *ResponseInitChain { + if x, ok := m.GetValue().(*Response_InitChain); ok { + return x.InitChain + } + return nil +} + +func (m *Response) GetQuery() *ResponseQuery { + if x, ok := m.GetValue().(*Response_Query); ok { + return x.Query + } + return nil +} + +func (m *Response) GetBeginBlock() *ResponseBeginBlock { + if x, ok := m.GetValue().(*Response_BeginBlock); ok { + return x.BeginBlock + } + return nil +} + +func (m *Response) GetCheckTx() *ResponseCheckTx { + if x, ok := m.GetValue().(*Response_CheckTx); ok { + return x.CheckTx + } + return nil +} + +func (m *Response) GetDeliverTx() *ResponseDeliverTx { + if x, ok := m.GetValue().(*Response_DeliverTx); ok { + return x.DeliverTx + } + return nil +} + +func (m *Response) GetEndBlock() *ResponseEndBlock { + if x, ok := m.GetValue().(*Response_EndBlock); ok { + return x.EndBlock + } + return nil +} + +func (m *Response) GetCommit() *ResponseCommit { + if x, ok := m.GetValue().(*Response_Commit); ok { + return x.Commit + } + return nil +} + +func (m *Response) GetListSnapshots() *ResponseListSnapshots { + if x, ok := m.GetValue().(*Response_ListSnapshots); ok { + return x.ListSnapshots + } + return nil +} + +func (m *Response) GetOfferSnapshot() *ResponseOfferSnapshot { + if x, ok := m.GetValue().(*Response_OfferSnapshot); ok { + return x.OfferSnapshot + } + return nil +} + +func (m *Response) GetLoadSnapshotChunk() *ResponseLoadSnapshotChunk { + if x, ok := m.GetValue().(*Response_LoadSnapshotChunk); ok { + return x.LoadSnapshotChunk + } + return nil +} + +func (m *Response) GetApplySnapshotChunk() *ResponseApplySnapshotChunk { + if x, ok := m.GetValue().(*Response_ApplySnapshotChunk); ok { + return x.ApplySnapshotChunk + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Response) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Response_Exception)(nil), + (*Response_Echo)(nil), + (*Response_Flush)(nil), + (*Response_Info)(nil), + (*Response_SetOption)(nil), + (*Response_InitChain)(nil), + (*Response_Query)(nil), + (*Response_BeginBlock)(nil), + (*Response_CheckTx)(nil), + (*Response_DeliverTx)(nil), + (*Response_EndBlock)(nil), + (*Response_Commit)(nil), + (*Response_ListSnapshots)(nil), + (*Response_OfferSnapshot)(nil), + (*Response_LoadSnapshotChunk)(nil), + (*Response_ApplySnapshotChunk)(nil), + } +} + +// nondeterministic +type ResponseException struct { + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *ResponseException) Reset() { *m = ResponseException{} } +func (m *ResponseException) String() string { return proto.CompactTextString(m) } +func (*ResponseException) ProtoMessage() {} +func (*ResponseException) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{17} +} +func (m *ResponseException) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseException) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseException.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseException) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseException.Merge(m, src) +} +func (m *ResponseException) XXX_Size() int { + return m.Size() +} +func (m *ResponseException) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseException.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseException proto.InternalMessageInfo + +func (m *ResponseException) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +// ResponseEcho indicates that the connection is still alive. +type ResponseEcho struct { + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } +func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } +func (*ResponseEcho) ProtoMessage() {} +func (*ResponseEcho) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{18} +} +func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseEcho) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseEcho.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseEcho) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseEcho.Merge(m, src) +} +func (m *ResponseEcho) XXX_Size() int { + return m.Size() +} +func (m *ResponseEcho) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseEcho.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseEcho proto.InternalMessageInfo + +func (m *ResponseEcho) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// ResponseFlush indicates that the ABCI application state was flushed? +type ResponseFlush struct { +} + +func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } +func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } +func (*ResponseFlush) ProtoMessage() {} +func (*ResponseFlush) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{19} +} +func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseFlush) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseFlush.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseFlush) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseFlush.Merge(m, src) +} +func (m *ResponseFlush) XXX_Size() int { + return m.Size() +} +func (m *ResponseFlush) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseFlush.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseFlush proto.InternalMessageInfo + +// ResponseInfo contains the ABCI application version information. +type ResponseInfo struct { + Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + AppVersion uint64 `protobuf:"varint,3,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` + LastBlockHeight int64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockAppHash []byte `protobuf:"bytes,5,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` +} + +func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } +func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } +func (*ResponseInfo) ProtoMessage() {} +func (*ResponseInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{20} +} +func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseInfo.Merge(m, src) +} +func (m *ResponseInfo) XXX_Size() int { + return m.Size() +} +func (m *ResponseInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseInfo proto.InternalMessageInfo + +func (m *ResponseInfo) GetData() string { + if m != nil { + return m.Data + } + return "" +} + +func (m *ResponseInfo) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *ResponseInfo) GetAppVersion() uint64 { + if m != nil { + return m.AppVersion + } + return 0 +} + +func (m *ResponseInfo) GetLastBlockHeight() int64 { + if m != nil { + return m.LastBlockHeight + } + return 0 +} + +func (m *ResponseInfo) GetLastBlockAppHash() []byte { + if m != nil { + return m.LastBlockAppHash + } + return nil +} + +// nondeterministic +type ResponseSetOption struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // bytes data = 2; + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` +} + +func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } +func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } +func (*ResponseSetOption) ProtoMessage() {} +func (*ResponseSetOption) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{21} +} +func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseSetOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseSetOption.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseSetOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseSetOption.Merge(m, src) +} +func (m *ResponseSetOption) XXX_Size() int { + return m.Size() +} +func (m *ResponseSetOption) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseSetOption.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseSetOption proto.InternalMessageInfo + +func (m *ResponseSetOption) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *ResponseSetOption) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *ResponseSetOption) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +// ResponseInitChain contains the ABCI application's hash and updates to the +// validator set and/or the consensus params, if any. +type ResponseInitChain struct { + ConsensusParams *ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` + Validators []ValidatorUpdate `protobuf:"bytes,2,rep,name=validators,proto3" json:"validators"` + AppHash []byte `protobuf:"bytes,3,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` +} + +func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } +func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } +func (*ResponseInitChain) ProtoMessage() {} +func (*ResponseInitChain) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{22} +} +func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseInitChain.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseInitChain) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseInitChain.Merge(m, src) +} +func (m *ResponseInitChain) XXX_Size() int { + return m.Size() +} +func (m *ResponseInitChain) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseInitChain.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseInitChain proto.InternalMessageInfo + +func (m *ResponseInitChain) GetConsensusParams() *ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return nil +} + +func (m *ResponseInitChain) GetValidators() []ValidatorUpdate { + if m != nil { + return m.Validators + } + return nil +} + +func (m *ResponseInitChain) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +// ResponseQuery contains the ABCI application data along with a proof. +type ResponseQuery struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // bytes data = 2; // use "value" instead. + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` + Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` + ProofOps *v1.ProofOps `protobuf:"bytes,8,opt,name=proof_ops,json=proofOps,proto3" json:"proof_ops,omitempty"` + Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` + Codespace string `protobuf:"bytes,10,opt,name=codespace,proto3" json:"codespace,omitempty"` +} + +func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } +func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } +func (*ResponseQuery) ProtoMessage() {} +func (*ResponseQuery) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{23} +} +func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseQuery.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseQuery) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseQuery.Merge(m, src) +} +func (m *ResponseQuery) XXX_Size() int { + return m.Size() +} +func (m *ResponseQuery) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseQuery.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseQuery proto.InternalMessageInfo + +func (m *ResponseQuery) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *ResponseQuery) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *ResponseQuery) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +func (m *ResponseQuery) GetIndex() int64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *ResponseQuery) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *ResponseQuery) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *ResponseQuery) GetProofOps() *v1.ProofOps { + if m != nil { + return m.ProofOps + } + return nil +} + +func (m *ResponseQuery) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *ResponseQuery) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + +// ResponseBeginBlock contains a list of block-level events. +type ResponseBeginBlock struct { + Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` +} + +func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } +func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } +func (*ResponseBeginBlock) ProtoMessage() {} +func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{24} +} +func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseBeginBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseBeginBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseBeginBlock.Merge(m, src) +} +func (m *ResponseBeginBlock) XXX_Size() int { + return m.Size() +} +func (m *ResponseBeginBlock) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseBeginBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseBeginBlock proto.InternalMessageInfo + +func (m *ResponseBeginBlock) GetEvents() []Event { + if m != nil { + return m.Events + } + return nil +} + +// ResponseCheckTx shows if the transaction was deemed valid by the ABCI +// application. +type ResponseCheckTx struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` + Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` + // mempool_error is set by CometBFT. + // ABCI applications creating a ResponseCheckTX should not set mempool_error. + MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` +} + +func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } +func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } +func (*ResponseCheckTx) ProtoMessage() {} +func (*ResponseCheckTx) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{25} +} +func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseCheckTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseCheckTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseCheckTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseCheckTx.Merge(m, src) +} +func (m *ResponseCheckTx) XXX_Size() int { + return m.Size() +} +func (m *ResponseCheckTx) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseCheckTx.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseCheckTx proto.InternalMessageInfo + +func (m *ResponseCheckTx) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *ResponseCheckTx) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ResponseCheckTx) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *ResponseCheckTx) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +func (m *ResponseCheckTx) GetGasWanted() int64 { + if m != nil { + return m.GasWanted + } + return 0 +} + +func (m *ResponseCheckTx) GetGasUsed() int64 { + if m != nil { + return m.GasUsed + } + return 0 +} + +func (m *ResponseCheckTx) GetEvents() []Event { + if m != nil { + return m.Events + } + return nil +} + +func (m *ResponseCheckTx) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + +func (m *ResponseCheckTx) GetSender() string { + if m != nil { + return m.Sender + } + return "" +} + +func (m *ResponseCheckTx) GetPriority() int64 { + if m != nil { + return m.Priority + } + return 0 +} + +func (m *ResponseCheckTx) GetMempoolError() string { + if m != nil { + return m.MempoolError + } + return "" +} + +// ResponseDeliverTx contains a result of committing the given transaction and a +// list of events. +type ResponseDeliverTx struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` +} + +func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } +func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } +func (*ResponseDeliverTx) ProtoMessage() {} +func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{26} +} +func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseDeliverTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseDeliverTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseDeliverTx.Merge(m, src) +} +func (m *ResponseDeliverTx) XXX_Size() int { + return m.Size() +} +func (m *ResponseDeliverTx) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseDeliverTx.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseDeliverTx proto.InternalMessageInfo + +func (m *ResponseDeliverTx) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *ResponseDeliverTx) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ResponseDeliverTx) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *ResponseDeliverTx) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +func (m *ResponseDeliverTx) GetGasWanted() int64 { + if m != nil { + return m.GasWanted + } + return 0 +} + +func (m *ResponseDeliverTx) GetGasUsed() int64 { + if m != nil { + return m.GasUsed + } + return 0 +} + +func (m *ResponseDeliverTx) GetEvents() []Event { + if m != nil { + return m.Events + } + return nil +} + +func (m *ResponseDeliverTx) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + +// ResponseEndBlock contains updates to consensus params and/or validator set changes, if any. +type ResponseEndBlock struct { + ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` + ConsensusParamUpdates *ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` +} + +func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } +func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } +func (*ResponseEndBlock) ProtoMessage() {} +func (*ResponseEndBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{27} +} +func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseEndBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseEndBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseEndBlock.Merge(m, src) +} +func (m *ResponseEndBlock) XXX_Size() int { + return m.Size() +} +func (m *ResponseEndBlock) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseEndBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseEndBlock proto.InternalMessageInfo + +func (m *ResponseEndBlock) GetValidatorUpdates() []ValidatorUpdate { + if m != nil { + return m.ValidatorUpdates + } + return nil +} + +func (m *ResponseEndBlock) GetConsensusParamUpdates() *ConsensusParams { + if m != nil { + return m.ConsensusParamUpdates + } + return nil +} + +func (m *ResponseEndBlock) GetEvents() []Event { + if m != nil { + return m.Events + } + return nil +} + +// ResponseCommit indicates how much blocks should CometBFT retain. +type ResponseCommit struct { + // reserve 1 + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` +} + +func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } +func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } +func (*ResponseCommit) ProtoMessage() {} +func (*ResponseCommit) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{28} +} +func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseCommit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseCommit) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseCommit.Merge(m, src) +} +func (m *ResponseCommit) XXX_Size() int { + return m.Size() +} +func (m *ResponseCommit) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseCommit.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseCommit proto.InternalMessageInfo + +func (m *ResponseCommit) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ResponseCommit) GetRetainHeight() int64 { + if m != nil { + return m.RetainHeight + } + return 0 +} + +// ResponseListSnapshots contains the list of snapshots. +type ResponseListSnapshots struct { + Snapshots []*Snapshot `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` +} + +func (m *ResponseListSnapshots) Reset() { *m = ResponseListSnapshots{} } +func (m *ResponseListSnapshots) String() string { return proto.CompactTextString(m) } +func (*ResponseListSnapshots) ProtoMessage() {} +func (*ResponseListSnapshots) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{29} +} +func (m *ResponseListSnapshots) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseListSnapshots) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseListSnapshots.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseListSnapshots) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseListSnapshots.Merge(m, src) +} +func (m *ResponseListSnapshots) XXX_Size() int { + return m.Size() +} +func (m *ResponseListSnapshots) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseListSnapshots.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseListSnapshots proto.InternalMessageInfo + +func (m *ResponseListSnapshots) GetSnapshots() []*Snapshot { + if m != nil { + return m.Snapshots + } + return nil +} + +// ResponseOfferSnapshot indicates the ABCI application decision whenever to +// provide a snapshot to the requester or not. +type ResponseOfferSnapshot struct { + Result ResponseOfferSnapshot_Result `protobuf:"varint,1,opt,name=result,proto3,enum=cometbft.abci.v1beta1.ResponseOfferSnapshot_Result" json:"result,omitempty"` +} + +func (m *ResponseOfferSnapshot) Reset() { *m = ResponseOfferSnapshot{} } +func (m *ResponseOfferSnapshot) String() string { return proto.CompactTextString(m) } +func (*ResponseOfferSnapshot) ProtoMessage() {} +func (*ResponseOfferSnapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{30} +} +func (m *ResponseOfferSnapshot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseOfferSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseOfferSnapshot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseOfferSnapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseOfferSnapshot.Merge(m, src) +} +func (m *ResponseOfferSnapshot) XXX_Size() int { + return m.Size() +} +func (m *ResponseOfferSnapshot) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseOfferSnapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseOfferSnapshot proto.InternalMessageInfo + +func (m *ResponseOfferSnapshot) GetResult() ResponseOfferSnapshot_Result { + if m != nil { + return m.Result + } + return ResponseOfferSnapshot_UNKNOWN +} + +// ResponseLoadSnapshotChunk returns a snapshot's chunk. +type ResponseLoadSnapshotChunk struct { + Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"` +} + +func (m *ResponseLoadSnapshotChunk) Reset() { *m = ResponseLoadSnapshotChunk{} } +func (m *ResponseLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*ResponseLoadSnapshotChunk) ProtoMessage() {} +func (*ResponseLoadSnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{31} +} +func (m *ResponseLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseLoadSnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseLoadSnapshotChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseLoadSnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseLoadSnapshotChunk.Merge(m, src) +} +func (m *ResponseLoadSnapshotChunk) XXX_Size() int { + return m.Size() +} +func (m *ResponseLoadSnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseLoadSnapshotChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseLoadSnapshotChunk proto.InternalMessageInfo + +func (m *ResponseLoadSnapshotChunk) GetChunk() []byte { + if m != nil { + return m.Chunk + } + return nil +} + +// ResponseApplySnapshotChunk returns a result of applying the specified chunk. +type ResponseApplySnapshotChunk struct { + Result ResponseApplySnapshotChunk_Result `protobuf:"varint,1,opt,name=result,proto3,enum=cometbft.abci.v1beta1.ResponseApplySnapshotChunk_Result" json:"result,omitempty"` + RefetchChunks []uint32 `protobuf:"varint,2,rep,packed,name=refetch_chunks,json=refetchChunks,proto3" json:"refetch_chunks,omitempty"` + RejectSenders []string `protobuf:"bytes,3,rep,name=reject_senders,json=rejectSenders,proto3" json:"reject_senders,omitempty"` +} + +func (m *ResponseApplySnapshotChunk) Reset() { *m = ResponseApplySnapshotChunk{} } +func (m *ResponseApplySnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*ResponseApplySnapshotChunk) ProtoMessage() {} +func (*ResponseApplySnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{32} +} +func (m *ResponseApplySnapshotChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseApplySnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseApplySnapshotChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseApplySnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseApplySnapshotChunk.Merge(m, src) +} +func (m *ResponseApplySnapshotChunk) XXX_Size() int { + return m.Size() +} +func (m *ResponseApplySnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseApplySnapshotChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseApplySnapshotChunk proto.InternalMessageInfo + +func (m *ResponseApplySnapshotChunk) GetResult() ResponseApplySnapshotChunk_Result { + if m != nil { + return m.Result + } + return ResponseApplySnapshotChunk_UNKNOWN +} + +func (m *ResponseApplySnapshotChunk) GetRefetchChunks() []uint32 { + if m != nil { + return m.RefetchChunks + } + return nil +} + +func (m *ResponseApplySnapshotChunk) GetRejectSenders() []string { + if m != nil { + return m.RejectSenders + } + return nil +} + +// ConsensusParams contains all consensus-relevant parameters +// that can be adjusted by the abci app +type ConsensusParams struct { + Block *BlockParams `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + Evidence *v1beta1.EvidenceParams `protobuf:"bytes,2,opt,name=evidence,proto3" json:"evidence,omitempty"` + Validator *v1beta1.ValidatorParams `protobuf:"bytes,3,opt,name=validator,proto3" json:"validator,omitempty"` + Version *v1beta1.VersionParams `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` +} + +func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } +func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } +func (*ConsensusParams) ProtoMessage() {} +func (*ConsensusParams) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{33} +} +func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusParams.Merge(m, src) +} +func (m *ConsensusParams) XXX_Size() int { + return m.Size() +} +func (m *ConsensusParams) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusParams.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusParams proto.InternalMessageInfo + +func (m *ConsensusParams) GetBlock() *BlockParams { + if m != nil { + return m.Block + } + return nil +} + +func (m *ConsensusParams) GetEvidence() *v1beta1.EvidenceParams { + if m != nil { + return m.Evidence + } + return nil +} + +func (m *ConsensusParams) GetValidator() *v1beta1.ValidatorParams { + if m != nil { + return m.Validator + } + return nil +} + +func (m *ConsensusParams) GetVersion() *v1beta1.VersionParams { + if m != nil { + return m.Version + } + return nil +} + +// BlockParams contains limits on the block size. +type BlockParams struct { + // Note: must be greater than 0 + MaxBytes int64 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` + // Note: must be greater or equal to -1 + MaxGas int64 `protobuf:"varint,2,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` +} + +func (m *BlockParams) Reset() { *m = BlockParams{} } +func (m *BlockParams) String() string { return proto.CompactTextString(m) } +func (*BlockParams) ProtoMessage() {} +func (*BlockParams) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{34} +} +func (m *BlockParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockParams.Merge(m, src) +} +func (m *BlockParams) XXX_Size() int { + return m.Size() +} +func (m *BlockParams) XXX_DiscardUnknown() { + xxx_messageInfo_BlockParams.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockParams proto.InternalMessageInfo + +func (m *BlockParams) GetMaxBytes() int64 { + if m != nil { + return m.MaxBytes + } + return 0 +} + +func (m *BlockParams) GetMaxGas() int64 { + if m != nil { + return m.MaxGas + } + return 0 +} + +// LastCommitInfo contains votes for the particular round. +type LastCommitInfo struct { + Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` + Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` +} + +func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } +func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } +func (*LastCommitInfo) ProtoMessage() {} +func (*LastCommitInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{35} +} +func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LastCommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LastCommitInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LastCommitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_LastCommitInfo.Merge(m, src) +} +func (m *LastCommitInfo) XXX_Size() int { + return m.Size() +} +func (m *LastCommitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_LastCommitInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_LastCommitInfo proto.InternalMessageInfo + +func (m *LastCommitInfo) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *LastCommitInfo) GetVotes() []VoteInfo { + if m != nil { + return m.Votes + } + return nil +} + +// Event allows application developers to attach additional information to +// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +// Later, transactions may be queried using these events. +type Event struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Attributes []EventAttribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{36} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Event.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *Event) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Event) GetAttributes() []EventAttribute { + if m != nil { + return m.Attributes + } + return nil +} + +// EventAttribute is a single key-value pair, associated with an event. +type EventAttribute struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Index bool `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` +} + +func (m *EventAttribute) Reset() { *m = EventAttribute{} } +func (m *EventAttribute) String() string { return proto.CompactTextString(m) } +func (*EventAttribute) ProtoMessage() {} +func (*EventAttribute) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{37} +} +func (m *EventAttribute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventAttribute.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventAttribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventAttribute.Merge(m, src) +} +func (m *EventAttribute) XXX_Size() int { + return m.Size() +} +func (m *EventAttribute) XXX_DiscardUnknown() { + xxx_messageInfo_EventAttribute.DiscardUnknown(m) +} + +var xxx_messageInfo_EventAttribute proto.InternalMessageInfo + +func (m *EventAttribute) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *EventAttribute) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *EventAttribute) GetIndex() bool { + if m != nil { + return m.Index + } + return false +} + +// TxResult contains results of executing the transaction. +// +// One usage is indexing transaction results. +type TxResult struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + Tx []byte `protobuf:"bytes,3,opt,name=tx,proto3" json:"tx,omitempty"` + Result ResponseDeliverTx `protobuf:"bytes,4,opt,name=result,proto3" json:"result"` +} + +func (m *TxResult) Reset() { *m = TxResult{} } +func (m *TxResult) String() string { return proto.CompactTextString(m) } +func (*TxResult) ProtoMessage() {} +func (*TxResult) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{38} +} +func (m *TxResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TxResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TxResult.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TxResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxResult.Merge(m, src) +} +func (m *TxResult) XXX_Size() int { + return m.Size() +} +func (m *TxResult) XXX_DiscardUnknown() { + xxx_messageInfo_TxResult.DiscardUnknown(m) +} + +var xxx_messageInfo_TxResult proto.InternalMessageInfo + +func (m *TxResult) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *TxResult) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *TxResult) GetTx() []byte { + if m != nil { + return m.Tx + } + return nil +} + +func (m *TxResult) GetResult() ResponseDeliverTx { + if m != nil { + return m.Result + } + return ResponseDeliverTx{} +} + +// Validator in the validator set. +type Validator struct { + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // PubKey pub_key = 2 [(gogoproto.nullable)=false]; + Power int64 `protobuf:"varint,3,opt,name=power,proto3" json:"power,omitempty"` +} + +func (m *Validator) Reset() { *m = Validator{} } +func (m *Validator) String() string { return proto.CompactTextString(m) } +func (*Validator) ProtoMessage() {} +func (*Validator) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{39} +} +func (m *Validator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Validator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Validator.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Validator) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validator.Merge(m, src) +} +func (m *Validator) XXX_Size() int { + return m.Size() +} +func (m *Validator) XXX_DiscardUnknown() { + xxx_messageInfo_Validator.DiscardUnknown(m) +} + +var xxx_messageInfo_Validator proto.InternalMessageInfo + +func (m *Validator) GetAddress() []byte { + if m != nil { + return m.Address + } + return nil +} + +func (m *Validator) GetPower() int64 { + if m != nil { + return m.Power + } + return 0 +} + +// ValidatorUpdate is a singular update to a validator set. +type ValidatorUpdate struct { + PubKey v1.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` +} + +func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } +func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } +func (*ValidatorUpdate) ProtoMessage() {} +func (*ValidatorUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{40} +} +func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorUpdate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorUpdate.Merge(m, src) +} +func (m *ValidatorUpdate) XXX_Size() int { + return m.Size() +} +func (m *ValidatorUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorUpdate.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorUpdate proto.InternalMessageInfo + +func (m *ValidatorUpdate) GetPubKey() v1.PublicKey { + if m != nil { + return m.PubKey + } + return v1.PublicKey{} +} + +func (m *ValidatorUpdate) GetPower() int64 { + if m != nil { + return m.Power + } + return 0 +} + +// VoteInfo contains the information about the vote. +type VoteInfo struct { + Validator Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator"` + SignedLastBlock bool `protobuf:"varint,2,opt,name=signed_last_block,json=signedLastBlock,proto3" json:"signed_last_block,omitempty"` +} + +func (m *VoteInfo) Reset() { *m = VoteInfo{} } +func (m *VoteInfo) String() string { return proto.CompactTextString(m) } +func (*VoteInfo) ProtoMessage() {} +func (*VoteInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{41} +} +func (m *VoteInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VoteInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VoteInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteInfo.Merge(m, src) +} +func (m *VoteInfo) XXX_Size() int { + return m.Size() +} +func (m *VoteInfo) XXX_DiscardUnknown() { + xxx_messageInfo_VoteInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_VoteInfo proto.InternalMessageInfo + +func (m *VoteInfo) GetValidator() Validator { + if m != nil { + return m.Validator + } + return Validator{} +} + +func (m *VoteInfo) GetSignedLastBlock() bool { + if m != nil { + return m.SignedLastBlock + } + return false +} + +// Evidence of a misbehavior committed by a validator. +type Evidence struct { + Type EvidenceType `protobuf:"varint,1,opt,name=type,proto3,enum=cometbft.abci.v1beta1.EvidenceType" json:"type,omitempty"` + // The offending validator + Validator Validator `protobuf:"bytes,2,opt,name=validator,proto3" json:"validator"` + // The height when the offense occurred + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + // The corresponding time where the offense occurred + Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` + // Total voting power of the validator set in case the ABCI application does + // not store historical validators. + // https://github.com/tendermint/tendermint/issues/4581 + TotalVotingPower int64 `protobuf:"varint,5,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` +} + +func (m *Evidence) Reset() { *m = Evidence{} } +func (m *Evidence) String() string { return proto.CompactTextString(m) } +func (*Evidence) ProtoMessage() {} +func (*Evidence) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{42} +} +func (m *Evidence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Evidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Evidence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Evidence) XXX_Merge(src proto.Message) { + xxx_messageInfo_Evidence.Merge(m, src) +} +func (m *Evidence) XXX_Size() int { + return m.Size() +} +func (m *Evidence) XXX_DiscardUnknown() { + xxx_messageInfo_Evidence.DiscardUnknown(m) +} + +var xxx_messageInfo_Evidence proto.InternalMessageInfo + +func (m *Evidence) GetType() EvidenceType { + if m != nil { + return m.Type + } + return EvidenceType_UNKNOWN +} + +func (m *Evidence) GetValidator() Validator { + if m != nil { + return m.Validator + } + return Validator{} +} + +func (m *Evidence) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Evidence) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *Evidence) GetTotalVotingPower() int64 { + if m != nil { + return m.TotalVotingPower + } + return 0 +} + +// Snapshot of the ABCI application state. +type Snapshot struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` + Chunks uint32 `protobuf:"varint,3,opt,name=chunks,proto3" json:"chunks,omitempty"` + Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` + Metadata []byte `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_916eac2194a40aed, []int{43} +} +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(m, src) +} +func (m *Snapshot) XXX_Size() int { + return m.Size() +} +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_Snapshot proto.InternalMessageInfo + +func (m *Snapshot) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Snapshot) GetFormat() uint32 { + if m != nil { + return m.Format + } + return 0 +} + +func (m *Snapshot) GetChunks() uint32 { + if m != nil { + return m.Chunks + } + return 0 +} + +func (m *Snapshot) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *Snapshot) GetMetadata() []byte { + if m != nil { + return m.Metadata + } + return nil +} + +func init() { + proto.RegisterEnum("cometbft.abci.v1beta1.CheckTxType", CheckTxType_name, CheckTxType_value) + proto.RegisterEnum("cometbft.abci.v1beta1.EvidenceType", EvidenceType_name, EvidenceType_value) + proto.RegisterEnum("cometbft.abci.v1beta1.ResponseOfferSnapshot_Result", ResponseOfferSnapshot_Result_name, ResponseOfferSnapshot_Result_value) + proto.RegisterEnum("cometbft.abci.v1beta1.ResponseApplySnapshotChunk_Result", ResponseApplySnapshotChunk_Result_name, ResponseApplySnapshotChunk_Result_value) + proto.RegisterType((*Request)(nil), "cometbft.abci.v1beta1.Request") + proto.RegisterType((*RequestEcho)(nil), "cometbft.abci.v1beta1.RequestEcho") + proto.RegisterType((*RequestFlush)(nil), "cometbft.abci.v1beta1.RequestFlush") + proto.RegisterType((*RequestInfo)(nil), "cometbft.abci.v1beta1.RequestInfo") + proto.RegisterType((*RequestSetOption)(nil), "cometbft.abci.v1beta1.RequestSetOption") + proto.RegisterType((*RequestInitChain)(nil), "cometbft.abci.v1beta1.RequestInitChain") + proto.RegisterType((*RequestQuery)(nil), "cometbft.abci.v1beta1.RequestQuery") + proto.RegisterType((*RequestBeginBlock)(nil), "cometbft.abci.v1beta1.RequestBeginBlock") + proto.RegisterType((*RequestCheckTx)(nil), "cometbft.abci.v1beta1.RequestCheckTx") + proto.RegisterType((*RequestDeliverTx)(nil), "cometbft.abci.v1beta1.RequestDeliverTx") + proto.RegisterType((*RequestEndBlock)(nil), "cometbft.abci.v1beta1.RequestEndBlock") + proto.RegisterType((*RequestCommit)(nil), "cometbft.abci.v1beta1.RequestCommit") + proto.RegisterType((*RequestListSnapshots)(nil), "cometbft.abci.v1beta1.RequestListSnapshots") + proto.RegisterType((*RequestOfferSnapshot)(nil), "cometbft.abci.v1beta1.RequestOfferSnapshot") + proto.RegisterType((*RequestLoadSnapshotChunk)(nil), "cometbft.abci.v1beta1.RequestLoadSnapshotChunk") + proto.RegisterType((*RequestApplySnapshotChunk)(nil), "cometbft.abci.v1beta1.RequestApplySnapshotChunk") + proto.RegisterType((*Response)(nil), "cometbft.abci.v1beta1.Response") + proto.RegisterType((*ResponseException)(nil), "cometbft.abci.v1beta1.ResponseException") + proto.RegisterType((*ResponseEcho)(nil), "cometbft.abci.v1beta1.ResponseEcho") + proto.RegisterType((*ResponseFlush)(nil), "cometbft.abci.v1beta1.ResponseFlush") + proto.RegisterType((*ResponseInfo)(nil), "cometbft.abci.v1beta1.ResponseInfo") + proto.RegisterType((*ResponseSetOption)(nil), "cometbft.abci.v1beta1.ResponseSetOption") + proto.RegisterType((*ResponseInitChain)(nil), "cometbft.abci.v1beta1.ResponseInitChain") + proto.RegisterType((*ResponseQuery)(nil), "cometbft.abci.v1beta1.ResponseQuery") + proto.RegisterType((*ResponseBeginBlock)(nil), "cometbft.abci.v1beta1.ResponseBeginBlock") + proto.RegisterType((*ResponseCheckTx)(nil), "cometbft.abci.v1beta1.ResponseCheckTx") + proto.RegisterType((*ResponseDeliverTx)(nil), "cometbft.abci.v1beta1.ResponseDeliverTx") + proto.RegisterType((*ResponseEndBlock)(nil), "cometbft.abci.v1beta1.ResponseEndBlock") + proto.RegisterType((*ResponseCommit)(nil), "cometbft.abci.v1beta1.ResponseCommit") + proto.RegisterType((*ResponseListSnapshots)(nil), "cometbft.abci.v1beta1.ResponseListSnapshots") + proto.RegisterType((*ResponseOfferSnapshot)(nil), "cometbft.abci.v1beta1.ResponseOfferSnapshot") + proto.RegisterType((*ResponseLoadSnapshotChunk)(nil), "cometbft.abci.v1beta1.ResponseLoadSnapshotChunk") + proto.RegisterType((*ResponseApplySnapshotChunk)(nil), "cometbft.abci.v1beta1.ResponseApplySnapshotChunk") + proto.RegisterType((*ConsensusParams)(nil), "cometbft.abci.v1beta1.ConsensusParams") + proto.RegisterType((*BlockParams)(nil), "cometbft.abci.v1beta1.BlockParams") + proto.RegisterType((*LastCommitInfo)(nil), "cometbft.abci.v1beta1.LastCommitInfo") + proto.RegisterType((*Event)(nil), "cometbft.abci.v1beta1.Event") + proto.RegisterType((*EventAttribute)(nil), "cometbft.abci.v1beta1.EventAttribute") + proto.RegisterType((*TxResult)(nil), "cometbft.abci.v1beta1.TxResult") + proto.RegisterType((*Validator)(nil), "cometbft.abci.v1beta1.Validator") + proto.RegisterType((*ValidatorUpdate)(nil), "cometbft.abci.v1beta1.ValidatorUpdate") + proto.RegisterType((*VoteInfo)(nil), "cometbft.abci.v1beta1.VoteInfo") + proto.RegisterType((*Evidence)(nil), "cometbft.abci.v1beta1.Evidence") + proto.RegisterType((*Snapshot)(nil), "cometbft.abci.v1beta1.Snapshot") +} + +func init() { proto.RegisterFile("cometbft/abci/v1beta1/types.proto", fileDescriptor_916eac2194a40aed) } + +var fileDescriptor_916eac2194a40aed = []byte{ + // 2819 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4b, 0x73, 0xe3, 0xc6, + 0xf1, 0xe7, 0xfb, 0xd1, 0x14, 0x29, 0x6a, 0x76, 0xbd, 0xa6, 0xf9, 0xb7, 0xa5, 0xfd, 0x63, 0xb3, + 0x7e, 0x6c, 0x1c, 0x29, 0xbb, 0xae, 0x72, 0xbc, 0xd9, 0x4d, 0x5c, 0x22, 0x97, 0x6b, 0x2a, 0x92, + 0x25, 0x19, 0xa2, 0x64, 0x3b, 0xa9, 0x32, 0x3c, 0x04, 0x46, 0x24, 0xbc, 0x24, 0x00, 0x13, 0x43, + 0x5a, 0x4c, 0xe5, 0x90, 0xca, 0x2d, 0xce, 0xc5, 0xa7, 0xe4, 0xe4, 0x7c, 0x8d, 0xe4, 0x03, 0x24, + 0x55, 0xbe, 0xc5, 0xb7, 0xe4, 0xe4, 0xa4, 0xd6, 0xb7, 0xe4, 0x0b, 0xe4, 0x98, 0x9a, 0x07, 0x5e, + 0x14, 0x29, 0x82, 0x8e, 0x6f, 0xb9, 0x61, 0x1a, 0xdd, 0x3d, 0x98, 0xc6, 0xcc, 0xaf, 0x7f, 0xdd, + 0x00, 0xfc, 0xbf, 0x6e, 0x0f, 0x09, 0xed, 0x9e, 0xd3, 0x1d, 0xdc, 0xd5, 0xcd, 0x9d, 0xc9, 0xdd, + 0x2e, 0xa1, 0xf8, 0xee, 0x0e, 0x9d, 0x3a, 0xc4, 0xdd, 0x76, 0x46, 0x36, 0xb5, 0xd1, 0x33, 0x9e, + 0xca, 0x36, 0x53, 0xd9, 0x96, 0x2a, 0xf5, 0x17, 0x7c, 0x4b, 0x7d, 0x34, 0x75, 0xa8, 0xbd, 0x33, + 0xb9, 0xbb, 0xf3, 0x84, 0x4c, 0xa5, 0x55, 0x7d, 0x73, 0xce, 0x6d, 0x67, 0x64, 0xdb, 0xe7, 0xf2, + 0xfe, 0x2d, 0xff, 0x3e, 0x9f, 0xcb, 0x9f, 0xd9, 0xc1, 0x23, 0x3c, 0xf4, 0x9c, 0x28, 0x0b, 0x94, + 0x42, 0x8f, 0x57, 0xbf, 0xde, 0xb3, 0x7b, 0x36, 0xbf, 0xdc, 0x61, 0x57, 0x52, 0xba, 0xd5, 0xb3, + 0xed, 0xde, 0x80, 0xec, 0xf0, 0x51, 0x77, 0x7c, 0xbe, 0x43, 0xcd, 0x21, 0x71, 0x29, 0x1e, 0x3a, + 0x42, 0x41, 0xf9, 0x57, 0x01, 0xf2, 0x2a, 0xf9, 0x78, 0x4c, 0x5c, 0x8a, 0xde, 0x80, 0x0c, 0xd1, + 0xfb, 0x76, 0x2d, 0x79, 0x33, 0xf9, 0x72, 0xe9, 0x9e, 0xb2, 0x3d, 0x77, 0xc1, 0xdb, 0x52, 0xbb, + 0xa5, 0xf7, 0xed, 0x76, 0x42, 0xe5, 0x16, 0xe8, 0x01, 0x64, 0xcf, 0x07, 0x63, 0xb7, 0x5f, 0x4b, + 0x71, 0xd3, 0x5b, 0x57, 0x9b, 0x3e, 0x66, 0xaa, 0xed, 0x84, 0x2a, 0x6c, 0xd8, 0xb4, 0xa6, 0x75, + 0x6e, 0xd7, 0xd2, 0x71, 0xa6, 0xdd, 0xb3, 0xce, 0xf9, 0xb4, 0xcc, 0x02, 0xb5, 0x01, 0x5c, 0x42, + 0x35, 0xdb, 0xa1, 0xa6, 0x6d, 0xd5, 0x32, 0xdc, 0xfe, 0xa5, 0xab, 0xed, 0x4f, 0x08, 0x3d, 0xe2, + 0xea, 0xed, 0x84, 0x5a, 0x74, 0xbd, 0x01, 0xf3, 0x64, 0x5a, 0x26, 0xd5, 0xf4, 0x3e, 0x36, 0xad, + 0x5a, 0x36, 0x8e, 0xa7, 0x3d, 0xcb, 0xa4, 0x4d, 0xa6, 0xce, 0x3c, 0x99, 0xde, 0x80, 0x85, 0xe2, + 0xe3, 0x31, 0x19, 0x4d, 0x6b, 0xb9, 0x38, 0xa1, 0x78, 0x87, 0xa9, 0xb2, 0x50, 0x70, 0x1b, 0xb4, + 0x0f, 0xa5, 0x2e, 0xe9, 0x99, 0x96, 0xd6, 0x1d, 0xd8, 0xfa, 0x93, 0x5a, 0x9e, 0xbb, 0x78, 0xf9, + 0x6a, 0x17, 0x0d, 0x66, 0xd0, 0x60, 0xfa, 0xed, 0x84, 0x0a, 0x5d, 0x7f, 0x84, 0x1a, 0x50, 0xd0, + 0xfb, 0x44, 0x7f, 0xa2, 0xd1, 0x8b, 0x5a, 0x81, 0x7b, 0xba, 0x7d, 0xb5, 0xa7, 0x26, 0xd3, 0xee, + 0x5c, 0xb4, 0x13, 0x6a, 0x5e, 0x17, 0x97, 0x2c, 0x2e, 0x06, 0x19, 0x98, 0x13, 0x32, 0x62, 0x5e, + 0x8a, 0x71, 0xe2, 0xf2, 0x48, 0xe8, 0x73, 0x3f, 0x45, 0xc3, 0x1b, 0xa0, 0x16, 0x14, 0x89, 0x65, + 0xc8, 0x85, 0x01, 0x77, 0xf4, 0xe2, 0x92, 0x1d, 0x66, 0x19, 0xde, 0xb2, 0x0a, 0x44, 0x5e, 0xa3, + 0x1f, 0x43, 0x4e, 0xb7, 0x87, 0x43, 0x93, 0xd6, 0x4a, 0xdc, 0xc7, 0x77, 0x96, 0x2c, 0x89, 0xeb, + 0xb6, 0x13, 0xaa, 0xb4, 0x42, 0x1d, 0xa8, 0x0c, 0x4c, 0x97, 0x6a, 0xae, 0x85, 0x1d, 0xb7, 0x6f, + 0x53, 0xb7, 0xb6, 0xc6, 0xfd, 0x7c, 0xf7, 0x6a, 0x3f, 0x07, 0xa6, 0x4b, 0x4f, 0x3c, 0x93, 0x76, + 0x42, 0x2d, 0x0f, 0xc2, 0x02, 0xe6, 0xd5, 0x3e, 0x3f, 0x27, 0x23, 0xdf, 0x6d, 0xad, 0x1c, 0xc7, + 0xeb, 0x11, 0xb3, 0xf1, 0xbc, 0x30, 0xaf, 0x76, 0x58, 0x80, 0x30, 0x5c, 0x1b, 0xd8, 0xd8, 0xf0, + 0x9d, 0x6a, 0x7a, 0x7f, 0x6c, 0x3d, 0xa9, 0x55, 0xb8, 0xeb, 0x9d, 0x25, 0x0f, 0x6c, 0x63, 0xc3, + 0x73, 0xd4, 0x64, 0x66, 0xed, 0x84, 0xba, 0x31, 0x98, 0x15, 0x22, 0x03, 0xae, 0x63, 0xc7, 0x19, + 0x4c, 0x67, 0xe7, 0x58, 0xe7, 0x73, 0x7c, 0xff, 0xea, 0x39, 0x76, 0x99, 0xe5, 0xec, 0x24, 0x08, + 0x5f, 0x92, 0x36, 0xf2, 0x90, 0x9d, 0xe0, 0xc1, 0x98, 0x28, 0x2f, 0x41, 0x29, 0x04, 0x1f, 0xa8, + 0x06, 0xf9, 0x21, 0x71, 0x5d, 0xdc, 0x23, 0x1c, 0x73, 0x8a, 0xaa, 0x37, 0x54, 0x2a, 0xb0, 0x16, + 0x06, 0x0b, 0x65, 0xe8, 0x1b, 0x32, 0x00, 0x60, 0x86, 0x13, 0x32, 0x72, 0xd9, 0xa9, 0x97, 0x86, + 0x72, 0x88, 0x6e, 0x41, 0x99, 0x6f, 0x31, 0xcd, 0xbb, 0xcf, 0x10, 0x29, 0xa3, 0xae, 0x71, 0xe1, + 0x99, 0x54, 0xda, 0x82, 0x92, 0x73, 0xcf, 0xf1, 0x55, 0xd2, 0x5c, 0x05, 0x9c, 0x7b, 0x8e, 0x54, + 0x50, 0x7e, 0x08, 0xd5, 0x59, 0xbc, 0x40, 0x55, 0x48, 0x3f, 0x21, 0x53, 0x39, 0x1f, 0xbb, 0x44, + 0xd7, 0xe5, 0xb2, 0xf8, 0x1c, 0x45, 0x55, 0xae, 0xf1, 0xaf, 0x29, 0xdf, 0xd8, 0x87, 0x08, 0x86, + 0x71, 0x0c, 0x79, 0x25, 0xb4, 0xd6, 0xb7, 0x05, 0x2c, 0x6f, 0x7b, 0xb0, 0xbc, 0xdd, 0xf1, 0x60, + 0xb9, 0x51, 0xf8, 0xe2, 0xab, 0xad, 0xc4, 0x67, 0x7f, 0xdf, 0x4a, 0xaa, 0xdc, 0x02, 0x3d, 0xc7, + 0x4e, 0x31, 0x36, 0x2d, 0xcd, 0x34, 0xe4, 0x3c, 0x79, 0x3e, 0xde, 0x33, 0xd0, 0x3b, 0x50, 0xd5, + 0x6d, 0xcb, 0x25, 0x96, 0x3b, 0x76, 0x35, 0x91, 0x30, 0x24, 0x88, 0x2e, 0x3a, 0x59, 0x4d, 0x4f, + 0xfd, 0x98, 0x6b, 0xab, 0xeb, 0x7a, 0x54, 0x80, 0x0e, 0x00, 0x26, 0x78, 0x60, 0x1a, 0x98, 0xda, + 0x23, 0xb7, 0x96, 0xb9, 0x99, 0xbe, 0xc2, 0xd9, 0x99, 0xa7, 0x78, 0xea, 0x18, 0x98, 0x92, 0x46, + 0x86, 0x3d, 0xb9, 0x1a, 0xb2, 0x47, 0x2f, 0xc2, 0x3a, 0x76, 0x1c, 0xcd, 0xa5, 0x98, 0x12, 0xad, + 0x3b, 0xa5, 0xc4, 0xe5, 0xd0, 0xba, 0xa6, 0x96, 0xb1, 0xe3, 0x9c, 0x30, 0x69, 0x83, 0x09, 0xd1, + 0x6d, 0xa8, 0x30, 0x00, 0x35, 0xf1, 0x40, 0xeb, 0x13, 0xb3, 0xd7, 0xa7, 0x1c, 0x3c, 0xd3, 0x6a, + 0x59, 0x4a, 0xdb, 0x5c, 0xa8, 0x18, 0xfe, 0xa6, 0xe0, 0xb0, 0x89, 0x10, 0x64, 0x0c, 0x4c, 0x31, + 0x0f, 0xea, 0x9a, 0xca, 0xaf, 0x99, 0xcc, 0xc1, 0xb4, 0x2f, 0x43, 0xc5, 0xaf, 0xd1, 0x0d, 0xc8, + 0x49, 0xb7, 0x69, 0xee, 0x56, 0x8e, 0xd8, 0xfb, 0x73, 0x46, 0xf6, 0x84, 0xf0, 0xcc, 0x51, 0x50, + 0xc5, 0x40, 0xf9, 0x6d, 0x0a, 0x36, 0x2e, 0x41, 0x2b, 0xf3, 0xdb, 0xc7, 0x6e, 0xdf, 0x9b, 0x8b, + 0x5d, 0xa3, 0x87, 0xcc, 0x2f, 0x36, 0xc8, 0x48, 0xa6, 0xbd, 0xcd, 0x20, 0x50, 0x22, 0x33, 0x7b, + 0x91, 0x6a, 0x73, 0x2d, 0x19, 0x20, 0x69, 0x83, 0x4e, 0xa1, 0x3a, 0xc0, 0x2e, 0xd5, 0x04, 0x30, + 0x69, 0xa1, 0x14, 0xb8, 0x08, 0xa6, 0x0f, 0xb0, 0x07, 0x68, 0xec, 0x10, 0x48, 0x77, 0x95, 0x41, + 0x44, 0x8a, 0xde, 0x83, 0xeb, 0xdd, 0xe9, 0xcf, 0xb1, 0x45, 0x4d, 0x8b, 0x68, 0x97, 0xde, 0xe5, + 0xd6, 0x02, 0xd7, 0xad, 0x89, 0x69, 0x10, 0x4b, 0xf7, 0x5e, 0xe2, 0x35, 0xdf, 0x85, 0xff, 0x92, + 0x5d, 0xe5, 0x3d, 0xa8, 0x44, 0x13, 0x05, 0xaa, 0x40, 0x8a, 0x5e, 0xc8, 0x90, 0xa4, 0xe8, 0x05, + 0x7a, 0x1d, 0x32, 0x6c, 0xe1, 0x3c, 0x1c, 0x95, 0x85, 0x99, 0x5c, 0x5a, 0x77, 0xa6, 0x0e, 0x51, + 0xb9, 0xbe, 0xa2, 0xf8, 0x27, 0xc6, 0x4f, 0x1e, 0xb3, 0xbe, 0x95, 0x57, 0x60, 0x7d, 0x26, 0x2f, + 0x84, 0xde, 0x6b, 0x32, 0xfc, 0x5e, 0x95, 0x75, 0x28, 0x47, 0xe0, 0x5f, 0xb9, 0x01, 0xd7, 0xe7, + 0xe1, 0xb8, 0x62, 0xf9, 0xf2, 0x08, 0x12, 0xa3, 0x07, 0x50, 0xf0, 0x81, 0x5c, 0x9c, 0xd8, 0x45, + 0x71, 0xf3, 0x4c, 0x54, 0xdf, 0x80, 0x1d, 0x58, 0xb6, 0xe9, 0xf9, 0x6e, 0x49, 0xf1, 0xc7, 0xcf, + 0x63, 0xc7, 0x69, 0x63, 0xb7, 0xaf, 0x7c, 0x08, 0xb5, 0x45, 0xf0, 0x3c, 0xb3, 0x98, 0x8c, 0xbf, + 0x49, 0x6f, 0x40, 0xee, 0xdc, 0x1e, 0x0d, 0x31, 0xe5, 0xce, 0xca, 0xaa, 0x1c, 0xb1, 0xcd, 0x2b, + 0xa0, 0x3a, 0xcd, 0xc5, 0x62, 0xa0, 0x68, 0xf0, 0xdc, 0x42, 0x70, 0x66, 0x26, 0xa6, 0x65, 0x10, + 0x11, 0xd5, 0xb2, 0x2a, 0x06, 0x81, 0x23, 0xf1, 0xb0, 0x62, 0xc0, 0xa6, 0x75, 0x89, 0xc5, 0xf6, + 0x76, 0x9a, 0x9f, 0x24, 0x39, 0x52, 0xfe, 0x5c, 0x84, 0x82, 0x4a, 0x5c, 0x87, 0xe1, 0x06, 0x6a, + 0x43, 0x91, 0x5c, 0xe8, 0x44, 0xd0, 0xaf, 0xe4, 0x12, 0xb2, 0x22, 0x6c, 0x5a, 0x9e, 0x3e, 0x63, + 0x07, 0xbe, 0x31, 0xba, 0x2f, 0xa9, 0xe7, 0x32, 0xfe, 0x28, 0x9d, 0x84, 0xb9, 0xe7, 0x43, 0x8f, + 0x7b, 0xa6, 0x97, 0x10, 0x02, 0x61, 0x3b, 0x43, 0x3e, 0xef, 0x4b, 0xf2, 0x99, 0x89, 0x35, 0x71, + 0x84, 0x7d, 0xee, 0x45, 0xd8, 0x67, 0x36, 0xd6, 0xf2, 0x17, 0xd0, 0xcf, 0xbd, 0x08, 0xfd, 0xcc, + 0xc5, 0x72, 0xb5, 0x80, 0x7f, 0x3e, 0xf4, 0xf8, 0x67, 0x3e, 0x56, 0x38, 0x66, 0x08, 0xe8, 0x41, + 0x94, 0x80, 0x0a, 0xda, 0xf8, 0xca, 0x12, 0x1f, 0x0b, 0x19, 0x68, 0x33, 0xc4, 0x40, 0x8b, 0x4b, + 0x28, 0x9f, 0x70, 0x35, 0x87, 0x82, 0xee, 0x45, 0x28, 0x28, 0xc4, 0x8a, 0xcd, 0x02, 0x0e, 0xfa, + 0x38, 0xcc, 0x41, 0x4b, 0x4b, 0xc8, 0xac, 0xdc, 0x6a, 0xf3, 0x48, 0xe8, 0x9b, 0x3e, 0x09, 0x5d, + 0x5b, 0xc2, 0xab, 0xe5, 0xaa, 0x66, 0x59, 0xe8, 0xe9, 0x25, 0x16, 0x2a, 0xf8, 0xe2, 0xab, 0x4b, + 0x1c, 0x2d, 0xa1, 0xa1, 0xa7, 0x97, 0x68, 0x68, 0x25, 0x96, 0xdb, 0x25, 0x3c, 0xb4, 0x3b, 0x9f, + 0x87, 0x2e, 0xe3, 0x88, 0xf2, 0x91, 0xe3, 0x11, 0x51, 0xb2, 0x80, 0x88, 0x56, 0xf9, 0x24, 0x77, + 0x97, 0x4c, 0xb2, 0x3a, 0x13, 0x7d, 0x85, 0x25, 0xf9, 0x19, 0x48, 0x62, 0x50, 0x48, 0x46, 0x23, + 0x7b, 0x24, 0x49, 0x9e, 0x18, 0x28, 0x2f, 0x33, 0xda, 0x11, 0x00, 0xcf, 0x15, 0xac, 0x95, 0x27, + 0x9e, 0x10, 0xcc, 0x28, 0x7f, 0x4c, 0x06, 0xb6, 0x3c, 0x3b, 0x87, 0x29, 0x4b, 0x51, 0x52, 0x96, + 0x10, 0x99, 0x4d, 0x45, 0xc9, 0xec, 0x16, 0x94, 0x58, 0x2a, 0x99, 0xe1, 0xa9, 0xd8, 0xf1, 0x78, + 0x2a, 0xba, 0x03, 0x1b, 0x9c, 0x43, 0x08, 0xca, 0x2b, 0xf3, 0x47, 0x86, 0x27, 0xc3, 0x75, 0x76, + 0x43, 0x6c, 0x5d, 0x91, 0x48, 0xbe, 0x07, 0xd7, 0x42, 0xba, 0x7e, 0x8a, 0x12, 0x84, 0xac, 0xea, + 0x6b, 0xef, 0xca, 0x5c, 0xf5, 0x76, 0x10, 0xa0, 0x80, 0x03, 0x23, 0xc8, 0xe8, 0xb6, 0x41, 0x64, + 0x02, 0xe1, 0xd7, 0x8c, 0x17, 0x0f, 0xec, 0x9e, 0x4c, 0x13, 0xec, 0x92, 0x69, 0xf9, 0x98, 0x5a, + 0x14, 0x60, 0xa9, 0xfc, 0x25, 0x19, 0xf8, 0x0b, 0x68, 0xf1, 0x3c, 0x06, 0x9b, 0xfc, 0x36, 0x19, + 0x6c, 0xea, 0xbf, 0x64, 0xb0, 0xe1, 0x64, 0x9e, 0x8e, 0x26, 0xf3, 0x7f, 0x27, 0x83, 0xb7, 0xed, + 0xf3, 0xd1, 0x6f, 0x16, 0x9d, 0x20, 0x33, 0x67, 0xf9, 0xbb, 0x93, 0x99, 0x59, 0x56, 0x1c, 0x39, + 0x3e, 0x6f, 0xb4, 0xe2, 0xc8, 0x8b, 0x5c, 0xcd, 0x07, 0xe8, 0x3e, 0x14, 0x79, 0x4b, 0x49, 0xb3, + 0x1d, 0x57, 0x42, 0xf6, 0xf3, 0xc1, 0x8a, 0x45, 0xdf, 0x69, 0x7b, 0x72, 0x77, 0xfb, 0x98, 0x29, + 0x1d, 0x39, 0xae, 0x5a, 0x70, 0xe4, 0x55, 0x88, 0x75, 0x14, 0x23, 0xd4, 0xf8, 0x79, 0x28, 0xb2, + 0xc7, 0x77, 0x1d, 0xac, 0x13, 0x8e, 0xb9, 0x45, 0x35, 0x10, 0x28, 0x06, 0xa0, 0xcb, 0xd8, 0x8f, + 0x0e, 0x21, 0x47, 0x26, 0xc4, 0xa2, 0xec, 0x15, 0xa6, 0xa3, 0xcf, 0x30, 0xc3, 0x35, 0x89, 0x45, + 0x1b, 0x35, 0x16, 0xeb, 0x7f, 0x7e, 0xb5, 0x55, 0x15, 0x36, 0xaf, 0xda, 0x43, 0x93, 0x92, 0xa1, + 0x43, 0xa7, 0xaa, 0xf4, 0xa2, 0x3c, 0x4d, 0x31, 0xca, 0x17, 0xc9, 0x0b, 0x73, 0x43, 0xec, 0x9d, + 0xa9, 0x54, 0xa8, 0x0c, 0x88, 0x17, 0xf6, 0x4d, 0x80, 0x1e, 0x76, 0xb5, 0x4f, 0xb0, 0x45, 0x89, + 0x21, 0x63, 0x1f, 0x92, 0xa0, 0x3a, 0x14, 0xd8, 0x68, 0xec, 0x12, 0x43, 0x56, 0x24, 0xfe, 0x38, + 0xb4, 0xda, 0xfc, 0xb7, 0xb1, 0xda, 0x68, 0xc4, 0x0b, 0x33, 0x11, 0x0f, 0xd1, 0xb1, 0x62, 0x98, + 0x8e, 0xb1, 0x27, 0x74, 0x46, 0xa6, 0x3d, 0x32, 0xe9, 0x94, 0xbf, 0xa6, 0xb4, 0xea, 0x8f, 0x59, + 0x29, 0x3c, 0x24, 0x43, 0xc7, 0xb6, 0x07, 0x9a, 0x40, 0xb5, 0x12, 0x37, 0x5d, 0x93, 0xc2, 0x16, + 0x07, 0xb7, 0xdf, 0xa4, 0x82, 0x73, 0x19, 0x90, 0xef, 0xff, 0xd1, 0x30, 0x2b, 0x9f, 0xf3, 0xda, + 0x3d, 0x9a, 0xf9, 0xd1, 0xfb, 0xb0, 0xe1, 0x23, 0x82, 0x36, 0xe6, 0x48, 0xe1, 0x6d, 0xf1, 0xd5, + 0x80, 0xa5, 0x3a, 0x89, 0x8a, 0x5d, 0xf4, 0x01, 0x3c, 0x3b, 0x83, 0x7f, 0xfe, 0x04, 0xa9, 0x95, + 0x60, 0xf0, 0x99, 0x28, 0x0c, 0x7a, 0xfe, 0x83, 0xe8, 0xa5, 0xbf, 0x95, 0x23, 0xb9, 0xc7, 0x4a, + 0xc0, 0x30, 0xa7, 0x99, 0xbb, 0x2b, 0x6e, 0x41, 0x79, 0x44, 0x28, 0x36, 0x2d, 0x2d, 0x52, 0x76, + 0xaf, 0x09, 0xa1, 0x2c, 0xe6, 0xcf, 0xe0, 0x99, 0xb9, 0xac, 0x06, 0xfd, 0x08, 0x8a, 0x01, 0x2d, + 0x4a, 0x5e, 0x59, 0xb5, 0xfa, 0xd5, 0x57, 0x60, 0xa1, 0xfc, 0x29, 0x19, 0x38, 0x8e, 0x56, 0x75, + 0xfb, 0x90, 0x1b, 0x11, 0x77, 0x3c, 0x10, 0x15, 0x56, 0xe5, 0xde, 0x6b, 0xab, 0xb0, 0x22, 0x26, + 0x1d, 0x0f, 0xa8, 0x2a, 0x5d, 0x28, 0x1f, 0x40, 0x4e, 0x48, 0x50, 0x09, 0xf2, 0xa7, 0x87, 0xfb, + 0x87, 0x47, 0xef, 0x1e, 0x56, 0x13, 0x08, 0x20, 0xb7, 0xdb, 0x6c, 0xb6, 0x8e, 0x3b, 0xd5, 0x24, + 0x2a, 0x42, 0x76, 0xb7, 0x71, 0xa4, 0x76, 0xaa, 0x29, 0x26, 0x56, 0x5b, 0x3f, 0x69, 0x35, 0x3b, + 0xd5, 0x34, 0xda, 0x80, 0xb2, 0xb8, 0xd6, 0x1e, 0x1f, 0xa9, 0x6f, 0xef, 0x76, 0xaa, 0x99, 0x90, + 0xe8, 0xa4, 0x75, 0xf8, 0xa8, 0xa5, 0x56, 0xb3, 0xca, 0x5d, 0x56, 0xc8, 0x2d, 0x60, 0x50, 0x41, + 0xc9, 0x96, 0x0c, 0x95, 0x6c, 0xca, 0xef, 0x53, 0x50, 0x5f, 0x4c, 0x88, 0xd0, 0xf1, 0xcc, 0xf2, + 0xdf, 0x58, 0x99, 0x53, 0xcd, 0xc4, 0x00, 0xdd, 0x86, 0xca, 0x88, 0x9c, 0x13, 0xaa, 0xf7, 0x05, + 0x59, 0x13, 0xe9, 0xb6, 0xac, 0x96, 0xa5, 0x94, 0x1b, 0xb9, 0x42, 0xed, 0x23, 0xa2, 0x53, 0x4d, + 0x80, 0x96, 0xd8, 0x8c, 0x45, 0xa6, 0xc6, 0xa4, 0x27, 0x42, 0xa8, 0x7c, 0xb8, 0x52, 0x44, 0x8b, + 0x90, 0x55, 0x5b, 0x1d, 0xf5, 0xfd, 0x6a, 0x1a, 0x21, 0xa8, 0xf0, 0x4b, 0xed, 0xe4, 0x70, 0xf7, + 0xf8, 0xa4, 0x7d, 0xc4, 0x22, 0x7a, 0x0d, 0xd6, 0xbd, 0x88, 0x7a, 0xc2, 0xac, 0xf2, 0xbb, 0x14, + 0xac, 0xcf, 0x1c, 0x1c, 0xf4, 0x06, 0x64, 0x45, 0x39, 0x70, 0xf5, 0x47, 0x0f, 0x8e, 0x04, 0xf2, + 0xac, 0x09, 0x03, 0xd4, 0x80, 0x02, 0x91, 0x5d, 0x93, 0xcb, 0x87, 0x35, 0xda, 0xff, 0xf1, 0xba, + 0x2b, 0xd2, 0x81, 0x6f, 0x87, 0x5a, 0x50, 0xf4, 0x31, 0x41, 0xd6, 0xaf, 0x2f, 0x2d, 0x72, 0xe2, + 0x63, 0x8a, 0xf4, 0x12, 0x58, 0xa2, 0x37, 0x03, 0x06, 0x99, 0x99, 0x2d, 0x48, 0x66, 0x9c, 0x08, + 0x35, 0xe9, 0xc2, 0xb3, 0x52, 0x9a, 0x50, 0x0a, 0xad, 0x10, 0xfd, 0x1f, 0x14, 0x87, 0xf8, 0x42, + 0x76, 0xec, 0x44, 0x6f, 0xa5, 0x30, 0xc4, 0x17, 0xa2, 0x59, 0xf7, 0x2c, 0xe4, 0xd9, 0xcd, 0x1e, + 0x16, 0x18, 0x95, 0x56, 0x73, 0x43, 0x7c, 0xf1, 0x16, 0x76, 0x15, 0x1d, 0x2a, 0xd1, 0x0e, 0x15, + 0xdb, 0xa7, 0x23, 0x7b, 0x6c, 0x19, 0xdc, 0x47, 0x56, 0x15, 0x03, 0xf4, 0x00, 0xb2, 0x13, 0x5b, + 0x40, 0xdc, 0x55, 0x87, 0xfb, 0xcc, 0xa6, 0x24, 0xd4, 0xe7, 0x12, 0x36, 0xca, 0x2f, 0x93, 0x90, + 0xe5, 0x68, 0xc5, 0x90, 0x87, 0x37, 0x9b, 0x24, 0x95, 0x66, 0xd7, 0x48, 0x07, 0xc0, 0x94, 0x8e, + 0xcc, 0xee, 0x38, 0xf0, 0x7f, 0xfb, 0x2a, 0xcc, 0xdb, 0xf5, 0xb4, 0x1b, 0xcf, 0x4b, 0xf0, 0xbb, + 0x1e, 0x38, 0x08, 0x01, 0x60, 0xc8, 0xad, 0x72, 0x08, 0x95, 0xa8, 0x6d, 0xb8, 0x35, 0xbc, 0x36, + 0xa7, 0x35, 0xec, 0x13, 0x35, 0x9f, 0xe6, 0xa5, 0x45, 0xc3, 0x91, 0x0f, 0x94, 0xcf, 0x92, 0x50, + 0xe8, 0x5c, 0xc8, 0xbd, 0xbf, 0xa0, 0xa7, 0x15, 0x98, 0xa6, 0xc2, 0xbd, 0x1b, 0xd1, 0x24, 0x4b, + 0xfb, 0x0d, 0xb8, 0xc7, 0xfe, 0x19, 0xcf, 0xac, 0x56, 0x27, 0x7b, 0xbd, 0x49, 0x89, 0x6e, 0x0f, + 0xa0, 0xe8, 0x6f, 0x37, 0x56, 0x9f, 0x60, 0xc3, 0x18, 0x11, 0xd7, 0x95, 0x2b, 0xf4, 0x86, 0xbc, + 0x81, 0x6a, 0x7f, 0x22, 0x7b, 0x44, 0x69, 0x55, 0x0c, 0x14, 0x02, 0xeb, 0x33, 0xf9, 0x0f, 0x3d, + 0x84, 0xbc, 0x33, 0xee, 0x6a, 0x5e, 0x90, 0x4a, 0xf7, 0x5e, 0x98, 0xcb, 0x4f, 0xc7, 0xdd, 0x81, + 0xa9, 0xef, 0x93, 0xa9, 0xf7, 0x34, 0xce, 0xb8, 0xbb, 0x2f, 0x82, 0x29, 0xa6, 0x49, 0x85, 0xa7, + 0xf9, 0x05, 0x14, 0xbc, 0x2d, 0x82, 0x1e, 0x85, 0xcf, 0x91, 0x98, 0xe1, 0xe6, 0xb2, 0xd4, 0x2c, + 0x27, 0x09, 0x1d, 0xa3, 0x3b, 0xb0, 0xe1, 0x9a, 0x3d, 0x8b, 0x18, 0x5a, 0x50, 0x28, 0xf1, 0x39, + 0x0b, 0xea, 0xba, 0xb8, 0x71, 0xe0, 0x55, 0x49, 0xca, 0xaf, 0x53, 0x50, 0xf0, 0x8e, 0x35, 0xfa, + 0x41, 0x68, 0x2b, 0x56, 0x16, 0x36, 0x91, 0x3c, 0xf5, 0xa0, 0xf1, 0x19, 0x7d, 0xee, 0xd4, 0x37, + 0x7d, 0xee, 0x45, 0xfd, 0x6d, 0xef, 0xa3, 0x43, 0x66, 0xe5, 0x8f, 0x0e, 0xaf, 0x02, 0xa2, 0x36, + 0xc5, 0x03, 0x6d, 0x62, 0x53, 0xd3, 0xea, 0x69, 0x22, 0xfc, 0x82, 0xb9, 0x55, 0xf9, 0x9d, 0x33, + 0x7e, 0xe3, 0x98, 0xbf, 0x89, 0x5f, 0x25, 0xa1, 0xe0, 0x67, 0xd9, 0x55, 0xfb, 0x98, 0x37, 0x20, + 0x27, 0x93, 0x87, 0x68, 0x64, 0xca, 0x91, 0xdf, 0x70, 0xcf, 0x84, 0x1a, 0xee, 0x75, 0x28, 0x0c, + 0x09, 0xc5, 0x9c, 0x70, 0x88, 0xba, 0xd5, 0x1f, 0xdf, 0xb9, 0x0f, 0xa5, 0x50, 0x63, 0x99, 0x1d, + 0xc9, 0xc3, 0xd6, 0xbb, 0xd5, 0x44, 0x3d, 0xff, 0xe9, 0xe7, 0x37, 0xd3, 0x87, 0xe4, 0x13, 0xb6, + 0x8d, 0xd5, 0x56, 0xb3, 0xdd, 0x6a, 0xee, 0x57, 0x93, 0xf5, 0xd2, 0xa7, 0x9f, 0xdf, 0xcc, 0xab, + 0x84, 0xf7, 0x98, 0xee, 0xb4, 0x61, 0x2d, 0xfc, 0x6e, 0xa2, 0xf9, 0x07, 0x41, 0xe5, 0xd1, 0xe9, + 0xf1, 0xc1, 0x5e, 0x73, 0xb7, 0xd3, 0xd2, 0xce, 0x8e, 0x3a, 0xad, 0x6a, 0x12, 0x3d, 0x0b, 0xd7, + 0x0e, 0xf6, 0xde, 0x6a, 0x77, 0xb4, 0xe6, 0xc1, 0x5e, 0xeb, 0xb0, 0xa3, 0xed, 0x76, 0x3a, 0xbb, + 0xcd, 0xfd, 0x6a, 0xea, 0xde, 0x1f, 0x4a, 0xb0, 0xbe, 0xdb, 0x68, 0xee, 0xb1, 0xdc, 0x69, 0xea, + 0x98, 0xd7, 0xcc, 0x47, 0x90, 0xe1, 0x6d, 0x83, 0x18, 0xdf, 0xd3, 0xeb, 0x71, 0x1a, 0x9f, 0x48, + 0x85, 0x2c, 0xef, 0x2e, 0xa0, 0x38, 0x9f, 0xd9, 0xeb, 0xb1, 0xfa, 0xa1, 0xec, 0x21, 0xf9, 0x41, + 0x8a, 0xf1, 0xf5, 0xbd, 0x1e, 0xa7, 0x49, 0x8a, 0x3e, 0x80, 0x62, 0xd0, 0x36, 0x88, 0xfb, 0x4d, + 0xbe, 0x1e, 0xbb, 0x7d, 0xca, 0xfc, 0x07, 0xe5, 0x4a, 0xdc, 0x2f, 0xd2, 0xf5, 0xd8, 0x78, 0x88, + 0xde, 0x83, 0xbc, 0x57, 0x73, 0xc6, 0xfb, 0x6a, 0x5e, 0x8f, 0xd9, 0xda, 0x64, 0xaf, 0x4f, 0xb4, + 0x0b, 0xe2, 0xfc, 0x1a, 0x50, 0x8f, 0xd5, 0xbf, 0x45, 0xa7, 0x90, 0x93, 0x7c, 0x3c, 0xd6, 0xf7, + 0xf0, 0x7a, 0xbc, 0x86, 0x25, 0x0b, 0x72, 0xd0, 0xab, 0x89, 0xfb, 0x3b, 0x44, 0x3d, 0x76, 0xe3, + 0x1a, 0x61, 0x80, 0x50, 0xff, 0x20, 0xf6, 0x7f, 0x0e, 0xf5, 0xf8, 0x0d, 0x69, 0xf4, 0x33, 0x28, + 0xf8, 0x85, 0x5c, 0xcc, 0xff, 0x0d, 0xea, 0x71, 0x7b, 0xc2, 0xe8, 0x23, 0x28, 0x47, 0x6b, 0x97, + 0x55, 0xfe, 0x22, 0xa8, 0xaf, 0xd4, 0xec, 0x65, 0x73, 0x45, 0xcb, 0x99, 0x55, 0xfe, 0x2d, 0xa8, + 0xaf, 0xd4, 0x01, 0x46, 0x13, 0xd8, 0xb8, 0x5c, 0x74, 0xac, 0xfa, 0xc3, 0x41, 0x7d, 0xe5, 0xce, + 0x30, 0x9a, 0x02, 0x9a, 0x53, 0xb8, 0xac, 0xfc, 0x17, 0x42, 0x7d, 0xf5, 0x76, 0x71, 0xe3, 0xf8, + 0x8b, 0xa7, 0x9b, 0xc9, 0x2f, 0x9f, 0x6e, 0x26, 0xff, 0xf1, 0x74, 0x33, 0xf9, 0xd9, 0xd7, 0x9b, + 0x89, 0x2f, 0xbf, 0xde, 0x4c, 0xfc, 0xed, 0xeb, 0xcd, 0xc4, 0x4f, 0x5f, 0xef, 0x99, 0xb4, 0x3f, + 0xee, 0x32, 0x97, 0x3b, 0xc1, 0xcf, 0x5c, 0xfe, 0xef, 0x62, 0x8e, 0xb9, 0x33, 0xf7, 0xdf, 0xb1, + 0x6e, 0x8e, 0xe7, 0xd9, 0xd7, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x56, 0x6f, 0xb3, 0x98, 0x5b, + 0x26, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ABCIApplicationClient is the client API for ABCIApplication service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ABCIApplicationClient interface { + // Echo returns back the same message it is sent. + Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) + // Flush flushes the write buffer. + Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) + // Info returns information about the application state. + Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) + // SetOption sets a parameter in the application. + SetOption(ctx context.Context, in *RequestSetOption, opts ...grpc.CallOption) (*ResponseSetOption, error) + // DeliverTx applies a transaction. + DeliverTx(ctx context.Context, in *RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) + // CheckTx validates a transaction. + CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) + // Query queries the application state. + Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) + // Commit commits a block of transactions. + Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) + // InitChain initializes the blockchain. + InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) + // BeginBlock signals the beginning of a block. + BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) + // EndBlock signals the end of a block, returns changes to the validator set. + EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) + // ListSnapshots lists all the available snapshots. + ListSnapshots(ctx context.Context, in *RequestListSnapshots, opts ...grpc.CallOption) (*ResponseListSnapshots, error) + // OfferSnapshot sends a snapshot offer. + OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) + // LoadSnapshotChunk returns a chunk of snapshot. + LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) + // ApplySnapshotChunk applies a chunk of snapshot. + ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) +} + +type aBCIApplicationClient struct { + cc grpc1.ClientConn +} + +func NewABCIApplicationClient(cc grpc1.ClientConn) ABCIApplicationClient { + return &aBCIApplicationClient{cc} +} + +func (c *aBCIApplicationClient) Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) { + out := new(ResponseEcho) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/Echo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) { + out := new(ResponseFlush) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/Flush", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) { + out := new(ResponseInfo) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/Info", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) SetOption(ctx context.Context, in *RequestSetOption, opts ...grpc.CallOption) (*ResponseSetOption, error) { + out := new(ResponseSetOption) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/SetOption", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) DeliverTx(ctx context.Context, in *RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) { + out := new(ResponseDeliverTx) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/DeliverTx", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) { + out := new(ResponseCheckTx) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/CheckTx", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) { + out := new(ResponseQuery) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/Query", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) { + out := new(ResponseCommit) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/Commit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) { + out := new(ResponseInitChain) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/InitChain", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) { + out := new(ResponseBeginBlock) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/BeginBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) { + out := new(ResponseEndBlock) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/EndBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) ListSnapshots(ctx context.Context, in *RequestListSnapshots, opts ...grpc.CallOption) (*ResponseListSnapshots, error) { + out := new(ResponseListSnapshots) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/ListSnapshots", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) { + out := new(ResponseOfferSnapshot) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/OfferSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) { + out := new(ResponseLoadSnapshotChunk) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/LoadSnapshotChunk", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) { + out := new(ResponseApplySnapshotChunk) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta1.ABCIApplication/ApplySnapshotChunk", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ABCIApplicationServer is the server API for ABCIApplication service. +type ABCIApplicationServer interface { + // Echo returns back the same message it is sent. + Echo(context.Context, *RequestEcho) (*ResponseEcho, error) + // Flush flushes the write buffer. + Flush(context.Context, *RequestFlush) (*ResponseFlush, error) + // Info returns information about the application state. + Info(context.Context, *RequestInfo) (*ResponseInfo, error) + // SetOption sets a parameter in the application. + SetOption(context.Context, *RequestSetOption) (*ResponseSetOption, error) + // DeliverTx applies a transaction. + DeliverTx(context.Context, *RequestDeliverTx) (*ResponseDeliverTx, error) + // CheckTx validates a transaction. + CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) + // Query queries the application state. + Query(context.Context, *RequestQuery) (*ResponseQuery, error) + // Commit commits a block of transactions. + Commit(context.Context, *RequestCommit) (*ResponseCommit, error) + // InitChain initializes the blockchain. + InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) + // BeginBlock signals the beginning of a block. + BeginBlock(context.Context, *RequestBeginBlock) (*ResponseBeginBlock, error) + // EndBlock signals the end of a block, returns changes to the validator set. + EndBlock(context.Context, *RequestEndBlock) (*ResponseEndBlock, error) + // ListSnapshots lists all the available snapshots. + ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) + // OfferSnapshot sends a snapshot offer. + OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) + // LoadSnapshotChunk returns a chunk of snapshot. + LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) + // ApplySnapshotChunk applies a chunk of snapshot. + ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) +} + +// UnimplementedABCIApplicationServer can be embedded to have forward compatible implementations. +type UnimplementedABCIApplicationServer struct { +} + +func (*UnimplementedABCIApplicationServer) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) { + return nil, status.Errorf(codes.Unimplemented, "method Echo not implemented") +} +func (*UnimplementedABCIApplicationServer) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) { + return nil, status.Errorf(codes.Unimplemented, "method Flush not implemented") +} +func (*UnimplementedABCIApplicationServer) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +} +func (*UnimplementedABCIApplicationServer) SetOption(ctx context.Context, req *RequestSetOption) (*ResponseSetOption, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetOption not implemented") +} +func (*UnimplementedABCIApplicationServer) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeliverTx not implemented") +} +func (*UnimplementedABCIApplicationServer) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckTx not implemented") +} +func (*UnimplementedABCIApplicationServer) Query(ctx context.Context, req *RequestQuery) (*ResponseQuery, error) { + return nil, status.Errorf(codes.Unimplemented, "method Query not implemented") +} +func (*UnimplementedABCIApplicationServer) Commit(ctx context.Context, req *RequestCommit) (*ResponseCommit, error) { + return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") +} +func (*UnimplementedABCIApplicationServer) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) { + return nil, status.Errorf(codes.Unimplemented, "method InitChain not implemented") +} +func (*UnimplementedABCIApplicationServer) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) { + return nil, status.Errorf(codes.Unimplemented, "method BeginBlock not implemented") +} +func (*UnimplementedABCIApplicationServer) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) { + return nil, status.Errorf(codes.Unimplemented, "method EndBlock not implemented") +} +func (*UnimplementedABCIApplicationServer) ListSnapshots(ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") +} +func (*UnimplementedABCIApplicationServer) OfferSnapshot(ctx context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { + return nil, status.Errorf(codes.Unimplemented, "method OfferSnapshot not implemented") +} +func (*UnimplementedABCIApplicationServer) LoadSnapshotChunk(ctx context.Context, req *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { + return nil, status.Errorf(codes.Unimplemented, "method LoadSnapshotChunk not implemented") +} +func (*UnimplementedABCIApplicationServer) ApplySnapshotChunk(ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplySnapshotChunk not implemented") +} + +func RegisterABCIApplicationServer(s grpc1.Server, srv ABCIApplicationServer) { + s.RegisterService(&_ABCIApplication_serviceDesc, srv) +} + +func _ABCIApplication_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestEcho) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Echo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/Echo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Echo(ctx, req.(*RequestEcho)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Flush_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestFlush) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Flush(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/Flush", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Flush(ctx, req.(*RequestFlush)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestInfo) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Info(ctx, req.(*RequestInfo)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_SetOption_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestSetOption) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).SetOption(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/SetOption", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).SetOption(ctx, req.(*RequestSetOption)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_DeliverTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestDeliverTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).DeliverTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/DeliverTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).DeliverTx(ctx, req.(*RequestDeliverTx)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_CheckTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestCheckTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).CheckTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/CheckTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).CheckTx(ctx, req.(*RequestCheckTx)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestQuery) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Query(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/Query", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Query(ctx, req.(*RequestQuery)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestCommit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Commit(ctx, req.(*RequestCommit)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_InitChain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestInitChain) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).InitChain(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/InitChain", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).InitChain(ctx, req.(*RequestInitChain)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_BeginBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestBeginBlock) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).BeginBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/BeginBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).BeginBlock(ctx, req.(*RequestBeginBlock)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_EndBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestEndBlock) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).EndBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/EndBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).EndBlock(ctx, req.(*RequestEndBlock)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestListSnapshots) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/ListSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ListSnapshots(ctx, req.(*RequestListSnapshots)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_OfferSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestOfferSnapshot) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).OfferSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/OfferSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).OfferSnapshot(ctx, req.(*RequestOfferSnapshot)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_LoadSnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestLoadSnapshotChunk) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).LoadSnapshotChunk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/LoadSnapshotChunk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).LoadSnapshotChunk(ctx, req.(*RequestLoadSnapshotChunk)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_ApplySnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestApplySnapshotChunk) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).ApplySnapshotChunk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta1.ABCIApplication/ApplySnapshotChunk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ApplySnapshotChunk(ctx, req.(*RequestApplySnapshotChunk)) + } + return interceptor(ctx, in, info, handler) +} + +var ABCIApplication_serviceDesc = _ABCIApplication_serviceDesc +var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cometbft.abci.v1beta1.ABCIApplication", + HandlerType: (*ABCIApplicationServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Echo", + Handler: _ABCIApplication_Echo_Handler, + }, + { + MethodName: "Flush", + Handler: _ABCIApplication_Flush_Handler, + }, + { + MethodName: "Info", + Handler: _ABCIApplication_Info_Handler, + }, + { + MethodName: "SetOption", + Handler: _ABCIApplication_SetOption_Handler, + }, + { + MethodName: "DeliverTx", + Handler: _ABCIApplication_DeliverTx_Handler, + }, + { + MethodName: "CheckTx", + Handler: _ABCIApplication_CheckTx_Handler, + }, + { + MethodName: "Query", + Handler: _ABCIApplication_Query_Handler, + }, + { + MethodName: "Commit", + Handler: _ABCIApplication_Commit_Handler, + }, + { + MethodName: "InitChain", + Handler: _ABCIApplication_InitChain_Handler, + }, + { + MethodName: "BeginBlock", + Handler: _ABCIApplication_BeginBlock_Handler, + }, + { + MethodName: "EndBlock", + Handler: _ABCIApplication_EndBlock_Handler, + }, + { + MethodName: "ListSnapshots", + Handler: _ABCIApplication_ListSnapshots_Handler, + }, + { + MethodName: "OfferSnapshot", + Handler: _ABCIApplication_OfferSnapshot_Handler, + }, + { + MethodName: "LoadSnapshotChunk", + Handler: _ABCIApplication_LoadSnapshotChunk_Handler, + }, + { + MethodName: "ApplySnapshotChunk", + Handler: _ABCIApplication_ApplySnapshotChunk_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cometbft/abci/v1beta1/types.proto", +} + +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Request_Echo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Echo != nil { + { + size, err := m.Echo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Request_Flush) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Flush != nil { + { + size, err := m.Flush.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Request_Info) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Info != nil { + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Request_SetOption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_SetOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SetOption != nil { + { + size, err := m.SetOption.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Request_InitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InitChain != nil { + { + size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Request_Query) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Request_BeginBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.BeginBlock != nil { + { + size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Request_CheckTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CheckTx != nil { + { + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Request_DeliverTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DeliverTx != nil { + { + size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Request_EndBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EndBlock != nil { + { + size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *Request_Commit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *Request_ListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListSnapshots != nil { + { + size, err := m.ListSnapshots.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *Request_OfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_OfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OfferSnapshot != nil { + { + size, err := m.OfferSnapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *Request_LoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_LoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LoadSnapshotChunk != nil { + { + size, err := m.LoadSnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + return len(dAtA) - i, nil +} +func (m *Request_ApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ApplySnapshotChunk != nil { + { + size, err := m.ApplySnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + return len(dAtA) - i, nil +} +func (m *RequestEcho) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestEcho) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestFlush) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestFlush) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestFlush) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RequestInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.P2PVersion != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.P2PVersion)) + i-- + dAtA[i] = 0x18 + } + if m.BlockVersion != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockVersion)) + i-- + dAtA[i] = 0x10 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestSetOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestSetOption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestSetOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestInitChain) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestInitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InitialHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.InitialHeight)) + i-- + dAtA[i] = 0x30 + } + if len(m.AppStateBytes) > 0 { + i -= len(m.AppStateBytes) + copy(dAtA[i:], m.AppStateBytes) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppStateBytes))) + i-- + dAtA[i] = 0x2a + } + if len(m.Validators) > 0 { + for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.ConsensusParams != nil { + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x12 + } + n17, err17 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err17 != nil { + return 0, err17 + } + i -= n17 + i = encodeVarintTypes(dAtA, i, uint64(n17)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RequestQuery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestQuery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Prove { + i-- + if m.Prove { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x18 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestBeginBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestBeginBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ByzantineValidators) > 0 { + for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.LastCommitInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestCheckTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x10 + } + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestDeliverTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestEndBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RequestCommit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestCommit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RequestListSnapshots) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RequestOfferSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x12 + } + if m.Snapshot != nil { + { + size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestLoadSnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestLoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Chunk != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Chunk)) + i-- + dAtA[i] = 0x18 + } + if m.Format != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RequestApplySnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x1a + } + if len(m.Chunk) > 0 { + i -= len(m.Chunk) + copy(dAtA[i:], m.Chunk) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Chunk))) + i-- + dAtA[i] = 0x12 + } + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Response) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Response_Exception) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Exception) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Exception != nil { + { + size, err := m.Exception.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Response_Echo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Echo != nil { + { + size, err := m.Echo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Response_Flush) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Flush != nil { + { + size, err := m.Flush.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Response_Info) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Info != nil { + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Response_SetOption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_SetOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SetOption != nil { + { + size, err := m.SetOption.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Response_InitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InitChain != nil { + { + size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Response_Query) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Response_BeginBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.BeginBlock != nil { + { + size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Response_CheckTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CheckTx != nil { + { + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Response_DeliverTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DeliverTx != nil { + { + size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *Response_EndBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EndBlock != nil { + { + size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *Response_Commit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *Response_ListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListSnapshots != nil { + { + size, err := m.ListSnapshots.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *Response_OfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_OfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OfferSnapshot != nil { + { + size, err := m.OfferSnapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + return len(dAtA) - i, nil +} +func (m *Response_LoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_LoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LoadSnapshotChunk != nil { + { + size, err := m.LoadSnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + return len(dAtA) - i, nil +} +func (m *Response_ApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ApplySnapshotChunk != nil { + { + size, err := m.ApplySnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + return len(dAtA) - i, nil +} +func (m *ResponseException) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseException) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseException) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseEcho) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseEcho) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseFlush) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseFlush) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseFlush) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *ResponseInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LastBlockAppHash) > 0 { + i -= len(m.LastBlockAppHash) + copy(dAtA[i:], m.LastBlockAppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastBlockAppHash))) + i-- + dAtA[i] = 0x2a + } + if m.LastBlockHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockHeight)) + i-- + dAtA[i] = 0x20 + } + if m.AppVersion != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.AppVersion)) + i-- + dAtA[i] = 0x18 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseSetOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseSetOption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseSetOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Info) > 0 { + i -= len(m.Info) + copy(dAtA[i:], m.Info) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i-- + dAtA[i] = 0x22 + } + if len(m.Log) > 0 { + i -= len(m.Log) + copy(dAtA[i:], m.Log) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i-- + dAtA[i] = 0x1a + } + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseInitChain) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseInitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x1a + } + if len(m.Validators) > 0 { + for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.ConsensusParams != nil { + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseQuery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseQuery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Codespace) > 0 { + i -= len(m.Codespace) + copy(dAtA[i:], m.Codespace) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i-- + dAtA[i] = 0x52 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x48 + } + if m.ProofOps != nil { + { + size, err := m.ProofOps.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x3a + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x32 + } + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x28 + } + if len(m.Info) > 0 { + i -= len(m.Info) + copy(dAtA[i:], m.Info) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i-- + dAtA[i] = 0x22 + } + if len(m.Log) > 0 { + i -= len(m.Log) + copy(dAtA[i:], m.Log) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i-- + dAtA[i] = 0x1a + } + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseBeginBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseBeginBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseCheckTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MempoolError) > 0 { + i -= len(m.MempoolError) + copy(dAtA[i:], m.MempoolError) + i = encodeVarintTypes(dAtA, i, uint64(len(m.MempoolError))) + i-- + dAtA[i] = 0x5a + } + if m.Priority != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x50 + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x4a + } + if len(m.Codespace) > 0 { + i -= len(m.Codespace) + copy(dAtA[i:], m.Codespace) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i-- + dAtA[i] = 0x42 + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.GasUsed != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) + i-- + dAtA[i] = 0x30 + } + if m.GasWanted != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) + i-- + dAtA[i] = 0x28 + } + if len(m.Info) > 0 { + i -= len(m.Info) + copy(dAtA[i:], m.Info) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i-- + dAtA[i] = 0x22 + } + if len(m.Log) > 0 { + i -= len(m.Log) + copy(dAtA[i:], m.Log) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i-- + dAtA[i] = 0x1a + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseDeliverTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseDeliverTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Codespace) > 0 { + i -= len(m.Codespace) + copy(dAtA[i:], m.Codespace) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i-- + dAtA[i] = 0x42 + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.GasUsed != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) + i-- + dAtA[i] = 0x30 + } + if m.GasWanted != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) + i-- + dAtA[i] = 0x28 + } + if len(m.Info) > 0 { + i -= len(m.Info) + copy(dAtA[i:], m.Info) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i-- + dAtA[i] = 0x22 + } + if len(m.Log) > 0 { + i -= len(m.Log) + copy(dAtA[i:], m.Log) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i-- + dAtA[i] = 0x1a + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ConsensusParamUpdates != nil { + { + size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ValidatorUpdates) > 0 { + for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseCommit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RetainHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) + i-- + dAtA[i] = 0x18 + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *ResponseListSnapshots) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Snapshots) > 0 { + for iNdEx := len(m.Snapshots) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Snapshots[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseOfferSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Result != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Result)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseLoadSnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseLoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Chunk) > 0 { + i -= len(m.Chunk) + copy(dAtA[i:], m.Chunk) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Chunk))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseApplySnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RejectSenders) > 0 { + for iNdEx := len(m.RejectSenders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RejectSenders[iNdEx]) + copy(dAtA[i:], m.RejectSenders[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.RejectSenders[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.RefetchChunks) > 0 { + dAtA41 := make([]byte, len(m.RefetchChunks)*10) + var j40 int + for _, num := range m.RefetchChunks { + for num >= 1<<7 { + dAtA41[j40] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j40++ + } + dAtA41[j40] = uint8(num) + j40++ + } + i -= j40 + copy(dAtA[i:], dAtA41[:j40]) + i = encodeVarintTypes(dAtA, i, uint64(j40)) + i-- + dAtA[i] = 0x12 + } + if m.Result != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Result)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ConsensusParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Version != nil { + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Validator != nil { + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Evidence != nil { + { + size, err := m.Evidence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Block != nil { + { + size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BlockParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxGas != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.MaxGas)) + i-- + dAtA[i] = 0x10 + } + if m.MaxBytes != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.MaxBytes)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LastCommitInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LastCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Votes) > 0 { + for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Votes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventAttribute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventAttribute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index { + i-- + if m.Index { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TxResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TxResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0x1a + } + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Validator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Power != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Power)) + i-- + dAtA[i] = 0x18 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidatorUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorUpdate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Power != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Power)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VoteInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VoteInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VoteInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SignedLastBlock { + i-- + if m.SignedLastBlock { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Evidence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TotalVotingPower != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.TotalVotingPower)) + i-- + dAtA[i] = 0x28 + } + n49, err49 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err49 != nil { + return 0, err49 + } + i -= n49 + i = encodeVarintTypes(dAtA, i, uint64(n49)) + i-- + dAtA[i] = 0x22 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x18 + } + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Metadata) > 0 { + i -= len(m.Metadata) + copy(dAtA[i:], m.Metadata) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Metadata))) + i-- + dAtA[i] = 0x2a + } + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x22 + } + if m.Chunks != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Chunks)) + i-- + dAtA[i] = 0x18 + } + if m.Format != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *Request_Echo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Echo != nil { + l = m.Echo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Flush) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Flush != nil { + l = m.Flush.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Info) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_SetOption) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SetOption != nil { + l = m.SetOption.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_InitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InitChain != nil { + l = m.InitChain.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Query) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_BeginBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeginBlock != nil { + l = m.BeginBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_CheckTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_DeliverTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DeliverTx != nil { + l = m.DeliverTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_EndBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndBlock != nil { + l = m.EndBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Commit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListSnapshots != nil { + l = m.ListSnapshots.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_OfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OfferSnapshot != nil { + l = m.OfferSnapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_LoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadSnapshotChunk != nil { + l = m.LoadSnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApplySnapshotChunk != nil { + l = m.ApplySnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *RequestEcho) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestFlush) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RequestInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.BlockVersion != 0 { + n += 1 + sovTypes(uint64(m.BlockVersion)) + } + if m.P2PVersion != 0 { + n += 1 + sovTypes(uint64(m.P2PVersion)) + } + return n +} + +func (m *RequestSetOption) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestInitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ConsensusParams != nil { + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Validators) > 0 { + for _, e := range m.Validators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.AppStateBytes) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.InitialHeight != 0 { + n += 1 + sovTypes(uint64(m.InitialHeight)) + } + return n +} + +func (m *RequestQuery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Prove { + n += 2 + } + return n +} + +func (m *RequestBeginBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + l = m.LastCommitInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.ByzantineValidators) > 0 { + for _, e := range m.ByzantineValidators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *RequestCheckTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + return n +} + +func (m *RequestDeliverTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestEndBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *RequestCommit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RequestListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RequestOfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Snapshot != nil { + l = m.Snapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestLoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Format != 0 { + n += 1 + sovTypes(uint64(m.Format)) + } + if m.Chunk != 0 { + n += 1 + sovTypes(uint64(m.Chunk)) + } + return n +} + +func (m *RequestApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + l = len(m.Chunk) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Response) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *Response_Exception) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exception != nil { + l = m.Exception.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Echo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Echo != nil { + l = m.Echo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Flush) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Flush != nil { + l = m.Flush.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Info) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_SetOption) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SetOption != nil { + l = m.SetOption.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_InitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InitChain != nil { + l = m.InitChain.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Query) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_BeginBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeginBlock != nil { + l = m.BeginBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_CheckTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_DeliverTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DeliverTx != nil { + l = m.DeliverTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_EndBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndBlock != nil { + l = m.EndBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Commit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListSnapshots != nil { + l = m.ListSnapshots.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_OfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OfferSnapshot != nil { + l = m.OfferSnapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_LoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadSnapshotChunk != nil { + l = m.LoadSnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApplySnapshotChunk != nil { + l = m.ApplySnapshotChunk.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *ResponseException) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseEcho) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseFlush) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ResponseInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.AppVersion != 0 { + n += 1 + sovTypes(uint64(m.AppVersion)) + } + if m.LastBlockHeight != 0 { + n += 1 + sovTypes(uint64(m.LastBlockHeight)) + } + l = len(m.LastBlockAppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseSetOption) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseInitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConsensusParams != nil { + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Validators) > 0 { + for _, e := range m.Validators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseQuery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ProofOps != nil { + l = m.ProofOps.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseBeginBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResponseCheckTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.GasWanted != 0 { + n += 1 + sovTypes(uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + n += 1 + sovTypes(uint64(m.GasUsed)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Priority != 0 { + n += 1 + sovTypes(uint64(m.Priority)) + } + l = len(m.MempoolError) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseDeliverTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.GasWanted != 0 { + n += 1 + sovTypes(uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + n += 1 + sovTypes(uint64(m.GasUsed)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseEndBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ValidatorUpdates) > 0 { + for _, e := range m.ValidatorUpdates { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.ConsensusParamUpdates != nil { + l = m.ConsensusParamUpdates.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResponseCommit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.RetainHeight != 0 { + n += 1 + sovTypes(uint64(m.RetainHeight)) + } + return n +} + +func (m *ResponseListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Snapshots) > 0 { + for _, e := range m.Snapshots { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResponseOfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != 0 { + n += 1 + sovTypes(uint64(m.Result)) + } + return n +} + +func (m *ResponseLoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Chunk) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != 0 { + n += 1 + sovTypes(uint64(m.Result)) + } + if len(m.RefetchChunks) > 0 { + l = 0 + for _, e := range m.RefetchChunks { + l += sovTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.RejectSenders) > 0 { + for _, s := range m.RejectSenders { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ConsensusParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Block != nil { + l = m.Block.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Evidence != nil { + l = m.Evidence.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Validator != nil { + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Version != nil { + l = m.Version.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *BlockParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxBytes != 0 { + n += 1 + sovTypes(uint64(m.MaxBytes)) + } + if m.MaxGas != 0 { + n += 1 + sovTypes(uint64(m.MaxGas)) + } + return n +} + +func (m *LastCommitInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if len(m.Votes) > 0 { + for _, e := range m.Votes { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *EventAttribute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Index { + n += 2 + } + return n +} + +func (m *TxResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Result.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *Validator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Power != 0 { + n += 1 + sovTypes(uint64(m.Power)) + } + return n +} + +func (m *ValidatorUpdate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PubKey.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Power != 0 { + n += 1 + sovTypes(uint64(m.Power)) + } + return n +} + +func (m *VoteInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.SignedLastBlock { + n += 2 + } + return n +} + +func (m *Evidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + if m.TotalVotingPower != 0 { + n += 1 + sovTypes(uint64(m.TotalVotingPower)) + } + return n +} + +func (m *Snapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Format != 0 { + n += 1 + sovTypes(uint64(m.Format)) + } + if m.Chunks != 0 { + n += 1 + sovTypes(uint64(m.Chunks)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Metadata) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestEcho{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Echo{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestFlush{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Flush{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Info{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SetOption", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestSetOption{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_SetOption{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestInitChain{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_InitChain{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestQuery{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Query{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestBeginBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_BeginBlock{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestCheckTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_CheckTx{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestDeliverTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_DeliverTx{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestEndBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_EndBlock{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestCommit{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Commit{v} + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestListSnapshots{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ListSnapshots{v} + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestOfferSnapshot{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_OfferSnapshot{v} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestLoadSnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_LoadSnapshotChunk{v} + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestApplySnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ApplySnapshotChunk{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestEcho) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestEcho: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestEcho: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestFlush) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestFlush: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestFlush: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockVersion", wireType) + } + m.BlockVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field P2PVersion", wireType) + } + m.P2PVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.P2PVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestSetOption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestSetOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestSetOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestInitChain) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestInitChain: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParams == nil { + m.ConsensusParams = &ConsensusParams{} + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validators = append(m.Validators, ValidatorUpdate{}) + if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppStateBytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppStateBytes = append(m.AppStateBytes[:0], dAtA[iNdEx:postIndex]...) + if m.AppStateBytes == nil { + m.AppStateBytes = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) + } + m.InitialHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestQuery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Prove = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestBeginBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ByzantineValidators = append(m.ByzantineValidators, Evidence{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= CheckTxType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestDeliverTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestEndBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestCommit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestCommit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestCommit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestListSnapshots: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestListSnapshots: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestOfferSnapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestOfferSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Snapshot == nil { + m.Snapshot = &Snapshot{} + } + if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestLoadSnapshotChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestLoadSnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + m.Format = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Format |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) + } + m.Chunk = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Chunk |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestApplySnapshotChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestApplySnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunk = append(m.Chunk[:0], dAtA[iNdEx:postIndex]...) + if m.Chunk == nil { + m.Chunk = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Response: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exception", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseException{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Exception{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseEcho{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Echo{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseFlush{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Flush{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Info{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SetOption", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseSetOption{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_SetOption{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseInitChain{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_InitChain{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseQuery{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Query{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseBeginBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_BeginBlock{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseCheckTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_CheckTx{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseDeliverTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_DeliverTx{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseEndBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_EndBlock{v} + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseCommit{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Commit{v} + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseListSnapshots{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_ListSnapshots{v} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseOfferSnapshot{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_OfferSnapshot{v} + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseLoadSnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_LoadSnapshotChunk{v} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseApplySnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_ApplySnapshotChunk{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseException) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseException: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseException: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseEcho) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseEcho: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseEcho: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseFlush) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseFlush: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseFlush: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AppVersion", wireType) + } + m.AppVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AppVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockHeight", wireType) + } + m.LastBlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastBlockHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockAppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastBlockAppHash = append(m.LastBlockAppHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastBlockAppHash == nil { + m.LastBlockAppHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseSetOption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseSetOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseSetOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseInitChain: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParams == nil { + m.ConsensusParams = &ConsensusParams{} + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validators = append(m.Validators, ValidatorUpdate{}) + if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseQuery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofOps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProofOps == nil { + m.ProofOps = &v1.ProofOps{} + } + if err := m.ProofOps.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseBeginBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseCheckTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) + } + m.GasWanted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasWanted |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) + } + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MempoolError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MempoolError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseDeliverTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) + } + m.GasWanted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasWanted |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) + } + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseEndBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorUpdates = append(m.ValidatorUpdates, ValidatorUpdate{}) + if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParamUpdates == nil { + m.ConsensusParamUpdates = &ConsensusParams{} + } + if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseCommit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseCommit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseCommit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) + } + m.RetainHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RetainHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseListSnapshots: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseListSnapshots: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshots", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Snapshots = append(m.Snapshots, &Snapshot{}) + if err := m.Snapshots[len(m.Snapshots)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseOfferSnapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseOfferSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + m.Result = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Result |= ResponseOfferSnapshot_Result(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseLoadSnapshotChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseLoadSnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunk = append(m.Chunk[:0], dAtA[iNdEx:postIndex]...) + if m.Chunk == nil { + m.Chunk = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseApplySnapshotChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseApplySnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + m.Result = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Result |= ResponseApplySnapshotChunk_Result(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RefetchChunks = append(m.RefetchChunks, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.RefetchChunks) == 0 { + m.RefetchChunks = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RefetchChunks = append(m.RefetchChunks, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field RefetchChunks", wireType) + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectSenders", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RejectSenders = append(m.RejectSenders, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Block == nil { + m.Block = &BlockParams{} + } + if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Evidence == nil { + m.Evidence = &v1beta1.EvidenceParams{} + } + if err := m.Evidence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Validator == nil { + m.Validator = &v1beta1.ValidatorParams{} + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Version == nil { + m.Version = &v1beta1.VersionParams{} + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxBytes", wireType) + } + m.MaxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxGas", wireType) + } + m.MaxGas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxGas |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LastCommitInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LastCommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Votes = append(m.Votes, VoteInfo{}) + if err := m.Votes[len(m.Votes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, EventAttribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventAttribute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventAttribute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventAttribute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Index = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TxResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Validator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Validator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Validator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = append(m.Address[:0], dAtA[iNdEx:postIndex]...) + if m.Address == nil { + m.Address = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Power", wireType) + } + m.Power = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Power |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Power", wireType) + } + m.Power = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Power |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VoteInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VoteInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VoteInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedLastBlock", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SignedLastBlock = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Evidence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Evidence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Evidence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= EvidenceType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) + } + m.TotalVotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalVotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + m.Format = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Format |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + m.Chunks = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Chunks |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metadata = append(m.Metadata[:0], dAtA[iNdEx:postIndex]...) + if m.Metadata == nil { + m.Metadata = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/api/cometbft/abci/v1beta2/types.go b/api/cometbft/abci/v1beta2/types.go new file mode 100644 index 00000000000..8ef412c2d71 --- /dev/null +++ b/api/cometbft/abci/v1beta2/types.go @@ -0,0 +1,38 @@ +package v1beta2 + +import ( + "bytes" + + "github.com/cosmos/gogoproto/jsonpb" +) + +// IsAccepted returns true if Code is ACCEPT +func (r ResponseProcessProposal) IsAccepted() bool { + return r.Status == ResponseProcessProposal_ACCEPT +} + +// IsStatusUnknown returns true if Code is UNKNOWN +func (r ResponseProcessProposal) IsStatusUnknown() bool { + return r.Status == ResponseProcessProposal_UNKNOWN +} + +// --------------------------------------------------------------------------- +// override JSON marshaling so we emit defaults (ie. disable omitempty) + +var ( + jsonpbMarshaller = jsonpb.Marshaler{ + EnumsAsInts: true, + EmitDefaults: true, + } + jsonpbUnmarshaller = jsonpb.Unmarshaler{} +) + +func (r *EventAttribute) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *EventAttribute) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} diff --git a/api/cometbft/abci/v1beta2/types.pb.go b/api/cometbft/abci/v1beta2/types.pb.go new file mode 100644 index 00000000000..10c0a5d6e37 --- /dev/null +++ b/api/cometbft/abci/v1beta2/types.pb.go @@ -0,0 +1,9912 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/abci/v1beta2/types.proto + +package v1beta2 + +import ( + context "context" + fmt "fmt" + v1beta1 "github.com/cometbft/cometbft/api/cometbft/abci/v1beta1" + v1beta11 "github.com/cometbft/cometbft/api/cometbft/types/v1beta1" + v1beta2 "github.com/cometbft/cometbft/api/cometbft/types/v1beta2" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "github.com/cosmos/gogoproto/types" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// The type of misbehavior committed by a validator. +type MisbehaviorType int32 + +const ( + // Unknown + MisbehaviorType_UNKNOWN MisbehaviorType = 0 + // Duplicate vote + MisbehaviorType_DUPLICATE_VOTE MisbehaviorType = 1 + // Light client attack + MisbehaviorType_LIGHT_CLIENT_ATTACK MisbehaviorType = 2 +) + +var MisbehaviorType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "DUPLICATE_VOTE", + 2: "LIGHT_CLIENT_ATTACK", +} + +var MisbehaviorType_value = map[string]int32{ + "UNKNOWN": 0, + "DUPLICATE_VOTE": 1, + "LIGHT_CLIENT_ATTACK": 2, +} + +func (x MisbehaviorType) String() string { + return proto.EnumName(MisbehaviorType_name, int32(x)) +} + +func (MisbehaviorType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{0} +} + +// The status. +type ResponseProcessProposal_ProposalStatus int32 + +const ( + // Unknown + ResponseProcessProposal_UNKNOWN ResponseProcessProposal_ProposalStatus = 0 + // Accepted + ResponseProcessProposal_ACCEPT ResponseProcessProposal_ProposalStatus = 1 + // Rejected + ResponseProcessProposal_REJECT ResponseProcessProposal_ProposalStatus = 2 +) + +var ResponseProcessProposal_ProposalStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ACCEPT", + 2: "REJECT", +} + +var ResponseProcessProposal_ProposalStatus_value = map[string]int32{ + "UNKNOWN": 0, + "ACCEPT": 1, + "REJECT": 2, +} + +func (x ResponseProcessProposal_ProposalStatus) String() string { + return proto.EnumName(ResponseProcessProposal_ProposalStatus_name, int32(x)) +} + +func (ResponseProcessProposal_ProposalStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{13, 0} +} + +// Request represents a request to the ABCI application. +type Request struct { + // Sum of all possible messages. + // + // Types that are valid to be assigned to Value: + // *Request_Echo + // *Request_Flush + // *Request_Info + // *Request_InitChain + // *Request_Query + // *Request_BeginBlock + // *Request_CheckTx + // *Request_DeliverTx + // *Request_EndBlock + // *Request_Commit + // *Request_ListSnapshots + // *Request_OfferSnapshot + // *Request_LoadSnapshotChunk + // *Request_ApplySnapshotChunk + // *Request_PrepareProposal + // *Request_ProcessProposal + Value isRequest_Value `protobuf_oneof:"value"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(m, src) +} +func (m *Request) XXX_Size() int { + return m.Size() +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +type isRequest_Value interface { + isRequest_Value() + MarshalTo([]byte) (int, error) + Size() int +} + +type Request_Echo struct { + Echo *v1beta1.RequestEcho `protobuf:"bytes,1,opt,name=echo,proto3,oneof" json:"echo,omitempty"` +} +type Request_Flush struct { + Flush *v1beta1.RequestFlush `protobuf:"bytes,2,opt,name=flush,proto3,oneof" json:"flush,omitempty"` +} +type Request_Info struct { + Info *RequestInfo `protobuf:"bytes,3,opt,name=info,proto3,oneof" json:"info,omitempty"` +} +type Request_InitChain struct { + InitChain *RequestInitChain `protobuf:"bytes,5,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` +} +type Request_Query struct { + Query *v1beta1.RequestQuery `protobuf:"bytes,6,opt,name=query,proto3,oneof" json:"query,omitempty"` +} +type Request_BeginBlock struct { + BeginBlock *RequestBeginBlock `protobuf:"bytes,7,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` +} +type Request_CheckTx struct { + CheckTx *v1beta1.RequestCheckTx `protobuf:"bytes,8,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` +} +type Request_DeliverTx struct { + DeliverTx *v1beta1.RequestDeliverTx `protobuf:"bytes,9,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` +} +type Request_EndBlock struct { + EndBlock *v1beta1.RequestEndBlock `protobuf:"bytes,10,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` +} +type Request_Commit struct { + Commit *v1beta1.RequestCommit `protobuf:"bytes,11,opt,name=commit,proto3,oneof" json:"commit,omitempty"` +} +type Request_ListSnapshots struct { + ListSnapshots *v1beta1.RequestListSnapshots `protobuf:"bytes,12,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` +} +type Request_OfferSnapshot struct { + OfferSnapshot *v1beta1.RequestOfferSnapshot `protobuf:"bytes,13,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` +} +type Request_LoadSnapshotChunk struct { + LoadSnapshotChunk *v1beta1.RequestLoadSnapshotChunk `protobuf:"bytes,14,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` +} +type Request_ApplySnapshotChunk struct { + ApplySnapshotChunk *v1beta1.RequestApplySnapshotChunk `protobuf:"bytes,15,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` +} +type Request_PrepareProposal struct { + PrepareProposal *RequestPrepareProposal `protobuf:"bytes,16,opt,name=prepare_proposal,json=prepareProposal,proto3,oneof" json:"prepare_proposal,omitempty"` +} +type Request_ProcessProposal struct { + ProcessProposal *RequestProcessProposal `protobuf:"bytes,17,opt,name=process_proposal,json=processProposal,proto3,oneof" json:"process_proposal,omitempty"` +} + +func (*Request_Echo) isRequest_Value() {} +func (*Request_Flush) isRequest_Value() {} +func (*Request_Info) isRequest_Value() {} +func (*Request_InitChain) isRequest_Value() {} +func (*Request_Query) isRequest_Value() {} +func (*Request_BeginBlock) isRequest_Value() {} +func (*Request_CheckTx) isRequest_Value() {} +func (*Request_DeliverTx) isRequest_Value() {} +func (*Request_EndBlock) isRequest_Value() {} +func (*Request_Commit) isRequest_Value() {} +func (*Request_ListSnapshots) isRequest_Value() {} +func (*Request_OfferSnapshot) isRequest_Value() {} +func (*Request_LoadSnapshotChunk) isRequest_Value() {} +func (*Request_ApplySnapshotChunk) isRequest_Value() {} +func (*Request_PrepareProposal) isRequest_Value() {} +func (*Request_ProcessProposal) isRequest_Value() {} + +func (m *Request) GetValue() isRequest_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Request) GetEcho() *v1beta1.RequestEcho { + if x, ok := m.GetValue().(*Request_Echo); ok { + return x.Echo + } + return nil +} + +func (m *Request) GetFlush() *v1beta1.RequestFlush { + if x, ok := m.GetValue().(*Request_Flush); ok { + return x.Flush + } + return nil +} + +func (m *Request) GetInfo() *RequestInfo { + if x, ok := m.GetValue().(*Request_Info); ok { + return x.Info + } + return nil +} + +func (m *Request) GetInitChain() *RequestInitChain { + if x, ok := m.GetValue().(*Request_InitChain); ok { + return x.InitChain + } + return nil +} + +func (m *Request) GetQuery() *v1beta1.RequestQuery { + if x, ok := m.GetValue().(*Request_Query); ok { + return x.Query + } + return nil +} + +func (m *Request) GetBeginBlock() *RequestBeginBlock { + if x, ok := m.GetValue().(*Request_BeginBlock); ok { + return x.BeginBlock + } + return nil +} + +func (m *Request) GetCheckTx() *v1beta1.RequestCheckTx { + if x, ok := m.GetValue().(*Request_CheckTx); ok { + return x.CheckTx + } + return nil +} + +func (m *Request) GetDeliverTx() *v1beta1.RequestDeliverTx { + if x, ok := m.GetValue().(*Request_DeliverTx); ok { + return x.DeliverTx + } + return nil +} + +func (m *Request) GetEndBlock() *v1beta1.RequestEndBlock { + if x, ok := m.GetValue().(*Request_EndBlock); ok { + return x.EndBlock + } + return nil +} + +func (m *Request) GetCommit() *v1beta1.RequestCommit { + if x, ok := m.GetValue().(*Request_Commit); ok { + return x.Commit + } + return nil +} + +func (m *Request) GetListSnapshots() *v1beta1.RequestListSnapshots { + if x, ok := m.GetValue().(*Request_ListSnapshots); ok { + return x.ListSnapshots + } + return nil +} + +func (m *Request) GetOfferSnapshot() *v1beta1.RequestOfferSnapshot { + if x, ok := m.GetValue().(*Request_OfferSnapshot); ok { + return x.OfferSnapshot + } + return nil +} + +func (m *Request) GetLoadSnapshotChunk() *v1beta1.RequestLoadSnapshotChunk { + if x, ok := m.GetValue().(*Request_LoadSnapshotChunk); ok { + return x.LoadSnapshotChunk + } + return nil +} + +func (m *Request) GetApplySnapshotChunk() *v1beta1.RequestApplySnapshotChunk { + if x, ok := m.GetValue().(*Request_ApplySnapshotChunk); ok { + return x.ApplySnapshotChunk + } + return nil +} + +func (m *Request) GetPrepareProposal() *RequestPrepareProposal { + if x, ok := m.GetValue().(*Request_PrepareProposal); ok { + return x.PrepareProposal + } + return nil +} + +func (m *Request) GetProcessProposal() *RequestProcessProposal { + if x, ok := m.GetValue().(*Request_ProcessProposal); ok { + return x.ProcessProposal + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Request) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Request_Echo)(nil), + (*Request_Flush)(nil), + (*Request_Info)(nil), + (*Request_InitChain)(nil), + (*Request_Query)(nil), + (*Request_BeginBlock)(nil), + (*Request_CheckTx)(nil), + (*Request_DeliverTx)(nil), + (*Request_EndBlock)(nil), + (*Request_Commit)(nil), + (*Request_ListSnapshots)(nil), + (*Request_OfferSnapshot)(nil), + (*Request_LoadSnapshotChunk)(nil), + (*Request_ApplySnapshotChunk)(nil), + (*Request_PrepareProposal)(nil), + (*Request_ProcessProposal)(nil), + } +} + +// RequestInfo is a request for the ABCI application version. +type RequestInfo struct { + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + BlockVersion uint64 `protobuf:"varint,2,opt,name=block_version,json=blockVersion,proto3" json:"block_version,omitempty"` + P2PVersion uint64 `protobuf:"varint,3,opt,name=p2p_version,json=p2pVersion,proto3" json:"p2p_version,omitempty"` + AbciVersion string `protobuf:"bytes,4,opt,name=abci_version,json=abciVersion,proto3" json:"abci_version,omitempty"` +} + +func (m *RequestInfo) Reset() { *m = RequestInfo{} } +func (m *RequestInfo) String() string { return proto.CompactTextString(m) } +func (*RequestInfo) ProtoMessage() {} +func (*RequestInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{1} +} +func (m *RequestInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestInfo.Merge(m, src) +} +func (m *RequestInfo) XXX_Size() int { + return m.Size() +} +func (m *RequestInfo) XXX_DiscardUnknown() { + xxx_messageInfo_RequestInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestInfo proto.InternalMessageInfo + +func (m *RequestInfo) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *RequestInfo) GetBlockVersion() uint64 { + if m != nil { + return m.BlockVersion + } + return 0 +} + +func (m *RequestInfo) GetP2PVersion() uint64 { + if m != nil { + return m.P2PVersion + } + return 0 +} + +func (m *RequestInfo) GetAbciVersion() string { + if m != nil { + return m.AbciVersion + } + return "" +} + +// RequestInitChain is a request to initialize the blockchain. +type RequestInitChain struct { + Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + ConsensusParams *v1beta2.ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` + Validators []v1beta1.ValidatorUpdate `protobuf:"bytes,4,rep,name=validators,proto3" json:"validators"` + AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` + InitialHeight int64 `protobuf:"varint,6,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` +} + +func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } +func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } +func (*RequestInitChain) ProtoMessage() {} +func (*RequestInitChain) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{2} +} +func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestInitChain.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestInitChain) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestInitChain.Merge(m, src) +} +func (m *RequestInitChain) XXX_Size() int { + return m.Size() +} +func (m *RequestInitChain) XXX_DiscardUnknown() { + xxx_messageInfo_RequestInitChain.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestInitChain proto.InternalMessageInfo + +func (m *RequestInitChain) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *RequestInitChain) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +func (m *RequestInitChain) GetConsensusParams() *v1beta2.ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return nil +} + +func (m *RequestInitChain) GetValidators() []v1beta1.ValidatorUpdate { + if m != nil { + return m.Validators + } + return nil +} + +func (m *RequestInitChain) GetAppStateBytes() []byte { + if m != nil { + return m.AppStateBytes + } + return nil +} + +func (m *RequestInitChain) GetInitialHeight() int64 { + if m != nil { + return m.InitialHeight + } + return 0 +} + +// RequestBeginBlock indicates the beginning of committing the block. +type RequestBeginBlock struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Header v1beta11.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` + LastCommitInfo CommitInfo `protobuf:"bytes,3,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` + ByzantineValidators []Misbehavior `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` +} + +func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } +func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } +func (*RequestBeginBlock) ProtoMessage() {} +func (*RequestBeginBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{3} +} +func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestBeginBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestBeginBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestBeginBlock.Merge(m, src) +} +func (m *RequestBeginBlock) XXX_Size() int { + return m.Size() +} +func (m *RequestBeginBlock) XXX_DiscardUnknown() { + xxx_messageInfo_RequestBeginBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestBeginBlock proto.InternalMessageInfo + +func (m *RequestBeginBlock) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *RequestBeginBlock) GetHeader() v1beta11.Header { + if m != nil { + return m.Header + } + return v1beta11.Header{} +} + +func (m *RequestBeginBlock) GetLastCommitInfo() CommitInfo { + if m != nil { + return m.LastCommitInfo + } + return CommitInfo{} +} + +func (m *RequestBeginBlock) GetByzantineValidators() []Misbehavior { + if m != nil { + return m.ByzantineValidators + } + return nil +} + +// RequestPrepareProposal is a request for the ABCI application to prepare a new +// block proposal. +type RequestPrepareProposal struct { + // the modified transactions cannot exceed this size. + MaxTxBytes int64 `protobuf:"varint,1,opt,name=max_tx_bytes,json=maxTxBytes,proto3" json:"max_tx_bytes,omitempty"` + // txs is an array of transactions that will be included in a block, + // sent to the app for possible modifications. + Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` + LocalLastCommit ExtendedCommitInfo `protobuf:"bytes,3,opt,name=local_last_commit,json=localLastCommit,proto3" json:"local_last_commit"` + Misbehavior []Misbehavior `protobuf:"bytes,4,rep,name=misbehavior,proto3" json:"misbehavior"` + Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` + NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + // address of the public key of the validator proposing the block. + ProposerAddress []byte `protobuf:"bytes,8,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` +} + +func (m *RequestPrepareProposal) Reset() { *m = RequestPrepareProposal{} } +func (m *RequestPrepareProposal) String() string { return proto.CompactTextString(m) } +func (*RequestPrepareProposal) ProtoMessage() {} +func (*RequestPrepareProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{4} +} +func (m *RequestPrepareProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestPrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestPrepareProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestPrepareProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestPrepareProposal.Merge(m, src) +} +func (m *RequestPrepareProposal) XXX_Size() int { + return m.Size() +} +func (m *RequestPrepareProposal) XXX_DiscardUnknown() { + xxx_messageInfo_RequestPrepareProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestPrepareProposal proto.InternalMessageInfo + +func (m *RequestPrepareProposal) GetMaxTxBytes() int64 { + if m != nil { + return m.MaxTxBytes + } + return 0 +} + +func (m *RequestPrepareProposal) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + +func (m *RequestPrepareProposal) GetLocalLastCommit() ExtendedCommitInfo { + if m != nil { + return m.LocalLastCommit + } + return ExtendedCommitInfo{} +} + +func (m *RequestPrepareProposal) GetMisbehavior() []Misbehavior { + if m != nil { + return m.Misbehavior + } + return nil +} + +func (m *RequestPrepareProposal) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RequestPrepareProposal) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *RequestPrepareProposal) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash + } + return nil +} + +func (m *RequestPrepareProposal) GetProposerAddress() []byte { + if m != nil { + return m.ProposerAddress + } + return nil +} + +// RequestProcessProposal is a request for the ABCI application to process proposal. +type RequestProcessProposal struct { + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + ProposedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=proposed_last_commit,json=proposedLastCommit,proto3" json:"proposed_last_commit"` + Misbehavior []Misbehavior `protobuf:"bytes,3,rep,name=misbehavior,proto3" json:"misbehavior"` + // hash is the merkle root hash of the fields of the proposed block. + Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` + Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` + NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + // address of the public key of the original proposer of the block. + ProposerAddress []byte `protobuf:"bytes,8,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` +} + +func (m *RequestProcessProposal) Reset() { *m = RequestProcessProposal{} } +func (m *RequestProcessProposal) String() string { return proto.CompactTextString(m) } +func (*RequestProcessProposal) ProtoMessage() {} +func (*RequestProcessProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{5} +} +func (m *RequestProcessProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestProcessProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestProcessProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestProcessProposal.Merge(m, src) +} +func (m *RequestProcessProposal) XXX_Size() int { + return m.Size() +} +func (m *RequestProcessProposal) XXX_DiscardUnknown() { + xxx_messageInfo_RequestProcessProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestProcessProposal proto.InternalMessageInfo + +func (m *RequestProcessProposal) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + +func (m *RequestProcessProposal) GetProposedLastCommit() CommitInfo { + if m != nil { + return m.ProposedLastCommit + } + return CommitInfo{} +} + +func (m *RequestProcessProposal) GetMisbehavior() []Misbehavior { + if m != nil { + return m.Misbehavior + } + return nil +} + +func (m *RequestProcessProposal) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *RequestProcessProposal) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RequestProcessProposal) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *RequestProcessProposal) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash + } + return nil +} + +func (m *RequestProcessProposal) GetProposerAddress() []byte { + if m != nil { + return m.ProposerAddress + } + return nil +} + +// Response represents a response from the ABCI application. +type Response struct { + // Sum of all possible messages. + // + // Types that are valid to be assigned to Value: + // *Response_Exception + // *Response_Echo + // *Response_Flush + // *Response_Info + // *Response_InitChain + // *Response_Query + // *Response_BeginBlock + // *Response_CheckTx + // *Response_DeliverTx + // *Response_EndBlock + // *Response_Commit + // *Response_ListSnapshots + // *Response_OfferSnapshot + // *Response_LoadSnapshotChunk + // *Response_ApplySnapshotChunk + // *Response_PrepareProposal + // *Response_ProcessProposal + Value isResponse_Value `protobuf_oneof:"value"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{6} +} +func (m *Response) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Response.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(m, src) +} +func (m *Response) XXX_Size() int { + return m.Size() +} +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Response proto.InternalMessageInfo + +type isResponse_Value interface { + isResponse_Value() + MarshalTo([]byte) (int, error) + Size() int +} + +type Response_Exception struct { + Exception *v1beta1.ResponseException `protobuf:"bytes,1,opt,name=exception,proto3,oneof" json:"exception,omitempty"` +} +type Response_Echo struct { + Echo *v1beta1.ResponseEcho `protobuf:"bytes,2,opt,name=echo,proto3,oneof" json:"echo,omitempty"` +} +type Response_Flush struct { + Flush *v1beta1.ResponseFlush `protobuf:"bytes,3,opt,name=flush,proto3,oneof" json:"flush,omitempty"` +} +type Response_Info struct { + Info *v1beta1.ResponseInfo `protobuf:"bytes,4,opt,name=info,proto3,oneof" json:"info,omitempty"` +} +type Response_InitChain struct { + InitChain *ResponseInitChain `protobuf:"bytes,6,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` +} +type Response_Query struct { + Query *v1beta1.ResponseQuery `protobuf:"bytes,7,opt,name=query,proto3,oneof" json:"query,omitempty"` +} +type Response_BeginBlock struct { + BeginBlock *ResponseBeginBlock `protobuf:"bytes,8,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` +} +type Response_CheckTx struct { + CheckTx *ResponseCheckTx `protobuf:"bytes,9,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` +} +type Response_DeliverTx struct { + DeliverTx *ResponseDeliverTx `protobuf:"bytes,10,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` +} +type Response_EndBlock struct { + EndBlock *ResponseEndBlock `protobuf:"bytes,11,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` +} +type Response_Commit struct { + Commit *v1beta1.ResponseCommit `protobuf:"bytes,12,opt,name=commit,proto3,oneof" json:"commit,omitempty"` +} +type Response_ListSnapshots struct { + ListSnapshots *v1beta1.ResponseListSnapshots `protobuf:"bytes,13,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` +} +type Response_OfferSnapshot struct { + OfferSnapshot *v1beta1.ResponseOfferSnapshot `protobuf:"bytes,14,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` +} +type Response_LoadSnapshotChunk struct { + LoadSnapshotChunk *v1beta1.ResponseLoadSnapshotChunk `protobuf:"bytes,15,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` +} +type Response_ApplySnapshotChunk struct { + ApplySnapshotChunk *v1beta1.ResponseApplySnapshotChunk `protobuf:"bytes,16,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` +} +type Response_PrepareProposal struct { + PrepareProposal *ResponsePrepareProposal `protobuf:"bytes,17,opt,name=prepare_proposal,json=prepareProposal,proto3,oneof" json:"prepare_proposal,omitempty"` +} +type Response_ProcessProposal struct { + ProcessProposal *ResponseProcessProposal `protobuf:"bytes,18,opt,name=process_proposal,json=processProposal,proto3,oneof" json:"process_proposal,omitempty"` +} + +func (*Response_Exception) isResponse_Value() {} +func (*Response_Echo) isResponse_Value() {} +func (*Response_Flush) isResponse_Value() {} +func (*Response_Info) isResponse_Value() {} +func (*Response_InitChain) isResponse_Value() {} +func (*Response_Query) isResponse_Value() {} +func (*Response_BeginBlock) isResponse_Value() {} +func (*Response_CheckTx) isResponse_Value() {} +func (*Response_DeliverTx) isResponse_Value() {} +func (*Response_EndBlock) isResponse_Value() {} +func (*Response_Commit) isResponse_Value() {} +func (*Response_ListSnapshots) isResponse_Value() {} +func (*Response_OfferSnapshot) isResponse_Value() {} +func (*Response_LoadSnapshotChunk) isResponse_Value() {} +func (*Response_ApplySnapshotChunk) isResponse_Value() {} +func (*Response_PrepareProposal) isResponse_Value() {} +func (*Response_ProcessProposal) isResponse_Value() {} + +func (m *Response) GetValue() isResponse_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Response) GetException() *v1beta1.ResponseException { + if x, ok := m.GetValue().(*Response_Exception); ok { + return x.Exception + } + return nil +} + +func (m *Response) GetEcho() *v1beta1.ResponseEcho { + if x, ok := m.GetValue().(*Response_Echo); ok { + return x.Echo + } + return nil +} + +func (m *Response) GetFlush() *v1beta1.ResponseFlush { + if x, ok := m.GetValue().(*Response_Flush); ok { + return x.Flush + } + return nil +} + +func (m *Response) GetInfo() *v1beta1.ResponseInfo { + if x, ok := m.GetValue().(*Response_Info); ok { + return x.Info + } + return nil +} + +func (m *Response) GetInitChain() *ResponseInitChain { + if x, ok := m.GetValue().(*Response_InitChain); ok { + return x.InitChain + } + return nil +} + +func (m *Response) GetQuery() *v1beta1.ResponseQuery { + if x, ok := m.GetValue().(*Response_Query); ok { + return x.Query + } + return nil +} + +func (m *Response) GetBeginBlock() *ResponseBeginBlock { + if x, ok := m.GetValue().(*Response_BeginBlock); ok { + return x.BeginBlock + } + return nil +} + +func (m *Response) GetCheckTx() *ResponseCheckTx { + if x, ok := m.GetValue().(*Response_CheckTx); ok { + return x.CheckTx + } + return nil +} + +func (m *Response) GetDeliverTx() *ResponseDeliverTx { + if x, ok := m.GetValue().(*Response_DeliverTx); ok { + return x.DeliverTx + } + return nil +} + +func (m *Response) GetEndBlock() *ResponseEndBlock { + if x, ok := m.GetValue().(*Response_EndBlock); ok { + return x.EndBlock + } + return nil +} + +func (m *Response) GetCommit() *v1beta1.ResponseCommit { + if x, ok := m.GetValue().(*Response_Commit); ok { + return x.Commit + } + return nil +} + +func (m *Response) GetListSnapshots() *v1beta1.ResponseListSnapshots { + if x, ok := m.GetValue().(*Response_ListSnapshots); ok { + return x.ListSnapshots + } + return nil +} + +func (m *Response) GetOfferSnapshot() *v1beta1.ResponseOfferSnapshot { + if x, ok := m.GetValue().(*Response_OfferSnapshot); ok { + return x.OfferSnapshot + } + return nil +} + +func (m *Response) GetLoadSnapshotChunk() *v1beta1.ResponseLoadSnapshotChunk { + if x, ok := m.GetValue().(*Response_LoadSnapshotChunk); ok { + return x.LoadSnapshotChunk + } + return nil +} + +func (m *Response) GetApplySnapshotChunk() *v1beta1.ResponseApplySnapshotChunk { + if x, ok := m.GetValue().(*Response_ApplySnapshotChunk); ok { + return x.ApplySnapshotChunk + } + return nil +} + +func (m *Response) GetPrepareProposal() *ResponsePrepareProposal { + if x, ok := m.GetValue().(*Response_PrepareProposal); ok { + return x.PrepareProposal + } + return nil +} + +func (m *Response) GetProcessProposal() *ResponseProcessProposal { + if x, ok := m.GetValue().(*Response_ProcessProposal); ok { + return x.ProcessProposal + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Response) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Response_Exception)(nil), + (*Response_Echo)(nil), + (*Response_Flush)(nil), + (*Response_Info)(nil), + (*Response_InitChain)(nil), + (*Response_Query)(nil), + (*Response_BeginBlock)(nil), + (*Response_CheckTx)(nil), + (*Response_DeliverTx)(nil), + (*Response_EndBlock)(nil), + (*Response_Commit)(nil), + (*Response_ListSnapshots)(nil), + (*Response_OfferSnapshot)(nil), + (*Response_LoadSnapshotChunk)(nil), + (*Response_ApplySnapshotChunk)(nil), + (*Response_PrepareProposal)(nil), + (*Response_ProcessProposal)(nil), + } +} + +// ResponseInitChain contains the ABCI application's hash and updates to the +// validator set and/or the consensus params, if any. +type ResponseInitChain struct { + ConsensusParams *v1beta2.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` + Validators []v1beta1.ValidatorUpdate `protobuf:"bytes,2,rep,name=validators,proto3" json:"validators"` + AppHash []byte `protobuf:"bytes,3,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` +} + +func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } +func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } +func (*ResponseInitChain) ProtoMessage() {} +func (*ResponseInitChain) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{7} +} +func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseInitChain.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseInitChain) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseInitChain.Merge(m, src) +} +func (m *ResponseInitChain) XXX_Size() int { + return m.Size() +} +func (m *ResponseInitChain) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseInitChain.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseInitChain proto.InternalMessageInfo + +func (m *ResponseInitChain) GetConsensusParams() *v1beta2.ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return nil +} + +func (m *ResponseInitChain) GetValidators() []v1beta1.ValidatorUpdate { + if m != nil { + return m.Validators + } + return nil +} + +func (m *ResponseInitChain) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +// ResponseBeginBlock contains a list of block-level events. +type ResponseBeginBlock struct { + Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` +} + +func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } +func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } +func (*ResponseBeginBlock) ProtoMessage() {} +func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{8} +} +func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseBeginBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseBeginBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseBeginBlock.Merge(m, src) +} +func (m *ResponseBeginBlock) XXX_Size() int { + return m.Size() +} +func (m *ResponseBeginBlock) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseBeginBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseBeginBlock proto.InternalMessageInfo + +func (m *ResponseBeginBlock) GetEvents() []Event { + if m != nil { + return m.Events + } + return nil +} + +// ResponseCheckTx shows if the transaction was deemed valid by the ABCI +// application. +type ResponseCheckTx struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` + Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` + // mempool_error is set by CometBFT. + // ABCI applications creating a ResponseCheckTX should not set mempool_error. + MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` +} + +func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } +func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } +func (*ResponseCheckTx) ProtoMessage() {} +func (*ResponseCheckTx) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{9} +} +func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseCheckTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseCheckTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseCheckTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseCheckTx.Merge(m, src) +} +func (m *ResponseCheckTx) XXX_Size() int { + return m.Size() +} +func (m *ResponseCheckTx) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseCheckTx.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseCheckTx proto.InternalMessageInfo + +func (m *ResponseCheckTx) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *ResponseCheckTx) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ResponseCheckTx) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *ResponseCheckTx) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +func (m *ResponseCheckTx) GetGasWanted() int64 { + if m != nil { + return m.GasWanted + } + return 0 +} + +func (m *ResponseCheckTx) GetGasUsed() int64 { + if m != nil { + return m.GasUsed + } + return 0 +} + +func (m *ResponseCheckTx) GetEvents() []Event { + if m != nil { + return m.Events + } + return nil +} + +func (m *ResponseCheckTx) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + +func (m *ResponseCheckTx) GetSender() string { + if m != nil { + return m.Sender + } + return "" +} + +func (m *ResponseCheckTx) GetPriority() int64 { + if m != nil { + return m.Priority + } + return 0 +} + +func (m *ResponseCheckTx) GetMempoolError() string { + if m != nil { + return m.MempoolError + } + return "" +} + +// ResponseDeliverTx contains a result of committing the given transaction and a +// list of events. +type ResponseDeliverTx struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` +} + +func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } +func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } +func (*ResponseDeliverTx) ProtoMessage() {} +func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{10} +} +func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseDeliverTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseDeliverTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseDeliverTx.Merge(m, src) +} +func (m *ResponseDeliverTx) XXX_Size() int { + return m.Size() +} +func (m *ResponseDeliverTx) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseDeliverTx.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseDeliverTx proto.InternalMessageInfo + +func (m *ResponseDeliverTx) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *ResponseDeliverTx) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ResponseDeliverTx) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *ResponseDeliverTx) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +func (m *ResponseDeliverTx) GetGasWanted() int64 { + if m != nil { + return m.GasWanted + } + return 0 +} + +func (m *ResponseDeliverTx) GetGasUsed() int64 { + if m != nil { + return m.GasUsed + } + return 0 +} + +func (m *ResponseDeliverTx) GetEvents() []Event { + if m != nil { + return m.Events + } + return nil +} + +func (m *ResponseDeliverTx) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + +// ResponseEndBlock contains updates to consensus params and/or validator set changes, if any. +type ResponseEndBlock struct { + ValidatorUpdates []v1beta1.ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` + ConsensusParamUpdates *v1beta2.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` +} + +func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } +func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } +func (*ResponseEndBlock) ProtoMessage() {} +func (*ResponseEndBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{11} +} +func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseEndBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseEndBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseEndBlock.Merge(m, src) +} +func (m *ResponseEndBlock) XXX_Size() int { + return m.Size() +} +func (m *ResponseEndBlock) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseEndBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseEndBlock proto.InternalMessageInfo + +func (m *ResponseEndBlock) GetValidatorUpdates() []v1beta1.ValidatorUpdate { + if m != nil { + return m.ValidatorUpdates + } + return nil +} + +func (m *ResponseEndBlock) GetConsensusParamUpdates() *v1beta2.ConsensusParams { + if m != nil { + return m.ConsensusParamUpdates + } + return nil +} + +func (m *ResponseEndBlock) GetEvents() []Event { + if m != nil { + return m.Events + } + return nil +} + +// ResponsePrepareProposal contains the list of transactions that will be included in the proposal. +type ResponsePrepareProposal struct { + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` +} + +func (m *ResponsePrepareProposal) Reset() { *m = ResponsePrepareProposal{} } +func (m *ResponsePrepareProposal) String() string { return proto.CompactTextString(m) } +func (*ResponsePrepareProposal) ProtoMessage() {} +func (*ResponsePrepareProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{12} +} +func (m *ResponsePrepareProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponsePrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponsePrepareProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponsePrepareProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponsePrepareProposal.Merge(m, src) +} +func (m *ResponsePrepareProposal) XXX_Size() int { + return m.Size() +} +func (m *ResponsePrepareProposal) XXX_DiscardUnknown() { + xxx_messageInfo_ResponsePrepareProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponsePrepareProposal proto.InternalMessageInfo + +func (m *ResponsePrepareProposal) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + +// ResponseProcessProposal contains the result of processing a proposal. +type ResponseProcessProposal struct { + Status ResponseProcessProposal_ProposalStatus `protobuf:"varint,1,opt,name=status,proto3,enum=cometbft.abci.v1beta2.ResponseProcessProposal_ProposalStatus" json:"status,omitempty"` +} + +func (m *ResponseProcessProposal) Reset() { *m = ResponseProcessProposal{} } +func (m *ResponseProcessProposal) String() string { return proto.CompactTextString(m) } +func (*ResponseProcessProposal) ProtoMessage() {} +func (*ResponseProcessProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{13} +} +func (m *ResponseProcessProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseProcessProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseProcessProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseProcessProposal.Merge(m, src) +} +func (m *ResponseProcessProposal) XXX_Size() int { + return m.Size() +} +func (m *ResponseProcessProposal) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseProcessProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseProcessProposal proto.InternalMessageInfo + +func (m *ResponseProcessProposal) GetStatus() ResponseProcessProposal_ProposalStatus { + if m != nil { + return m.Status + } + return ResponseProcessProposal_UNKNOWN +} + +// CommitInfo contains votes for the particular round. +type CommitInfo struct { + Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` + Votes []v1beta1.VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` +} + +func (m *CommitInfo) Reset() { *m = CommitInfo{} } +func (m *CommitInfo) String() string { return proto.CompactTextString(m) } +func (*CommitInfo) ProtoMessage() {} +func (*CommitInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{14} +} +func (m *CommitInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CommitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitInfo.Merge(m, src) +} +func (m *CommitInfo) XXX_Size() int { + return m.Size() +} +func (m *CommitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_CommitInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitInfo proto.InternalMessageInfo + +func (m *CommitInfo) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *CommitInfo) GetVotes() []v1beta1.VoteInfo { + if m != nil { + return m.Votes + } + return nil +} + +// ExtendedCommitInfo is similar to CommitInfo except that it is only used in +// the PrepareProposal request such that Tendermint can provide vote extensions +// to the application. +type ExtendedCommitInfo struct { + // The round at which the block proposer decided in the previous height. + Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` + // List of validators' addresses in the last validator set with their voting + // information, including vote extensions. + Votes []ExtendedVoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` +} + +func (m *ExtendedCommitInfo) Reset() { *m = ExtendedCommitInfo{} } +func (m *ExtendedCommitInfo) String() string { return proto.CompactTextString(m) } +func (*ExtendedCommitInfo) ProtoMessage() {} +func (*ExtendedCommitInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{15} +} +func (m *ExtendedCommitInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtendedCommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExtendedCommitInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExtendedCommitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtendedCommitInfo.Merge(m, src) +} +func (m *ExtendedCommitInfo) XXX_Size() int { + return m.Size() +} +func (m *ExtendedCommitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ExtendedCommitInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtendedCommitInfo proto.InternalMessageInfo + +func (m *ExtendedCommitInfo) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *ExtendedCommitInfo) GetVotes() []ExtendedVoteInfo { + if m != nil { + return m.Votes + } + return nil +} + +// Event allows application developers to attach additional information to +// ResponseFinalizeBlock (defined in .v1beta3) and ResponseCheckTx. +// Up to 0.37, this could also be used in ResponseBeginBlock, ResponseEndBlock, +// and ResponseDeliverTx. +// Later, transactions may be queried using these events. +type Event struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Attributes []EventAttribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{16} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Event.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *Event) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Event) GetAttributes() []EventAttribute { + if m != nil { + return m.Attributes + } + return nil +} + +// EventAttribute is a single key-value pair, associated with an event. +type EventAttribute struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Index bool `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` +} + +func (m *EventAttribute) Reset() { *m = EventAttribute{} } +func (m *EventAttribute) String() string { return proto.CompactTextString(m) } +func (*EventAttribute) ProtoMessage() {} +func (*EventAttribute) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{17} +} +func (m *EventAttribute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventAttribute.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventAttribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventAttribute.Merge(m, src) +} +func (m *EventAttribute) XXX_Size() int { + return m.Size() +} +func (m *EventAttribute) XXX_DiscardUnknown() { + xxx_messageInfo_EventAttribute.DiscardUnknown(m) +} + +var xxx_messageInfo_EventAttribute proto.InternalMessageInfo + +func (m *EventAttribute) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *EventAttribute) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *EventAttribute) GetIndex() bool { + if m != nil { + return m.Index + } + return false +} + +// ExtendedVoteInfo extends VoteInfo with the vote extensions (non-deterministic). +type ExtendedVoteInfo struct { + // The validator that sent the vote. + Validator v1beta1.Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator"` + // Indicates whether the validator signed the last block, allowing for rewards based on validator availability. + SignedLastBlock bool `protobuf:"varint,2,opt,name=signed_last_block,json=signedLastBlock,proto3" json:"signed_last_block,omitempty"` + // Non-deterministic extension provided by the sending validator's application. + VoteExtension []byte `protobuf:"bytes,3,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` +} + +func (m *ExtendedVoteInfo) Reset() { *m = ExtendedVoteInfo{} } +func (m *ExtendedVoteInfo) String() string { return proto.CompactTextString(m) } +func (*ExtendedVoteInfo) ProtoMessage() {} +func (*ExtendedVoteInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{18} +} +func (m *ExtendedVoteInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtendedVoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExtendedVoteInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExtendedVoteInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtendedVoteInfo.Merge(m, src) +} +func (m *ExtendedVoteInfo) XXX_Size() int { + return m.Size() +} +func (m *ExtendedVoteInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ExtendedVoteInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtendedVoteInfo proto.InternalMessageInfo + +func (m *ExtendedVoteInfo) GetValidator() v1beta1.Validator { + if m != nil { + return m.Validator + } + return v1beta1.Validator{} +} + +func (m *ExtendedVoteInfo) GetSignedLastBlock() bool { + if m != nil { + return m.SignedLastBlock + } + return false +} + +func (m *ExtendedVoteInfo) GetVoteExtension() []byte { + if m != nil { + return m.VoteExtension + } + return nil +} + +// Misbehavior is a type of misbehavior committed by a validator. +type Misbehavior struct { + Type MisbehaviorType `protobuf:"varint,1,opt,name=type,proto3,enum=cometbft.abci.v1beta2.MisbehaviorType" json:"type,omitempty"` + // The offending validator + Validator v1beta1.Validator `protobuf:"bytes,2,opt,name=validator,proto3" json:"validator"` + // The height when the offense occurred + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + // The corresponding time where the offense occurred + Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` + // Total voting power of the validator set in case the ABCI application does + // not store historical validators. + // https://github.com/tendermint/tendermint/issues/4581 + TotalVotingPower int64 `protobuf:"varint,5,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` +} + +func (m *Misbehavior) Reset() { *m = Misbehavior{} } +func (m *Misbehavior) String() string { return proto.CompactTextString(m) } +func (*Misbehavior) ProtoMessage() {} +func (*Misbehavior) Descriptor() ([]byte, []int) { + return fileDescriptor_84748304b8d8ccc3, []int{19} +} +func (m *Misbehavior) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Misbehavior) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Misbehavior.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Misbehavior) XXX_Merge(src proto.Message) { + xxx_messageInfo_Misbehavior.Merge(m, src) +} +func (m *Misbehavior) XXX_Size() int { + return m.Size() +} +func (m *Misbehavior) XXX_DiscardUnknown() { + xxx_messageInfo_Misbehavior.DiscardUnknown(m) +} + +var xxx_messageInfo_Misbehavior proto.InternalMessageInfo + +func (m *Misbehavior) GetType() MisbehaviorType { + if m != nil { + return m.Type + } + return MisbehaviorType_UNKNOWN +} + +func (m *Misbehavior) GetValidator() v1beta1.Validator { + if m != nil { + return m.Validator + } + return v1beta1.Validator{} +} + +func (m *Misbehavior) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Misbehavior) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *Misbehavior) GetTotalVotingPower() int64 { + if m != nil { + return m.TotalVotingPower + } + return 0 +} + +func init() { + proto.RegisterEnum("cometbft.abci.v1beta2.MisbehaviorType", MisbehaviorType_name, MisbehaviorType_value) + proto.RegisterEnum("cometbft.abci.v1beta2.ResponseProcessProposal_ProposalStatus", ResponseProcessProposal_ProposalStatus_name, ResponseProcessProposal_ProposalStatus_value) + proto.RegisterType((*Request)(nil), "cometbft.abci.v1beta2.Request") + proto.RegisterType((*RequestInfo)(nil), "cometbft.abci.v1beta2.RequestInfo") + proto.RegisterType((*RequestInitChain)(nil), "cometbft.abci.v1beta2.RequestInitChain") + proto.RegisterType((*RequestBeginBlock)(nil), "cometbft.abci.v1beta2.RequestBeginBlock") + proto.RegisterType((*RequestPrepareProposal)(nil), "cometbft.abci.v1beta2.RequestPrepareProposal") + proto.RegisterType((*RequestProcessProposal)(nil), "cometbft.abci.v1beta2.RequestProcessProposal") + proto.RegisterType((*Response)(nil), "cometbft.abci.v1beta2.Response") + proto.RegisterType((*ResponseInitChain)(nil), "cometbft.abci.v1beta2.ResponseInitChain") + proto.RegisterType((*ResponseBeginBlock)(nil), "cometbft.abci.v1beta2.ResponseBeginBlock") + proto.RegisterType((*ResponseCheckTx)(nil), "cometbft.abci.v1beta2.ResponseCheckTx") + proto.RegisterType((*ResponseDeliverTx)(nil), "cometbft.abci.v1beta2.ResponseDeliverTx") + proto.RegisterType((*ResponseEndBlock)(nil), "cometbft.abci.v1beta2.ResponseEndBlock") + proto.RegisterType((*ResponsePrepareProposal)(nil), "cometbft.abci.v1beta2.ResponsePrepareProposal") + proto.RegisterType((*ResponseProcessProposal)(nil), "cometbft.abci.v1beta2.ResponseProcessProposal") + proto.RegisterType((*CommitInfo)(nil), "cometbft.abci.v1beta2.CommitInfo") + proto.RegisterType((*ExtendedCommitInfo)(nil), "cometbft.abci.v1beta2.ExtendedCommitInfo") + proto.RegisterType((*Event)(nil), "cometbft.abci.v1beta2.Event") + proto.RegisterType((*EventAttribute)(nil), "cometbft.abci.v1beta2.EventAttribute") + proto.RegisterType((*ExtendedVoteInfo)(nil), "cometbft.abci.v1beta2.ExtendedVoteInfo") + proto.RegisterType((*Misbehavior)(nil), "cometbft.abci.v1beta2.Misbehavior") +} + +func init() { proto.RegisterFile("cometbft/abci/v1beta2/types.proto", fileDescriptor_84748304b8d8ccc3) } + +var fileDescriptor_84748304b8d8ccc3 = []byte{ + // 2262 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4f, 0x93, 0x1b, 0x47, + 0x15, 0xd7, 0xdf, 0x95, 0xf4, 0xa4, 0xd5, 0x6a, 0x3b, 0x9b, 0x58, 0x99, 0x72, 0xed, 0xda, 0x32, + 0x4e, 0x9c, 0xc4, 0x68, 0xe3, 0xa5, 0xa0, 0x08, 0x31, 0x50, 0x2b, 0x59, 0x41, 0x6b, 0x2f, 0xeb, + 0xcd, 0x58, 0x6b, 0x48, 0xb6, 0x8a, 0xa9, 0x96, 0xa6, 0x57, 0x1a, 0x2c, 0x4d, 0x4f, 0x66, 0x5a, + 0x8a, 0xc4, 0x89, 0x3b, 0x07, 0x7c, 0x80, 0x2b, 0x27, 0xaa, 0x38, 0xf0, 0x0d, 0xe0, 0x0b, 0xe4, + 0xe8, 0x63, 0x4e, 0x09, 0x65, 0xdf, 0xf8, 0x14, 0x54, 0xf7, 0xf4, 0x8c, 0x66, 0xf4, 0x77, 0x44, + 0x7c, 0xa1, 0xb8, 0x75, 0xbf, 0x79, 0xef, 0x75, 0xf7, 0xeb, 0xee, 0xdf, 0xfb, 0x3d, 0xb5, 0xe0, + 0x66, 0x87, 0x0e, 0x08, 0x6b, 0x5f, 0xb1, 0x43, 0xdc, 0xee, 0x18, 0x87, 0xa3, 0x7b, 0x6d, 0xc2, + 0xf0, 0xd1, 0x21, 0x9b, 0x58, 0xc4, 0xa9, 0x5a, 0x36, 0x65, 0x14, 0xbd, 0xe9, 0xa9, 0x54, 0xb9, + 0x4a, 0x55, 0xaa, 0x28, 0x7b, 0x5d, 0xda, 0xa5, 0x42, 0xe3, 0x90, 0xb7, 0x5c, 0x65, 0x65, 0xa1, + 0xbf, 0x7b, 0x41, 0x7f, 0x4a, 0xc5, 0x57, 0x11, 0xd2, 0x85, 0x3a, 0xb7, 0x16, 0xea, 0x1c, 0x1d, + 0x5a, 0xd8, 0xc6, 0x03, 0x4f, 0xe9, 0xa0, 0x4b, 0x69, 0xb7, 0x4f, 0x0e, 0x45, 0xaf, 0x3d, 0xbc, + 0x3a, 0x64, 0xc6, 0x80, 0x38, 0x0c, 0x0f, 0x2c, 0x57, 0xa1, 0xf2, 0x6d, 0x0e, 0x32, 0x2a, 0xf9, + 0x62, 0x48, 0x1c, 0x86, 0x7e, 0x0c, 0x29, 0xd2, 0xe9, 0xd1, 0x72, 0xfc, 0x46, 0xfc, 0x4e, 0xfe, + 0xa8, 0x52, 0x5d, 0xb4, 0xa8, 0x7b, 0x55, 0xa9, 0xdd, 0xe8, 0xf4, 0x68, 0x33, 0xa6, 0x0a, 0x0b, + 0xf4, 0x31, 0xa4, 0xaf, 0xfa, 0x43, 0xa7, 0x57, 0x4e, 0x08, 0xd3, 0x5b, 0xab, 0x4d, 0x3f, 0xe1, + 0xaa, 0xcd, 0x98, 0xea, 0xda, 0xf0, 0x61, 0x0d, 0xf3, 0x8a, 0x96, 0x93, 0x2b, 0x86, 0x3d, 0xf2, + 0x6c, 0x4f, 0xcc, 0x2b, 0x31, 0x2c, 0xb7, 0x40, 0x4d, 0x00, 0xc3, 0x34, 0x98, 0xd6, 0xe9, 0x61, + 0xc3, 0x2c, 0xa7, 0x85, 0xfd, 0xbb, 0xeb, 0xec, 0x0d, 0x56, 0xe7, 0xea, 0xcd, 0x98, 0x9a, 0x33, + 0xbc, 0x0e, 0x5f, 0xc0, 0x17, 0x43, 0x62, 0x4f, 0xca, 0x5b, 0x51, 0x16, 0xf0, 0x29, 0x57, 0xe5, + 0x0b, 0x10, 0x36, 0xe8, 0x11, 0xe4, 0xdb, 0xa4, 0x6b, 0x98, 0x5a, 0xbb, 0x4f, 0x3b, 0xcf, 0xca, + 0x19, 0xe1, 0xe2, 0xce, 0xea, 0x79, 0xd4, 0xb8, 0x41, 0x8d, 0xeb, 0x37, 0x63, 0x2a, 0xb4, 0xfd, + 0x1e, 0xaa, 0x41, 0xb6, 0xd3, 0x23, 0x9d, 0x67, 0x1a, 0x1b, 0x97, 0xb3, 0xc2, 0xd3, 0xed, 0xd5, + 0x93, 0xa9, 0x73, 0xed, 0xd6, 0xb8, 0x19, 0x53, 0x33, 0x1d, 0xb7, 0xc9, 0xe3, 0xa2, 0x93, 0xbe, + 0x31, 0x22, 0x36, 0xf7, 0x92, 0x5b, 0x11, 0x17, 0xdf, 0xcb, 0x03, 0x57, 0x5f, 0xf8, 0xc9, 0xe9, + 0x5e, 0x07, 0x35, 0x20, 0x47, 0x4c, 0x5d, 0x2e, 0x0c, 0x84, 0xa3, 0x77, 0xd6, 0x9c, 0x0b, 0x53, + 0xf7, 0x96, 0x95, 0x25, 0xb2, 0x8d, 0x7e, 0x06, 0x5b, 0x1d, 0x3a, 0x18, 0x18, 0xac, 0x9c, 0x17, + 0x3e, 0xbe, 0xb7, 0x66, 0x49, 0x42, 0xb7, 0x19, 0x53, 0xa5, 0x15, 0x6a, 0x41, 0xb1, 0x6f, 0x38, + 0x4c, 0x73, 0x4c, 0x6c, 0x39, 0x3d, 0xca, 0x9c, 0x72, 0x41, 0xf8, 0xf9, 0x60, 0xb5, 0x9f, 0x53, + 0xc3, 0x61, 0x4f, 0x3c, 0x93, 0x66, 0x4c, 0xdd, 0xee, 0x07, 0x05, 0xdc, 0x2b, 0xbd, 0xba, 0x22, + 0xb6, 0xef, 0xb6, 0xbc, 0x1d, 0xc5, 0xeb, 0x63, 0x6e, 0xe3, 0x79, 0xe1, 0x5e, 0x69, 0x50, 0x80, + 0x30, 0xbc, 0xd1, 0xa7, 0x58, 0xf7, 0x9d, 0x6a, 0x9d, 0xde, 0xd0, 0x7c, 0x56, 0x2e, 0x0a, 0xd7, + 0x87, 0x6b, 0x26, 0x4c, 0xb1, 0xee, 0x39, 0xaa, 0x73, 0xb3, 0x66, 0x4c, 0xdd, 0xed, 0xcf, 0x0a, + 0x91, 0x0e, 0x7b, 0xd8, 0xb2, 0xfa, 0x93, 0xd9, 0x31, 0x76, 0xc4, 0x18, 0x1f, 0xae, 0x1e, 0xe3, + 0x98, 0x5b, 0xce, 0x0e, 0x82, 0xf0, 0x9c, 0x14, 0x7d, 0x0e, 0x25, 0xcb, 0x26, 0x16, 0xb6, 0x89, + 0x66, 0xd9, 0xd4, 0xa2, 0x0e, 0xee, 0x97, 0x4b, 0x62, 0x84, 0xef, 0xaf, 0x3e, 0xdb, 0xe7, 0xae, + 0xd5, 0xb9, 0x34, 0x6a, 0xc6, 0xd4, 0x1d, 0x2b, 0x2c, 0x72, 0x7d, 0xd3, 0x0e, 0x71, 0x9c, 0xa9, + 0xef, 0xdd, 0x68, 0xbe, 0x85, 0x55, 0xd8, 0x77, 0x48, 0x54, 0xcb, 0x40, 0x7a, 0x84, 0xfb, 0x43, + 0xf2, 0x30, 0x95, 0x4d, 0x95, 0xd2, 0x95, 0xe7, 0x71, 0xc8, 0x07, 0xc0, 0x03, 0x95, 0x21, 0x33, + 0x22, 0xb6, 0x63, 0x50, 0x53, 0x00, 0x5d, 0x4e, 0xf5, 0xba, 0xe8, 0x16, 0x6c, 0x8b, 0x83, 0xae, + 0x79, 0xdf, 0x39, 0x9a, 0xa5, 0xd4, 0x82, 0x10, 0x3e, 0x95, 0x4a, 0x07, 0x90, 0xb7, 0x8e, 0x2c, + 0x5f, 0x25, 0x29, 0x54, 0xc0, 0x3a, 0xb2, 0x3c, 0x85, 0x9b, 0x50, 0xe0, 0x13, 0xf7, 0x35, 0x52, + 0x62, 0x90, 0x3c, 0x97, 0x49, 0x95, 0xca, 0xd7, 0x09, 0x28, 0xcd, 0xe2, 0x11, 0x87, 0x41, 0x0e, + 0xce, 0x12, 0x7d, 0x95, 0xaa, 0x8b, 0xdc, 0x55, 0x0f, 0xb9, 0xab, 0x2d, 0x0f, 0xb9, 0x6b, 0xd9, + 0xaf, 0xbe, 0x39, 0x88, 0x3d, 0xff, 0xf6, 0x20, 0xae, 0x0a, 0x0b, 0xf4, 0x36, 0x87, 0x0c, 0x6c, + 0x98, 0x9a, 0xa1, 0x8b, 0x29, 0xe7, 0x38, 0x12, 0x60, 0xc3, 0x3c, 0xd1, 0x91, 0x0a, 0xa5, 0x0e, + 0x35, 0x1d, 0x62, 0x3a, 0x43, 0x47, 0x73, 0x33, 0x83, 0xc4, 0xd9, 0x00, 0x1e, 0xb8, 0x59, 0xc5, + 0x0b, 0x74, 0xdd, 0xd3, 0x3f, 0x17, 0xea, 0xea, 0x4e, 0x27, 0x2c, 0x40, 0xa7, 0x00, 0x23, 0xdc, + 0x37, 0x74, 0xcc, 0xa8, 0xed, 0x94, 0x53, 0x37, 0x92, 0x2b, 0x40, 0xe1, 0xa9, 0xa7, 0x78, 0x61, + 0xe9, 0x98, 0x91, 0x5a, 0x8a, 0x4f, 0x5d, 0x0d, 0xd8, 0xa3, 0x77, 0x60, 0x07, 0x5b, 0x96, 0xe6, + 0x30, 0xcc, 0x88, 0xd6, 0x9e, 0x30, 0xe2, 0x08, 0x20, 0x2f, 0xa8, 0xdb, 0xd8, 0xb2, 0x9e, 0x70, + 0x69, 0x8d, 0x0b, 0xd1, 0x6d, 0x28, 0x72, 0xb8, 0x36, 0x70, 0x5f, 0xeb, 0x11, 0xa3, 0xdb, 0x63, + 0x02, 0xaa, 0x93, 0xea, 0xb6, 0x94, 0x36, 0x85, 0xb0, 0xf2, 0xe7, 0x04, 0xec, 0xce, 0x41, 0x2c, + 0x42, 0x90, 0xea, 0x61, 0xa7, 0x27, 0x62, 0x5b, 0x50, 0x45, 0x1b, 0xdd, 0x87, 0xad, 0x1e, 0xc1, + 0x3a, 0xb1, 0x65, 0xd2, 0xda, 0x5f, 0x1c, 0x90, 0x7b, 0xd5, 0xa6, 0xd0, 0x92, 0x53, 0x97, 0x36, + 0xe8, 0x53, 0x28, 0xf5, 0xb1, 0xc3, 0x34, 0x17, 0xa0, 0xb4, 0x40, 0x02, 0xbb, 0xb9, 0xe4, 0x00, + 0xbb, 0xa0, 0xc6, 0x8f, 0xa0, 0x74, 0x55, 0xe4, 0x0e, 0xa6, 0x52, 0x74, 0x09, 0x7b, 0xed, 0xc9, + 0xef, 0xb0, 0xc9, 0x0c, 0x93, 0x68, 0x73, 0x11, 0x5e, 0x96, 0x17, 0x7f, 0x69, 0x38, 0x6d, 0xd2, + 0xc3, 0x23, 0x83, 0x7a, 0x53, 0x7c, 0xc3, 0xf7, 0xe2, 0x47, 0xdf, 0xa9, 0xfc, 0x35, 0x09, 0x6f, + 0x2d, 0xbe, 0x9e, 0xe8, 0x06, 0x14, 0x06, 0x78, 0xac, 0xb1, 0xb1, 0x0c, 0x7f, 0x5c, 0xc4, 0x15, + 0x06, 0x78, 0xdc, 0x1a, 0xbb, 0xb1, 0x2f, 0x41, 0x92, 0x8d, 0x9d, 0x72, 0xe2, 0x46, 0xf2, 0x4e, + 0x41, 0xe5, 0x4d, 0x74, 0x09, 0xbb, 0x7d, 0xda, 0xc1, 0x7d, 0x2d, 0x10, 0x04, 0xb9, 0xfe, 0xf7, + 0x96, 0x4c, 0xb4, 0x31, 0x66, 0xc4, 0xd4, 0x89, 0x3e, 0x17, 0x87, 0x1d, 0xe1, 0xe9, 0xd4, 0x0f, + 0x06, 0x7a, 0x08, 0xf9, 0xc1, 0x74, 0x55, 0x1b, 0xaf, 0x3f, 0x68, 0x8c, 0xde, 0xe2, 0xbb, 0x2c, + 0x8e, 0x4b, 0x5a, 0x2c, 0x4b, 0xf6, 0xfc, 0xdb, 0xb6, 0xb5, 0xf1, 0x6d, 0xfb, 0x10, 0xf6, 0x4c, + 0x32, 0x66, 0x81, 0x1d, 0xd2, 0xc4, 0xd9, 0xca, 0x88, 0xb3, 0x85, 0xf8, 0xb7, 0x69, 0xdc, 0x9b, + 0xfc, 0xa4, 0xbd, 0x27, 0xc0, 0xce, 0xa2, 0x0e, 0xb1, 0x35, 0xac, 0xeb, 0x36, 0x71, 0x1c, 0x91, + 0xda, 0x0b, 0x02, 0xbb, 0x84, 0xfc, 0xd8, 0x15, 0x57, 0xfe, 0x14, 0xdc, 0xa6, 0x10, 0xac, 0x79, + 0x9b, 0x10, 0x9f, 0x6e, 0xc2, 0x67, 0xb0, 0x27, 0xed, 0xf5, 0xd0, 0x3e, 0x24, 0x36, 0x3b, 0x87, + 0xc8, 0x73, 0xb2, 0x7c, 0x0b, 0x92, 0xdf, 0x65, 0x0b, 0xbc, 0xcb, 0x97, 0x0a, 0x5c, 0xbe, 0xff, + 0xb1, 0x6d, 0xf9, 0x07, 0x40, 0x56, 0x25, 0x8e, 0xc5, 0x91, 0x10, 0x35, 0x21, 0x47, 0xc6, 0x1d, + 0x62, 0x31, 0x2f, 0x85, 0x2c, 0x23, 0x7b, 0x3c, 0xe5, 0xba, 0x36, 0x0d, 0x4f, 0x9f, 0xb3, 0x2b, + 0xdf, 0x18, 0x7d, 0x24, 0x09, 0xf7, 0x3a, 0xd6, 0x2c, 0x9d, 0x04, 0x19, 0xf7, 0x7d, 0x8f, 0x71, + 0x27, 0xd7, 0x10, 0x2a, 0xd7, 0x76, 0x86, 0x72, 0x7f, 0x24, 0x29, 0x77, 0x2a, 0xd2, 0xc0, 0x21, + 0xce, 0x7d, 0x12, 0xe2, 0xdc, 0x5b, 0x6b, 0xb8, 0xae, 0xe7, 0x60, 0x21, 0xe9, 0xbe, 0xef, 0x91, + 0xee, 0x4c, 0xa4, 0x35, 0xcc, 0xb0, 0xee, 0xd3, 0x30, 0xeb, 0xce, 0xae, 0x04, 0x1f, 0xcf, 0xc7, + 0x52, 0xda, 0x5d, 0x0f, 0xd0, 0xee, 0xdc, 0x0a, 0x9e, 0x3b, 0x75, 0xb5, 0x80, 0x77, 0x9f, 0x84, + 0x78, 0x37, 0x44, 0x8a, 0xcd, 0x12, 0xe2, 0xfd, 0x49, 0x90, 0x78, 0xe7, 0xd7, 0x54, 0x36, 0xf2, + 0x7c, 0x2c, 0x62, 0xde, 0x3f, 0xf7, 0x99, 0x77, 0x61, 0x4d, 0x31, 0x21, 0x57, 0x35, 0x4b, 0xbd, + 0x2f, 0xe6, 0xa8, 0xb7, 0x4b, 0x92, 0xef, 0xae, 0x71, 0xb4, 0x86, 0x7b, 0x5f, 0xcc, 0x71, 0xef, + 0x62, 0x24, 0xb7, 0x6b, 0xc8, 0x77, 0x7b, 0x31, 0xf9, 0x5e, 0x47, 0x8c, 0xe5, 0x94, 0xa3, 0xb1, + 0x6f, 0xb2, 0x84, 0x7d, 0xbb, 0xdc, 0xf8, 0xde, 0x9a, 0x41, 0x22, 0xd3, 0xef, 0xcb, 0x05, 0xf4, + 0xdb, 0xa5, 0xc8, 0xd5, 0x35, 0x07, 0x21, 0x02, 0xff, 0xbe, 0x5c, 0xc0, 0xbf, 0x51, 0x44, 0xe7, + 0x9b, 0x10, 0xf0, 0x74, 0x69, 0xab, 0xf2, 0x22, 0xce, 0x29, 0xd9, 0x0c, 0x12, 0x2c, 0x64, 0xa6, + 0xf1, 0xd7, 0xca, 0x4c, 0x13, 0xdf, 0x91, 0x99, 0xbe, 0x0d, 0x59, 0xce, 0x4c, 0x45, 0x16, 0x49, + 0x8a, 0xbc, 0x90, 0xc1, 0x96, 0xc5, 0x53, 0x47, 0x45, 0x07, 0x34, 0x8f, 0x28, 0xe8, 0x0c, 0xb6, + 0xc8, 0x88, 0x98, 0xcc, 0x4d, 0xd2, 0xf9, 0xa3, 0xeb, 0xcb, 0x98, 0x10, 0x57, 0xaa, 0x95, 0xf9, + 0x80, 0xff, 0xfe, 0xe6, 0xa0, 0xe4, 0xda, 0xdc, 0xa5, 0x03, 0x83, 0x91, 0x81, 0xc5, 0x26, 0xaa, + 0xf4, 0x52, 0x79, 0x99, 0x80, 0x9d, 0x19, 0xb4, 0xe1, 0xc9, 0xb4, 0x43, 0x75, 0xb7, 0x4a, 0xd8, + 0x56, 0x45, 0x9b, 0xcb, 0x74, 0xcc, 0xb0, 0x48, 0x23, 0x05, 0x55, 0xb4, 0x39, 0x5b, 0xe8, 0xd3, + 0xae, 0x98, 0x77, 0x4e, 0xe5, 0x4d, 0xae, 0xe5, 0x63, 0x7e, 0x4e, 0x82, 0xf9, 0x3e, 0x40, 0x17, + 0x3b, 0xda, 0x97, 0xd8, 0x64, 0x44, 0x97, 0xa9, 0x38, 0x20, 0x41, 0x0a, 0x64, 0x79, 0x6f, 0xe8, + 0x10, 0x5d, 0xd2, 0x6d, 0xbf, 0x1f, 0x58, 0x6d, 0xe6, 0x75, 0xac, 0x16, 0x5d, 0x87, 0x1c, 0x5f, + 0x8d, 0x63, 0xe1, 0x0e, 0x11, 0x68, 0x9e, 0x53, 0xa7, 0x02, 0x4e, 0x18, 0x1c, 0x4e, 0x1f, 0x6d, + 0x81, 0xce, 0x39, 0x55, 0xf6, 0xf8, 0x0c, 0x2d, 0xdb, 0xa0, 0xb6, 0xc1, 0x26, 0x02, 0x70, 0x93, + 0xaa, 0xdf, 0xe7, 0xf5, 0xdc, 0x80, 0x0c, 0x2c, 0x4a, 0xfb, 0x1a, 0xb1, 0x6d, 0x6a, 0x0b, 0x1c, + 0xcd, 0xa9, 0x05, 0x29, 0x6c, 0x70, 0x59, 0xe5, 0x0f, 0x89, 0xe9, 0xe9, 0xf4, 0xb1, 0xf8, 0xff, + 0x35, 0xcc, 0x95, 0xbf, 0x88, 0xca, 0x34, 0x9c, 0x4f, 0xd0, 0x67, 0xb0, 0xeb, 0x5f, 0x0b, 0x6d, + 0x28, 0xae, 0x8b, 0x77, 0xc4, 0x37, 0xbb, 0x5d, 0xa5, 0x51, 0x58, 0xec, 0x20, 0x0d, 0xae, 0xcd, + 0xa0, 0x80, 0x3f, 0x40, 0x62, 0x33, 0x30, 0x78, 0x33, 0x0c, 0x06, 0xde, 0x00, 0xd3, 0xf0, 0x25, + 0x5f, 0xcb, 0x9d, 0xfc, 0x00, 0xae, 0x2d, 0x81, 0xd9, 0x79, 0x82, 0x5e, 0xf9, 0x5b, 0x3c, 0xa8, + 0x1d, 0xa6, 0xf3, 0x17, 0xb0, 0xc5, 0x6b, 0xde, 0xa1, 0x8b, 0x7a, 0xc5, 0xa3, 0x9f, 0x6e, 0x86, + 0xbb, 0x55, 0xaf, 0xf1, 0x44, 0x38, 0x51, 0xa5, 0xb3, 0xca, 0x0f, 0xa1, 0x18, 0xfe, 0x82, 0xf2, + 0x90, 0xb9, 0x38, 0x7b, 0x74, 0xf6, 0xf8, 0x57, 0x67, 0xa5, 0x18, 0x02, 0xd8, 0x3a, 0xae, 0xd7, + 0x1b, 0xe7, 0xad, 0x52, 0x9c, 0xb7, 0xd5, 0xc6, 0xc3, 0x46, 0xbd, 0x55, 0x4a, 0x54, 0x34, 0x80, + 0x40, 0x25, 0xba, 0x07, 0x69, 0x9b, 0x0e, 0x4d, 0x5d, 0x4c, 0x2d, 0xad, 0xba, 0x1d, 0xf4, 0x31, + 0xa4, 0x47, 0xd4, 0xdd, 0x19, 0x1e, 0xc9, 0x83, 0x65, 0x5b, 0x4f, 0x19, 0x09, 0x54, 0x17, 0xae, + 0x4d, 0x85, 0x02, 0x9a, 0x2f, 0x00, 0x97, 0x0c, 0x54, 0x0f, 0x0f, 0xf4, 0xee, 0x9a, 0x82, 0x72, + 0xf1, 0x80, 0xbf, 0x8f, 0x43, 0x5a, 0x6c, 0x2a, 0xbf, 0x91, 0xfc, 0xe8, 0xc8, 0x5f, 0x7b, 0x44, + 0x1b, 0x75, 0x00, 0x30, 0x63, 0xb6, 0xd1, 0x1e, 0x4e, 0xc7, 0xb9, 0xbd, 0xea, 0x68, 0x1c, 0x7b, + 0xda, 0xb5, 0xeb, 0xf2, 0x8c, 0xec, 0x4d, 0x1d, 0x04, 0xce, 0x49, 0xc0, 0x6d, 0xe5, 0x0c, 0x8a, + 0x61, 0x5b, 0x7e, 0x44, 0x9e, 0x91, 0x89, 0x9c, 0x09, 0x6f, 0xf2, 0x08, 0x88, 0x5c, 0x29, 0x7f, + 0xb8, 0x71, 0x3b, 0x5c, 0x6a, 0x98, 0x3a, 0x19, 0x0b, 0x60, 0xc9, 0xaa, 0x6e, 0xa7, 0xf2, 0xf7, + 0x38, 0x94, 0x66, 0x17, 0x8d, 0x1e, 0x40, 0xce, 0xbf, 0x55, 0x32, 0x81, 0xde, 0x58, 0x77, 0x29, + 0x65, 0xa4, 0xa6, 0x86, 0xe8, 0x7d, 0xd8, 0x75, 0x8c, 0xae, 0xe9, 0x15, 0x92, 0x2e, 0xed, 0x4c, + 0x88, 0xc1, 0x77, 0xdc, 0x0f, 0xbc, 0x38, 0x74, 0xe1, 0xe0, 0x36, 0x14, 0x79, 0x88, 0x35, 0xc2, + 0xa7, 0xe2, 0xff, 0x08, 0x56, 0x50, 0xb7, 0xb9, 0xb4, 0xe1, 0x09, 0x2b, 0x7f, 0x4c, 0x40, 0x3e, + 0x50, 0x19, 0xa2, 0x9f, 0x04, 0xb6, 0xa1, 0xb8, 0x94, 0x5d, 0x07, 0x2c, 0x5a, 0x13, 0x8b, 0xc8, + 0xed, 0x0a, 0x2d, 0x32, 0xf1, 0xdf, 0x2e, 0x72, 0x5a, 0x74, 0x26, 0x17, 0x16, 0x9d, 0xa9, 0x8d, + 0x8b, 0xce, 0xbb, 0x80, 0x18, 0x65, 0xb8, 0xaf, 0x8d, 0x28, 0x33, 0xcc, 0xae, 0x66, 0xd1, 0x2f, + 0x89, 0x2d, 0x01, 0xbe, 0x24, 0xbe, 0x3c, 0x15, 0x1f, 0xce, 0xb9, 0xfc, 0xfd, 0x47, 0xb0, 0x33, + 0xb3, 0xbc, 0xf0, 0xe5, 0x44, 0x50, 0x7c, 0x70, 0x71, 0x7e, 0x7a, 0x52, 0x3f, 0x6e, 0x35, 0xb4, + 0xa7, 0x8f, 0x5b, 0x8d, 0x52, 0x1c, 0x5d, 0x83, 0x37, 0x4e, 0x4f, 0x7e, 0xd1, 0x6c, 0x69, 0xf5, + 0xd3, 0x93, 0xc6, 0x59, 0x4b, 0x3b, 0x6e, 0xb5, 0x8e, 0xeb, 0x8f, 0x4a, 0x89, 0xa3, 0x7f, 0x16, + 0x60, 0xe7, 0xb8, 0x56, 0x3f, 0xe1, 0x7c, 0xd2, 0xe8, 0x60, 0x51, 0x4f, 0x3e, 0x86, 0x14, 0x2f, + 0x12, 0x51, 0x84, 0xa7, 0x1b, 0x25, 0x4a, 0xb5, 0x89, 0x54, 0x48, 0x8b, 0xca, 0x11, 0x45, 0x79, + 0xd1, 0x51, 0x22, 0x15, 0xa1, 0x7c, 0x92, 0xe2, 0xe0, 0x46, 0x78, 0xe8, 0x51, 0xa2, 0x54, 0xa6, + 0xe8, 0x37, 0x90, 0x9b, 0x26, 0xee, 0xa8, 0xcf, 0x1c, 0x4a, 0xe4, 0xba, 0x0c, 0xfd, 0x1a, 0x32, + 0x1e, 0xfb, 0x8a, 0xf6, 0x14, 0xa3, 0x44, 0x2c, 0x1d, 0x79, 0x78, 0x45, 0x51, 0x8b, 0xa2, 0xbc, + 0x37, 0x29, 0x91, 0xea, 0x63, 0x9e, 0x57, 0xe4, 0x6f, 0x38, 0x91, 0x1e, 0x59, 0x94, 0x68, 0x05, + 0x21, 0x0f, 0xf2, 0x94, 0xbb, 0x47, 0x7d, 0x63, 0x53, 0x22, 0xff, 0x30, 0x80, 0x30, 0x40, 0x80, + 0x49, 0x47, 0x7e, 0x3c, 0x53, 0xa2, 0x17, 0xfc, 0xe8, 0x12, 0xb2, 0x3e, 0xa5, 0x89, 0xf8, 0x88, + 0xa5, 0x44, 0xad, 0xb9, 0xd1, 0x6f, 0x61, 0x3b, 0x54, 0xf1, 0xa2, 0x4d, 0x9e, 0xa6, 0x94, 0x8d, + 0x8a, 0x69, 0x3e, 0x56, 0xa8, 0x0c, 0x46, 0x9b, 0x3c, 0x58, 0x29, 0x1b, 0x55, 0xd8, 0x68, 0x04, + 0xbb, 0x73, 0x65, 0x31, 0xda, 0xf4, 0x15, 0x4b, 0xd9, 0xb8, 0xf2, 0x46, 0x13, 0x40, 0xf3, 0x95, + 0x32, 0xda, 0xf8, 0x69, 0x4b, 0xd9, 0xbc, 0x1c, 0x47, 0x16, 0xec, 0xcc, 0x52, 0xbb, 0xcd, 0x1e, + 0xbc, 0x94, 0x0d, 0x0b, 0x74, 0x77, 0xc4, 0x30, 0x3d, 0xdc, 0xec, 0x19, 0x4c, 0xd9, 0xb0, 0x6a, + 0xaf, 0x9d, 0x7f, 0xf5, 0x72, 0x3f, 0xfe, 0xe2, 0xe5, 0x7e, 0xfc, 0x5f, 0x2f, 0xf7, 0xe3, 0xcf, + 0x5f, 0xed, 0xc7, 0x5e, 0xbc, 0xda, 0x8f, 0x7d, 0xfd, 0x6a, 0x3f, 0xf6, 0xf9, 0x8f, 0xba, 0x06, + 0xeb, 0x0d, 0xdb, 0xdc, 0xdf, 0xa1, 0xff, 0x0f, 0x83, 0xe9, 0x3f, 0x16, 0x2c, 0xe3, 0x70, 0xe1, + 0xdf, 0x21, 0xda, 0x5b, 0x22, 0x5d, 0xfe, 0xe0, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc2, 0x6e, + 0x78, 0x98, 0x2e, 0x21, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ABCIApplicationClient is the client API for ABCIApplication service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ABCIApplicationClient interface { + // Echo returns back the same message it is sent. + Echo(ctx context.Context, in *v1beta1.RequestEcho, opts ...grpc.CallOption) (*v1beta1.ResponseEcho, error) + // Flush flushes the write buffer. + Flush(ctx context.Context, in *v1beta1.RequestFlush, opts ...grpc.CallOption) (*v1beta1.ResponseFlush, error) + // Info returns information about the application state. + Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*v1beta1.ResponseInfo, error) + // DeliverTx applies a transaction. + DeliverTx(ctx context.Context, in *v1beta1.RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) + // CheckTx validates a transaction. + CheckTx(ctx context.Context, in *v1beta1.RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) + // Query queries the application state. + Query(ctx context.Context, in *v1beta1.RequestQuery, opts ...grpc.CallOption) (*v1beta1.ResponseQuery, error) + // Commit commits a block of transactions. + Commit(ctx context.Context, in *v1beta1.RequestCommit, opts ...grpc.CallOption) (*v1beta1.ResponseCommit, error) + // InitChain initializes the blockchain. + InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) + // BeginBlock signals the beginning of a block. + BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) + // EndBlock signals the end of a block, returns changes to the validator set. + EndBlock(ctx context.Context, in *v1beta1.RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) + // ListSnapshots lists all the available snapshots. + ListSnapshots(ctx context.Context, in *v1beta1.RequestListSnapshots, opts ...grpc.CallOption) (*v1beta1.ResponseListSnapshots, error) + // OfferSnapshot sends a snapshot offer. + OfferSnapshot(ctx context.Context, in *v1beta1.RequestOfferSnapshot, opts ...grpc.CallOption) (*v1beta1.ResponseOfferSnapshot, error) + // LoadSnapshotChunk returns a chunk of snapshot. + LoadSnapshotChunk(ctx context.Context, in *v1beta1.RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*v1beta1.ResponseLoadSnapshotChunk, error) + // ApplySnapshotChunk applies a chunk of snapshot. + ApplySnapshotChunk(ctx context.Context, in *v1beta1.RequestApplySnapshotChunk, opts ...grpc.CallOption) (*v1beta1.ResponseApplySnapshotChunk, error) + // PrepareProposal returns a proposal for the next block. + PrepareProposal(ctx context.Context, in *RequestPrepareProposal, opts ...grpc.CallOption) (*ResponsePrepareProposal, error) + // ProcessProposal validates a proposal. + ProcessProposal(ctx context.Context, in *RequestProcessProposal, opts ...grpc.CallOption) (*ResponseProcessProposal, error) +} + +type aBCIApplicationClient struct { + cc grpc1.ClientConn +} + +func NewABCIApplicationClient(cc grpc1.ClientConn) ABCIApplicationClient { + return &aBCIApplicationClient{cc} +} + +func (c *aBCIApplicationClient) Echo(ctx context.Context, in *v1beta1.RequestEcho, opts ...grpc.CallOption) (*v1beta1.ResponseEcho, error) { + out := new(v1beta1.ResponseEcho) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/Echo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Flush(ctx context.Context, in *v1beta1.RequestFlush, opts ...grpc.CallOption) (*v1beta1.ResponseFlush, error) { + out := new(v1beta1.ResponseFlush) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/Flush", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*v1beta1.ResponseInfo, error) { + out := new(v1beta1.ResponseInfo) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/Info", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) DeliverTx(ctx context.Context, in *v1beta1.RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) { + out := new(ResponseDeliverTx) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/DeliverTx", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) CheckTx(ctx context.Context, in *v1beta1.RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) { + out := new(ResponseCheckTx) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/CheckTx", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Query(ctx context.Context, in *v1beta1.RequestQuery, opts ...grpc.CallOption) (*v1beta1.ResponseQuery, error) { + out := new(v1beta1.ResponseQuery) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/Query", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) Commit(ctx context.Context, in *v1beta1.RequestCommit, opts ...grpc.CallOption) (*v1beta1.ResponseCommit, error) { + out := new(v1beta1.ResponseCommit) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/Commit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) { + out := new(ResponseInitChain) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/InitChain", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) { + out := new(ResponseBeginBlock) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/BeginBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) EndBlock(ctx context.Context, in *v1beta1.RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) { + out := new(ResponseEndBlock) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/EndBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) ListSnapshots(ctx context.Context, in *v1beta1.RequestListSnapshots, opts ...grpc.CallOption) (*v1beta1.ResponseListSnapshots, error) { + out := new(v1beta1.ResponseListSnapshots) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/ListSnapshots", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) OfferSnapshot(ctx context.Context, in *v1beta1.RequestOfferSnapshot, opts ...grpc.CallOption) (*v1beta1.ResponseOfferSnapshot, error) { + out := new(v1beta1.ResponseOfferSnapshot) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/OfferSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) LoadSnapshotChunk(ctx context.Context, in *v1beta1.RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*v1beta1.ResponseLoadSnapshotChunk, error) { + out := new(v1beta1.ResponseLoadSnapshotChunk) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/LoadSnapshotChunk", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) ApplySnapshotChunk(ctx context.Context, in *v1beta1.RequestApplySnapshotChunk, opts ...grpc.CallOption) (*v1beta1.ResponseApplySnapshotChunk, error) { + out := new(v1beta1.ResponseApplySnapshotChunk) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/ApplySnapshotChunk", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) PrepareProposal(ctx context.Context, in *RequestPrepareProposal, opts ...grpc.CallOption) (*ResponsePrepareProposal, error) { + out := new(ResponsePrepareProposal) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/PrepareProposal", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) ProcessProposal(ctx context.Context, in *RequestProcessProposal, opts ...grpc.CallOption) (*ResponseProcessProposal, error) { + out := new(ResponseProcessProposal) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta2.ABCIApplication/ProcessProposal", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ABCIApplicationServer is the server API for ABCIApplication service. +type ABCIApplicationServer interface { + // Echo returns back the same message it is sent. + Echo(context.Context, *v1beta1.RequestEcho) (*v1beta1.ResponseEcho, error) + // Flush flushes the write buffer. + Flush(context.Context, *v1beta1.RequestFlush) (*v1beta1.ResponseFlush, error) + // Info returns information about the application state. + Info(context.Context, *RequestInfo) (*v1beta1.ResponseInfo, error) + // DeliverTx applies a transaction. + DeliverTx(context.Context, *v1beta1.RequestDeliverTx) (*ResponseDeliverTx, error) + // CheckTx validates a transaction. + CheckTx(context.Context, *v1beta1.RequestCheckTx) (*ResponseCheckTx, error) + // Query queries the application state. + Query(context.Context, *v1beta1.RequestQuery) (*v1beta1.ResponseQuery, error) + // Commit commits a block of transactions. + Commit(context.Context, *v1beta1.RequestCommit) (*v1beta1.ResponseCommit, error) + // InitChain initializes the blockchain. + InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) + // BeginBlock signals the beginning of a block. + BeginBlock(context.Context, *RequestBeginBlock) (*ResponseBeginBlock, error) + // EndBlock signals the end of a block, returns changes to the validator set. + EndBlock(context.Context, *v1beta1.RequestEndBlock) (*ResponseEndBlock, error) + // ListSnapshots lists all the available snapshots. + ListSnapshots(context.Context, *v1beta1.RequestListSnapshots) (*v1beta1.ResponseListSnapshots, error) + // OfferSnapshot sends a snapshot offer. + OfferSnapshot(context.Context, *v1beta1.RequestOfferSnapshot) (*v1beta1.ResponseOfferSnapshot, error) + // LoadSnapshotChunk returns a chunk of snapshot. + LoadSnapshotChunk(context.Context, *v1beta1.RequestLoadSnapshotChunk) (*v1beta1.ResponseLoadSnapshotChunk, error) + // ApplySnapshotChunk applies a chunk of snapshot. + ApplySnapshotChunk(context.Context, *v1beta1.RequestApplySnapshotChunk) (*v1beta1.ResponseApplySnapshotChunk, error) + // PrepareProposal returns a proposal for the next block. + PrepareProposal(context.Context, *RequestPrepareProposal) (*ResponsePrepareProposal, error) + // ProcessProposal validates a proposal. + ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error) +} + +// UnimplementedABCIApplicationServer can be embedded to have forward compatible implementations. +type UnimplementedABCIApplicationServer struct { +} + +func (*UnimplementedABCIApplicationServer) Echo(ctx context.Context, req *v1beta1.RequestEcho) (*v1beta1.ResponseEcho, error) { + return nil, status.Errorf(codes.Unimplemented, "method Echo not implemented") +} +func (*UnimplementedABCIApplicationServer) Flush(ctx context.Context, req *v1beta1.RequestFlush) (*v1beta1.ResponseFlush, error) { + return nil, status.Errorf(codes.Unimplemented, "method Flush not implemented") +} +func (*UnimplementedABCIApplicationServer) Info(ctx context.Context, req *RequestInfo) (*v1beta1.ResponseInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +} +func (*UnimplementedABCIApplicationServer) DeliverTx(ctx context.Context, req *v1beta1.RequestDeliverTx) (*ResponseDeliverTx, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeliverTx not implemented") +} +func (*UnimplementedABCIApplicationServer) CheckTx(ctx context.Context, req *v1beta1.RequestCheckTx) (*ResponseCheckTx, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckTx not implemented") +} +func (*UnimplementedABCIApplicationServer) Query(ctx context.Context, req *v1beta1.RequestQuery) (*v1beta1.ResponseQuery, error) { + return nil, status.Errorf(codes.Unimplemented, "method Query not implemented") +} +func (*UnimplementedABCIApplicationServer) Commit(ctx context.Context, req *v1beta1.RequestCommit) (*v1beta1.ResponseCommit, error) { + return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") +} +func (*UnimplementedABCIApplicationServer) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) { + return nil, status.Errorf(codes.Unimplemented, "method InitChain not implemented") +} +func (*UnimplementedABCIApplicationServer) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) { + return nil, status.Errorf(codes.Unimplemented, "method BeginBlock not implemented") +} +func (*UnimplementedABCIApplicationServer) EndBlock(ctx context.Context, req *v1beta1.RequestEndBlock) (*ResponseEndBlock, error) { + return nil, status.Errorf(codes.Unimplemented, "method EndBlock not implemented") +} +func (*UnimplementedABCIApplicationServer) ListSnapshots(ctx context.Context, req *v1beta1.RequestListSnapshots) (*v1beta1.ResponseListSnapshots, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") +} +func (*UnimplementedABCIApplicationServer) OfferSnapshot(ctx context.Context, req *v1beta1.RequestOfferSnapshot) (*v1beta1.ResponseOfferSnapshot, error) { + return nil, status.Errorf(codes.Unimplemented, "method OfferSnapshot not implemented") +} +func (*UnimplementedABCIApplicationServer) LoadSnapshotChunk(ctx context.Context, req *v1beta1.RequestLoadSnapshotChunk) (*v1beta1.ResponseLoadSnapshotChunk, error) { + return nil, status.Errorf(codes.Unimplemented, "method LoadSnapshotChunk not implemented") +} +func (*UnimplementedABCIApplicationServer) ApplySnapshotChunk(ctx context.Context, req *v1beta1.RequestApplySnapshotChunk) (*v1beta1.ResponseApplySnapshotChunk, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplySnapshotChunk not implemented") +} +func (*UnimplementedABCIApplicationServer) PrepareProposal(ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) { + return nil, status.Errorf(codes.Unimplemented, "method PrepareProposal not implemented") +} +func (*UnimplementedABCIApplicationServer) ProcessProposal(ctx context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) { + return nil, status.Errorf(codes.Unimplemented, "method ProcessProposal not implemented") +} + +func RegisterABCIApplicationServer(s grpc1.Server, srv ABCIApplicationServer) { + s.RegisterService(&_ABCIApplication_serviceDesc, srv) +} + +func _ABCIApplication_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestEcho) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Echo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/Echo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Echo(ctx, req.(*v1beta1.RequestEcho)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Flush_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestFlush) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Flush(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/Flush", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Flush(ctx, req.(*v1beta1.RequestFlush)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestInfo) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Info(ctx, req.(*RequestInfo)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_DeliverTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestDeliverTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).DeliverTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/DeliverTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).DeliverTx(ctx, req.(*v1beta1.RequestDeliverTx)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_CheckTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestCheckTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).CheckTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/CheckTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).CheckTx(ctx, req.(*v1beta1.RequestCheckTx)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestQuery) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Query(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/Query", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Query(ctx, req.(*v1beta1.RequestQuery)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestCommit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Commit(ctx, req.(*v1beta1.RequestCommit)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_InitChain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestInitChain) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).InitChain(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/InitChain", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).InitChain(ctx, req.(*RequestInitChain)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_BeginBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestBeginBlock) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).BeginBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/BeginBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).BeginBlock(ctx, req.(*RequestBeginBlock)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_EndBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestEndBlock) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).EndBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/EndBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).EndBlock(ctx, req.(*v1beta1.RequestEndBlock)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestListSnapshots) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/ListSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ListSnapshots(ctx, req.(*v1beta1.RequestListSnapshots)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_OfferSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestOfferSnapshot) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).OfferSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/OfferSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).OfferSnapshot(ctx, req.(*v1beta1.RequestOfferSnapshot)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_LoadSnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestLoadSnapshotChunk) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).LoadSnapshotChunk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/LoadSnapshotChunk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).LoadSnapshotChunk(ctx, req.(*v1beta1.RequestLoadSnapshotChunk)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_ApplySnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestApplySnapshotChunk) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).ApplySnapshotChunk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/ApplySnapshotChunk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ApplySnapshotChunk(ctx, req.(*v1beta1.RequestApplySnapshotChunk)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_PrepareProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestPrepareProposal) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).PrepareProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/PrepareProposal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).PrepareProposal(ctx, req.(*RequestPrepareProposal)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_ProcessProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestProcessProposal) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).ProcessProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta2.ABCIApplication/ProcessProposal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ProcessProposal(ctx, req.(*RequestProcessProposal)) + } + return interceptor(ctx, in, info, handler) +} + +var ABCIApplication_serviceDesc = _ABCIApplication_serviceDesc +var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cometbft.abci.v1beta2.ABCIApplication", + HandlerType: (*ABCIApplicationServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Echo", + Handler: _ABCIApplication_Echo_Handler, + }, + { + MethodName: "Flush", + Handler: _ABCIApplication_Flush_Handler, + }, + { + MethodName: "Info", + Handler: _ABCIApplication_Info_Handler, + }, + { + MethodName: "DeliverTx", + Handler: _ABCIApplication_DeliverTx_Handler, + }, + { + MethodName: "CheckTx", + Handler: _ABCIApplication_CheckTx_Handler, + }, + { + MethodName: "Query", + Handler: _ABCIApplication_Query_Handler, + }, + { + MethodName: "Commit", + Handler: _ABCIApplication_Commit_Handler, + }, + { + MethodName: "InitChain", + Handler: _ABCIApplication_InitChain_Handler, + }, + { + MethodName: "BeginBlock", + Handler: _ABCIApplication_BeginBlock_Handler, + }, + { + MethodName: "EndBlock", + Handler: _ABCIApplication_EndBlock_Handler, + }, + { + MethodName: "ListSnapshots", + Handler: _ABCIApplication_ListSnapshots_Handler, + }, + { + MethodName: "OfferSnapshot", + Handler: _ABCIApplication_OfferSnapshot_Handler, + }, + { + MethodName: "LoadSnapshotChunk", + Handler: _ABCIApplication_LoadSnapshotChunk_Handler, + }, + { + MethodName: "ApplySnapshotChunk", + Handler: _ABCIApplication_ApplySnapshotChunk_Handler, + }, + { + MethodName: "PrepareProposal", + Handler: _ABCIApplication_PrepareProposal_Handler, + }, + { + MethodName: "ProcessProposal", + Handler: _ABCIApplication_ProcessProposal_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cometbft/abci/v1beta2/types.proto", +} + +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Request_Echo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Echo != nil { + { + size, err := m.Echo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Request_Flush) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Flush != nil { + { + size, err := m.Flush.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Request_Info) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Info != nil { + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Request_InitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InitChain != nil { + { + size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Request_Query) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Request_BeginBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.BeginBlock != nil { + { + size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Request_CheckTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CheckTx != nil { + { + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Request_DeliverTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DeliverTx != nil { + { + size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Request_EndBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EndBlock != nil { + { + size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *Request_Commit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *Request_ListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListSnapshots != nil { + { + size, err := m.ListSnapshots.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *Request_OfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_OfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OfferSnapshot != nil { + { + size, err := m.OfferSnapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *Request_LoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_LoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LoadSnapshotChunk != nil { + { + size, err := m.LoadSnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + return len(dAtA) - i, nil +} +func (m *Request_ApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ApplySnapshotChunk != nil { + { + size, err := m.ApplySnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + return len(dAtA) - i, nil +} +func (m *Request_PrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_PrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PrepareProposal != nil { + { + size, err := m.PrepareProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + return len(dAtA) - i, nil +} +func (m *Request_ProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProcessProposal != nil { + { + size, err := m.ProcessProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + return len(dAtA) - i, nil +} +func (m *RequestInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AbciVersion) > 0 { + i -= len(m.AbciVersion) + copy(dAtA[i:], m.AbciVersion) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AbciVersion))) + i-- + dAtA[i] = 0x22 + } + if m.P2PVersion != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.P2PVersion)) + i-- + dAtA[i] = 0x18 + } + if m.BlockVersion != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockVersion)) + i-- + dAtA[i] = 0x10 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestInitChain) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestInitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InitialHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.InitialHeight)) + i-- + dAtA[i] = 0x30 + } + if len(m.AppStateBytes) > 0 { + i -= len(m.AppStateBytes) + copy(dAtA[i:], m.AppStateBytes) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppStateBytes))) + i-- + dAtA[i] = 0x2a + } + if len(m.Validators) > 0 { + for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.ConsensusParams != nil { + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x12 + } + n18, err18 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err18 != nil { + return 0, err18 + } + i -= n18 + i = encodeVarintTypes(dAtA, i, uint64(n18)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RequestBeginBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestBeginBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ByzantineValidators) > 0 { + for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.LastCommitInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestPrepareProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestPrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProposerAddress) > 0 { + i -= len(m.ProposerAddress) + copy(dAtA[i:], m.ProposerAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) + i-- + dAtA[i] = 0x42 + } + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x3a + } + n21, err21 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err21 != nil { + return 0, err21 + } + i -= n21 + i = encodeVarintTypes(dAtA, i, uint64(n21)) + i-- + dAtA[i] = 0x32 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x28 + } + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.LocalLastCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.MaxTxBytes != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.MaxTxBytes)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RequestProcessProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProposerAddress) > 0 { + i -= len(m.ProposerAddress) + copy(dAtA[i:], m.ProposerAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) + i-- + dAtA[i] = 0x42 + } + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x3a + } + n23, err23 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err23 != nil { + return 0, err23 + } + i -= n23 + i = encodeVarintTypes(dAtA, i, uint64(n23)) + i-- + dAtA[i] = 0x32 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x28 + } + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x22 + } + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ProposedLastCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Response) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Response_Exception) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Exception) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Exception != nil { + { + size, err := m.Exception.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Response_Echo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Echo != nil { + { + size, err := m.Echo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Response_Flush) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Flush != nil { + { + size, err := m.Flush.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Response_Info) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Info != nil { + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Response_InitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InitChain != nil { + { + size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Response_Query) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Response_BeginBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.BeginBlock != nil { + { + size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Response_CheckTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CheckTx != nil { + { + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Response_DeliverTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DeliverTx != nil { + { + size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *Response_EndBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EndBlock != nil { + { + size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *Response_Commit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *Response_ListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListSnapshots != nil { + { + size, err := m.ListSnapshots.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *Response_OfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_OfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OfferSnapshot != nil { + { + size, err := m.OfferSnapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + return len(dAtA) - i, nil +} +func (m *Response_LoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_LoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LoadSnapshotChunk != nil { + { + size, err := m.LoadSnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + return len(dAtA) - i, nil +} +func (m *Response_ApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ApplySnapshotChunk != nil { + { + size, err := m.ApplySnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + return len(dAtA) - i, nil +} +func (m *Response_PrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_PrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PrepareProposal != nil { + { + size, err := m.PrepareProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + return len(dAtA) - i, nil +} +func (m *Response_ProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProcessProposal != nil { + { + size, err := m.ProcessProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + return len(dAtA) - i, nil +} +func (m *ResponseInitChain) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseInitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x1a + } + if len(m.Validators) > 0 { + for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.ConsensusParams != nil { + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseBeginBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseBeginBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseCheckTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MempoolError) > 0 { + i -= len(m.MempoolError) + copy(dAtA[i:], m.MempoolError) + i = encodeVarintTypes(dAtA, i, uint64(len(m.MempoolError))) + i-- + dAtA[i] = 0x5a + } + if m.Priority != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x50 + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x4a + } + if len(m.Codespace) > 0 { + i -= len(m.Codespace) + copy(dAtA[i:], m.Codespace) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i-- + dAtA[i] = 0x42 + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.GasUsed != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) + i-- + dAtA[i] = 0x30 + } + if m.GasWanted != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) + i-- + dAtA[i] = 0x28 + } + if len(m.Info) > 0 { + i -= len(m.Info) + copy(dAtA[i:], m.Info) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i-- + dAtA[i] = 0x22 + } + if len(m.Log) > 0 { + i -= len(m.Log) + copy(dAtA[i:], m.Log) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i-- + dAtA[i] = 0x1a + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseDeliverTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseDeliverTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Codespace) > 0 { + i -= len(m.Codespace) + copy(dAtA[i:], m.Codespace) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i-- + dAtA[i] = 0x42 + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.GasUsed != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) + i-- + dAtA[i] = 0x30 + } + if m.GasWanted != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) + i-- + dAtA[i] = 0x28 + } + if len(m.Info) > 0 { + i -= len(m.Info) + copy(dAtA[i:], m.Info) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i-- + dAtA[i] = 0x22 + } + if len(m.Log) > 0 { + i -= len(m.Log) + copy(dAtA[i:], m.Log) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i-- + dAtA[i] = 0x1a + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ConsensusParamUpdates != nil { + { + size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ValidatorUpdates) > 0 { + for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponsePrepareProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponsePrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponsePrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseProcessProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Status != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CommitInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Votes) > 0 { + for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Votes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ExtendedCommitInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtendedCommitInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExtendedCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Votes) > 0 { + for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Votes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventAttribute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventAttribute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index { + i-- + if m.Index { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExtendedVoteInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtendedVoteInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExtendedVoteInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VoteExtension) > 0 { + i -= len(m.VoteExtension) + copy(dAtA[i:], m.VoteExtension) + i = encodeVarintTypes(dAtA, i, uint64(len(m.VoteExtension))) + i-- + dAtA[i] = 0x1a + } + if m.SignedLastBlock { + i-- + if m.SignedLastBlock { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Misbehavior) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Misbehavior) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Misbehavior) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TotalVotingPower != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.TotalVotingPower)) + i-- + dAtA[i] = 0x28 + } + n45, err45 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err45 != nil { + return 0, err45 + } + i -= n45 + i = encodeVarintTypes(dAtA, i, uint64(n45)) + i-- + dAtA[i] = 0x22 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x18 + } + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *Request_Echo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Echo != nil { + l = m.Echo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Flush) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Flush != nil { + l = m.Flush.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Info) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_InitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InitChain != nil { + l = m.InitChain.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Query) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_BeginBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeginBlock != nil { + l = m.BeginBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_CheckTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_DeliverTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DeliverTx != nil { + l = m.DeliverTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_EndBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndBlock != nil { + l = m.EndBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Commit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListSnapshots != nil { + l = m.ListSnapshots.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_OfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OfferSnapshot != nil { + l = m.OfferSnapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_LoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadSnapshotChunk != nil { + l = m.LoadSnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApplySnapshotChunk != nil { + l = m.ApplySnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_PrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrepareProposal != nil { + l = m.PrepareProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProcessProposal != nil { + l = m.ProcessProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *RequestInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.BlockVersion != 0 { + n += 1 + sovTypes(uint64(m.BlockVersion)) + } + if m.P2PVersion != 0 { + n += 1 + sovTypes(uint64(m.P2PVersion)) + } + l = len(m.AbciVersion) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestInitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ConsensusParams != nil { + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Validators) > 0 { + for _, e := range m.Validators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.AppStateBytes) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.InitialHeight != 0 { + n += 1 + sovTypes(uint64(m.InitialHeight)) + } + return n +} + +func (m *RequestBeginBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + l = m.LastCommitInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.ByzantineValidators) > 0 { + for _, e := range m.ByzantineValidators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *RequestPrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxTxBytes != 0 { + n += 1 + sovTypes(uint64(m.MaxTxBytes)) + } + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.LocalLastCommit.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.ProposedLastCommit.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Response) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *Response_Exception) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exception != nil { + l = m.Exception.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Echo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Echo != nil { + l = m.Echo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Flush) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Flush != nil { + l = m.Flush.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Info) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_InitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InitChain != nil { + l = m.InitChain.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Query) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_BeginBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeginBlock != nil { + l = m.BeginBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_CheckTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_DeliverTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DeliverTx != nil { + l = m.DeliverTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_EndBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndBlock != nil { + l = m.EndBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Commit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListSnapshots != nil { + l = m.ListSnapshots.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_OfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OfferSnapshot != nil { + l = m.OfferSnapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_LoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadSnapshotChunk != nil { + l = m.LoadSnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApplySnapshotChunk != nil { + l = m.ApplySnapshotChunk.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_PrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrepareProposal != nil { + l = m.PrepareProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProcessProposal != nil { + l = m.ProcessProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *ResponseInitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConsensusParams != nil { + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Validators) > 0 { + for _, e := range m.Validators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseBeginBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResponseCheckTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.GasWanted != 0 { + n += 1 + sovTypes(uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + n += 1 + sovTypes(uint64(m.GasUsed)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Priority != 0 { + n += 1 + sovTypes(uint64(m.Priority)) + } + l = len(m.MempoolError) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseDeliverTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.GasWanted != 0 { + n += 1 + sovTypes(uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + n += 1 + sovTypes(uint64(m.GasUsed)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseEndBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ValidatorUpdates) > 0 { + for _, e := range m.ValidatorUpdates { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.ConsensusParamUpdates != nil { + l = m.ConsensusParamUpdates.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResponsePrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResponseProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + sovTypes(uint64(m.Status)) + } + return n +} + +func (m *CommitInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if len(m.Votes) > 0 { + for _, e := range m.Votes { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ExtendedCommitInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if len(m.Votes) > 0 { + for _, e := range m.Votes { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *EventAttribute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Index { + n += 2 + } + return n +} + +func (m *ExtendedVoteInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.SignedLastBlock { + n += 2 + } + l = len(m.VoteExtension) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Misbehavior) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + if m.TotalVotingPower != 0 { + n += 1 + sovTypes(uint64(m.TotalVotingPower)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestEcho{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Echo{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestFlush{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Flush{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Info{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestInitChain{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_InitChain{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestQuery{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Query{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestBeginBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_BeginBlock{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestCheckTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_CheckTx{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestDeliverTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_DeliverTx{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestEndBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_EndBlock{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestCommit{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Commit{v} + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestListSnapshots{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ListSnapshots{v} + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestOfferSnapshot{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_OfferSnapshot{v} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestLoadSnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_LoadSnapshotChunk{v} + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestApplySnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ApplySnapshotChunk{v} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrepareProposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestPrepareProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_PrepareProposal{v} + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessProposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestProcessProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ProcessProposal{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockVersion", wireType) + } + m.BlockVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field P2PVersion", wireType) + } + m.P2PVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.P2PVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AbciVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AbciVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestInitChain) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestInitChain: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParams == nil { + m.ConsensusParams = &v1beta2.ConsensusParams{} + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validators = append(m.Validators, v1beta1.ValidatorUpdate{}) + if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppStateBytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppStateBytes = append(m.AppStateBytes[:0], dAtA[iNdEx:postIndex]...) + if m.AppStateBytes == nil { + m.AppStateBytes = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) + } + m.InitialHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestBeginBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestPrepareProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestPrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTxBytes", wireType) + } + m.MaxTxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTxBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalLastCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalLastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Misbehavior = append(m.Misbehavior, Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerAddress == nil { + m.ProposerAddress = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestProcessProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestProcessProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposedLastCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProposedLastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Misbehavior = append(m.Misbehavior, Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerAddress == nil { + m.ProposerAddress = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Response: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exception", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseException{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Exception{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseEcho{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Echo{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseFlush{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Flush{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Info{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseInitChain{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_InitChain{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseQuery{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Query{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseBeginBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_BeginBlock{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseCheckTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_CheckTx{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseDeliverTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_DeliverTx{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseEndBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_EndBlock{v} + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseCommit{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Commit{v} + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseListSnapshots{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_ListSnapshots{v} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseOfferSnapshot{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_OfferSnapshot{v} + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseLoadSnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_LoadSnapshotChunk{v} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseApplySnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_ApplySnapshotChunk{v} + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrepareProposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponsePrepareProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_PrepareProposal{v} + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessProposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseProcessProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_ProcessProposal{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseInitChain: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParams == nil { + m.ConsensusParams = &v1beta2.ConsensusParams{} + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validators = append(m.Validators, v1beta1.ValidatorUpdate{}) + if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseBeginBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseCheckTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) + } + m.GasWanted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasWanted |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) + } + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MempoolError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MempoolError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseDeliverTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) + } + m.GasWanted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasWanted |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) + } + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseEndBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorUpdates = append(m.ValidatorUpdates, v1beta1.ValidatorUpdate{}) + if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParamUpdates == nil { + m.ConsensusParamUpdates = &v1beta2.ConsensusParams{} + } + if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponsePrepareProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponsePrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseProcessProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseProcessProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= ResponseProcessProposal_ProposalStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Votes = append(m.Votes, v1beta1.VoteInfo{}) + if err := m.Votes[len(m.Votes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtendedCommitInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtendedCommitInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtendedCommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Votes = append(m.Votes, ExtendedVoteInfo{}) + if err := m.Votes[len(m.Votes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, EventAttribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventAttribute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventAttribute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventAttribute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Index = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtendedVoteInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtendedVoteInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtendedVoteInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedLastBlock", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SignedLastBlock = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtension", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VoteExtension = append(m.VoteExtension[:0], dAtA[iNdEx:postIndex]...) + if m.VoteExtension == nil { + m.VoteExtension = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Misbehavior) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Misbehavior: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Misbehavior: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MisbehaviorType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) + } + m.TotalVotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalVotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/api/cometbft/abci/v1beta3/types.go b/api/cometbft/abci/v1beta3/types.go new file mode 100644 index 00000000000..5a1266845af --- /dev/null +++ b/api/cometbft/abci/v1beta3/types.go @@ -0,0 +1,81 @@ +package v1beta3 + +import ( + "bytes" + + "github.com/cosmos/gogoproto/jsonpb" +) + +const ( + CodeTypeOK uint32 = 0 +) + +// IsOK returns true if Code is OK. +func (r ResponseCheckTx) IsOK() bool { + return r.Code == CodeTypeOK +} + +// IsErr returns true if Code is something other than OK. +func (r ResponseCheckTx) IsErr() bool { + return r.Code != CodeTypeOK +} + +// IsOK returns true if Code is OK. +func (r ExecTxResult) IsOK() bool { + return r.Code == CodeTypeOK +} + +// IsErr returns true if Code is something other than OK. +func (r ExecTxResult) IsErr() bool { + return r.Code != CodeTypeOK +} + +func (r ResponseVerifyVoteExtension) IsAccepted() bool { + return r.Status == ResponseVerifyVoteExtension_ACCEPT +} + +// IsStatusUnknown returns true if Code is Unknown +func (r ResponseVerifyVoteExtension) IsStatusUnknown() bool { + return r.Status == ResponseVerifyVoteExtension_UNKNOWN +} + +// --------------------------------------------------------------------------- +// override JSON marshaling so we emit defaults (ie. disable omitempty) + +var ( + jsonpbMarshaller = jsonpb.Marshaler{ + EnumsAsInts: true, + EmitDefaults: true, + } + jsonpbUnmarshaller = jsonpb.Unmarshaler{} +) + +func (r *ResponseCheckTx) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *ResponseCheckTx) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} + +func (r *ResponseCommit) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *ResponseCommit) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} + +func (r *ExecTxResult) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *ExecTxResult) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} diff --git a/api/cometbft/abci/v1beta3/types.pb.go b/api/cometbft/abci/v1beta3/types.pb.go new file mode 100644 index 00000000000..723b13e8ec3 --- /dev/null +++ b/api/cometbft/abci/v1beta3/types.pb.go @@ -0,0 +1,10328 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/abci/v1beta3/types.proto + +package v1beta3 + +import ( + context "context" + fmt "fmt" + v1beta1 "github.com/cometbft/cometbft/api/cometbft/abci/v1beta1" + v1beta2 "github.com/cometbft/cometbft/api/cometbft/abci/v1beta2" + v1 "github.com/cometbft/cometbft/api/cometbft/types/v1" + v1beta11 "github.com/cometbft/cometbft/api/cometbft/types/v1beta1" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "github.com/cosmos/gogoproto/types" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Verification status. +type ResponseVerifyVoteExtension_VerifyStatus int32 + +const ( + // Unknown + ResponseVerifyVoteExtension_UNKNOWN ResponseVerifyVoteExtension_VerifyStatus = 0 + // Accepted + ResponseVerifyVoteExtension_ACCEPT ResponseVerifyVoteExtension_VerifyStatus = 1 + // Rejecting the vote extension will reject the entire precommit by the sender. + // Incorrectly implementing this thus has liveness implications as it may affect + // CometBFT's ability to receive 2/3+ valid votes to finalize the block. + // Honest nodes should never be rejected. + ResponseVerifyVoteExtension_REJECT ResponseVerifyVoteExtension_VerifyStatus = 2 +) + +var ResponseVerifyVoteExtension_VerifyStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ACCEPT", + 2: "REJECT", +} + +var ResponseVerifyVoteExtension_VerifyStatus_value = map[string]int32{ + "UNKNOWN": 0, + "ACCEPT": 1, + "REJECT": 2, +} + +func (x ResponseVerifyVoteExtension_VerifyStatus) String() string { + return proto.EnumName(ResponseVerifyVoteExtension_VerifyStatus_name, int32(x)) +} + +func (ResponseVerifyVoteExtension_VerifyStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{12, 0} +} + +// Request represents a request to the ABCI application. +type Request struct { + // Sum of all possible messages. + // + // Types that are valid to be assigned to Value: + // *Request_Echo + // *Request_Flush + // *Request_Info + // *Request_InitChain + // *Request_Query + // *Request_CheckTx + // *Request_Commit + // *Request_ListSnapshots + // *Request_OfferSnapshot + // *Request_LoadSnapshotChunk + // *Request_ApplySnapshotChunk + // *Request_PrepareProposal + // *Request_ProcessProposal + // *Request_ExtendVote + // *Request_VerifyVoteExtension + // *Request_FinalizeBlock + Value isRequest_Value `protobuf_oneof:"value"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(m, src) +} +func (m *Request) XXX_Size() int { + return m.Size() +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +type isRequest_Value interface { + isRequest_Value() + MarshalTo([]byte) (int, error) + Size() int +} + +type Request_Echo struct { + Echo *v1beta1.RequestEcho `protobuf:"bytes,1,opt,name=echo,proto3,oneof" json:"echo,omitempty"` +} +type Request_Flush struct { + Flush *v1beta1.RequestFlush `protobuf:"bytes,2,opt,name=flush,proto3,oneof" json:"flush,omitempty"` +} +type Request_Info struct { + Info *v1beta2.RequestInfo `protobuf:"bytes,3,opt,name=info,proto3,oneof" json:"info,omitempty"` +} +type Request_InitChain struct { + InitChain *RequestInitChain `protobuf:"bytes,5,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` +} +type Request_Query struct { + Query *v1beta1.RequestQuery `protobuf:"bytes,6,opt,name=query,proto3,oneof" json:"query,omitempty"` +} +type Request_CheckTx struct { + CheckTx *v1beta1.RequestCheckTx `protobuf:"bytes,8,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` +} +type Request_Commit struct { + Commit *v1beta1.RequestCommit `protobuf:"bytes,11,opt,name=commit,proto3,oneof" json:"commit,omitempty"` +} +type Request_ListSnapshots struct { + ListSnapshots *v1beta1.RequestListSnapshots `protobuf:"bytes,12,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` +} +type Request_OfferSnapshot struct { + OfferSnapshot *v1beta1.RequestOfferSnapshot `protobuf:"bytes,13,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` +} +type Request_LoadSnapshotChunk struct { + LoadSnapshotChunk *v1beta1.RequestLoadSnapshotChunk `protobuf:"bytes,14,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` +} +type Request_ApplySnapshotChunk struct { + ApplySnapshotChunk *v1beta1.RequestApplySnapshotChunk `protobuf:"bytes,15,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` +} +type Request_PrepareProposal struct { + PrepareProposal *RequestPrepareProposal `protobuf:"bytes,16,opt,name=prepare_proposal,json=prepareProposal,proto3,oneof" json:"prepare_proposal,omitempty"` +} +type Request_ProcessProposal struct { + ProcessProposal *RequestProcessProposal `protobuf:"bytes,17,opt,name=process_proposal,json=processProposal,proto3,oneof" json:"process_proposal,omitempty"` +} +type Request_ExtendVote struct { + ExtendVote *RequestExtendVote `protobuf:"bytes,18,opt,name=extend_vote,json=extendVote,proto3,oneof" json:"extend_vote,omitempty"` +} +type Request_VerifyVoteExtension struct { + VerifyVoteExtension *RequestVerifyVoteExtension `protobuf:"bytes,19,opt,name=verify_vote_extension,json=verifyVoteExtension,proto3,oneof" json:"verify_vote_extension,omitempty"` +} +type Request_FinalizeBlock struct { + FinalizeBlock *RequestFinalizeBlock `protobuf:"bytes,20,opt,name=finalize_block,json=finalizeBlock,proto3,oneof" json:"finalize_block,omitempty"` +} + +func (*Request_Echo) isRequest_Value() {} +func (*Request_Flush) isRequest_Value() {} +func (*Request_Info) isRequest_Value() {} +func (*Request_InitChain) isRequest_Value() {} +func (*Request_Query) isRequest_Value() {} +func (*Request_CheckTx) isRequest_Value() {} +func (*Request_Commit) isRequest_Value() {} +func (*Request_ListSnapshots) isRequest_Value() {} +func (*Request_OfferSnapshot) isRequest_Value() {} +func (*Request_LoadSnapshotChunk) isRequest_Value() {} +func (*Request_ApplySnapshotChunk) isRequest_Value() {} +func (*Request_PrepareProposal) isRequest_Value() {} +func (*Request_ProcessProposal) isRequest_Value() {} +func (*Request_ExtendVote) isRequest_Value() {} +func (*Request_VerifyVoteExtension) isRequest_Value() {} +func (*Request_FinalizeBlock) isRequest_Value() {} + +func (m *Request) GetValue() isRequest_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Request) GetEcho() *v1beta1.RequestEcho { + if x, ok := m.GetValue().(*Request_Echo); ok { + return x.Echo + } + return nil +} + +func (m *Request) GetFlush() *v1beta1.RequestFlush { + if x, ok := m.GetValue().(*Request_Flush); ok { + return x.Flush + } + return nil +} + +func (m *Request) GetInfo() *v1beta2.RequestInfo { + if x, ok := m.GetValue().(*Request_Info); ok { + return x.Info + } + return nil +} + +func (m *Request) GetInitChain() *RequestInitChain { + if x, ok := m.GetValue().(*Request_InitChain); ok { + return x.InitChain + } + return nil +} + +func (m *Request) GetQuery() *v1beta1.RequestQuery { + if x, ok := m.GetValue().(*Request_Query); ok { + return x.Query + } + return nil +} + +func (m *Request) GetCheckTx() *v1beta1.RequestCheckTx { + if x, ok := m.GetValue().(*Request_CheckTx); ok { + return x.CheckTx + } + return nil +} + +func (m *Request) GetCommit() *v1beta1.RequestCommit { + if x, ok := m.GetValue().(*Request_Commit); ok { + return x.Commit + } + return nil +} + +func (m *Request) GetListSnapshots() *v1beta1.RequestListSnapshots { + if x, ok := m.GetValue().(*Request_ListSnapshots); ok { + return x.ListSnapshots + } + return nil +} + +func (m *Request) GetOfferSnapshot() *v1beta1.RequestOfferSnapshot { + if x, ok := m.GetValue().(*Request_OfferSnapshot); ok { + return x.OfferSnapshot + } + return nil +} + +func (m *Request) GetLoadSnapshotChunk() *v1beta1.RequestLoadSnapshotChunk { + if x, ok := m.GetValue().(*Request_LoadSnapshotChunk); ok { + return x.LoadSnapshotChunk + } + return nil +} + +func (m *Request) GetApplySnapshotChunk() *v1beta1.RequestApplySnapshotChunk { + if x, ok := m.GetValue().(*Request_ApplySnapshotChunk); ok { + return x.ApplySnapshotChunk + } + return nil +} + +func (m *Request) GetPrepareProposal() *RequestPrepareProposal { + if x, ok := m.GetValue().(*Request_PrepareProposal); ok { + return x.PrepareProposal + } + return nil +} + +func (m *Request) GetProcessProposal() *RequestProcessProposal { + if x, ok := m.GetValue().(*Request_ProcessProposal); ok { + return x.ProcessProposal + } + return nil +} + +func (m *Request) GetExtendVote() *RequestExtendVote { + if x, ok := m.GetValue().(*Request_ExtendVote); ok { + return x.ExtendVote + } + return nil +} + +func (m *Request) GetVerifyVoteExtension() *RequestVerifyVoteExtension { + if x, ok := m.GetValue().(*Request_VerifyVoteExtension); ok { + return x.VerifyVoteExtension + } + return nil +} + +func (m *Request) GetFinalizeBlock() *RequestFinalizeBlock { + if x, ok := m.GetValue().(*Request_FinalizeBlock); ok { + return x.FinalizeBlock + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Request) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Request_Echo)(nil), + (*Request_Flush)(nil), + (*Request_Info)(nil), + (*Request_InitChain)(nil), + (*Request_Query)(nil), + (*Request_CheckTx)(nil), + (*Request_Commit)(nil), + (*Request_ListSnapshots)(nil), + (*Request_OfferSnapshot)(nil), + (*Request_LoadSnapshotChunk)(nil), + (*Request_ApplySnapshotChunk)(nil), + (*Request_PrepareProposal)(nil), + (*Request_ProcessProposal)(nil), + (*Request_ExtendVote)(nil), + (*Request_VerifyVoteExtension)(nil), + (*Request_FinalizeBlock)(nil), + } +} + +// RequestInitChain is a request to initialize the blockchain. +type RequestInitChain struct { + Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + ConsensusParams *v1.ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` + Validators []v1beta1.ValidatorUpdate `protobuf:"bytes,4,rep,name=validators,proto3" json:"validators"` + AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` + InitialHeight int64 `protobuf:"varint,6,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` +} + +func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } +func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } +func (*RequestInitChain) ProtoMessage() {} +func (*RequestInitChain) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{1} +} +func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestInitChain.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestInitChain) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestInitChain.Merge(m, src) +} +func (m *RequestInitChain) XXX_Size() int { + return m.Size() +} +func (m *RequestInitChain) XXX_DiscardUnknown() { + xxx_messageInfo_RequestInitChain.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestInitChain proto.InternalMessageInfo + +func (m *RequestInitChain) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *RequestInitChain) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +func (m *RequestInitChain) GetConsensusParams() *v1.ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return nil +} + +func (m *RequestInitChain) GetValidators() []v1beta1.ValidatorUpdate { + if m != nil { + return m.Validators + } + return nil +} + +func (m *RequestInitChain) GetAppStateBytes() []byte { + if m != nil { + return m.AppStateBytes + } + return nil +} + +func (m *RequestInitChain) GetInitialHeight() int64 { + if m != nil { + return m.InitialHeight + } + return 0 +} + +// RequestPrepareProposal is a request for the ABCI application to prepare a new +// block proposal. +type RequestPrepareProposal struct { + // the modified transactions cannot exceed this size. + MaxTxBytes int64 `protobuf:"varint,1,opt,name=max_tx_bytes,json=maxTxBytes,proto3" json:"max_tx_bytes,omitempty"` + // txs is an array of transactions that will be included in a block, + // sent to the app for possible modifications. + Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` + LocalLastCommit ExtendedCommitInfo `protobuf:"bytes,3,opt,name=local_last_commit,json=localLastCommit,proto3" json:"local_last_commit"` + Misbehavior []v1beta2.Misbehavior `protobuf:"bytes,4,rep,name=misbehavior,proto3" json:"misbehavior"` + Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` + NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + // address of the public key of the validator proposing the block. + ProposerAddress []byte `protobuf:"bytes,8,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` +} + +func (m *RequestPrepareProposal) Reset() { *m = RequestPrepareProposal{} } +func (m *RequestPrepareProposal) String() string { return proto.CompactTextString(m) } +func (*RequestPrepareProposal) ProtoMessage() {} +func (*RequestPrepareProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{2} +} +func (m *RequestPrepareProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestPrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestPrepareProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestPrepareProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestPrepareProposal.Merge(m, src) +} +func (m *RequestPrepareProposal) XXX_Size() int { + return m.Size() +} +func (m *RequestPrepareProposal) XXX_DiscardUnknown() { + xxx_messageInfo_RequestPrepareProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestPrepareProposal proto.InternalMessageInfo + +func (m *RequestPrepareProposal) GetMaxTxBytes() int64 { + if m != nil { + return m.MaxTxBytes + } + return 0 +} + +func (m *RequestPrepareProposal) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + +func (m *RequestPrepareProposal) GetLocalLastCommit() ExtendedCommitInfo { + if m != nil { + return m.LocalLastCommit + } + return ExtendedCommitInfo{} +} + +func (m *RequestPrepareProposal) GetMisbehavior() []v1beta2.Misbehavior { + if m != nil { + return m.Misbehavior + } + return nil +} + +func (m *RequestPrepareProposal) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RequestPrepareProposal) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *RequestPrepareProposal) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash + } + return nil +} + +func (m *RequestPrepareProposal) GetProposerAddress() []byte { + if m != nil { + return m.ProposerAddress + } + return nil +} + +// RequestProcessProposal is a request for the ABCI application to process proposal. +type RequestProcessProposal struct { + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + ProposedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=proposed_last_commit,json=proposedLastCommit,proto3" json:"proposed_last_commit"` + Misbehavior []v1beta2.Misbehavior `protobuf:"bytes,3,rep,name=misbehavior,proto3" json:"misbehavior"` + // hash is the merkle root hash of the fields of the proposed block. + Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` + Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` + NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + // address of the public key of the original proposer of the block. + ProposerAddress []byte `protobuf:"bytes,8,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` +} + +func (m *RequestProcessProposal) Reset() { *m = RequestProcessProposal{} } +func (m *RequestProcessProposal) String() string { return proto.CompactTextString(m) } +func (*RequestProcessProposal) ProtoMessage() {} +func (*RequestProcessProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{3} +} +func (m *RequestProcessProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestProcessProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestProcessProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestProcessProposal.Merge(m, src) +} +func (m *RequestProcessProposal) XXX_Size() int { + return m.Size() +} +func (m *RequestProcessProposal) XXX_DiscardUnknown() { + xxx_messageInfo_RequestProcessProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestProcessProposal proto.InternalMessageInfo + +func (m *RequestProcessProposal) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + +func (m *RequestProcessProposal) GetProposedLastCommit() CommitInfo { + if m != nil { + return m.ProposedLastCommit + } + return CommitInfo{} +} + +func (m *RequestProcessProposal) GetMisbehavior() []v1beta2.Misbehavior { + if m != nil { + return m.Misbehavior + } + return nil +} + +func (m *RequestProcessProposal) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *RequestProcessProposal) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RequestProcessProposal) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *RequestProcessProposal) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash + } + return nil +} + +func (m *RequestProcessProposal) GetProposerAddress() []byte { + if m != nil { + return m.ProposerAddress + } + return nil +} + +// Extends a vote with application-injected data +type RequestExtendVote struct { + // the hash of the block that this vote may be referring to + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + // the height of the extended vote + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + // info of the block that this vote may be referring to + Time time.Time `protobuf:"bytes,3,opt,name=time,proto3,stdtime" json:"time"` + Txs [][]byte `protobuf:"bytes,4,rep,name=txs,proto3" json:"txs,omitempty"` + ProposedLastCommit CommitInfo `protobuf:"bytes,5,opt,name=proposed_last_commit,json=proposedLastCommit,proto3" json:"proposed_last_commit"` + Misbehavior []v1beta2.Misbehavior `protobuf:"bytes,6,rep,name=misbehavior,proto3" json:"misbehavior"` + NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + // address of the public key of the original proposer of the block. + ProposerAddress []byte `protobuf:"bytes,8,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` +} + +func (m *RequestExtendVote) Reset() { *m = RequestExtendVote{} } +func (m *RequestExtendVote) String() string { return proto.CompactTextString(m) } +func (*RequestExtendVote) ProtoMessage() {} +func (*RequestExtendVote) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{4} +} +func (m *RequestExtendVote) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestExtendVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestExtendVote.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestExtendVote) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestExtendVote.Merge(m, src) +} +func (m *RequestExtendVote) XXX_Size() int { + return m.Size() +} +func (m *RequestExtendVote) XXX_DiscardUnknown() { + xxx_messageInfo_RequestExtendVote.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestExtendVote proto.InternalMessageInfo + +func (m *RequestExtendVote) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *RequestExtendVote) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RequestExtendVote) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *RequestExtendVote) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + +func (m *RequestExtendVote) GetProposedLastCommit() CommitInfo { + if m != nil { + return m.ProposedLastCommit + } + return CommitInfo{} +} + +func (m *RequestExtendVote) GetMisbehavior() []v1beta2.Misbehavior { + if m != nil { + return m.Misbehavior + } + return nil +} + +func (m *RequestExtendVote) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash + } + return nil +} + +func (m *RequestExtendVote) GetProposerAddress() []byte { + if m != nil { + return m.ProposerAddress + } + return nil +} + +// Verify the vote extension +type RequestVerifyVoteExtension struct { + // the hash of the block that this received vote corresponds to + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + // the validator that signed the vote extension + ValidatorAddress []byte `protobuf:"bytes,2,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + VoteExtension []byte `protobuf:"bytes,4,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` +} + +func (m *RequestVerifyVoteExtension) Reset() { *m = RequestVerifyVoteExtension{} } +func (m *RequestVerifyVoteExtension) String() string { return proto.CompactTextString(m) } +func (*RequestVerifyVoteExtension) ProtoMessage() {} +func (*RequestVerifyVoteExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{5} +} +func (m *RequestVerifyVoteExtension) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestVerifyVoteExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestVerifyVoteExtension.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestVerifyVoteExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestVerifyVoteExtension.Merge(m, src) +} +func (m *RequestVerifyVoteExtension) XXX_Size() int { + return m.Size() +} +func (m *RequestVerifyVoteExtension) XXX_DiscardUnknown() { + xxx_messageInfo_RequestVerifyVoteExtension.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestVerifyVoteExtension proto.InternalMessageInfo + +func (m *RequestVerifyVoteExtension) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *RequestVerifyVoteExtension) GetValidatorAddress() []byte { + if m != nil { + return m.ValidatorAddress + } + return nil +} + +func (m *RequestVerifyVoteExtension) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RequestVerifyVoteExtension) GetVoteExtension() []byte { + if m != nil { + return m.VoteExtension + } + return nil +} + +// RequestFinalizeBlock is a request to finalize the block. +type RequestFinalizeBlock struct { + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + DecidedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=decided_last_commit,json=decidedLastCommit,proto3" json:"decided_last_commit"` + Misbehavior []v1beta2.Misbehavior `protobuf:"bytes,3,rep,name=misbehavior,proto3" json:"misbehavior"` + // hash is the merkle root hash of the fields of the decided block. + Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` + Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` + NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + // proposer_address is the address of the public key of the original proposer of the block. + ProposerAddress []byte `protobuf:"bytes,8,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` +} + +func (m *RequestFinalizeBlock) Reset() { *m = RequestFinalizeBlock{} } +func (m *RequestFinalizeBlock) String() string { return proto.CompactTextString(m) } +func (*RequestFinalizeBlock) ProtoMessage() {} +func (*RequestFinalizeBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{6} +} +func (m *RequestFinalizeBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestFinalizeBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestFinalizeBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestFinalizeBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestFinalizeBlock.Merge(m, src) +} +func (m *RequestFinalizeBlock) XXX_Size() int { + return m.Size() +} +func (m *RequestFinalizeBlock) XXX_DiscardUnknown() { + xxx_messageInfo_RequestFinalizeBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestFinalizeBlock proto.InternalMessageInfo + +func (m *RequestFinalizeBlock) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + +func (m *RequestFinalizeBlock) GetDecidedLastCommit() CommitInfo { + if m != nil { + return m.DecidedLastCommit + } + return CommitInfo{} +} + +func (m *RequestFinalizeBlock) GetMisbehavior() []v1beta2.Misbehavior { + if m != nil { + return m.Misbehavior + } + return nil +} + +func (m *RequestFinalizeBlock) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *RequestFinalizeBlock) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RequestFinalizeBlock) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *RequestFinalizeBlock) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash + } + return nil +} + +func (m *RequestFinalizeBlock) GetProposerAddress() []byte { + if m != nil { + return m.ProposerAddress + } + return nil +} + +// Response represents a response from the ABCI application. +type Response struct { + // Sum of all possible messages. + // + // Types that are valid to be assigned to Value: + // *Response_Exception + // *Response_Echo + // *Response_Flush + // *Response_Info + // *Response_InitChain + // *Response_Query + // *Response_CheckTx + // *Response_Commit + // *Response_ListSnapshots + // *Response_OfferSnapshot + // *Response_LoadSnapshotChunk + // *Response_ApplySnapshotChunk + // *Response_PrepareProposal + // *Response_ProcessProposal + // *Response_ExtendVote + // *Response_VerifyVoteExtension + // *Response_FinalizeBlock + Value isResponse_Value `protobuf_oneof:"value"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{7} +} +func (m *Response) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Response.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(m, src) +} +func (m *Response) XXX_Size() int { + return m.Size() +} +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Response proto.InternalMessageInfo + +type isResponse_Value interface { + isResponse_Value() + MarshalTo([]byte) (int, error) + Size() int +} + +type Response_Exception struct { + Exception *v1beta1.ResponseException `protobuf:"bytes,1,opt,name=exception,proto3,oneof" json:"exception,omitempty"` +} +type Response_Echo struct { + Echo *v1beta1.ResponseEcho `protobuf:"bytes,2,opt,name=echo,proto3,oneof" json:"echo,omitempty"` +} +type Response_Flush struct { + Flush *v1beta1.ResponseFlush `protobuf:"bytes,3,opt,name=flush,proto3,oneof" json:"flush,omitempty"` +} +type Response_Info struct { + Info *v1beta1.ResponseInfo `protobuf:"bytes,4,opt,name=info,proto3,oneof" json:"info,omitempty"` +} +type Response_InitChain struct { + InitChain *ResponseInitChain `protobuf:"bytes,6,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` +} +type Response_Query struct { + Query *v1beta1.ResponseQuery `protobuf:"bytes,7,opt,name=query,proto3,oneof" json:"query,omitempty"` +} +type Response_CheckTx struct { + CheckTx *ResponseCheckTx `protobuf:"bytes,9,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` +} +type Response_Commit struct { + Commit *ResponseCommit `protobuf:"bytes,12,opt,name=commit,proto3,oneof" json:"commit,omitempty"` +} +type Response_ListSnapshots struct { + ListSnapshots *v1beta1.ResponseListSnapshots `protobuf:"bytes,13,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` +} +type Response_OfferSnapshot struct { + OfferSnapshot *v1beta1.ResponseOfferSnapshot `protobuf:"bytes,14,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` +} +type Response_LoadSnapshotChunk struct { + LoadSnapshotChunk *v1beta1.ResponseLoadSnapshotChunk `protobuf:"bytes,15,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` +} +type Response_ApplySnapshotChunk struct { + ApplySnapshotChunk *v1beta1.ResponseApplySnapshotChunk `protobuf:"bytes,16,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` +} +type Response_PrepareProposal struct { + PrepareProposal *v1beta2.ResponsePrepareProposal `protobuf:"bytes,17,opt,name=prepare_proposal,json=prepareProposal,proto3,oneof" json:"prepare_proposal,omitempty"` +} +type Response_ProcessProposal struct { + ProcessProposal *v1beta2.ResponseProcessProposal `protobuf:"bytes,18,opt,name=process_proposal,json=processProposal,proto3,oneof" json:"process_proposal,omitempty"` +} +type Response_ExtendVote struct { + ExtendVote *ResponseExtendVote `protobuf:"bytes,19,opt,name=extend_vote,json=extendVote,proto3,oneof" json:"extend_vote,omitempty"` +} +type Response_VerifyVoteExtension struct { + VerifyVoteExtension *ResponseVerifyVoteExtension `protobuf:"bytes,20,opt,name=verify_vote_extension,json=verifyVoteExtension,proto3,oneof" json:"verify_vote_extension,omitempty"` +} +type Response_FinalizeBlock struct { + FinalizeBlock *ResponseFinalizeBlock `protobuf:"bytes,21,opt,name=finalize_block,json=finalizeBlock,proto3,oneof" json:"finalize_block,omitempty"` +} + +func (*Response_Exception) isResponse_Value() {} +func (*Response_Echo) isResponse_Value() {} +func (*Response_Flush) isResponse_Value() {} +func (*Response_Info) isResponse_Value() {} +func (*Response_InitChain) isResponse_Value() {} +func (*Response_Query) isResponse_Value() {} +func (*Response_CheckTx) isResponse_Value() {} +func (*Response_Commit) isResponse_Value() {} +func (*Response_ListSnapshots) isResponse_Value() {} +func (*Response_OfferSnapshot) isResponse_Value() {} +func (*Response_LoadSnapshotChunk) isResponse_Value() {} +func (*Response_ApplySnapshotChunk) isResponse_Value() {} +func (*Response_PrepareProposal) isResponse_Value() {} +func (*Response_ProcessProposal) isResponse_Value() {} +func (*Response_ExtendVote) isResponse_Value() {} +func (*Response_VerifyVoteExtension) isResponse_Value() {} +func (*Response_FinalizeBlock) isResponse_Value() {} + +func (m *Response) GetValue() isResponse_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Response) GetException() *v1beta1.ResponseException { + if x, ok := m.GetValue().(*Response_Exception); ok { + return x.Exception + } + return nil +} + +func (m *Response) GetEcho() *v1beta1.ResponseEcho { + if x, ok := m.GetValue().(*Response_Echo); ok { + return x.Echo + } + return nil +} + +func (m *Response) GetFlush() *v1beta1.ResponseFlush { + if x, ok := m.GetValue().(*Response_Flush); ok { + return x.Flush + } + return nil +} + +func (m *Response) GetInfo() *v1beta1.ResponseInfo { + if x, ok := m.GetValue().(*Response_Info); ok { + return x.Info + } + return nil +} + +func (m *Response) GetInitChain() *ResponseInitChain { + if x, ok := m.GetValue().(*Response_InitChain); ok { + return x.InitChain + } + return nil +} + +func (m *Response) GetQuery() *v1beta1.ResponseQuery { + if x, ok := m.GetValue().(*Response_Query); ok { + return x.Query + } + return nil +} + +func (m *Response) GetCheckTx() *ResponseCheckTx { + if x, ok := m.GetValue().(*Response_CheckTx); ok { + return x.CheckTx + } + return nil +} + +func (m *Response) GetCommit() *ResponseCommit { + if x, ok := m.GetValue().(*Response_Commit); ok { + return x.Commit + } + return nil +} + +func (m *Response) GetListSnapshots() *v1beta1.ResponseListSnapshots { + if x, ok := m.GetValue().(*Response_ListSnapshots); ok { + return x.ListSnapshots + } + return nil +} + +func (m *Response) GetOfferSnapshot() *v1beta1.ResponseOfferSnapshot { + if x, ok := m.GetValue().(*Response_OfferSnapshot); ok { + return x.OfferSnapshot + } + return nil +} + +func (m *Response) GetLoadSnapshotChunk() *v1beta1.ResponseLoadSnapshotChunk { + if x, ok := m.GetValue().(*Response_LoadSnapshotChunk); ok { + return x.LoadSnapshotChunk + } + return nil +} + +func (m *Response) GetApplySnapshotChunk() *v1beta1.ResponseApplySnapshotChunk { + if x, ok := m.GetValue().(*Response_ApplySnapshotChunk); ok { + return x.ApplySnapshotChunk + } + return nil +} + +func (m *Response) GetPrepareProposal() *v1beta2.ResponsePrepareProposal { + if x, ok := m.GetValue().(*Response_PrepareProposal); ok { + return x.PrepareProposal + } + return nil +} + +func (m *Response) GetProcessProposal() *v1beta2.ResponseProcessProposal { + if x, ok := m.GetValue().(*Response_ProcessProposal); ok { + return x.ProcessProposal + } + return nil +} + +func (m *Response) GetExtendVote() *ResponseExtendVote { + if x, ok := m.GetValue().(*Response_ExtendVote); ok { + return x.ExtendVote + } + return nil +} + +func (m *Response) GetVerifyVoteExtension() *ResponseVerifyVoteExtension { + if x, ok := m.GetValue().(*Response_VerifyVoteExtension); ok { + return x.VerifyVoteExtension + } + return nil +} + +func (m *Response) GetFinalizeBlock() *ResponseFinalizeBlock { + if x, ok := m.GetValue().(*Response_FinalizeBlock); ok { + return x.FinalizeBlock + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Response) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Response_Exception)(nil), + (*Response_Echo)(nil), + (*Response_Flush)(nil), + (*Response_Info)(nil), + (*Response_InitChain)(nil), + (*Response_Query)(nil), + (*Response_CheckTx)(nil), + (*Response_Commit)(nil), + (*Response_ListSnapshots)(nil), + (*Response_OfferSnapshot)(nil), + (*Response_LoadSnapshotChunk)(nil), + (*Response_ApplySnapshotChunk)(nil), + (*Response_PrepareProposal)(nil), + (*Response_ProcessProposal)(nil), + (*Response_ExtendVote)(nil), + (*Response_VerifyVoteExtension)(nil), + (*Response_FinalizeBlock)(nil), + } +} + +// ResponseInitChain contains the ABCI application's hash and updates to the +// validator set and/or the consensus params, if any. +type ResponseInitChain struct { + ConsensusParams *v1.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` + Validators []v1beta1.ValidatorUpdate `protobuf:"bytes,2,rep,name=validators,proto3" json:"validators"` + AppHash []byte `protobuf:"bytes,3,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` +} + +func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } +func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } +func (*ResponseInitChain) ProtoMessage() {} +func (*ResponseInitChain) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{8} +} +func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseInitChain.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseInitChain) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseInitChain.Merge(m, src) +} +func (m *ResponseInitChain) XXX_Size() int { + return m.Size() +} +func (m *ResponseInitChain) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseInitChain.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseInitChain proto.InternalMessageInfo + +func (m *ResponseInitChain) GetConsensusParams() *v1.ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return nil +} + +func (m *ResponseInitChain) GetValidators() []v1beta1.ValidatorUpdate { + if m != nil { + return m.Validators + } + return nil +} + +func (m *ResponseInitChain) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +// ResponseCheckTx shows if the transaction was deemed valid by the ABCI +// application. +type ResponseCheckTx struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` + Events []v1beta2.Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` +} + +func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } +func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } +func (*ResponseCheckTx) ProtoMessage() {} +func (*ResponseCheckTx) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{9} +} +func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseCheckTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseCheckTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseCheckTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseCheckTx.Merge(m, src) +} +func (m *ResponseCheckTx) XXX_Size() int { + return m.Size() +} +func (m *ResponseCheckTx) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseCheckTx.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseCheckTx proto.InternalMessageInfo + +func (m *ResponseCheckTx) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *ResponseCheckTx) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ResponseCheckTx) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *ResponseCheckTx) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +func (m *ResponseCheckTx) GetGasWanted() int64 { + if m != nil { + return m.GasWanted + } + return 0 +} + +func (m *ResponseCheckTx) GetGasUsed() int64 { + if m != nil { + return m.GasUsed + } + return 0 +} + +func (m *ResponseCheckTx) GetEvents() []v1beta2.Event { + if m != nil { + return m.Events + } + return nil +} + +func (m *ResponseCheckTx) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + +// ResponseCommit indicates how much blocks should CometBFT retain. +type ResponseCommit struct { + RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` +} + +func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } +func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } +func (*ResponseCommit) ProtoMessage() {} +func (*ResponseCommit) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{10} +} +func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseCommit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseCommit) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseCommit.Merge(m, src) +} +func (m *ResponseCommit) XXX_Size() int { + return m.Size() +} +func (m *ResponseCommit) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseCommit.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseCommit proto.InternalMessageInfo + +func (m *ResponseCommit) GetRetainHeight() int64 { + if m != nil { + return m.RetainHeight + } + return 0 +} + +// ResponseExtendVote is the result of extending a vote with application-injected data. +type ResponseExtendVote struct { + VoteExtension []byte `protobuf:"bytes,1,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` +} + +func (m *ResponseExtendVote) Reset() { *m = ResponseExtendVote{} } +func (m *ResponseExtendVote) String() string { return proto.CompactTextString(m) } +func (*ResponseExtendVote) ProtoMessage() {} +func (*ResponseExtendVote) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{11} +} +func (m *ResponseExtendVote) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseExtendVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseExtendVote.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseExtendVote) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseExtendVote.Merge(m, src) +} +func (m *ResponseExtendVote) XXX_Size() int { + return m.Size() +} +func (m *ResponseExtendVote) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseExtendVote.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseExtendVote proto.InternalMessageInfo + +func (m *ResponseExtendVote) GetVoteExtension() []byte { + if m != nil { + return m.VoteExtension + } + return nil +} + +// ResponseVerifyVoteExtension is the result of verifying a vote extension. +type ResponseVerifyVoteExtension struct { + Status ResponseVerifyVoteExtension_VerifyStatus `protobuf:"varint,1,opt,name=status,proto3,enum=cometbft.abci.v1beta3.ResponseVerifyVoteExtension_VerifyStatus" json:"status,omitempty"` +} + +func (m *ResponseVerifyVoteExtension) Reset() { *m = ResponseVerifyVoteExtension{} } +func (m *ResponseVerifyVoteExtension) String() string { return proto.CompactTextString(m) } +func (*ResponseVerifyVoteExtension) ProtoMessage() {} +func (*ResponseVerifyVoteExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{12} +} +func (m *ResponseVerifyVoteExtension) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseVerifyVoteExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseVerifyVoteExtension.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseVerifyVoteExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseVerifyVoteExtension.Merge(m, src) +} +func (m *ResponseVerifyVoteExtension) XXX_Size() int { + return m.Size() +} +func (m *ResponseVerifyVoteExtension) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseVerifyVoteExtension.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseVerifyVoteExtension proto.InternalMessageInfo + +func (m *ResponseVerifyVoteExtension) GetStatus() ResponseVerifyVoteExtension_VerifyStatus { + if m != nil { + return m.Status + } + return ResponseVerifyVoteExtension_UNKNOWN +} + +// FinalizeBlockResponse contains the result of executing the block. +type ResponseFinalizeBlock struct { + // set of block events emitted as part of executing the block + Events []v1beta2.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + // the result of executing each transaction including the events + // the particular transaction emitted. This should match the order + // of the transactions delivered in the block itself + TxResults []*ExecTxResult `protobuf:"bytes,2,rep,name=tx_results,json=txResults,proto3" json:"tx_results,omitempty"` + // a list of updates to the validator set. These will reflect the validator set at current height + 2. + ValidatorUpdates []v1beta1.ValidatorUpdate `protobuf:"bytes,3,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` + // updates to the consensus params, if any. + ConsensusParamUpdates *v1.ConsensusParams `protobuf:"bytes,4,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + // app_hash is the hash of the applications' state which is used to confirm + // that execution of the transactions was deterministic. + // It is up to the application to decide which algorithm to use. + AppHash []byte `protobuf:"bytes,5,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` +} + +func (m *ResponseFinalizeBlock) Reset() { *m = ResponseFinalizeBlock{} } +func (m *ResponseFinalizeBlock) String() string { return proto.CompactTextString(m) } +func (*ResponseFinalizeBlock) ProtoMessage() {} +func (*ResponseFinalizeBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{13} +} +func (m *ResponseFinalizeBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseFinalizeBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseFinalizeBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseFinalizeBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseFinalizeBlock.Merge(m, src) +} +func (m *ResponseFinalizeBlock) XXX_Size() int { + return m.Size() +} +func (m *ResponseFinalizeBlock) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseFinalizeBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseFinalizeBlock proto.InternalMessageInfo + +func (m *ResponseFinalizeBlock) GetEvents() []v1beta2.Event { + if m != nil { + return m.Events + } + return nil +} + +func (m *ResponseFinalizeBlock) GetTxResults() []*ExecTxResult { + if m != nil { + return m.TxResults + } + return nil +} + +func (m *ResponseFinalizeBlock) GetValidatorUpdates() []v1beta1.ValidatorUpdate { + if m != nil { + return m.ValidatorUpdates + } + return nil +} + +func (m *ResponseFinalizeBlock) GetConsensusParamUpdates() *v1.ConsensusParams { + if m != nil { + return m.ConsensusParamUpdates + } + return nil +} + +func (m *ResponseFinalizeBlock) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +// VoteInfo contains the information about the vote. +type VoteInfo struct { + Validator v1beta1.Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator"` + BlockIdFlag v1beta11.BlockIDFlag `protobuf:"varint,3,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=cometbft.types.v1beta1.BlockIDFlag" json:"block_id_flag,omitempty"` +} + +func (m *VoteInfo) Reset() { *m = VoteInfo{} } +func (m *VoteInfo) String() string { return proto.CompactTextString(m) } +func (*VoteInfo) ProtoMessage() {} +func (*VoteInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{14} +} +func (m *VoteInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VoteInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VoteInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteInfo.Merge(m, src) +} +func (m *VoteInfo) XXX_Size() int { + return m.Size() +} +func (m *VoteInfo) XXX_DiscardUnknown() { + xxx_messageInfo_VoteInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_VoteInfo proto.InternalMessageInfo + +func (m *VoteInfo) GetValidator() v1beta1.Validator { + if m != nil { + return m.Validator + } + return v1beta1.Validator{} +} + +func (m *VoteInfo) GetBlockIdFlag() v1beta11.BlockIDFlag { + if m != nil { + return m.BlockIdFlag + } + return v1beta11.BlockIDFlagUnknown +} + +// ExtendedVoteInfo extends VoteInfo with the vote extensions (non-deterministic). +type ExtendedVoteInfo struct { + // The validator that sent the vote. + Validator v1beta1.Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator"` + // Non-deterministic extension provided by the sending validator's application. + VoteExtension []byte `protobuf:"bytes,3,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` + // Vote extension signature created by CometBFT + ExtensionSignature []byte `protobuf:"bytes,4,opt,name=extension_signature,json=extensionSignature,proto3" json:"extension_signature,omitempty"` + // block_id_flag indicates whether the validator voted for a block, nil, or did not vote at all + BlockIdFlag v1beta11.BlockIDFlag `protobuf:"varint,5,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=cometbft.types.v1beta1.BlockIDFlag" json:"block_id_flag,omitempty"` +} + +func (m *ExtendedVoteInfo) Reset() { *m = ExtendedVoteInfo{} } +func (m *ExtendedVoteInfo) String() string { return proto.CompactTextString(m) } +func (*ExtendedVoteInfo) ProtoMessage() {} +func (*ExtendedVoteInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{15} +} +func (m *ExtendedVoteInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtendedVoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExtendedVoteInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExtendedVoteInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtendedVoteInfo.Merge(m, src) +} +func (m *ExtendedVoteInfo) XXX_Size() int { + return m.Size() +} +func (m *ExtendedVoteInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ExtendedVoteInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtendedVoteInfo proto.InternalMessageInfo + +func (m *ExtendedVoteInfo) GetValidator() v1beta1.Validator { + if m != nil { + return m.Validator + } + return v1beta1.Validator{} +} + +func (m *ExtendedVoteInfo) GetVoteExtension() []byte { + if m != nil { + return m.VoteExtension + } + return nil +} + +func (m *ExtendedVoteInfo) GetExtensionSignature() []byte { + if m != nil { + return m.ExtensionSignature + } + return nil +} + +func (m *ExtendedVoteInfo) GetBlockIdFlag() v1beta11.BlockIDFlag { + if m != nil { + return m.BlockIdFlag + } + return v1beta11.BlockIDFlagUnknown +} + +// CommitInfo contains votes for the particular round. +type CommitInfo struct { + Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` + Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` +} + +func (m *CommitInfo) Reset() { *m = CommitInfo{} } +func (m *CommitInfo) String() string { return proto.CompactTextString(m) } +func (*CommitInfo) ProtoMessage() {} +func (*CommitInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{16} +} +func (m *CommitInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CommitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitInfo.Merge(m, src) +} +func (m *CommitInfo) XXX_Size() int { + return m.Size() +} +func (m *CommitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_CommitInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitInfo proto.InternalMessageInfo + +func (m *CommitInfo) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *CommitInfo) GetVotes() []VoteInfo { + if m != nil { + return m.Votes + } + return nil +} + +// ExtendedCommitInfo is similar to CommitInfo except that it is only used in +// the PrepareProposal request such that Tendermint can provide vote extensions +// to the application. +type ExtendedCommitInfo struct { + // The round at which the block proposer decided in the previous height. + Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` + // List of validators' addresses in the last validator set with their voting + // information, including vote extensions. + Votes []ExtendedVoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` +} + +func (m *ExtendedCommitInfo) Reset() { *m = ExtendedCommitInfo{} } +func (m *ExtendedCommitInfo) String() string { return proto.CompactTextString(m) } +func (*ExtendedCommitInfo) ProtoMessage() {} +func (*ExtendedCommitInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{17} +} +func (m *ExtendedCommitInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtendedCommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExtendedCommitInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExtendedCommitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtendedCommitInfo.Merge(m, src) +} +func (m *ExtendedCommitInfo) XXX_Size() int { + return m.Size() +} +func (m *ExtendedCommitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ExtendedCommitInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtendedCommitInfo proto.InternalMessageInfo + +func (m *ExtendedCommitInfo) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *ExtendedCommitInfo) GetVotes() []ExtendedVoteInfo { + if m != nil { + return m.Votes + } + return nil +} + +// ExecTxResult contains results of executing one individual transaction. +// +// * Its structure is equivalent to #ResponseDeliverTx which will be deprecated/deleted +type ExecTxResult struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` + Events []v1beta2.Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` +} + +func (m *ExecTxResult) Reset() { *m = ExecTxResult{} } +func (m *ExecTxResult) String() string { return proto.CompactTextString(m) } +func (*ExecTxResult) ProtoMessage() {} +func (*ExecTxResult) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{18} +} +func (m *ExecTxResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecTxResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExecTxResult.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExecTxResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecTxResult.Merge(m, src) +} +func (m *ExecTxResult) XXX_Size() int { + return m.Size() +} +func (m *ExecTxResult) XXX_DiscardUnknown() { + xxx_messageInfo_ExecTxResult.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecTxResult proto.InternalMessageInfo + +func (m *ExecTxResult) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *ExecTxResult) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ExecTxResult) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *ExecTxResult) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +func (m *ExecTxResult) GetGasWanted() int64 { + if m != nil { + return m.GasWanted + } + return 0 +} + +func (m *ExecTxResult) GetGasUsed() int64 { + if m != nil { + return m.GasUsed + } + return 0 +} + +func (m *ExecTxResult) GetEvents() []v1beta2.Event { + if m != nil { + return m.Events + } + return nil +} + +func (m *ExecTxResult) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + +// TxResult contains results of executing the transaction. +// +// One usage is indexing transaction results. +type TxResult struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + Tx []byte `protobuf:"bytes,3,opt,name=tx,proto3" json:"tx,omitempty"` + Result ExecTxResult `protobuf:"bytes,4,opt,name=result,proto3" json:"result"` +} + +func (m *TxResult) Reset() { *m = TxResult{} } +func (m *TxResult) String() string { return proto.CompactTextString(m) } +func (*TxResult) ProtoMessage() {} +func (*TxResult) Descriptor() ([]byte, []int) { + return fileDescriptor_1cabe0dccee1dedf, []int{19} +} +func (m *TxResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TxResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TxResult.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TxResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxResult.Merge(m, src) +} +func (m *TxResult) XXX_Size() int { + return m.Size() +} +func (m *TxResult) XXX_DiscardUnknown() { + xxx_messageInfo_TxResult.DiscardUnknown(m) +} + +var xxx_messageInfo_TxResult proto.InternalMessageInfo + +func (m *TxResult) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *TxResult) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *TxResult) GetTx() []byte { + if m != nil { + return m.Tx + } + return nil +} + +func (m *TxResult) GetResult() ExecTxResult { + if m != nil { + return m.Result + } + return ExecTxResult{} +} + +func init() { + proto.RegisterEnum("cometbft.abci.v1beta3.ResponseVerifyVoteExtension_VerifyStatus", ResponseVerifyVoteExtension_VerifyStatus_name, ResponseVerifyVoteExtension_VerifyStatus_value) + proto.RegisterType((*Request)(nil), "cometbft.abci.v1beta3.Request") + proto.RegisterType((*RequestInitChain)(nil), "cometbft.abci.v1beta3.RequestInitChain") + proto.RegisterType((*RequestPrepareProposal)(nil), "cometbft.abci.v1beta3.RequestPrepareProposal") + proto.RegisterType((*RequestProcessProposal)(nil), "cometbft.abci.v1beta3.RequestProcessProposal") + proto.RegisterType((*RequestExtendVote)(nil), "cometbft.abci.v1beta3.RequestExtendVote") + proto.RegisterType((*RequestVerifyVoteExtension)(nil), "cometbft.abci.v1beta3.RequestVerifyVoteExtension") + proto.RegisterType((*RequestFinalizeBlock)(nil), "cometbft.abci.v1beta3.RequestFinalizeBlock") + proto.RegisterType((*Response)(nil), "cometbft.abci.v1beta3.Response") + proto.RegisterType((*ResponseInitChain)(nil), "cometbft.abci.v1beta3.ResponseInitChain") + proto.RegisterType((*ResponseCheckTx)(nil), "cometbft.abci.v1beta3.ResponseCheckTx") + proto.RegisterType((*ResponseCommit)(nil), "cometbft.abci.v1beta3.ResponseCommit") + proto.RegisterType((*ResponseExtendVote)(nil), "cometbft.abci.v1beta3.ResponseExtendVote") + proto.RegisterType((*ResponseVerifyVoteExtension)(nil), "cometbft.abci.v1beta3.ResponseVerifyVoteExtension") + proto.RegisterType((*ResponseFinalizeBlock)(nil), "cometbft.abci.v1beta3.ResponseFinalizeBlock") + proto.RegisterType((*VoteInfo)(nil), "cometbft.abci.v1beta3.VoteInfo") + proto.RegisterType((*ExtendedVoteInfo)(nil), "cometbft.abci.v1beta3.ExtendedVoteInfo") + proto.RegisterType((*CommitInfo)(nil), "cometbft.abci.v1beta3.CommitInfo") + proto.RegisterType((*ExtendedCommitInfo)(nil), "cometbft.abci.v1beta3.ExtendedCommitInfo") + proto.RegisterType((*ExecTxResult)(nil), "cometbft.abci.v1beta3.ExecTxResult") + proto.RegisterType((*TxResult)(nil), "cometbft.abci.v1beta3.TxResult") +} + +func init() { proto.RegisterFile("cometbft/abci/v1beta3/types.proto", fileDescriptor_1cabe0dccee1dedf) } + +var fileDescriptor_1cabe0dccee1dedf = []byte{ + // 2233 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x9a, 0x4f, 0x8f, 0xdb, 0xc6, + 0x15, 0xc0, 0x45, 0xfd, 0x5b, 0xe9, 0xad, 0xb4, 0xcb, 0x9d, 0x5d, 0xa7, 0x8a, 0x1a, 0xac, 0x1d, + 0xb9, 0x76, 0x1c, 0x24, 0x95, 0xb2, 0x6b, 0xa0, 0x68, 0xe0, 0xa0, 0xc1, 0xee, 0x66, 0x5d, 0xed, + 0xc6, 0xb1, 0x5d, 0x7a, 0xd7, 0x6e, 0x6d, 0xa0, 0xc4, 0x88, 0x1c, 0x49, 0xac, 0x29, 0x91, 0x21, + 0x47, 0xaa, 0xb6, 0xe8, 0xb9, 0x97, 0x1e, 0xea, 0x43, 0x8f, 0x05, 0x72, 0xe9, 0xad, 0x97, 0x7e, + 0x8c, 0x00, 0xbd, 0x04, 0xe8, 0xa5, 0x97, 0xa6, 0x85, 0x7d, 0xeb, 0x47, 0x28, 0x50, 0xb4, 0x98, + 0xe1, 0x90, 0x22, 0x25, 0x4a, 0x22, 0xbb, 0xee, 0xa1, 0x68, 0x6f, 0xe4, 0xe3, 0xfb, 0x33, 0x33, + 0x6f, 0xe6, 0xcd, 0xfc, 0x34, 0x82, 0xb7, 0x35, 0x6b, 0x40, 0x68, 0xa7, 0x4b, 0x5b, 0xb8, 0xa3, + 0x19, 0xad, 0xf1, 0x5e, 0x87, 0x50, 0x7c, 0xbb, 0x45, 0x2f, 0x6c, 0xe2, 0x36, 0x6d, 0xc7, 0xa2, + 0x16, 0xba, 0xe2, 0xab, 0x34, 0x99, 0x4a, 0x53, 0xa8, 0xd4, 0x63, 0x2d, 0xf7, 0xc2, 0x96, 0xf1, + 0x2a, 0xfb, 0x11, 0x95, 0xdd, 0x40, 0x85, 0x4b, 0x5b, 0xe3, 0xbd, 0x96, 0x8d, 0x1d, 0x3c, 0xf0, + 0xbf, 0xdf, 0x9c, 0xfb, 0xee, 0x85, 0x19, 0x63, 0xd3, 0xd0, 0x31, 0xb5, 0x1c, 0xa1, 0xb7, 0xd3, + 0xb3, 0x7a, 0x16, 0x7f, 0x6c, 0xb1, 0x27, 0x21, 0xbd, 0xda, 0xb3, 0xac, 0x9e, 0x49, 0x5a, 0xfc, + 0xad, 0x33, 0xea, 0xb6, 0xa8, 0x31, 0x20, 0x2e, 0xc5, 0x03, 0xdb, 0x53, 0x68, 0x7c, 0x01, 0xb0, + 0xa6, 0x90, 0xcf, 0x47, 0xc4, 0xa5, 0xe8, 0xbb, 0x90, 0x27, 0x5a, 0xdf, 0xaa, 0x49, 0xd7, 0xa4, + 0x5b, 0xeb, 0xfb, 0x8d, 0x66, 0x5c, 0xb7, 0xf7, 0x9a, 0x42, 0xfb, 0x58, 0xeb, 0x5b, 0xed, 0x8c, + 0xc2, 0x2d, 0xd0, 0x1d, 0x28, 0x74, 0xcd, 0x91, 0xdb, 0xaf, 0x65, 0xb9, 0xe9, 0xf5, 0xe5, 0xa6, + 0x77, 0x99, 0x6a, 0x3b, 0xa3, 0x78, 0x36, 0x2c, 0xac, 0x31, 0xec, 0x5a, 0xb5, 0xdc, 0x92, 0xb0, + 0xfb, 0xbe, 0xed, 0xc9, 0xb0, 0xcb, 0xc3, 0x32, 0x0b, 0xd4, 0x06, 0x30, 0x86, 0x06, 0x55, 0xb5, + 0x3e, 0x36, 0x86, 0xb5, 0x02, 0xb7, 0x7f, 0x27, 0xd6, 0xfe, 0xf6, 0xd4, 0xde, 0xa0, 0x47, 0x4c, + 0xbd, 0x9d, 0x51, 0xca, 0x86, 0xff, 0xc2, 0x3a, 0xf0, 0xf9, 0x88, 0x38, 0x17, 0xb5, 0x62, 0x92, + 0x0e, 0xfc, 0x80, 0xa9, 0xb2, 0x0e, 0x70, 0x1b, 0x74, 0x08, 0x25, 0xad, 0x4f, 0xb4, 0xe7, 0x2a, + 0x9d, 0xd4, 0x4a, 0xdc, 0xfe, 0xc6, 0x72, 0xfb, 0x23, 0xa6, 0x7d, 0x36, 0x69, 0x67, 0x94, 0x35, + 0xcd, 0x7b, 0x44, 0xdf, 0x83, 0xa2, 0x66, 0x0d, 0x06, 0x06, 0xad, 0xad, 0x73, 0x0f, 0xdf, 0x5a, + 0xe1, 0x81, 0xeb, 0xb6, 0x33, 0x8a, 0xb0, 0x42, 0x67, 0xb0, 0x61, 0x1a, 0x2e, 0x55, 0xdd, 0x21, + 0xb6, 0xdd, 0xbe, 0x45, 0xdd, 0x5a, 0x85, 0xfb, 0x79, 0x6f, 0xb9, 0x9f, 0x7b, 0x86, 0x4b, 0x1f, + 0xf9, 0x26, 0xed, 0x8c, 0x52, 0x35, 0xc3, 0x02, 0xe6, 0xd5, 0xea, 0x76, 0x89, 0x13, 0xb8, 0xad, + 0x55, 0x93, 0x78, 0x7d, 0xc0, 0x6c, 0x7c, 0x2f, 0xcc, 0xab, 0x15, 0x16, 0x20, 0x0c, 0xdb, 0xa6, + 0x85, 0xf5, 0xc0, 0xa9, 0xaa, 0xf5, 0x47, 0xc3, 0xe7, 0xb5, 0x0d, 0xee, 0xba, 0xb5, 0xa2, 0xc1, + 0x16, 0xd6, 0x7d, 0x47, 0x47, 0xcc, 0xac, 0x9d, 0x51, 0xb6, 0xcc, 0x59, 0x21, 0xd2, 0x61, 0x07, + 0xdb, 0xb6, 0x79, 0x31, 0x1b, 0x63, 0x93, 0xc7, 0xf8, 0x60, 0x79, 0x8c, 0x03, 0x66, 0x39, 0x1b, + 0x04, 0xe1, 0x39, 0x29, 0x7a, 0x0a, 0xb2, 0xed, 0x10, 0x1b, 0x3b, 0x44, 0xb5, 0x1d, 0xcb, 0xb6, + 0x5c, 0x6c, 0xd6, 0x64, 0x1e, 0xe1, 0xdb, 0xcb, 0x67, 0xe1, 0x43, 0xcf, 0xea, 0xa1, 0x30, 0x6a, + 0x67, 0x94, 0x4d, 0x3b, 0x2a, 0xf2, 0x7c, 0x5b, 0x1a, 0x71, 0xdd, 0xa9, 0xef, 0xad, 0x64, 0xbe, + 0xb9, 0x55, 0xd4, 0x77, 0x44, 0x84, 0x3e, 0x85, 0x75, 0x32, 0xa1, 0x64, 0xa8, 0xab, 0x63, 0x8b, + 0x92, 0x1a, 0xe2, 0x6e, 0x6f, 0x2d, 0x77, 0x7b, 0xcc, 0x0d, 0x1e, 0x5b, 0x94, 0xb4, 0x33, 0x0a, + 0x90, 0xe0, 0x0d, 0xf5, 0xe0, 0xca, 0x98, 0x38, 0x46, 0xf7, 0x82, 0x3b, 0x53, 0xf9, 0x17, 0xd7, + 0xb0, 0x86, 0xb5, 0x6d, 0xee, 0x76, 0x6f, 0xb9, 0xdb, 0xc7, 0xdc, 0x94, 0x39, 0x3a, 0xf6, 0x0d, + 0xdb, 0x19, 0x65, 0x7b, 0x3c, 0x2f, 0x66, 0x93, 0xb1, 0x6b, 0x0c, 0xb1, 0x69, 0xfc, 0x8c, 0xa8, + 0x1d, 0xd3, 0xd2, 0x9e, 0xd7, 0x76, 0x96, 0x4c, 0xc6, 0x20, 0xc2, 0x5d, 0x61, 0x73, 0xc8, 0x4c, + 0xd8, 0x64, 0xec, 0x86, 0x05, 0x87, 0x6b, 0x50, 0x18, 0x63, 0x73, 0x44, 0x4e, 0xf3, 0xa5, 0xbc, + 0x5c, 0x38, 0xcd, 0x97, 0xd6, 0xe4, 0xd2, 0x69, 0xbe, 0x54, 0x96, 0xe1, 0x34, 0x5f, 0x02, 0x79, + 0xbd, 0xf1, 0xc7, 0x2c, 0xc8, 0xb3, 0xc5, 0x83, 0xd5, 0x2c, 0x56, 0x49, 0x45, 0xa9, 0xac, 0x37, + 0xbd, 0x32, 0xdb, 0xf4, 0xcb, 0x6c, 0xf3, 0xcc, 0x2f, 0xb3, 0x87, 0xa5, 0x2f, 0xbf, 0xbe, 0x9a, + 0x79, 0xf1, 0x97, 0xab, 0x92, 0xc2, 0x2d, 0xd0, 0x9b, 0xac, 0x58, 0x60, 0x63, 0xa8, 0x1a, 0x3a, + 0xaf, 0x96, 0x65, 0x56, 0x03, 0xb0, 0x31, 0x3c, 0xd1, 0xd1, 0x67, 0x20, 0x6b, 0xd6, 0xd0, 0x25, + 0x43, 0x77, 0xe4, 0xaa, 0xde, 0x26, 0x30, 0x5f, 0x14, 0xbd, 0xbd, 0x63, 0xbc, 0xd7, 0x3c, 0xf2, + 0x55, 0x1f, 0x72, 0x4d, 0x65, 0x53, 0x8b, 0x0a, 0xd0, 0x3d, 0x80, 0x60, 0x93, 0x70, 0x6b, 0xf9, + 0x6b, 0xb9, 0x5b, 0xeb, 0xfb, 0x37, 0x17, 0xcc, 0xfc, 0xc7, 0xbe, 0xe2, 0xb9, 0xad, 0x63, 0x4a, + 0x0e, 0xf3, 0xac, 0xd5, 0x4a, 0xc8, 0x1e, 0xdd, 0x84, 0x4d, 0x6c, 0xdb, 0xaa, 0x4b, 0x31, 0x25, + 0x6a, 0xe7, 0x82, 0x12, 0x97, 0x17, 0xdc, 0x8a, 0x52, 0xc5, 0xb6, 0xfd, 0x88, 0x49, 0x0f, 0x99, + 0x10, 0xdd, 0x80, 0x0d, 0x56, 0x56, 0x0d, 0x6c, 0xaa, 0x7d, 0x62, 0xf4, 0xfa, 0x94, 0x97, 0xd4, + 0x9c, 0x52, 0x15, 0xd2, 0x36, 0x17, 0x36, 0x7e, 0x9b, 0x83, 0x37, 0xe2, 0x17, 0x03, 0xba, 0x06, + 0x95, 0x01, 0x9e, 0xa8, 0x74, 0x22, 0xc2, 0x48, 0xdc, 0x1e, 0x06, 0x78, 0x72, 0x36, 0xf1, 0x62, + 0xc8, 0x90, 0xa3, 0x13, 0xb7, 0x96, 0xbd, 0x96, 0xbb, 0x55, 0x51, 0xd8, 0x23, 0x7a, 0x06, 0x5b, + 0xa6, 0xa5, 0x61, 0x53, 0x35, 0xb1, 0x4b, 0x55, 0x51, 0x49, 0xbd, 0xb1, 0x7b, 0x77, 0xc1, 0xf4, + 0xf0, 0x26, 0x34, 0xd1, 0xbd, 0x52, 0xca, 0xf6, 0x15, 0xd1, 0xeb, 0x4d, 0xee, 0xe9, 0x1e, 0xf6, + 0xab, 0x2c, 0x3a, 0x85, 0xf5, 0x81, 0xe1, 0x76, 0x48, 0x1f, 0x8f, 0x0d, 0xcb, 0x11, 0x23, 0xb9, + 0x68, 0x9f, 0xfa, 0x6c, 0xaa, 0x29, 0xfc, 0x85, 0x8d, 0xd1, 0x1b, 0x50, 0x14, 0xc3, 0x52, 0xe0, + 0xdd, 0x12, 0x6f, 0xc1, 0x84, 0x2a, 0xa6, 0x9e, 0x50, 0x1f, 0xc0, 0xce, 0x90, 0x4c, 0xa8, 0x3a, + 0xcd, 0x95, 0xda, 0xc7, 0x6e, 0xbf, 0xb6, 0xc6, 0xb3, 0x83, 0xd8, 0xb7, 0x20, 0xbb, 0x6e, 0x1b, + 0xbb, 0x7d, 0xf4, 0x2e, 0x2f, 0x2d, 0xb6, 0xe5, 0x12, 0x47, 0xc5, 0xba, 0xee, 0x10, 0xd7, 0xe5, + 0xfb, 0x56, 0x85, 0x57, 0x0a, 0x2e, 0x3f, 0xf0, 0xc4, 0x8d, 0x5f, 0x87, 0xd3, 0x14, 0x2d, 0x22, + 0x22, 0x09, 0xd2, 0x34, 0x09, 0x3f, 0x82, 0x1d, 0x61, 0xaf, 0x47, 0xf2, 0xe0, 0x1d, 0x0a, 0xde, + 0x5e, 0x90, 0x87, 0xb9, 0xf1, 0x47, 0xbe, 0x93, 0xc5, 0x29, 0xc8, 0x5d, 0x26, 0x05, 0x08, 0xf2, + 0x7c, 0x80, 0xf2, 0xbc, 0xcb, 0xfc, 0xf9, 0xbf, 0x2d, 0x2d, 0xbf, 0xca, 0xc1, 0xd6, 0x5c, 0x5d, + 0x0e, 0x3a, 0x26, 0xc5, 0x76, 0x2c, 0x1b, 0xdb, 0xb1, 0x5c, 0xea, 0x8e, 0x89, 0xbc, 0xe7, 0x57, + 0xe7, 0xbd, 0xf0, 0xda, 0xf3, 0x5e, 0xbc, 0x4c, 0xde, 0xff, 0xa3, 0x19, 0xf9, 0x8d, 0x04, 0xf5, + 0xc5, 0x5b, 0x5a, 0x6c, 0x6a, 0xde, 0x83, 0xad, 0xa0, 0x29, 0x81, 0xfb, 0x2c, 0x57, 0x90, 0x83, + 0x0f, 0xc2, 0x7f, 0x28, 0x8f, 0xb9, 0x48, 0x1e, 0x6f, 0xc0, 0xc6, 0xcc, 0xb6, 0xeb, 0x4d, 0xeb, + 0xea, 0x38, 0x1c, 0xbf, 0xf1, 0x22, 0x07, 0x3b, 0x71, 0xfb, 0x61, 0xcc, 0x2a, 0x7e, 0x02, 0xdb, + 0x3a, 0xd1, 0x0c, 0xfd, 0x72, 0x8b, 0x78, 0x4b, 0xf8, 0xf8, 0xff, 0x1a, 0x5e, 0x34, 0x63, 0xfe, + 0x0e, 0x50, 0x52, 0x88, 0x6b, 0xb3, 0x5d, 0x1b, 0xb5, 0xa1, 0x4c, 0x26, 0x1a, 0xb1, 0x29, 0xcb, + 0xa0, 0xb4, 0xe4, 0x3c, 0xc6, 0x0e, 0xa9, 0x9e, 0xcd, 0xb1, 0xaf, 0xcf, 0x48, 0x26, 0x30, 0x46, + 0x1f, 0x0a, 0x88, 0x5b, 0x45, 0x62, 0xc2, 0x49, 0x98, 0xe2, 0x3e, 0xf2, 0x29, 0x2e, 0xb7, 0x02, + 0x41, 0x3c, 0xdb, 0x19, 0x8c, 0xfb, 0x50, 0x60, 0x5c, 0x3e, 0x51, 0xe0, 0x08, 0xc7, 0x9d, 0x44, + 0x38, 0xae, 0xb8, 0xe2, 0x38, 0xea, 0x3b, 0x88, 0x05, 0xb9, 0x8f, 0x7c, 0x90, 0x5b, 0x4b, 0xd4, + 0x87, 0x19, 0x92, 0x3b, 0x0a, 0x91, 0x5c, 0x99, 0x3b, 0xb8, 0xb9, 0xa2, 0x19, 0x31, 0x28, 0xf7, + 0x71, 0x80, 0x72, 0x95, 0x25, 0x30, 0x18, 0x72, 0x31, 0xcb, 0x72, 0xe7, 0x73, 0x2c, 0xe7, 0x51, + 0xd7, 0xfb, 0x2b, 0x3a, 0xb3, 0x02, 0xe6, 0xce, 0xe7, 0x60, 0x6e, 0x23, 0x91, 0xdb, 0x15, 0x34, + 0xd7, 0x89, 0xa7, 0xb9, 0x55, 0xa4, 0x25, 0x9a, 0x9c, 0x0c, 0xe7, 0xc8, 0x02, 0x9c, 0x93, 0x97, + 0x20, 0xc6, 0x34, 0x48, 0x62, 0x9e, 0x7b, 0x16, 0xc3, 0x73, 0x1e, 0x73, 0x35, 0x17, 0xfe, 0x2a, + 0xe1, 0x85, 0x48, 0x00, 0x74, 0xcf, 0x62, 0x80, 0x0e, 0x25, 0x74, 0xbe, 0x92, 0xe8, 0xee, 0x45, + 0x89, 0x6e, 0x7b, 0xe9, 0xc9, 0x77, 0x5a, 0x41, 0x16, 0x20, 0x5d, 0x7f, 0x11, 0xd2, 0x79, 0xc0, + 0xb5, 0xbf, 0xc2, 0x6f, 0x0a, 0xa6, 0x3b, 0x9f, 0x63, 0xba, 0x2b, 0x4b, 0xe6, 0xe4, 0x34, 0x44, + 0x72, 0xa8, 0x2b, 0xc8, 0xc5, 0xd3, 0x7c, 0xa9, 0x24, 0x97, 0x3d, 0x9c, 0x3b, 0xcd, 0x97, 0xd6, + 0xe5, 0x4a, 0xe3, 0x0f, 0x12, 0x3b, 0x40, 0xcd, 0x54, 0x92, 0x58, 0x00, 0x93, 0x5e, 0x17, 0x80, + 0x65, 0x2f, 0x09, 0x60, 0x6f, 0x42, 0x89, 0x01, 0x18, 0xdf, 0x80, 0x72, 0x7c, 0x4b, 0x59, 0xc3, + 0xb6, 0xcd, 0x76, 0x9d, 0xc6, 0xef, 0xb3, 0xb0, 0x39, 0x53, 0x90, 0xd8, 0x0e, 0xa9, 0x59, 0xba, + 0x47, 0xa8, 0x55, 0x85, 0x3f, 0x33, 0x99, 0x8e, 0x29, 0x16, 0x87, 0x0c, 0xfe, 0xcc, 0x0e, 0x00, + 0xa6, 0xd5, 0xe3, 0x1e, 0xcb, 0x0a, 0x7b, 0x64, 0x5a, 0x41, 0x21, 0x2f, 0x8b, 0x0a, 0xbd, 0x0b, + 0xd0, 0xc3, 0xae, 0xfa, 0x53, 0x3c, 0xa4, 0x44, 0x17, 0xfb, 0x6b, 0x48, 0x82, 0xea, 0x50, 0x62, + 0x6f, 0x23, 0x97, 0xe8, 0x82, 0xf7, 0x82, 0x77, 0x74, 0x1f, 0x8a, 0x64, 0x4c, 0x86, 0xd4, 0xad, + 0xad, 0xf1, 0x21, 0x78, 0x6b, 0xc1, 0x74, 0x3f, 0x66, 0x4a, 0x87, 0x35, 0xd6, 0xf1, 0xbf, 0x7d, + 0x7d, 0x55, 0xf6, 0x6c, 0xde, 0xb7, 0x06, 0x06, 0x25, 0x03, 0x9b, 0x5e, 0x28, 0xc2, 0x0b, 0x7a, + 0x0b, 0xca, 0xac, 0x37, 0xae, 0x8d, 0x35, 0xc2, 0x37, 0xd7, 0xb2, 0x32, 0x15, 0x70, 0x74, 0xaf, + 0x28, 0x45, 0x97, 0xb1, 0x9d, 0xa3, 0x94, 0x6c, 0xc7, 0xb0, 0x1c, 0x83, 0x5e, 0x28, 0xd5, 0x01, + 0x19, 0xd8, 0x96, 0x65, 0xaa, 0xc4, 0x71, 0x2c, 0xa7, 0x71, 0x00, 0x1b, 0xd1, 0xfa, 0x8b, 0xae, + 0x43, 0xd5, 0x21, 0x94, 0x91, 0x79, 0xe4, 0xa0, 0x55, 0xf1, 0x84, 0x1e, 0xb6, 0x9e, 0xe6, 0x4b, + 0x92, 0x9c, 0x3d, 0xcd, 0x97, 0xb2, 0x72, 0xae, 0x71, 0x07, 0xd0, 0xfc, 0x4a, 0x8a, 0x39, 0x90, + 0x49, 0x71, 0x07, 0xb2, 0xdf, 0x49, 0xf0, 0xcd, 0x25, 0xeb, 0x05, 0x3d, 0x81, 0x22, 0x43, 0xed, + 0x91, 0x37, 0x01, 0x37, 0xf6, 0x3f, 0x4e, 0xbf, 0xe6, 0x9a, 0x9e, 0xec, 0x11, 0x77, 0xa3, 0x08, + 0x77, 0x8d, 0xdb, 0x50, 0x09, 0xcb, 0xd1, 0x3a, 0xac, 0x9d, 0xdf, 0xff, 0xf4, 0xfe, 0x83, 0x27, + 0xf7, 0xe5, 0x0c, 0x02, 0x28, 0x1e, 0x1c, 0x1d, 0x1d, 0x3f, 0x3c, 0x93, 0x25, 0xf6, 0xac, 0x1c, + 0x9f, 0x1e, 0x1f, 0x9d, 0xc9, 0xd9, 0xc6, 0x3f, 0xb2, 0x70, 0x25, 0x76, 0xe9, 0x85, 0x92, 0x2b, + 0xbd, 0x96, 0xe4, 0x1e, 0x02, 0xd0, 0x89, 0xea, 0x10, 0x77, 0x64, 0x52, 0x7f, 0xcd, 0x5c, 0x5f, + 0x48, 0xf0, 0x44, 0x3b, 0x9b, 0x28, 0x5c, 0x57, 0x29, 0x53, 0xf1, 0xc4, 0x78, 0x24, 0x74, 0xb0, + 0x1e, 0xf1, 0xf5, 0xe4, 0x8a, 0xe3, 0x66, 0xba, 0xe5, 0x37, 0x3d, 0x86, 0x7b, 0x62, 0x17, 0x3d, + 0x85, 0x6f, 0xcc, 0x54, 0x88, 0x20, 0x40, 0x3e, 0x71, 0xa1, 0xb8, 0x12, 0x2d, 0x14, 0xbe, 0xef, + 0xf0, 0x02, 0x2f, 0x44, 0x17, 0xf8, 0x17, 0x12, 0x94, 0x58, 0x6e, 0xd9, 0xa9, 0x09, 0x7d, 0x02, + 0xe5, 0xa0, 0x5d, 0xa2, 0x3c, 0x5d, 0x5b, 0xd5, 0x2d, 0xd1, 0xa1, 0xa9, 0x21, 0xfa, 0x3e, 0x54, + 0x79, 0xc1, 0x55, 0x0d, 0x5d, 0xed, 0x9a, 0xd8, 0xab, 0x00, 0x1b, 0xe1, 0xb1, 0xf6, 0xdb, 0xef, + 0xb9, 0xe2, 0xe9, 0x3e, 0xf9, 0xe4, 0xae, 0x89, 0x7b, 0xca, 0x3a, 0xb7, 0x3c, 0xd1, 0xd9, 0x8b, + 0x58, 0x0c, 0xff, 0x94, 0x40, 0xf6, 0x7f, 0x51, 0x79, 0xcd, 0x2d, 0x9d, 0x5f, 0x51, 0xb9, 0x98, + 0x15, 0x85, 0x5a, 0xb0, 0x1d, 0x68, 0xa8, 0xae, 0xd1, 0x1b, 0x62, 0x3a, 0x72, 0x88, 0x20, 0x04, + 0x14, 0x7c, 0x7a, 0xe4, 0x7f, 0x99, 0x1f, 0x81, 0xc2, 0xa5, 0x46, 0x40, 0x05, 0x98, 0x52, 0x10, + 0xda, 0x81, 0x82, 0x63, 0x8d, 0x86, 0x3a, 0xef, 0x76, 0x41, 0xf1, 0x5e, 0xd0, 0x1d, 0x28, 0xb0, + 0x46, 0xfb, 0x13, 0xfb, 0xea, 0x82, 0x89, 0xed, 0x0f, 0xa0, 0x18, 0x0b, 0xcf, 0xa6, 0x61, 0x01, + 0x9a, 0xff, 0xcd, 0x6a, 0x41, 0xa0, 0xa3, 0x68, 0xa0, 0x77, 0x56, 0xfc, 0x06, 0x16, 0x1f, 0xf0, + 0x17, 0x59, 0xa8, 0x84, 0xd7, 0xd8, 0xff, 0xea, 0x9e, 0xd2, 0xf8, 0xa5, 0x04, 0xa5, 0x60, 0x10, + 0xa6, 0x98, 0x29, 0x45, 0x30, 0x73, 0x07, 0x0a, 0xc6, 0x50, 0x27, 0x13, 0x3e, 0x12, 0x55, 0xc5, + 0x7b, 0x41, 0x1b, 0x90, 0xa5, 0x13, 0x31, 0x61, 0xb3, 0x74, 0x82, 0x0e, 0xa0, 0xe8, 0x15, 0xb7, + 0xa5, 0x9c, 0x14, 0xad, 0x6d, 0x22, 0x2b, 0xc2, 0x70, 0xff, 0xcf, 0x15, 0xc8, 0x1f, 0x1c, 0x1e, + 0x9d, 0xa0, 0x07, 0x90, 0x67, 0xfc, 0x86, 0x12, 0xdc, 0xd4, 0xd5, 0x93, 0x80, 0x20, 0x52, 0xa0, + 0xc0, 0xa1, 0x0e, 0x25, 0xb9, 0xc0, 0xab, 0x27, 0xe2, 0x43, 0xd6, 0x48, 0x3e, 0x4f, 0x13, 0xdc, + 0xeb, 0xd5, 0x93, 0x40, 0x23, 0xfa, 0x21, 0xac, 0xf9, 0x67, 0x9c, 0x64, 0xd7, 0x6c, 0xf5, 0x84, + 0x0c, 0xc7, 0xba, 0xcf, 0x79, 0x10, 0x25, 0xb9, 0xfe, 0xab, 0x27, 0x42, 0x4b, 0x74, 0x0e, 0x45, + 0x71, 0xbe, 0x48, 0x74, 0xa3, 0x57, 0x4f, 0x06, 0x8b, 0xe8, 0xc7, 0x50, 0x9e, 0x1e, 0x5b, 0x93, + 0x5e, 0x79, 0xd6, 0x13, 0x33, 0x35, 0xfa, 0x09, 0x54, 0x23, 0x34, 0x89, 0xd2, 0xdc, 0x23, 0xd6, + 0x53, 0x81, 0x2a, 0x8b, 0x15, 0x41, 0x4c, 0x94, 0xe6, 0x76, 0xb1, 0x9e, 0x8a, 0x5e, 0xd1, 0x18, + 0xb6, 0xe6, 0x90, 0x13, 0xa5, 0xbd, 0x72, 0xac, 0xa7, 0xa6, 0x5a, 0x74, 0x01, 0x68, 0x9e, 0x42, + 0x51, 0xea, 0x7b, 0xc8, 0x7a, 0x7a, 0xd4, 0x45, 0x36, 0x6c, 0xce, 0xde, 0xb0, 0xa4, 0xbb, 0x9d, + 0xac, 0xa7, 0x84, 0x5f, 0x2f, 0x62, 0x94, 0x4f, 0xd3, 0xdd, 0x59, 0xd6, 0x53, 0x12, 0x31, 0xc2, + 0x00, 0xa1, 0x23, 0x78, 0xe2, 0x9b, 0xcc, 0x7a, 0x72, 0x42, 0x46, 0x3f, 0x87, 0xed, 0xb8, 0x73, + 0x7a, 0xfa, 0xeb, 0xcd, 0xfa, 0xbf, 0x81, 0xcf, 0x6c, 0x8d, 0x44, 0xcf, 0xdd, 0x69, 0x2e, 0x3d, + 0xeb, 0xa9, 0x68, 0xfa, 0xf0, 0xe1, 0x97, 0x2f, 0x77, 0xa5, 0xaf, 0x5e, 0xee, 0x4a, 0x7f, 0x7d, + 0xb9, 0x2b, 0xbd, 0x78, 0xb5, 0x9b, 0xf9, 0xea, 0xd5, 0x6e, 0xe6, 0x4f, 0xaf, 0x76, 0x33, 0x4f, + 0xbf, 0xd3, 0x33, 0x68, 0x7f, 0xd4, 0x61, 0xde, 0x5a, 0xc1, 0xdf, 0x52, 0xa6, 0x7f, 0x71, 0xb1, + 0x8d, 0x56, 0xec, 0x9f, 0x69, 0x3a, 0x45, 0xfe, 0x5b, 0xeb, 0xed, 0x7f, 0x05, 0x00, 0x00, 0xff, + 0xff, 0x0a, 0xda, 0xa4, 0xf8, 0x6c, 0x23, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ABCIClient is the client API for ABCI service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ABCIClient interface { + // Echo returns back the same message it is sent. + Echo(ctx context.Context, in *v1beta1.RequestEcho, opts ...grpc.CallOption) (*v1beta1.ResponseEcho, error) + // Flush flushes the write buffer. + Flush(ctx context.Context, in *v1beta1.RequestFlush, opts ...grpc.CallOption) (*v1beta1.ResponseFlush, error) + // Info returns information about the application state. + Info(ctx context.Context, in *v1beta2.RequestInfo, opts ...grpc.CallOption) (*v1beta1.ResponseInfo, error) + // CheckTx validates a transaction. + CheckTx(ctx context.Context, in *v1beta1.RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) + // Query queries the application state. + Query(ctx context.Context, in *v1beta1.RequestQuery, opts ...grpc.CallOption) (*v1beta1.ResponseQuery, error) + // Commit commits a block of transactions. + Commit(ctx context.Context, in *v1beta1.RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) + // InitChain initializes the blockchain. + InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) + // ListSnapshots lists all the available snapshots. + ListSnapshots(ctx context.Context, in *v1beta1.RequestListSnapshots, opts ...grpc.CallOption) (*v1beta1.ResponseListSnapshots, error) + // OfferSnapshot sends a snapshot offer. + OfferSnapshot(ctx context.Context, in *v1beta1.RequestOfferSnapshot, opts ...grpc.CallOption) (*v1beta1.ResponseOfferSnapshot, error) + // LoadSnapshotChunk returns a chunk of snapshot. + LoadSnapshotChunk(ctx context.Context, in *v1beta1.RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*v1beta1.ResponseLoadSnapshotChunk, error) + // ApplySnapshotChunk applies a chunk of snapshot. + ApplySnapshotChunk(ctx context.Context, in *v1beta1.RequestApplySnapshotChunk, opts ...grpc.CallOption) (*v1beta1.ResponseApplySnapshotChunk, error) + // PrepareProposal returns a proposal for the next block. + PrepareProposal(ctx context.Context, in *RequestPrepareProposal, opts ...grpc.CallOption) (*v1beta2.ResponsePrepareProposal, error) + // ProcessProposal validates a proposal. + ProcessProposal(ctx context.Context, in *RequestProcessProposal, opts ...grpc.CallOption) (*v1beta2.ResponseProcessProposal, error) + // ExtendVote extends a vote with application-injected data (vote extensions). + ExtendVote(ctx context.Context, in *RequestExtendVote, opts ...grpc.CallOption) (*ResponseExtendVote, error) + // VerifyVoteExtension verifies a vote extension. + VerifyVoteExtension(ctx context.Context, in *RequestVerifyVoteExtension, opts ...grpc.CallOption) (*ResponseVerifyVoteExtension, error) + // FinalizeBlock finalizes a block. + FinalizeBlock(ctx context.Context, in *RequestFinalizeBlock, opts ...grpc.CallOption) (*ResponseFinalizeBlock, error) +} + +type aBCIClient struct { + cc grpc1.ClientConn +} + +func NewABCIClient(cc grpc1.ClientConn) ABCIClient { + return &aBCIClient{cc} +} + +func (c *aBCIClient) Echo(ctx context.Context, in *v1beta1.RequestEcho, opts ...grpc.CallOption) (*v1beta1.ResponseEcho, error) { + out := new(v1beta1.ResponseEcho) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/Echo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) Flush(ctx context.Context, in *v1beta1.RequestFlush, opts ...grpc.CallOption) (*v1beta1.ResponseFlush, error) { + out := new(v1beta1.ResponseFlush) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/Flush", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) Info(ctx context.Context, in *v1beta2.RequestInfo, opts ...grpc.CallOption) (*v1beta1.ResponseInfo, error) { + out := new(v1beta1.ResponseInfo) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/Info", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) CheckTx(ctx context.Context, in *v1beta1.RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) { + out := new(ResponseCheckTx) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/CheckTx", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) Query(ctx context.Context, in *v1beta1.RequestQuery, opts ...grpc.CallOption) (*v1beta1.ResponseQuery, error) { + out := new(v1beta1.ResponseQuery) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/Query", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) Commit(ctx context.Context, in *v1beta1.RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) { + out := new(ResponseCommit) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/Commit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) { + out := new(ResponseInitChain) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/InitChain", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) ListSnapshots(ctx context.Context, in *v1beta1.RequestListSnapshots, opts ...grpc.CallOption) (*v1beta1.ResponseListSnapshots, error) { + out := new(v1beta1.ResponseListSnapshots) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/ListSnapshots", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) OfferSnapshot(ctx context.Context, in *v1beta1.RequestOfferSnapshot, opts ...grpc.CallOption) (*v1beta1.ResponseOfferSnapshot, error) { + out := new(v1beta1.ResponseOfferSnapshot) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/OfferSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) LoadSnapshotChunk(ctx context.Context, in *v1beta1.RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*v1beta1.ResponseLoadSnapshotChunk, error) { + out := new(v1beta1.ResponseLoadSnapshotChunk) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/LoadSnapshotChunk", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) ApplySnapshotChunk(ctx context.Context, in *v1beta1.RequestApplySnapshotChunk, opts ...grpc.CallOption) (*v1beta1.ResponseApplySnapshotChunk, error) { + out := new(v1beta1.ResponseApplySnapshotChunk) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/ApplySnapshotChunk", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) PrepareProposal(ctx context.Context, in *RequestPrepareProposal, opts ...grpc.CallOption) (*v1beta2.ResponsePrepareProposal, error) { + out := new(v1beta2.ResponsePrepareProposal) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/PrepareProposal", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) ProcessProposal(ctx context.Context, in *RequestProcessProposal, opts ...grpc.CallOption) (*v1beta2.ResponseProcessProposal, error) { + out := new(v1beta2.ResponseProcessProposal) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/ProcessProposal", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) ExtendVote(ctx context.Context, in *RequestExtendVote, opts ...grpc.CallOption) (*ResponseExtendVote, error) { + out := new(ResponseExtendVote) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/ExtendVote", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) VerifyVoteExtension(ctx context.Context, in *RequestVerifyVoteExtension, opts ...grpc.CallOption) (*ResponseVerifyVoteExtension, error) { + out := new(ResponseVerifyVoteExtension) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/VerifyVoteExtension", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIClient) FinalizeBlock(ctx context.Context, in *RequestFinalizeBlock, opts ...grpc.CallOption) (*ResponseFinalizeBlock, error) { + out := new(ResponseFinalizeBlock) + err := c.cc.Invoke(ctx, "/cometbft.abci.v1beta3.ABCI/FinalizeBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ABCIServer is the server API for ABCI service. +type ABCIServer interface { + // Echo returns back the same message it is sent. + Echo(context.Context, *v1beta1.RequestEcho) (*v1beta1.ResponseEcho, error) + // Flush flushes the write buffer. + Flush(context.Context, *v1beta1.RequestFlush) (*v1beta1.ResponseFlush, error) + // Info returns information about the application state. + Info(context.Context, *v1beta2.RequestInfo) (*v1beta1.ResponseInfo, error) + // CheckTx validates a transaction. + CheckTx(context.Context, *v1beta1.RequestCheckTx) (*ResponseCheckTx, error) + // Query queries the application state. + Query(context.Context, *v1beta1.RequestQuery) (*v1beta1.ResponseQuery, error) + // Commit commits a block of transactions. + Commit(context.Context, *v1beta1.RequestCommit) (*ResponseCommit, error) + // InitChain initializes the blockchain. + InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) + // ListSnapshots lists all the available snapshots. + ListSnapshots(context.Context, *v1beta1.RequestListSnapshots) (*v1beta1.ResponseListSnapshots, error) + // OfferSnapshot sends a snapshot offer. + OfferSnapshot(context.Context, *v1beta1.RequestOfferSnapshot) (*v1beta1.ResponseOfferSnapshot, error) + // LoadSnapshotChunk returns a chunk of snapshot. + LoadSnapshotChunk(context.Context, *v1beta1.RequestLoadSnapshotChunk) (*v1beta1.ResponseLoadSnapshotChunk, error) + // ApplySnapshotChunk applies a chunk of snapshot. + ApplySnapshotChunk(context.Context, *v1beta1.RequestApplySnapshotChunk) (*v1beta1.ResponseApplySnapshotChunk, error) + // PrepareProposal returns a proposal for the next block. + PrepareProposal(context.Context, *RequestPrepareProposal) (*v1beta2.ResponsePrepareProposal, error) + // ProcessProposal validates a proposal. + ProcessProposal(context.Context, *RequestProcessProposal) (*v1beta2.ResponseProcessProposal, error) + // ExtendVote extends a vote with application-injected data (vote extensions). + ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error) + // VerifyVoteExtension verifies a vote extension. + VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) + // FinalizeBlock finalizes a block. + FinalizeBlock(context.Context, *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) +} + +// UnimplementedABCIServer can be embedded to have forward compatible implementations. +type UnimplementedABCIServer struct { +} + +func (*UnimplementedABCIServer) Echo(ctx context.Context, req *v1beta1.RequestEcho) (*v1beta1.ResponseEcho, error) { + return nil, status.Errorf(codes.Unimplemented, "method Echo not implemented") +} +func (*UnimplementedABCIServer) Flush(ctx context.Context, req *v1beta1.RequestFlush) (*v1beta1.ResponseFlush, error) { + return nil, status.Errorf(codes.Unimplemented, "method Flush not implemented") +} +func (*UnimplementedABCIServer) Info(ctx context.Context, req *v1beta2.RequestInfo) (*v1beta1.ResponseInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +} +func (*UnimplementedABCIServer) CheckTx(ctx context.Context, req *v1beta1.RequestCheckTx) (*ResponseCheckTx, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckTx not implemented") +} +func (*UnimplementedABCIServer) Query(ctx context.Context, req *v1beta1.RequestQuery) (*v1beta1.ResponseQuery, error) { + return nil, status.Errorf(codes.Unimplemented, "method Query not implemented") +} +func (*UnimplementedABCIServer) Commit(ctx context.Context, req *v1beta1.RequestCommit) (*ResponseCommit, error) { + return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") +} +func (*UnimplementedABCIServer) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) { + return nil, status.Errorf(codes.Unimplemented, "method InitChain not implemented") +} +func (*UnimplementedABCIServer) ListSnapshots(ctx context.Context, req *v1beta1.RequestListSnapshots) (*v1beta1.ResponseListSnapshots, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") +} +func (*UnimplementedABCIServer) OfferSnapshot(ctx context.Context, req *v1beta1.RequestOfferSnapshot) (*v1beta1.ResponseOfferSnapshot, error) { + return nil, status.Errorf(codes.Unimplemented, "method OfferSnapshot not implemented") +} +func (*UnimplementedABCIServer) LoadSnapshotChunk(ctx context.Context, req *v1beta1.RequestLoadSnapshotChunk) (*v1beta1.ResponseLoadSnapshotChunk, error) { + return nil, status.Errorf(codes.Unimplemented, "method LoadSnapshotChunk not implemented") +} +func (*UnimplementedABCIServer) ApplySnapshotChunk(ctx context.Context, req *v1beta1.RequestApplySnapshotChunk) (*v1beta1.ResponseApplySnapshotChunk, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplySnapshotChunk not implemented") +} +func (*UnimplementedABCIServer) PrepareProposal(ctx context.Context, req *RequestPrepareProposal) (*v1beta2.ResponsePrepareProposal, error) { + return nil, status.Errorf(codes.Unimplemented, "method PrepareProposal not implemented") +} +func (*UnimplementedABCIServer) ProcessProposal(ctx context.Context, req *RequestProcessProposal) (*v1beta2.ResponseProcessProposal, error) { + return nil, status.Errorf(codes.Unimplemented, "method ProcessProposal not implemented") +} +func (*UnimplementedABCIServer) ExtendVote(ctx context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExtendVote not implemented") +} +func (*UnimplementedABCIServer) VerifyVoteExtension(ctx context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { + return nil, status.Errorf(codes.Unimplemented, "method VerifyVoteExtension not implemented") +} +func (*UnimplementedABCIServer) FinalizeBlock(ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinalizeBlock not implemented") +} + +func RegisterABCIServer(s grpc1.Server, srv ABCIServer) { + s.RegisterService(&_ABCI_serviceDesc, srv) +} + +func _ABCI_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestEcho) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).Echo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/Echo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).Echo(ctx, req.(*v1beta1.RequestEcho)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_Flush_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestFlush) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).Flush(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/Flush", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).Flush(ctx, req.(*v1beta1.RequestFlush)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta2.RequestInfo) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).Info(ctx, req.(*v1beta2.RequestInfo)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_CheckTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestCheckTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).CheckTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/CheckTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).CheckTx(ctx, req.(*v1beta1.RequestCheckTx)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestQuery) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).Query(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/Query", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).Query(ctx, req.(*v1beta1.RequestQuery)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestCommit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).Commit(ctx, req.(*v1beta1.RequestCommit)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_InitChain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestInitChain) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).InitChain(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/InitChain", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).InitChain(ctx, req.(*RequestInitChain)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestListSnapshots) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/ListSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).ListSnapshots(ctx, req.(*v1beta1.RequestListSnapshots)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_OfferSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestOfferSnapshot) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).OfferSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/OfferSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).OfferSnapshot(ctx, req.(*v1beta1.RequestOfferSnapshot)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_LoadSnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestLoadSnapshotChunk) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).LoadSnapshotChunk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/LoadSnapshotChunk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).LoadSnapshotChunk(ctx, req.(*v1beta1.RequestLoadSnapshotChunk)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_ApplySnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestApplySnapshotChunk) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).ApplySnapshotChunk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/ApplySnapshotChunk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).ApplySnapshotChunk(ctx, req.(*v1beta1.RequestApplySnapshotChunk)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_PrepareProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestPrepareProposal) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).PrepareProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/PrepareProposal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).PrepareProposal(ctx, req.(*RequestPrepareProposal)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_ProcessProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestProcessProposal) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).ProcessProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/ProcessProposal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).ProcessProposal(ctx, req.(*RequestProcessProposal)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_ExtendVote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestExtendVote) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).ExtendVote(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/ExtendVote", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).ExtendVote(ctx, req.(*RequestExtendVote)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_VerifyVoteExtension_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestVerifyVoteExtension) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).VerifyVoteExtension(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/VerifyVoteExtension", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).VerifyVoteExtension(ctx, req.(*RequestVerifyVoteExtension)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCI_FinalizeBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestFinalizeBlock) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIServer).FinalizeBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.abci.v1beta3.ABCI/FinalizeBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIServer).FinalizeBlock(ctx, req.(*RequestFinalizeBlock)) + } + return interceptor(ctx, in, info, handler) +} + +var ABCI_serviceDesc = _ABCI_serviceDesc +var _ABCI_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cometbft.abci.v1beta3.ABCI", + HandlerType: (*ABCIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Echo", + Handler: _ABCI_Echo_Handler, + }, + { + MethodName: "Flush", + Handler: _ABCI_Flush_Handler, + }, + { + MethodName: "Info", + Handler: _ABCI_Info_Handler, + }, + { + MethodName: "CheckTx", + Handler: _ABCI_CheckTx_Handler, + }, + { + MethodName: "Query", + Handler: _ABCI_Query_Handler, + }, + { + MethodName: "Commit", + Handler: _ABCI_Commit_Handler, + }, + { + MethodName: "InitChain", + Handler: _ABCI_InitChain_Handler, + }, + { + MethodName: "ListSnapshots", + Handler: _ABCI_ListSnapshots_Handler, + }, + { + MethodName: "OfferSnapshot", + Handler: _ABCI_OfferSnapshot_Handler, + }, + { + MethodName: "LoadSnapshotChunk", + Handler: _ABCI_LoadSnapshotChunk_Handler, + }, + { + MethodName: "ApplySnapshotChunk", + Handler: _ABCI_ApplySnapshotChunk_Handler, + }, + { + MethodName: "PrepareProposal", + Handler: _ABCI_PrepareProposal_Handler, + }, + { + MethodName: "ProcessProposal", + Handler: _ABCI_ProcessProposal_Handler, + }, + { + MethodName: "ExtendVote", + Handler: _ABCI_ExtendVote_Handler, + }, + { + MethodName: "VerifyVoteExtension", + Handler: _ABCI_VerifyVoteExtension_Handler, + }, + { + MethodName: "FinalizeBlock", + Handler: _ABCI_FinalizeBlock_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cometbft/abci/v1beta3/types.proto", +} + +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Request_Echo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Echo != nil { + { + size, err := m.Echo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Request_Flush) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Flush != nil { + { + size, err := m.Flush.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Request_Info) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Info != nil { + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Request_InitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InitChain != nil { + { + size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Request_Query) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Request_CheckTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CheckTx != nil { + { + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Request_Commit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *Request_ListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListSnapshots != nil { + { + size, err := m.ListSnapshots.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *Request_OfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_OfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OfferSnapshot != nil { + { + size, err := m.OfferSnapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *Request_LoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_LoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LoadSnapshotChunk != nil { + { + size, err := m.LoadSnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + return len(dAtA) - i, nil +} +func (m *Request_ApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ApplySnapshotChunk != nil { + { + size, err := m.ApplySnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + return len(dAtA) - i, nil +} +func (m *Request_PrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_PrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PrepareProposal != nil { + { + size, err := m.PrepareProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + return len(dAtA) - i, nil +} +func (m *Request_ProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProcessProposal != nil { + { + size, err := m.ProcessProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + return len(dAtA) - i, nil +} +func (m *Request_ExtendVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExtendVote != nil { + { + size, err := m.ExtendVote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + return len(dAtA) - i, nil +} +func (m *Request_VerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_VerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.VerifyVoteExtension != nil { + { + size, err := m.VerifyVoteExtension.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + return len(dAtA) - i, nil +} +func (m *Request_FinalizeBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_FinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FinalizeBlock != nil { + { + size, err := m.FinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + return len(dAtA) - i, nil +} +func (m *RequestInitChain) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestInitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InitialHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.InitialHeight)) + i-- + dAtA[i] = 0x30 + } + if len(m.AppStateBytes) > 0 { + i -= len(m.AppStateBytes) + copy(dAtA[i:], m.AppStateBytes) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppStateBytes))) + i-- + dAtA[i] = 0x2a + } + if len(m.Validators) > 0 { + for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.ConsensusParams != nil { + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x12 + } + n18, err18 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err18 != nil { + return 0, err18 + } + i -= n18 + i = encodeVarintTypes(dAtA, i, uint64(n18)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RequestPrepareProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestPrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProposerAddress) > 0 { + i -= len(m.ProposerAddress) + copy(dAtA[i:], m.ProposerAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) + i-- + dAtA[i] = 0x42 + } + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x3a + } + n19, err19 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err19 != nil { + return 0, err19 + } + i -= n19 + i = encodeVarintTypes(dAtA, i, uint64(n19)) + i-- + dAtA[i] = 0x32 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x28 + } + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.LocalLastCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.MaxTxBytes != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.MaxTxBytes)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RequestProcessProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProposerAddress) > 0 { + i -= len(m.ProposerAddress) + copy(dAtA[i:], m.ProposerAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) + i-- + dAtA[i] = 0x42 + } + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x3a + } + n21, err21 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err21 != nil { + return 0, err21 + } + i -= n21 + i = encodeVarintTypes(dAtA, i, uint64(n21)) + i-- + dAtA[i] = 0x32 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x28 + } + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x22 + } + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ProposedLastCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RequestExtendVote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestExtendVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProposerAddress) > 0 { + i -= len(m.ProposerAddress) + copy(dAtA[i:], m.ProposerAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) + i-- + dAtA[i] = 0x42 + } + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x3a + } + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + { + size, err := m.ProposedLastCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + n24, err24 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err24 != nil { + return 0, err24 + } + i -= n24 + i = encodeVarintTypes(dAtA, i, uint64(n24)) + i-- + dAtA[i] = 0x1a + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestVerifyVoteExtension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestVerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestVerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VoteExtension) > 0 { + i -= len(m.VoteExtension) + copy(dAtA[i:], m.VoteExtension) + i = encodeVarintTypes(dAtA, i, uint64(len(m.VoteExtension))) + i-- + dAtA[i] = 0x22 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x18 + } + if len(m.ValidatorAddress) > 0 { + i -= len(m.ValidatorAddress) + copy(dAtA[i:], m.ValidatorAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + i-- + dAtA[i] = 0x12 + } + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestFinalizeBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestFinalizeBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProposerAddress) > 0 { + i -= len(m.ProposerAddress) + copy(dAtA[i:], m.ProposerAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) + i-- + dAtA[i] = 0x42 + } + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x3a + } + n25, err25 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err25 != nil { + return 0, err25 + } + i -= n25 + i = encodeVarintTypes(dAtA, i, uint64(n25)) + i-- + dAtA[i] = 0x32 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x28 + } + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x22 + } + if len(m.Misbehavior) > 0 { + for iNdEx := len(m.Misbehavior) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Misbehavior[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.DecidedLastCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Response) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Response_Exception) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Exception) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Exception != nil { + { + size, err := m.Exception.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Response_Echo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Echo != nil { + { + size, err := m.Echo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Response_Flush) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Flush != nil { + { + size, err := m.Flush.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Response_Info) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Info != nil { + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Response_InitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InitChain != nil { + { + size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Response_Query) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Response_CheckTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CheckTx != nil { + { + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Response_Commit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *Response_ListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListSnapshots != nil { + { + size, err := m.ListSnapshots.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *Response_OfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_OfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OfferSnapshot != nil { + { + size, err := m.OfferSnapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + return len(dAtA) - i, nil +} +func (m *Response_LoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_LoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LoadSnapshotChunk != nil { + { + size, err := m.LoadSnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + return len(dAtA) - i, nil +} +func (m *Response_ApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ApplySnapshotChunk != nil { + { + size, err := m.ApplySnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + return len(dAtA) - i, nil +} +func (m *Response_PrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_PrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PrepareProposal != nil { + { + size, err := m.PrepareProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + return len(dAtA) - i, nil +} +func (m *Response_ProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProcessProposal != nil { + { + size, err := m.ProcessProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + return len(dAtA) - i, nil +} +func (m *Response_ExtendVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExtendVote != nil { + { + size, err := m.ExtendVote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + return len(dAtA) - i, nil +} +func (m *Response_VerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_VerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.VerifyVoteExtension != nil { + { + size, err := m.VerifyVoteExtension.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + return len(dAtA) - i, nil +} +func (m *Response_FinalizeBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_FinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FinalizeBlock != nil { + { + size, err := m.FinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + return len(dAtA) - i, nil +} +func (m *ResponseInitChain) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseInitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x1a + } + if len(m.Validators) > 0 { + for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.ConsensusParams != nil { + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseCheckTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Codespace) > 0 { + i -= len(m.Codespace) + copy(dAtA[i:], m.Codespace) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i-- + dAtA[i] = 0x42 + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.GasUsed != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) + i-- + dAtA[i] = 0x30 + } + if m.GasWanted != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) + i-- + dAtA[i] = 0x28 + } + if len(m.Info) > 0 { + i -= len(m.Info) + copy(dAtA[i:], m.Info) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i-- + dAtA[i] = 0x22 + } + if len(m.Log) > 0 { + i -= len(m.Log) + copy(dAtA[i:], m.Log) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i-- + dAtA[i] = 0x1a + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseCommit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RetainHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) + i-- + dAtA[i] = 0x18 + } + return len(dAtA) - i, nil +} + +func (m *ResponseExtendVote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseExtendVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VoteExtension) > 0 { + i -= len(m.VoteExtension) + copy(dAtA[i:], m.VoteExtension) + i = encodeVarintTypes(dAtA, i, uint64(len(m.VoteExtension))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseVerifyVoteExtension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseVerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseVerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Status != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseFinalizeBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseFinalizeBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x2a + } + if m.ConsensusParamUpdates != nil { + { + size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.ValidatorUpdates) > 0 { + for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.TxResults) > 0 { + for iNdEx := len(m.TxResults) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TxResults[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *VoteInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VoteInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VoteInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BlockIdFlag != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) + i-- + dAtA[i] = 0x18 + } + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExtendedVoteInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtendedVoteInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExtendedVoteInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BlockIdFlag != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) + i-- + dAtA[i] = 0x28 + } + if len(m.ExtensionSignature) > 0 { + i -= len(m.ExtensionSignature) + copy(dAtA[i:], m.ExtensionSignature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ExtensionSignature))) + i-- + dAtA[i] = 0x22 + } + if len(m.VoteExtension) > 0 { + i -= len(m.VoteExtension) + copy(dAtA[i:], m.VoteExtension) + i = encodeVarintTypes(dAtA, i, uint64(len(m.VoteExtension))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CommitInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Votes) > 0 { + for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Votes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ExtendedCommitInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtendedCommitInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExtendedCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Votes) > 0 { + for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Votes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ExecTxResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecTxResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecTxResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Codespace) > 0 { + i -= len(m.Codespace) + copy(dAtA[i:], m.Codespace) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i-- + dAtA[i] = 0x42 + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.GasUsed != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) + i-- + dAtA[i] = 0x30 + } + if m.GasWanted != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) + i-- + dAtA[i] = 0x28 + } + if len(m.Info) > 0 { + i -= len(m.Info) + copy(dAtA[i:], m.Info) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i-- + dAtA[i] = 0x22 + } + if len(m.Log) > 0 { + i -= len(m.Log) + copy(dAtA[i:], m.Log) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i-- + dAtA[i] = 0x1a + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TxResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TxResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0x1a + } + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *Request_Echo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Echo != nil { + l = m.Echo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Flush) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Flush != nil { + l = m.Flush.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Info) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_InitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InitChain != nil { + l = m.InitChain.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Query) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_CheckTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Commit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListSnapshots != nil { + l = m.ListSnapshots.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_OfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OfferSnapshot != nil { + l = m.OfferSnapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_LoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadSnapshotChunk != nil { + l = m.LoadSnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApplySnapshotChunk != nil { + l = m.ApplySnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_PrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrepareProposal != nil { + l = m.PrepareProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProcessProposal != nil { + l = m.ProcessProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ExtendVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExtendVote != nil { + l = m.ExtendVote.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_VerifyVoteExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VerifyVoteExtension != nil { + l = m.VerifyVoteExtension.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_FinalizeBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FinalizeBlock != nil { + l = m.FinalizeBlock.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *RequestInitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ConsensusParams != nil { + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Validators) > 0 { + for _, e := range m.Validators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.AppStateBytes) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.InitialHeight != 0 { + n += 1 + sovTypes(uint64(m.InitialHeight)) + } + return n +} + +func (m *RequestPrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxTxBytes != 0 { + n += 1 + sovTypes(uint64(m.MaxTxBytes)) + } + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.LocalLastCommit.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.ProposedLastCommit.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestExtendVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.ProposedLastCommit.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestVerifyVoteExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ValidatorAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = len(m.VoteExtension) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestFinalizeBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.DecidedLastCommit.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.Misbehavior) > 0 { + for _, e := range m.Misbehavior { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Response) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *Response_Exception) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exception != nil { + l = m.Exception.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Echo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Echo != nil { + l = m.Echo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Flush) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Flush != nil { + l = m.Flush.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Info) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_InitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InitChain != nil { + l = m.InitChain.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Query) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_CheckTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Commit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListSnapshots != nil { + l = m.ListSnapshots.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_OfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OfferSnapshot != nil { + l = m.OfferSnapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_LoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadSnapshotChunk != nil { + l = m.LoadSnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApplySnapshotChunk != nil { + l = m.ApplySnapshotChunk.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_PrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrepareProposal != nil { + l = m.PrepareProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProcessProposal != nil { + l = m.ProcessProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ExtendVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExtendVote != nil { + l = m.ExtendVote.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_VerifyVoteExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VerifyVoteExtension != nil { + l = m.VerifyVoteExtension.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_FinalizeBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FinalizeBlock != nil { + l = m.FinalizeBlock.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *ResponseInitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConsensusParams != nil { + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Validators) > 0 { + for _, e := range m.Validators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseCheckTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.GasWanted != 0 { + n += 1 + sovTypes(uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + n += 1 + sovTypes(uint64(m.GasUsed)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseCommit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RetainHeight != 0 { + n += 1 + sovTypes(uint64(m.RetainHeight)) + } + return n +} + +func (m *ResponseExtendVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VoteExtension) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseVerifyVoteExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + sovTypes(uint64(m.Status)) + } + return n +} + +func (m *ResponseFinalizeBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.TxResults) > 0 { + for _, e := range m.TxResults { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.ValidatorUpdates) > 0 { + for _, e := range m.ValidatorUpdates { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.ConsensusParamUpdates != nil { + l = m.ConsensusParamUpdates.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *VoteInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.BlockIdFlag != 0 { + n += 1 + sovTypes(uint64(m.BlockIdFlag)) + } + return n +} + +func (m *ExtendedVoteInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.VoteExtension) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ExtensionSignature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.BlockIdFlag != 0 { + n += 1 + sovTypes(uint64(m.BlockIdFlag)) + } + return n +} + +func (m *CommitInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if len(m.Votes) > 0 { + for _, e := range m.Votes { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ExtendedCommitInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if len(m.Votes) > 0 { + for _, e := range m.Votes { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ExecTxResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.GasWanted != 0 { + n += 1 + sovTypes(uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + n += 1 + sovTypes(uint64(m.GasUsed)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *TxResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Result.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestEcho{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Echo{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestFlush{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Flush{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta2.RequestInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Info{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestInitChain{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_InitChain{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestQuery{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Query{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestCheckTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_CheckTx{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestCommit{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Commit{v} + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestListSnapshots{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ListSnapshots{v} + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestOfferSnapshot{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_OfferSnapshot{v} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestLoadSnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_LoadSnapshotChunk{v} + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.RequestApplySnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ApplySnapshotChunk{v} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrepareProposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestPrepareProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_PrepareProposal{v} + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessProposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestProcessProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ProcessProposal{v} + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtendVote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestExtendVote{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ExtendVote{v} + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VerifyVoteExtension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestVerifyVoteExtension{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_VerifyVoteExtension{v} + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinalizeBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestFinalizeBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_FinalizeBlock{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestInitChain) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestInitChain: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParams == nil { + m.ConsensusParams = &v1.ConsensusParams{} + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validators = append(m.Validators, v1beta1.ValidatorUpdate{}) + if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppStateBytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppStateBytes = append(m.AppStateBytes[:0], dAtA[iNdEx:postIndex]...) + if m.AppStateBytes == nil { + m.AppStateBytes = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) + } + m.InitialHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestPrepareProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestPrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTxBytes", wireType) + } + m.MaxTxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTxBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalLastCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalLastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Misbehavior = append(m.Misbehavior, v1beta2.Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerAddress == nil { + m.ProposerAddress = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestProcessProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestProcessProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposedLastCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProposedLastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Misbehavior = append(m.Misbehavior, v1beta2.Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerAddress == nil { + m.ProposerAddress = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestExtendVote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestExtendVote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposedLastCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProposedLastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Misbehavior = append(m.Misbehavior, v1beta2.Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerAddress == nil { + m.ProposerAddress = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestVerifyVoteExtension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestVerifyVoteExtension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestVerifyVoteExtension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorAddress == nil { + m.ValidatorAddress = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtension", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VoteExtension = append(m.VoteExtension[:0], dAtA[iNdEx:postIndex]...) + if m.VoteExtension == nil { + m.VoteExtension = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestFinalizeBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestFinalizeBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DecidedLastCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DecidedLastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Misbehavior", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Misbehavior = append(m.Misbehavior, v1beta2.Misbehavior{}) + if err := m.Misbehavior[len(m.Misbehavior)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerAddress == nil { + m.ProposerAddress = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Response: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exception", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseException{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Exception{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseEcho{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Echo{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseFlush{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Flush{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Info{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseInitChain{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_InitChain{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseQuery{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Query{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseCheckTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_CheckTx{v} + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseCommit{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Commit{v} + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseListSnapshots{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_ListSnapshots{v} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseOfferSnapshot{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_OfferSnapshot{v} + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseLoadSnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_LoadSnapshotChunk{v} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta1.ResponseApplySnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_ApplySnapshotChunk{v} + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrepareProposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta2.ResponsePrepareProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_PrepareProposal{v} + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessProposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1beta2.ResponseProcessProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_ProcessProposal{v} + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtendVote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseExtendVote{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_ExtendVote{v} + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VerifyVoteExtension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseVerifyVoteExtension{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_VerifyVoteExtension{v} + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinalizeBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseFinalizeBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_FinalizeBlock{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseInitChain: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParams == nil { + m.ConsensusParams = &v1.ConsensusParams{} + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validators = append(m.Validators, v1beta1.ValidatorUpdate{}) + if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseCheckTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) + } + m.GasWanted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasWanted |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) + } + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, v1beta2.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseCommit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseCommit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseCommit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) + } + m.RetainHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RetainHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseExtendVote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseExtendVote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtension", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VoteExtension = append(m.VoteExtension[:0], dAtA[iNdEx:postIndex]...) + if m.VoteExtension == nil { + m.VoteExtension = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseVerifyVoteExtension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseVerifyVoteExtension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= ResponseVerifyVoteExtension_VerifyStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseFinalizeBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseFinalizeBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, v1beta2.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxResults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TxResults = append(m.TxResults, &ExecTxResult{}) + if err := m.TxResults[len(m.TxResults)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorUpdates = append(m.ValidatorUpdates, v1beta1.ValidatorUpdate{}) + if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParamUpdates == nil { + m.ConsensusParamUpdates = &v1.ConsensusParams{} + } + if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VoteInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VoteInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VoteInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockIdFlag", wireType) + } + m.BlockIdFlag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockIdFlag |= v1beta11.BlockIDFlag(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtendedVoteInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtendedVoteInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtendedVoteInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtension", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VoteExtension = append(m.VoteExtension[:0], dAtA[iNdEx:postIndex]...) + if m.VoteExtension == nil { + m.VoteExtension = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtensionSignature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExtensionSignature = append(m.ExtensionSignature[:0], dAtA[iNdEx:postIndex]...) + if m.ExtensionSignature == nil { + m.ExtensionSignature = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockIdFlag", wireType) + } + m.BlockIdFlag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockIdFlag |= v1beta11.BlockIDFlag(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Votes = append(m.Votes, VoteInfo{}) + if err := m.Votes[len(m.Votes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtendedCommitInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtendedCommitInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtendedCommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Votes = append(m.Votes, ExtendedVoteInfo{}) + if err := m.Votes[len(m.Votes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecTxResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecTxResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecTxResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) + } + m.GasWanted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasWanted |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) + } + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, v1beta2.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TxResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/blocksync/message.go b/api/cometbft/blocksync/v1/message.go similarity index 85% rename from proto/tendermint/blocksync/message.go rename to api/cometbft/blocksync/v1/message.go index bce83de14ce..8b5bec3fcc9 100644 --- a/proto/tendermint/blocksync/message.go +++ b/api/cometbft/blocksync/v1/message.go @@ -1,19 +1,11 @@ -package blocksync +package v1 import ( "fmt" "github.com/cosmos/gogoproto/proto" - - "github.com/cometbft/cometbft/p2p" ) -var _ p2p.Wrapper = &StatusRequest{} -var _ p2p.Wrapper = &StatusResponse{} -var _ p2p.Wrapper = &NoBlockResponse{} -var _ p2p.Wrapper = &BlockResponse{} -var _ p2p.Wrapper = &BlockRequest{} - const ( BlockResponseMessagePrefixSize = 4 BlockResponseMessageFieldKeySize = 1 diff --git a/proto/tendermint/blocksync/types.pb.go b/api/cometbft/blocksync/v1/types.pb.go similarity index 89% rename from proto/tendermint/blocksync/types.pb.go rename to api/cometbft/blocksync/v1/types.pb.go index a9791970993..f1fde53df04 100644 --- a/proto/tendermint/blocksync/types.pb.go +++ b/api/cometbft/blocksync/v1/types.pb.go @@ -1,11 +1,11 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/blocksync/types.proto +// source: cometbft/blocksync/v1/types.proto -package blocksync +package v1 import ( fmt "fmt" - types "github.com/cometbft/cometbft/proto/tendermint/types" + v1 "github.com/cometbft/cometbft/api/cometbft/types/v1" proto "github.com/cosmos/gogoproto/proto" io "io" math "math" @@ -32,7 +32,7 @@ func (m *BlockRequest) Reset() { *m = BlockRequest{} } func (m *BlockRequest) String() string { return proto.CompactTextString(m) } func (*BlockRequest) ProtoMessage() {} func (*BlockRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_19b397c236e0fa07, []int{0} + return fileDescriptor_67182bd6cb30f2ef, []int{0} } func (m *BlockRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ func (m *NoBlockResponse) Reset() { *m = NoBlockResponse{} } func (m *NoBlockResponse) String() string { return proto.CompactTextString(m) } func (*NoBlockResponse) ProtoMessage() {} func (*NoBlockResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_19b397c236e0fa07, []int{1} + return fileDescriptor_67182bd6cb30f2ef, []int{1} } func (m *NoBlockResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -113,59 +113,6 @@ func (m *NoBlockResponse) GetHeight() int64 { return 0 } -// BlockResponse returns block to the requested -type BlockResponse struct { - Block *types.Block `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` - ExtCommit *types.ExtendedCommit `protobuf:"bytes,2,opt,name=ext_commit,json=extCommit,proto3" json:"ext_commit,omitempty"` -} - -func (m *BlockResponse) Reset() { *m = BlockResponse{} } -func (m *BlockResponse) String() string { return proto.CompactTextString(m) } -func (*BlockResponse) ProtoMessage() {} -func (*BlockResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_19b397c236e0fa07, []int{2} -} -func (m *BlockResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BlockResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BlockResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockResponse.Merge(m, src) -} -func (m *BlockResponse) XXX_Size() int { - return m.Size() -} -func (m *BlockResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BlockResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_BlockResponse proto.InternalMessageInfo - -func (m *BlockResponse) GetBlock() *types.Block { - if m != nil { - return m.Block - } - return nil -} - -func (m *BlockResponse) GetExtCommit() *types.ExtendedCommit { - if m != nil { - return m.ExtCommit - } - return nil -} - // StatusRequest requests the status of a peer. type StatusRequest struct { } @@ -174,7 +121,7 @@ func (m *StatusRequest) Reset() { *m = StatusRequest{} } func (m *StatusRequest) String() string { return proto.CompactTextString(m) } func (*StatusRequest) ProtoMessage() {} func (*StatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_19b397c236e0fa07, []int{3} + return fileDescriptor_67182bd6cb30f2ef, []int{2} } func (m *StatusRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -213,7 +160,7 @@ func (m *StatusResponse) Reset() { *m = StatusResponse{} } func (m *StatusResponse) String() string { return proto.CompactTextString(m) } func (*StatusResponse) ProtoMessage() {} func (*StatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_19b397c236e0fa07, []int{4} + return fileDescriptor_67182bd6cb30f2ef, []int{3} } func (m *StatusResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -256,7 +203,63 @@ func (m *StatusResponse) GetBase() int64 { return 0 } +// BlockResponse returns block to the requested +type BlockResponse struct { + Block *v1.Block `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + ExtCommit *v1.ExtendedCommit `protobuf:"bytes,2,opt,name=ext_commit,json=extCommit,proto3" json:"ext_commit,omitempty"` +} + +func (m *BlockResponse) Reset() { *m = BlockResponse{} } +func (m *BlockResponse) String() string { return proto.CompactTextString(m) } +func (*BlockResponse) ProtoMessage() {} +func (*BlockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_67182bd6cb30f2ef, []int{4} +} +func (m *BlockResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockResponse.Merge(m, src) +} +func (m *BlockResponse) XXX_Size() int { + return m.Size() +} +func (m *BlockResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BlockResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockResponse proto.InternalMessageInfo + +func (m *BlockResponse) GetBlock() *v1.Block { + if m != nil { + return m.Block + } + return nil +} + +func (m *BlockResponse) GetExtCommit() *v1.ExtendedCommit { + if m != nil { + return m.ExtCommit + } + return nil +} + +// Message is an abstract blocksync message. type Message struct { + // Sum of all possible messages. + // // Types that are valid to be assigned to Sum: // *Message_BlockRequest // *Message_NoBlockResponse @@ -270,7 +273,7 @@ func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_19b397c236e0fa07, []int{5} + return fileDescriptor_67182bd6cb30f2ef, []int{5} } func (m *Message) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -381,44 +384,44 @@ func (*Message) XXX_OneofWrappers() []interface{} { } func init() { - proto.RegisterType((*BlockRequest)(nil), "tendermint.blocksync.BlockRequest") - proto.RegisterType((*NoBlockResponse)(nil), "tendermint.blocksync.NoBlockResponse") - proto.RegisterType((*BlockResponse)(nil), "tendermint.blocksync.BlockResponse") - proto.RegisterType((*StatusRequest)(nil), "tendermint.blocksync.StatusRequest") - proto.RegisterType((*StatusResponse)(nil), "tendermint.blocksync.StatusResponse") - proto.RegisterType((*Message)(nil), "tendermint.blocksync.Message") -} - -func init() { proto.RegisterFile("tendermint/blocksync/types.proto", fileDescriptor_19b397c236e0fa07) } - -var fileDescriptor_19b397c236e0fa07 = []byte{ - // 405 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0xcd, 0x4a, 0xc3, 0x40, - 0x14, 0x85, 0x13, 0xd3, 0x56, 0xbc, 0x36, 0x0d, 0x06, 0xd1, 0x22, 0x12, 0x4a, 0xfc, 0x41, 0x17, - 0x26, 0xa0, 0x0b, 0x37, 0x82, 0x50, 0x11, 0x2a, 0xf8, 0x83, 0xe9, 0xce, 0x4d, 0xe9, 0xa4, 0x63, - 0x1b, 0x34, 0x99, 0xda, 0x99, 0x40, 0xbb, 0xf2, 0x15, 0x7c, 0x01, 0xdf, 0xc7, 0x65, 0x97, 0x2e, - 0xa5, 0x7d, 0x11, 0xe9, 0x4c, 0x9a, 0xa6, 0x31, 0x66, 0x37, 0xb9, 0x73, 0xee, 0x97, 0x73, 0xee, - 0x65, 0xa0, 0xc6, 0x70, 0xd0, 0xc1, 0x03, 0xdf, 0x0b, 0x98, 0x8d, 0x5e, 0x89, 0xfb, 0x42, 0x47, - 0x81, 0x6b, 0xb3, 0x51, 0x1f, 0x53, 0xab, 0x3f, 0x20, 0x8c, 0xe8, 0x9b, 0x0b, 0x85, 0x15, 0x2b, - 0x76, 0x76, 0x13, 0x7d, 0x5c, 0x2d, 0xba, 0x45, 0x4f, 0xc6, 0x6d, 0x82, 0x68, 0x1e, 0x42, 0xb9, - 0x3e, 0x13, 0x3b, 0xf8, 0x2d, 0xc4, 0x94, 0xe9, 0x5b, 0x50, 0xea, 0x61, 0xaf, 0xdb, 0x63, 0x55, - 0xb9, 0x26, 0x1f, 0x29, 0x4e, 0xf4, 0x65, 0x1e, 0x83, 0x76, 0x4f, 0x22, 0x25, 0xed, 0x93, 0x80, - 0xe2, 0x7f, 0xa5, 0xef, 0xa0, 0x2e, 0x0b, 0x4f, 0xa0, 0xc8, 0x0d, 0x71, 0xdd, 0xfa, 0xe9, 0xb6, - 0x95, 0x48, 0x21, 0xbc, 0x08, 0xbd, 0x50, 0xe9, 0x97, 0x00, 0x78, 0xc8, 0x5a, 0x2e, 0xf1, 0x7d, - 0x8f, 0x55, 0x57, 0x78, 0x4f, 0xed, 0x6f, 0xcf, 0xf5, 0x90, 0x97, 0x3a, 0x57, 0x5c, 0xe7, 0xac, - 0xe1, 0x21, 0x13, 0x47, 0x53, 0x03, 0xb5, 0xc9, 0xda, 0x2c, 0xa4, 0x51, 0x28, 0xf3, 0x02, 0x2a, - 0xf3, 0x42, 0xbe, 0x77, 0x5d, 0x87, 0x02, 0x6a, 0x53, 0xcc, 0xff, 0xaa, 0x38, 0xfc, 0x6c, 0x7e, - 0x2a, 0xb0, 0x7a, 0x87, 0x29, 0x6d, 0x77, 0xb1, 0x7e, 0x03, 0x2a, 0x37, 0xd9, 0x1a, 0x08, 0x74, - 0x14, 0xc9, 0xb4, 0xb2, 0x16, 0x63, 0x25, 0x27, 0xdb, 0x90, 0x9c, 0x32, 0x4a, 0x4e, 0xba, 0x09, - 0x1b, 0x01, 0x69, 0xcd, 0x69, 0xc2, 0x57, 0x94, 0xf6, 0x20, 0x1b, 0x97, 0x5a, 0x40, 0x43, 0x72, - 0xb4, 0x20, 0xb5, 0x93, 0x5b, 0xa8, 0xa4, 0x88, 0x0a, 0x27, 0xee, 0xe5, 0x1a, 0x8c, 0x79, 0x2a, - 0x4a, 0xd3, 0x28, 0x9f, 0x5b, 0x1c, 0xb7, 0x90, 0x47, 0x5b, 0x1a, 0xfa, 0x8c, 0x46, 0x93, 0x05, - 0xfd, 0x01, 0xb4, 0x98, 0x16, 0x99, 0x2b, 0x72, 0xdc, 0x7e, 0x3e, 0x2e, 0x76, 0x57, 0xa1, 0x4b, - 0x95, 0x7a, 0x11, 0x14, 0x1a, 0xfa, 0xf5, 0xc7, 0xaf, 0x89, 0x21, 0x8f, 0x27, 0x86, 0xfc, 0x33, - 0x31, 0xe4, 0x8f, 0xa9, 0x21, 0x8d, 0xa7, 0x86, 0xf4, 0x3d, 0x35, 0xa4, 0xa7, 0xf3, 0xae, 0xc7, - 0x7a, 0x21, 0xb2, 0x5c, 0xe2, 0xdb, 0x2e, 0xf1, 0x31, 0x43, 0xcf, 0x6c, 0x71, 0xe0, 0x0f, 0xc0, - 0xce, 0x7a, 0x73, 0xa8, 0xc4, 0xef, 0xce, 0x7e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x56, 0x8a, 0x71, - 0xcf, 0x92, 0x03, 0x00, 0x00, + proto.RegisterType((*BlockRequest)(nil), "cometbft.blocksync.v1.BlockRequest") + proto.RegisterType((*NoBlockResponse)(nil), "cometbft.blocksync.v1.NoBlockResponse") + proto.RegisterType((*StatusRequest)(nil), "cometbft.blocksync.v1.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "cometbft.blocksync.v1.StatusResponse") + proto.RegisterType((*BlockResponse)(nil), "cometbft.blocksync.v1.BlockResponse") + proto.RegisterType((*Message)(nil), "cometbft.blocksync.v1.Message") +} + +func init() { proto.RegisterFile("cometbft/blocksync/v1/types.proto", fileDescriptor_67182bd6cb30f2ef) } + +var fileDescriptor_67182bd6cb30f2ef = []byte{ + // 416 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xcf, 0x6e, 0xda, 0x40, + 0x10, 0xc6, 0xed, 0x1a, 0xa8, 0x3a, 0x60, 0xac, 0x5a, 0x6a, 0x85, 0x2a, 0xd5, 0x2a, 0x6e, 0x8b, + 0xda, 0xcb, 0x5a, 0x50, 0xa9, 0xa7, 0x1e, 0x2a, 0xa2, 0x48, 0x28, 0x12, 0x11, 0x72, 0x72, 0xca, + 0x05, 0xd9, 0x66, 0x03, 0x56, 0x62, 0xaf, 0xc3, 0xae, 0x11, 0x1c, 0xf3, 0x06, 0x79, 0x86, 0x3c, + 0x4d, 0x8e, 0x1c, 0x73, 0x8c, 0xe0, 0x45, 0x22, 0xaf, 0xcd, 0xca, 0x58, 0x86, 0xdc, 0xc6, 0xb3, + 0xdf, 0xf7, 0xf3, 0xfc, 0xd1, 0x40, 0xdb, 0x23, 0x01, 0x66, 0xee, 0x35, 0xb3, 0xdc, 0x5b, 0xe2, + 0xdd, 0xd0, 0x55, 0xe8, 0x59, 0x8b, 0xae, 0xc5, 0x56, 0x11, 0xa6, 0x28, 0x9a, 0x13, 0x46, 0xf4, + 0x4f, 0x3b, 0x09, 0x12, 0x12, 0xb4, 0xe8, 0x7e, 0xf9, 0x2a, 0x9c, 0x5c, 0x9c, 0xb8, 0xf8, 0x7b, + 0xea, 0x2a, 0x7b, 0xce, 0x41, 0xcd, 0x0e, 0x34, 0xfa, 0x89, 0xda, 0xc6, 0x77, 0x31, 0xa6, 0x4c, + 0xff, 0x0c, 0xb5, 0x19, 0xf6, 0xa7, 0x33, 0xd6, 0x92, 0xbf, 0xc9, 0xbf, 0x14, 0x3b, 0xfb, 0x32, + 0x7f, 0x83, 0x76, 0x4e, 0x32, 0x25, 0x8d, 0x48, 0x48, 0xf1, 0x41, 0xa9, 0x06, 0xea, 0x05, 0x73, + 0x58, 0x4c, 0x33, 0xa6, 0xf9, 0x0f, 0x9a, 0xbb, 0xc4, 0x71, 0xab, 0xae, 0x43, 0xc5, 0x75, 0x28, + 0x6e, 0xbd, 0xe3, 0x59, 0x1e, 0x9b, 0xf7, 0x32, 0xa8, 0xfb, 0x3f, 0x46, 0x50, 0xe5, 0x1d, 0x72, + 0x73, 0xbd, 0xd7, 0x42, 0x62, 0x30, 0x69, 0x67, 0x8b, 0x2e, 0x4a, 0x0d, 0xa9, 0x4c, 0xff, 0x0f, + 0x80, 0x97, 0x6c, 0xec, 0x91, 0x20, 0xf0, 0x19, 0x67, 0xd7, 0x7b, 0xed, 0x12, 0xd3, 0xe9, 0x92, + 0xe1, 0x70, 0x82, 0x27, 0x27, 0x5c, 0x68, 0x7f, 0xc0, 0x4b, 0x96, 0x86, 0xe6, 0xa3, 0x02, 0xef, + 0x87, 0x98, 0x52, 0x67, 0x8a, 0xf5, 0x33, 0x50, 0x39, 0x76, 0x3c, 0x4f, 0xdb, 0xcb, 0xaa, 0xf8, + 0x8e, 0x4a, 0xd7, 0x83, 0xf2, 0xd3, 0x1d, 0x48, 0x76, 0xc3, 0xcd, 0x4f, 0xfb, 0x12, 0x3e, 0x86, + 0x64, 0xbc, 0xc3, 0xa5, 0xed, 0x65, 0x05, 0x76, 0x0e, 0xf0, 0x0a, 0x5b, 0x18, 0x48, 0xb6, 0x16, + 0x16, 0x16, 0x33, 0x84, 0x66, 0x01, 0xa9, 0x70, 0xe4, 0x8f, 0xe3, 0x25, 0x0a, 0xa0, 0xea, 0x16, + 0x71, 0x94, 0xaf, 0x4f, 0x74, 0x5c, 0x39, 0x8a, 0xdb, 0x5b, 0x7e, 0x82, 0xa3, 0xf9, 0x84, 0x3e, + 0x02, 0x4d, 0xe0, 0xb2, 0xf2, 0xaa, 0x9c, 0xf7, 0xf3, 0x0d, 0x9e, 0xa8, 0xaf, 0x49, 0xf7, 0x32, + 0xfd, 0x2a, 0x28, 0x34, 0x0e, 0xfa, 0xa3, 0xa7, 0x8d, 0x21, 0xaf, 0x37, 0x86, 0xfc, 0xb2, 0x31, + 0xe4, 0x87, 0xad, 0x21, 0xad, 0xb7, 0x86, 0xf4, 0xbc, 0x35, 0xa4, 0xab, 0xbf, 0x53, 0x9f, 0xcd, + 0x62, 0x37, 0xe1, 0x5b, 0xe2, 0x1c, 0x44, 0xe0, 0x44, 0xbe, 0x55, 0x7a, 0x7d, 0x6e, 0x8d, 0xdf, + 0xc8, 0x9f, 0xd7, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xa3, 0xca, 0xb4, 0x9d, 0x03, 0x00, 0x00, } func (m *BlockRequest) Marshal() (dAtA []byte, err error) { @@ -477,7 +480,7 @@ func (m *NoBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *BlockResponse) Marshal() (dAtA []byte, err error) { +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -487,44 +490,20 @@ func (m *BlockResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BlockResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *BlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ExtCommit != nil { - { - size, err := m.ExtCommit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Block != nil { - { - size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *StatusRequest) Marshal() (dAtA []byte, err error) { +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -534,20 +513,30 @@ func (m *StatusRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if m.Base != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Base)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } -func (m *StatusResponse) Marshal() (dAtA []byte, err error) { +func (m *BlockResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -557,25 +546,39 @@ func (m *StatusResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *BlockResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *BlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Base != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Base)) + if m.ExtCommit != nil { + { + size, err := m.ExtCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x10 + dAtA[i] = 0x12 } - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + if m.Block != nil { + { + size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } @@ -752,43 +755,43 @@ func (m *NoBlockResponse) Size() (n int) { return n } -func (m *BlockResponse) Size() (n int) { +func (m *StatusRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Block != nil { - l = m.Block.Size() - n += 1 + l + sovTypes(uint64(l)) - } - if m.ExtCommit != nil { - l = m.ExtCommit.Size() - n += 1 + l + sovTypes(uint64(l)) - } return n } -func (m *StatusRequest) Size() (n int) { +func (m *StatusResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Base != 0 { + n += 1 + sovTypes(uint64(m.Base)) + } return n } -func (m *StatusResponse) Size() (n int) { +func (m *BlockResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) + if m.Block != nil { + l = m.Block.Size() + n += 1 + l + sovTypes(uint64(l)) } - if m.Base != 0 { - n += 1 + sovTypes(uint64(m.Base)) + if m.ExtCommit != nil { + l = m.ExtCommit.Size() + n += 1 + l + sovTypes(uint64(l)) } return n } @@ -1010,7 +1013,7 @@ func (m *NoBlockResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *BlockResponse) Unmarshal(dAtA []byte) error { +func (m *StatusRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1033,84 +1036,12 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BlockResponse: wiretype end group for non-group") + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Block == nil { - m.Block = &types.Block{} - } - if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtCommit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ExtCommit == nil { - m.ExtCommit = &types.ExtendedCommit{} - } - if err := m.ExtCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -1132,7 +1063,7 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *StatusRequest) Unmarshal(dAtA []byte) error { +func (m *StatusResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1155,12 +1086,50 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Base", wireType) + } + m.Base = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Base |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -1182,7 +1151,7 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *StatusResponse) Unmarshal(dAtA []byte) error { +func (m *BlockResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1205,17 +1174,17 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + return fmt.Errorf("proto: BlockResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) } - m.Height = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -1225,16 +1194,33 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Block == nil { + m.Block = &v1.Block{} + } + if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Base", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtCommit", wireType) } - m.Base = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -1244,11 +1230,28 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Base |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExtCommit == nil { + m.ExtCommit = &v1.ExtendedCommit{} + } + if err := m.ExtCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/api/cometbft/blocksync/v1beta1/message.go b/api/cometbft/blocksync/v1beta1/message.go new file mode 100644 index 00000000000..e43f971304e --- /dev/null +++ b/api/cometbft/blocksync/v1beta1/message.go @@ -0,0 +1,66 @@ +package v1beta1 + +import ( + "fmt" + + "github.com/cosmos/gogoproto/proto" +) + +const ( + BlockResponseMessagePrefixSize = 4 + BlockResponseMessageFieldKeySize = 1 +) + +func (m *BlockRequest) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_BlockRequest{BlockRequest: m} + return bm +} + +func (m *BlockResponse) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_BlockResponse{BlockResponse: m} + return bm +} + +func (m *NoBlockResponse) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_NoBlockResponse{NoBlockResponse: m} + return bm +} + +func (m *StatusRequest) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_StatusRequest{StatusRequest: m} + return bm +} + +func (m *StatusResponse) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_StatusResponse{StatusResponse: m} + return bm +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped blockchain +// message. +func (m *Message) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *Message_BlockRequest: + return m.GetBlockRequest(), nil + + case *Message_BlockResponse: + return m.GetBlockResponse(), nil + + case *Message_NoBlockResponse: + return m.GetNoBlockResponse(), nil + + case *Message_StatusRequest: + return m.GetStatusRequest(), nil + + case *Message_StatusResponse: + return m.GetStatusResponse(), nil + + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } +} diff --git a/api/cometbft/blocksync/v1beta1/types.pb.go b/api/cometbft/blocksync/v1beta1/types.pb.go new file mode 100644 index 00000000000..616c77efd35 --- /dev/null +++ b/api/cometbft/blocksync/v1beta1/types.pb.go @@ -0,0 +1,1524 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/blocksync/v1beta1/types.proto + +package v1beta1 + +import ( + fmt "fmt" + v1beta1 "github.com/cometbft/cometbft/api/cometbft/types/v1beta1" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// BlockRequest requests a block for a specific height +type BlockRequest struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *BlockRequest) Reset() { *m = BlockRequest{} } +func (m *BlockRequest) String() string { return proto.CompactTextString(m) } +func (*BlockRequest) ProtoMessage() {} +func (*BlockRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1cc1b1b3561d256d, []int{0} +} +func (m *BlockRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockRequest.Merge(m, src) +} +func (m *BlockRequest) XXX_Size() int { + return m.Size() +} +func (m *BlockRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BlockRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockRequest proto.InternalMessageInfo + +func (m *BlockRequest) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +// NoBlockResponse informs the node that the peer does not have block at the requested height +type NoBlockResponse struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *NoBlockResponse) Reset() { *m = NoBlockResponse{} } +func (m *NoBlockResponse) String() string { return proto.CompactTextString(m) } +func (*NoBlockResponse) ProtoMessage() {} +func (*NoBlockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1cc1b1b3561d256d, []int{1} +} +func (m *NoBlockResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NoBlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NoBlockResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NoBlockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NoBlockResponse.Merge(m, src) +} +func (m *NoBlockResponse) XXX_Size() int { + return m.Size() +} +func (m *NoBlockResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NoBlockResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NoBlockResponse proto.InternalMessageInfo + +func (m *NoBlockResponse) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +// BlockResponse returns block to the requested +type BlockResponse struct { + Block *v1beta1.Block `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` +} + +func (m *BlockResponse) Reset() { *m = BlockResponse{} } +func (m *BlockResponse) String() string { return proto.CompactTextString(m) } +func (*BlockResponse) ProtoMessage() {} +func (*BlockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1cc1b1b3561d256d, []int{2} +} +func (m *BlockResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockResponse.Merge(m, src) +} +func (m *BlockResponse) XXX_Size() int { + return m.Size() +} +func (m *BlockResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BlockResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockResponse proto.InternalMessageInfo + +func (m *BlockResponse) GetBlock() *v1beta1.Block { + if m != nil { + return m.Block + } + return nil +} + +// StatusRequest requests the status of a peer. +type StatusRequest struct { +} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1cc1b1b3561d256d, []int{3} +} +func (m *StatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusRequest.Merge(m, src) +} +func (m *StatusRequest) XXX_Size() int { + return m.Size() +} +func (m *StatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusRequest proto.InternalMessageInfo + +// StatusResponse is a peer response to inform their status. +type StatusResponse struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Base int64 `protobuf:"varint,2,opt,name=base,proto3" json:"base,omitempty"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1cc1b1b3561d256d, []int{4} +} +func (m *StatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusResponse.Merge(m, src) +} +func (m *StatusResponse) XXX_Size() int { + return m.Size() +} +func (m *StatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusResponse proto.InternalMessageInfo + +func (m *StatusResponse) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *StatusResponse) GetBase() int64 { + if m != nil { + return m.Base + } + return 0 +} + +// Message is an abstract blocksync message. +type Message struct { + // Sum of all possible messages. + // + // Types that are valid to be assigned to Sum: + // *Message_BlockRequest + // *Message_NoBlockResponse + // *Message_BlockResponse + // *Message_StatusRequest + // *Message_StatusResponse + Sum isMessage_Sum `protobuf_oneof:"sum"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_1cc1b1b3561d256d, []int{5} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +type isMessage_Sum interface { + isMessage_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Message_BlockRequest struct { + BlockRequest *BlockRequest `protobuf:"bytes,1,opt,name=block_request,json=blockRequest,proto3,oneof" json:"block_request,omitempty"` +} +type Message_NoBlockResponse struct { + NoBlockResponse *NoBlockResponse `protobuf:"bytes,2,opt,name=no_block_response,json=noBlockResponse,proto3,oneof" json:"no_block_response,omitempty"` +} +type Message_BlockResponse struct { + BlockResponse *BlockResponse `protobuf:"bytes,3,opt,name=block_response,json=blockResponse,proto3,oneof" json:"block_response,omitempty"` +} +type Message_StatusRequest struct { + StatusRequest *StatusRequest `protobuf:"bytes,4,opt,name=status_request,json=statusRequest,proto3,oneof" json:"status_request,omitempty"` +} +type Message_StatusResponse struct { + StatusResponse *StatusResponse `protobuf:"bytes,5,opt,name=status_response,json=statusResponse,proto3,oneof" json:"status_response,omitempty"` +} + +func (*Message_BlockRequest) isMessage_Sum() {} +func (*Message_NoBlockResponse) isMessage_Sum() {} +func (*Message_BlockResponse) isMessage_Sum() {} +func (*Message_StatusRequest) isMessage_Sum() {} +func (*Message_StatusResponse) isMessage_Sum() {} + +func (m *Message) GetSum() isMessage_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Message) GetBlockRequest() *BlockRequest { + if x, ok := m.GetSum().(*Message_BlockRequest); ok { + return x.BlockRequest + } + return nil +} + +func (m *Message) GetNoBlockResponse() *NoBlockResponse { + if x, ok := m.GetSum().(*Message_NoBlockResponse); ok { + return x.NoBlockResponse + } + return nil +} + +func (m *Message) GetBlockResponse() *BlockResponse { + if x, ok := m.GetSum().(*Message_BlockResponse); ok { + return x.BlockResponse + } + return nil +} + +func (m *Message) GetStatusRequest() *StatusRequest { + if x, ok := m.GetSum().(*Message_StatusRequest); ok { + return x.StatusRequest + } + return nil +} + +func (m *Message) GetStatusResponse() *StatusResponse { + if x, ok := m.GetSum().(*Message_StatusResponse); ok { + return x.StatusResponse + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Message) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Message_BlockRequest)(nil), + (*Message_NoBlockResponse)(nil), + (*Message_BlockResponse)(nil), + (*Message_StatusRequest)(nil), + (*Message_StatusResponse)(nil), + } +} + +func init() { + proto.RegisterType((*BlockRequest)(nil), "cometbft.blocksync.v1beta1.BlockRequest") + proto.RegisterType((*NoBlockResponse)(nil), "cometbft.blocksync.v1beta1.NoBlockResponse") + proto.RegisterType((*BlockResponse)(nil), "cometbft.blocksync.v1beta1.BlockResponse") + proto.RegisterType((*StatusRequest)(nil), "cometbft.blocksync.v1beta1.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "cometbft.blocksync.v1beta1.StatusResponse") + proto.RegisterType((*Message)(nil), "cometbft.blocksync.v1beta1.Message") +} + +func init() { + proto.RegisterFile("cometbft/blocksync/v1beta1/types.proto", fileDescriptor_1cc1b1b3561d256d) +} + +var fileDescriptor_1cc1b1b3561d256d = []byte{ + // 379 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x6a, 0xea, 0x40, + 0x14, 0x86, 0x93, 0x1b, 0xf5, 0xc2, 0xd1, 0x18, 0x6e, 0x16, 0x17, 0x11, 0x6e, 0xb8, 0x64, 0x21, + 0xda, 0xc2, 0x04, 0x75, 0xd9, 0xae, 0xa4, 0x0b, 0x37, 0x6d, 0x21, 0xc5, 0x45, 0xbb, 0x91, 0x8c, + 0x4c, 0x55, 0x5a, 0x33, 0xa9, 0x67, 0x52, 0xf0, 0x2d, 0xfa, 0x3e, 0x7d, 0x81, 0x2e, 0x5d, 0x76, + 0x59, 0xf4, 0x45, 0x8a, 0x93, 0x71, 0xd0, 0x80, 0xd6, 0xdd, 0xcc, 0xe1, 0x3f, 0xdf, 0xf9, 0xf3, + 0x67, 0x0e, 0x34, 0x46, 0x7c, 0xc6, 0x04, 0x7d, 0x14, 0x01, 0x7d, 0xe6, 0xa3, 0x27, 0x5c, 0xc4, + 0xa3, 0xe0, 0xb5, 0x4d, 0x99, 0x88, 0xda, 0x81, 0x58, 0x24, 0x0c, 0x49, 0x32, 0xe7, 0x82, 0xbb, + 0xf5, 0xad, 0x8e, 0x68, 0x1d, 0x51, 0xba, 0xba, 0xaf, 0x19, 0xb2, 0x43, 0xf7, 0x4b, 0x65, 0xd6, + 0xef, 0x37, 0xa0, 0xd2, 0xdb, 0x5c, 0x43, 0xf6, 0x92, 0x32, 0x14, 0xee, 0x5f, 0x28, 0x4d, 0xd8, + 0x74, 0x3c, 0x11, 0x35, 0xf3, 0xbf, 0xd9, 0xb4, 0x42, 0x75, 0xf3, 0x5b, 0xe0, 0xdc, 0x70, 0xa5, + 0xc4, 0x84, 0xc7, 0xc8, 0x0e, 0x4a, 0xaf, 0xc0, 0xde, 0x17, 0x76, 0xa1, 0x28, 0x47, 0x4a, 0x5d, + 0xb9, 0xf3, 0x8f, 0x68, 0xcf, 0xd9, 0x97, 0x28, 0x5f, 0x24, 0xeb, 0xca, 0xb4, 0xbe, 0x03, 0xf6, + 0x9d, 0x88, 0x44, 0x8a, 0xca, 0x99, 0x7f, 0x09, 0xd5, 0x6d, 0xe1, 0xb8, 0x01, 0xd7, 0x85, 0x02, + 0x8d, 0x90, 0xd5, 0x7e, 0xc9, 0xaa, 0x3c, 0xfb, 0xef, 0x16, 0xfc, 0xbe, 0x66, 0x88, 0xd1, 0x98, + 0xb9, 0xb7, 0x60, 0xcb, 0x19, 0xc3, 0x79, 0x86, 0x56, 0xbe, 0x9a, 0xe4, 0x70, 0x96, 0x64, 0x37, + 0xa4, 0xbe, 0x11, 0x56, 0xe8, 0x6e, 0x68, 0xf7, 0xf0, 0x27, 0xe6, 0xc3, 0x2d, 0x33, 0x73, 0x27, + 0xa7, 0x97, 0x3b, 0xe7, 0xc7, 0xa0, 0xb9, 0x44, 0xfb, 0x46, 0xe8, 0xc4, 0xb9, 0x90, 0x43, 0xa8, + 0xe6, 0xb8, 0x96, 0xe4, 0xb6, 0x4e, 0x30, 0xab, 0xa9, 0x36, 0xcd, 0x33, 0x51, 0x26, 0xa9, 0x03, + 0x28, 0xfc, 0xcc, 0xdc, 0xfb, 0x19, 0x1b, 0x26, 0xee, 0x16, 0xdc, 0x01, 0x38, 0x9a, 0xa9, 0x8c, + 0x16, 0x25, 0xf4, 0xec, 0x14, 0xa8, 0x76, 0x5a, 0xc5, 0xbd, 0x4a, 0xaf, 0x08, 0x16, 0xa6, 0xb3, + 0xde, 0xe0, 0x63, 0xe5, 0x99, 0xcb, 0x95, 0x67, 0x7e, 0xad, 0x3c, 0xf3, 0x6d, 0xed, 0x19, 0xcb, + 0xb5, 0x67, 0x7c, 0xae, 0x3d, 0xe3, 0xe1, 0x62, 0x3c, 0x15, 0x93, 0x94, 0x6e, 0x86, 0x04, 0xfa, + 0xb9, 0xeb, 0x43, 0x94, 0x4c, 0x83, 0xc3, 0x8b, 0x44, 0x4b, 0x72, 0x07, 0xba, 0xdf, 0x01, 0x00, + 0x00, 0xff, 0xff, 0x6b, 0xac, 0x9f, 0xc0, 0x6d, 0x03, 0x00, 0x00, +} + +func (m *BlockRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *NoBlockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NoBlockResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NoBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BlockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Block != nil { + { + size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Base != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Base)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Message_BlockRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_BlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.BlockRequest != nil { + { + size, err := m.BlockRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Message_NoBlockResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_NoBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NoBlockResponse != nil { + { + size, err := m.NoBlockResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Message_BlockResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_BlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.BlockResponse != nil { + { + size, err := m.BlockResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Message_StatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StatusRequest != nil { + { + size, err := m.StatusRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Message_StatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StatusResponse != nil { + { + size, err := m.StatusResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BlockRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *NoBlockResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *BlockResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Block != nil { + l = m.Block.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *StatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *StatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Base != 0 { + n += 1 + sovTypes(uint64(m.Base)) + } + return n +} + +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Message_BlockRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockRequest != nil { + l = m.BlockRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_NoBlockResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NoBlockResponse != nil { + l = m.NoBlockResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_BlockResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockResponse != nil { + l = m.BlockResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_StatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StatusRequest != nil { + l = m.StatusRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_StatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StatusResponse != nil { + l = m.StatusResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *BlockRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NoBlockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NoBlockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NoBlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Block == nil { + m.Block = &v1beta1.Block{} + } + if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Base", wireType) + } + m.Base = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Base |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &BlockRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_BlockRequest{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NoBlockResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NoBlockResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_NoBlockResponse{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &BlockResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_BlockResponse{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &StatusRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_StatusRequest{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &StatusResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_StatusResponse{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/consensus/message.go b/api/cometbft/consensus/v1/message.go similarity index 84% rename from proto/tendermint/consensus/message.go rename to api/cometbft/consensus/v1/message.go index 440addcc711..7dbbdb5b215 100644 --- a/proto/tendermint/consensus/message.go +++ b/api/cometbft/consensus/v1/message.go @@ -1,29 +1,15 @@ -package consensus +package v1 import ( "fmt" "github.com/cosmos/gogoproto/proto" - - "github.com/cometbft/cometbft/p2p" ) -var _ p2p.Wrapper = &VoteSetBits{} -var _ p2p.Wrapper = &VoteSetMaj23{} -var _ p2p.Wrapper = &Vote{} -var _ p2p.Wrapper = &ProposalPOL{} -var _ p2p.Wrapper = &Proposal{} -var _ p2p.Wrapper = &NewValidBlock{} -var _ p2p.Wrapper = &NewRoundStep{} -var _ p2p.Wrapper = &HasVote{} -var _ p2p.Wrapper = &HasProposalBlockPart{} -var _ p2p.Wrapper = &BlockPart{} - func (m *VoteSetBits) Wrap() proto.Message { cm := &Message{} cm.Sum = &Message_VoteSetBits{VoteSetBits: m} return cm - } func (m *VoteSetMaj23) Wrap() proto.Message { @@ -38,12 +24,6 @@ func (m *HasVote) Wrap() proto.Message { return cm } -func (m *HasProposalBlockPart) Wrap() proto.Message { - cm := &Message{} - cm.Sum = &Message_HasProposalBlockPart{HasProposalBlockPart: m} - return cm -} - func (m *Vote) Wrap() proto.Message { cm := &Message{} cm.Sum = &Message_Vote{Vote: m} @@ -80,6 +60,12 @@ func (m *NewRoundStep) Wrap() proto.Message { return cm } +func (m *HasProposalBlockPart) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_HasProposalBlockPart{HasProposalBlockPart: m} + return cm +} + // Unwrap implements the p2p Wrapper interface and unwraps a wrapped consensus // proto message. func (m *Message) Unwrap() (proto.Message, error) { diff --git a/proto/tendermint/consensus/types.pb.go b/api/cometbft/consensus/v1/types.pb.go similarity index 87% rename from proto/tendermint/consensus/types.pb.go rename to api/cometbft/consensus/v1/types.pb.go index 51ec6659286..3c75d7851f6 100644 --- a/proto/tendermint/consensus/types.pb.go +++ b/api/cometbft/consensus/v1/types.pb.go @@ -1,12 +1,12 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/consensus/types.proto +// source: cometbft/consensus/v1/types.proto -package consensus +package v1 import ( fmt "fmt" - bits "github.com/cometbft/cometbft/proto/tendermint/libs/bits" - types "github.com/cometbft/cometbft/proto/tendermint/types" + v11 "github.com/cometbft/cometbft/api/cometbft/libs/bits/v1" + v1 "github.com/cometbft/cometbft/api/cometbft/types/v1" _ "github.com/cosmos/gogoproto/gogoproto" proto "github.com/cosmos/gogoproto/proto" io "io" @@ -39,7 +39,7 @@ func (m *NewRoundStep) Reset() { *m = NewRoundStep{} } func (m *NewRoundStep) String() string { return proto.CompactTextString(m) } func (*NewRoundStep) ProtoMessage() {} func (*NewRoundStep) Descriptor() ([]byte, []int) { - return fileDescriptor_81a22d2efc008981, []int{0} + return fileDescriptor_4179ae4c5322abef, []int{0} } func (m *NewRoundStep) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,18 +107,18 @@ func (m *NewRoundStep) GetLastCommitRound() int32 { // i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. // In case the block is also committed, then IsCommit flag is set to true. type NewValidBlock struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` - BlockPartSetHeader types.PartSetHeader `protobuf:"bytes,3,opt,name=block_part_set_header,json=blockPartSetHeader,proto3" json:"block_part_set_header"` - BlockParts *bits.BitArray `protobuf:"bytes,4,opt,name=block_parts,json=blockParts,proto3" json:"block_parts,omitempty"` - IsCommit bool `protobuf:"varint,5,opt,name=is_commit,json=isCommit,proto3" json:"is_commit,omitempty"` + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + BlockPartSetHeader v1.PartSetHeader `protobuf:"bytes,3,opt,name=block_part_set_header,json=blockPartSetHeader,proto3" json:"block_part_set_header"` + BlockParts *v11.BitArray `protobuf:"bytes,4,opt,name=block_parts,json=blockParts,proto3" json:"block_parts,omitempty"` + IsCommit bool `protobuf:"varint,5,opt,name=is_commit,json=isCommit,proto3" json:"is_commit,omitempty"` } func (m *NewValidBlock) Reset() { *m = NewValidBlock{} } func (m *NewValidBlock) String() string { return proto.CompactTextString(m) } func (*NewValidBlock) ProtoMessage() {} func (*NewValidBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_81a22d2efc008981, []int{1} + return fileDescriptor_4179ae4c5322abef, []int{1} } func (m *NewValidBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,14 +161,14 @@ func (m *NewValidBlock) GetRound() int32 { return 0 } -func (m *NewValidBlock) GetBlockPartSetHeader() types.PartSetHeader { +func (m *NewValidBlock) GetBlockPartSetHeader() v1.PartSetHeader { if m != nil { return m.BlockPartSetHeader } - return types.PartSetHeader{} + return v1.PartSetHeader{} } -func (m *NewValidBlock) GetBlockParts() *bits.BitArray { +func (m *NewValidBlock) GetBlockParts() *v11.BitArray { if m != nil { return m.BlockParts } @@ -184,14 +184,14 @@ func (m *NewValidBlock) GetIsCommit() bool { // Proposal is sent when a new block is proposed. type Proposal struct { - Proposal types.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal"` + Proposal v1.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal"` } func (m *Proposal) Reset() { *m = Proposal{} } func (m *Proposal) String() string { return proto.CompactTextString(m) } func (*Proposal) ProtoMessage() {} func (*Proposal) Descriptor() ([]byte, []int) { - return fileDescriptor_81a22d2efc008981, []int{2} + return fileDescriptor_4179ae4c5322abef, []int{2} } func (m *Proposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,25 +220,25 @@ func (m *Proposal) XXX_DiscardUnknown() { var xxx_messageInfo_Proposal proto.InternalMessageInfo -func (m *Proposal) GetProposal() types.Proposal { +func (m *Proposal) GetProposal() v1.Proposal { if m != nil { return m.Proposal } - return types.Proposal{} + return v1.Proposal{} } // ProposalPOL is sent when a previous proposal is re-proposed. type ProposalPOL struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - ProposalPolRound int32 `protobuf:"varint,2,opt,name=proposal_pol_round,json=proposalPolRound,proto3" json:"proposal_pol_round,omitempty"` - ProposalPol bits.BitArray `protobuf:"bytes,3,opt,name=proposal_pol,json=proposalPol,proto3" json:"proposal_pol"` + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + ProposalPolRound int32 `protobuf:"varint,2,opt,name=proposal_pol_round,json=proposalPolRound,proto3" json:"proposal_pol_round,omitempty"` + ProposalPol v11.BitArray `protobuf:"bytes,3,opt,name=proposal_pol,json=proposalPol,proto3" json:"proposal_pol"` } func (m *ProposalPOL) Reset() { *m = ProposalPOL{} } func (m *ProposalPOL) String() string { return proto.CompactTextString(m) } func (*ProposalPOL) ProtoMessage() {} func (*ProposalPOL) Descriptor() ([]byte, []int) { - return fileDescriptor_81a22d2efc008981, []int{3} + return fileDescriptor_4179ae4c5322abef, []int{3} } func (m *ProposalPOL) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -281,25 +281,25 @@ func (m *ProposalPOL) GetProposalPolRound() int32 { return 0 } -func (m *ProposalPOL) GetProposalPol() bits.BitArray { +func (m *ProposalPOL) GetProposalPol() v11.BitArray { if m != nil { return m.ProposalPol } - return bits.BitArray{} + return v11.BitArray{} } // BlockPart is sent when gossipping a piece of the proposed block. type BlockPart struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` - Part types.Part `protobuf:"bytes,3,opt,name=part,proto3" json:"part"` + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Part v1.Part `protobuf:"bytes,3,opt,name=part,proto3" json:"part"` } func (m *BlockPart) Reset() { *m = BlockPart{} } func (m *BlockPart) String() string { return proto.CompactTextString(m) } func (*BlockPart) ProtoMessage() {} func (*BlockPart) Descriptor() ([]byte, []int) { - return fileDescriptor_81a22d2efc008981, []int{4} + return fileDescriptor_4179ae4c5322abef, []int{4} } func (m *BlockPart) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -342,23 +342,23 @@ func (m *BlockPart) GetRound() int32 { return 0 } -func (m *BlockPart) GetPart() types.Part { +func (m *BlockPart) GetPart() v1.Part { if m != nil { return m.Part } - return types.Part{} + return v1.Part{} } // Vote is sent when voting for a proposal (or lack thereof). type Vote struct { - Vote *types.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` + Vote *v1.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` } func (m *Vote) Reset() { *m = Vote{} } func (m *Vote) String() string { return proto.CompactTextString(m) } func (*Vote) ProtoMessage() {} func (*Vote) Descriptor() ([]byte, []int) { - return fileDescriptor_81a22d2efc008981, []int{5} + return fileDescriptor_4179ae4c5322abef, []int{5} } func (m *Vote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +387,7 @@ func (m *Vote) XXX_DiscardUnknown() { var xxx_messageInfo_Vote proto.InternalMessageInfo -func (m *Vote) GetVote() *types.Vote { +func (m *Vote) GetVote() *v1.Vote { if m != nil { return m.Vote } @@ -396,17 +396,17 @@ func (m *Vote) GetVote() *types.Vote { // HasVote is sent to indicate that a particular vote has been received. type HasVote struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` - Type types.SignedMsgType `protobuf:"varint,3,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` - Index int32 `protobuf:"varint,4,opt,name=index,proto3" json:"index,omitempty"` + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Type v1.SignedMsgType `protobuf:"varint,3,opt,name=type,proto3,enum=cometbft.types.v1.SignedMsgType" json:"type,omitempty"` + Index int32 `protobuf:"varint,4,opt,name=index,proto3" json:"index,omitempty"` } func (m *HasVote) Reset() { *m = HasVote{} } func (m *HasVote) String() string { return proto.CompactTextString(m) } func (*HasVote) ProtoMessage() {} func (*HasVote) Descriptor() ([]byte, []int) { - return fileDescriptor_81a22d2efc008981, []int{6} + return fileDescriptor_4179ae4c5322abef, []int{6} } func (m *HasVote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -449,11 +449,11 @@ func (m *HasVote) GetRound() int32 { return 0 } -func (m *HasVote) GetType() types.SignedMsgType { +func (m *HasVote) GetType() v1.SignedMsgType { if m != nil { return m.Type } - return types.UnknownType + return v1.UnknownType } func (m *HasVote) GetIndex() int32 { @@ -463,80 +463,19 @@ func (m *HasVote) GetIndex() int32 { return 0 } -// HasProposalBlockPart is sent to indicate that a particular proposal block part has been received. -type HasProposalBlockPart struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` - Index int32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` -} - -func (m *HasProposalBlockPart) Reset() { *m = HasProposalBlockPart{} } -func (m *HasProposalBlockPart) String() string { return proto.CompactTextString(m) } -func (*HasProposalBlockPart) ProtoMessage() {} -func (*HasProposalBlockPart) Descriptor() ([]byte, []int) { - return fileDescriptor_81a22d2efc008981, []int{7} -} -func (m *HasProposalBlockPart) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HasProposalBlockPart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HasProposalBlockPart.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HasProposalBlockPart) XXX_Merge(src proto.Message) { - xxx_messageInfo_HasProposalBlockPart.Merge(m, src) -} -func (m *HasProposalBlockPart) XXX_Size() int { - return m.Size() -} -func (m *HasProposalBlockPart) XXX_DiscardUnknown() { - xxx_messageInfo_HasProposalBlockPart.DiscardUnknown(m) -} - -var xxx_messageInfo_HasProposalBlockPart proto.InternalMessageInfo - -func (m *HasProposalBlockPart) GetHeight() int64 { - if m != nil { - return m.Height - } - return 0 -} - -func (m *HasProposalBlockPart) GetRound() int32 { - if m != nil { - return m.Round - } - return 0 -} - -func (m *HasProposalBlockPart) GetIndex() int32 { - if m != nil { - return m.Index - } - return 0 -} - // VoteSetMaj23 is sent to indicate that a given BlockID has seen +2/3 votes. type VoteSetMaj23 struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` - Type types.SignedMsgType `protobuf:"varint,3,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` - BlockID types.BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Type v1.SignedMsgType `protobuf:"varint,3,opt,name=type,proto3,enum=cometbft.types.v1.SignedMsgType" json:"type,omitempty"` + BlockID v1.BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` } func (m *VoteSetMaj23) Reset() { *m = VoteSetMaj23{} } func (m *VoteSetMaj23) String() string { return proto.CompactTextString(m) } func (*VoteSetMaj23) ProtoMessage() {} func (*VoteSetMaj23) Descriptor() ([]byte, []int) { - return fileDescriptor_81a22d2efc008981, []int{8} + return fileDescriptor_4179ae4c5322abef, []int{7} } func (m *VoteSetMaj23) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -579,34 +518,34 @@ func (m *VoteSetMaj23) GetRound() int32 { return 0 } -func (m *VoteSetMaj23) GetType() types.SignedMsgType { +func (m *VoteSetMaj23) GetType() v1.SignedMsgType { if m != nil { return m.Type } - return types.UnknownType + return v1.UnknownType } -func (m *VoteSetMaj23) GetBlockID() types.BlockID { +func (m *VoteSetMaj23) GetBlockID() v1.BlockID { if m != nil { return m.BlockID } - return types.BlockID{} + return v1.BlockID{} } // VoteSetBits is sent to communicate the bit-array of votes seen for the BlockID. type VoteSetBits struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` - Type types.SignedMsgType `protobuf:"varint,3,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` - BlockID types.BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` - Votes bits.BitArray `protobuf:"bytes,5,opt,name=votes,proto3" json:"votes"` + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Type v1.SignedMsgType `protobuf:"varint,3,opt,name=type,proto3,enum=cometbft.types.v1.SignedMsgType" json:"type,omitempty"` + BlockID v1.BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` + Votes v11.BitArray `protobuf:"bytes,5,opt,name=votes,proto3" json:"votes"` } func (m *VoteSetBits) Reset() { *m = VoteSetBits{} } func (m *VoteSetBits) String() string { return proto.CompactTextString(m) } func (*VoteSetBits) ProtoMessage() {} func (*VoteSetBits) Descriptor() ([]byte, []int) { - return fileDescriptor_81a22d2efc008981, []int{9} + return fileDescriptor_4179ae4c5322abef, []int{8} } func (m *VoteSetBits) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -649,28 +588,92 @@ func (m *VoteSetBits) GetRound() int32 { return 0 } -func (m *VoteSetBits) GetType() types.SignedMsgType { +func (m *VoteSetBits) GetType() v1.SignedMsgType { if m != nil { return m.Type } - return types.UnknownType + return v1.UnknownType } -func (m *VoteSetBits) GetBlockID() types.BlockID { +func (m *VoteSetBits) GetBlockID() v1.BlockID { if m != nil { return m.BlockID } - return types.BlockID{} + return v1.BlockID{} } -func (m *VoteSetBits) GetVotes() bits.BitArray { +func (m *VoteSetBits) GetVotes() v11.BitArray { if m != nil { return m.Votes } - return bits.BitArray{} + return v11.BitArray{} +} + +// HasProposalBlockPart is sent to indicate that a particular proposal block part has been received. +type HasProposalBlockPart struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Index int32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` +} + +func (m *HasProposalBlockPart) Reset() { *m = HasProposalBlockPart{} } +func (m *HasProposalBlockPart) String() string { return proto.CompactTextString(m) } +func (*HasProposalBlockPart) ProtoMessage() {} +func (*HasProposalBlockPart) Descriptor() ([]byte, []int) { + return fileDescriptor_4179ae4c5322abef, []int{9} +} +func (m *HasProposalBlockPart) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HasProposalBlockPart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HasProposalBlockPart.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HasProposalBlockPart) XXX_Merge(src proto.Message) { + xxx_messageInfo_HasProposalBlockPart.Merge(m, src) +} +func (m *HasProposalBlockPart) XXX_Size() int { + return m.Size() +} +func (m *HasProposalBlockPart) XXX_DiscardUnknown() { + xxx_messageInfo_HasProposalBlockPart.DiscardUnknown(m) +} + +var xxx_messageInfo_HasProposalBlockPart proto.InternalMessageInfo + +func (m *HasProposalBlockPart) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *HasProposalBlockPart) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *HasProposalBlockPart) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 } +// Message is an abstract consensus message. type Message struct { + // Sum of all possible messages. + // // Types that are valid to be assigned to Sum: // *Message_NewRoundStep // *Message_NewValidBlock @@ -689,7 +692,7 @@ func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_81a22d2efc008981, []int{10} + return fileDescriptor_4179ae4c5322abef, []int{10} } func (m *Message) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -860,79 +863,80 @@ func (*Message) XXX_OneofWrappers() []interface{} { } func init() { - proto.RegisterType((*NewRoundStep)(nil), "tendermint.consensus.NewRoundStep") - proto.RegisterType((*NewValidBlock)(nil), "tendermint.consensus.NewValidBlock") - proto.RegisterType((*Proposal)(nil), "tendermint.consensus.Proposal") - proto.RegisterType((*ProposalPOL)(nil), "tendermint.consensus.ProposalPOL") - proto.RegisterType((*BlockPart)(nil), "tendermint.consensus.BlockPart") - proto.RegisterType((*Vote)(nil), "tendermint.consensus.Vote") - proto.RegisterType((*HasVote)(nil), "tendermint.consensus.HasVote") - proto.RegisterType((*HasProposalBlockPart)(nil), "tendermint.consensus.HasProposalBlockPart") - proto.RegisterType((*VoteSetMaj23)(nil), "tendermint.consensus.VoteSetMaj23") - proto.RegisterType((*VoteSetBits)(nil), "tendermint.consensus.VoteSetBits") - proto.RegisterType((*Message)(nil), "tendermint.consensus.Message") -} - -func init() { proto.RegisterFile("tendermint/consensus/types.proto", fileDescriptor_81a22d2efc008981) } - -var fileDescriptor_81a22d2efc008981 = []byte{ - // 896 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0x5f, 0x6f, 0xe3, 0x44, - 0x10, 0xb7, 0x69, 0xd2, 0xa4, 0x93, 0xfe, 0x81, 0x55, 0xee, 0x08, 0x05, 0xd2, 0x62, 0x5e, 0xaa, - 0x13, 0x4a, 0x50, 0xfa, 0x70, 0xd2, 0x09, 0x09, 0x08, 0x7f, 0xce, 0x77, 0xba, 0xde, 0x85, 0xcd, - 0xe9, 0x84, 0xee, 0xc5, 0x72, 0xec, 0x25, 0x59, 0x2e, 0xf6, 0x5a, 0xde, 0x6d, 0x4b, 0x5f, 0xf9, - 0x04, 0x7c, 0x00, 0xbe, 0x06, 0x12, 0x1f, 0xe1, 0x1e, 0xef, 0x11, 0x09, 0xa9, 0x42, 0xed, 0x47, - 0x40, 0xbc, 0xa3, 0x1d, 0x6f, 0x9c, 0x2d, 0x75, 0x0b, 0xe5, 0x01, 0x89, 0x37, 0xdb, 0x33, 0xf3, - 0xdb, 0x99, 0xdf, 0xcc, 0xfc, 0xd6, 0xb0, 0xab, 0x58, 0x1a, 0xb3, 0x3c, 0xe1, 0xa9, 0xea, 0x47, - 0x22, 0x95, 0x2c, 0x95, 0x87, 0xb2, 0xaf, 0x4e, 0x32, 0x26, 0x7b, 0x59, 0x2e, 0x94, 0x20, 0xed, - 0xa5, 0x47, 0xaf, 0xf4, 0xd8, 0x6e, 0x4f, 0xc5, 0x54, 0xa0, 0x43, 0x5f, 0x3f, 0x15, 0xbe, 0xdb, - 0xef, 0x58, 0x68, 0x88, 0x61, 0x23, 0x6d, 0xdb, 0x67, 0xcd, 0xf9, 0x44, 0xf6, 0x27, 0x5c, 0x5d, - 0xf0, 0xf0, 0x7e, 0x72, 0x61, 0xfd, 0x31, 0x3b, 0xa6, 0xe2, 0x30, 0x8d, 0xc7, 0x8a, 0x65, 0xe4, - 0x36, 0xac, 0xce, 0x18, 0x9f, 0xce, 0x54, 0xc7, 0xdd, 0x75, 0xf7, 0x56, 0xa8, 0x79, 0x23, 0x6d, - 0xa8, 0xe7, 0xda, 0xa9, 0xf3, 0xda, 0xae, 0xbb, 0x57, 0xa7, 0xc5, 0x0b, 0x21, 0x50, 0x93, 0x8a, - 0x65, 0x9d, 0x95, 0x5d, 0x77, 0x6f, 0x83, 0xe2, 0x33, 0xb9, 0x0b, 0x1d, 0xc9, 0x22, 0x91, 0xc6, - 0x32, 0x90, 0x3c, 0x8d, 0x58, 0x20, 0x55, 0x98, 0xab, 0x40, 0xf1, 0x84, 0x75, 0x6a, 0x88, 0x79, - 0xcb, 0xd8, 0xc7, 0xda, 0x3c, 0xd6, 0xd6, 0xa7, 0x3c, 0x61, 0xe4, 0x0e, 0xbc, 0x31, 0x0f, 0xa5, - 0x0a, 0x22, 0x91, 0x24, 0x5c, 0x05, 0xc5, 0x71, 0x75, 0x3c, 0x6e, 0x4b, 0x1b, 0x3e, 0xc3, 0xef, - 0x98, 0xaa, 0xf7, 0x87, 0x0b, 0x1b, 0x8f, 0xd9, 0xf1, 0xb3, 0x70, 0xce, 0xe3, 0xe1, 0x5c, 0x44, - 0x2f, 0x6e, 0x98, 0xf8, 0xd7, 0x70, 0x6b, 0xa2, 0xc3, 0x82, 0x4c, 0xe7, 0x26, 0x99, 0x0a, 0x66, - 0x2c, 0x8c, 0x59, 0x8e, 0x95, 0xb4, 0x06, 0x3b, 0x3d, 0xab, 0x07, 0x05, 0x5f, 0xa3, 0x30, 0x57, - 0x63, 0xa6, 0x7c, 0x74, 0x1b, 0xd6, 0x5e, 0x9e, 0xee, 0x38, 0x94, 0x20, 0xc6, 0x05, 0x0b, 0xf9, - 0x18, 0x5a, 0x4b, 0x64, 0x89, 0x15, 0xb7, 0x06, 0x5d, 0x1b, 0x4f, 0x77, 0xa2, 0xa7, 0x3b, 0xd1, - 0x1b, 0x72, 0xf5, 0x69, 0x9e, 0x87, 0x27, 0x14, 0x4a, 0x20, 0x49, 0xde, 0x86, 0x35, 0x2e, 0x0d, - 0x09, 0x58, 0x7e, 0x93, 0x36, 0xb9, 0x2c, 0x8a, 0xf7, 0x7c, 0x68, 0x8e, 0x72, 0x91, 0x09, 0x19, - 0xce, 0xc9, 0x47, 0xd0, 0xcc, 0xcc, 0x33, 0xd6, 0xdc, 0x1a, 0x6c, 0x57, 0xa4, 0x6d, 0x3c, 0x4c, - 0xc6, 0x65, 0x84, 0xf7, 0xa3, 0x0b, 0xad, 0x85, 0x71, 0xf4, 0xe4, 0xd1, 0x95, 0xfc, 0x7d, 0x00, - 0x64, 0x11, 0x13, 0x64, 0x62, 0x1e, 0xd8, 0x64, 0xbe, 0xbe, 0xb0, 0x8c, 0xc4, 0x1c, 0xfb, 0x42, - 0xee, 0xc3, 0xba, 0xed, 0x6d, 0xe8, 0xfc, 0x9b, 0xf2, 0x4d, 0x6e, 0x2d, 0x0b, 0xcd, 0x7b, 0x01, - 0x6b, 0xc3, 0x05, 0x27, 0x37, 0xec, 0xed, 0x87, 0x50, 0xd3, 0xdc, 0x9b, 0xb3, 0x6f, 0x57, 0xb7, - 0xd2, 0x9c, 0x89, 0x9e, 0xde, 0x00, 0x6a, 0xcf, 0x84, 0xd2, 0x13, 0x58, 0x3b, 0x12, 0x8a, 0x19, - 0x36, 0x2b, 0x22, 0xb5, 0x17, 0x45, 0x1f, 0xef, 0x7b, 0x17, 0x1a, 0x7e, 0x28, 0x31, 0xee, 0x66, - 0xf9, 0xed, 0x43, 0x4d, 0xa3, 0x61, 0x7e, 0x9b, 0x55, 0xa3, 0x36, 0xe6, 0xd3, 0x94, 0xc5, 0x07, - 0x72, 0xfa, 0xf4, 0x24, 0x63, 0x14, 0x9d, 0x35, 0x14, 0x4f, 0x63, 0xf6, 0x1d, 0x0e, 0x54, 0x9d, - 0x16, 0x2f, 0xde, 0x73, 0x68, 0xfb, 0xa1, 0x2c, 0x7b, 0xfc, 0x2f, 0x09, 0x2b, 0xb1, 0x57, 0x6c, - 0xec, 0x9f, 0x5d, 0x58, 0xd7, 0xd5, 0x8d, 0x99, 0x3a, 0x08, 0xbf, 0x1d, 0xec, 0xff, 0x17, 0x55, - 0x7e, 0x01, 0xcd, 0x62, 0x79, 0x78, 0x6c, 0x36, 0xe7, 0xad, 0xcb, 0x81, 0x58, 0xe6, 0x83, 0xcf, - 0x87, 0x5b, 0xba, 0x83, 0x67, 0xa7, 0x3b, 0x0d, 0xf3, 0x81, 0x36, 0x30, 0xf6, 0x41, 0xec, 0xfd, - 0xee, 0x42, 0xcb, 0xa4, 0x3e, 0xe4, 0x4a, 0xfe, 0x7f, 0x32, 0x27, 0xf7, 0xa0, 0xae, 0xa7, 0x4b, - 0xe2, 0xe2, 0xff, 0xd3, 0xc5, 0x29, 0x42, 0xbc, 0x5f, 0xeb, 0xd0, 0x38, 0x60, 0x52, 0x86, 0x53, - 0x46, 0x1e, 0xc2, 0x66, 0xca, 0x8e, 0x8b, 0x65, 0x0d, 0x50, 0xa2, 0x8b, 0x99, 0xf6, 0x7a, 0x55, - 0x97, 0x4b, 0xcf, 0xbe, 0x02, 0x7c, 0x87, 0xae, 0xa7, 0xf6, 0x95, 0x70, 0x00, 0x5b, 0x1a, 0xeb, - 0x48, 0x6b, 0x6d, 0x80, 0x89, 0x22, 0x5f, 0xad, 0xc1, 0xfb, 0x57, 0x82, 0x2d, 0x75, 0xd9, 0x77, - 0xe8, 0x46, 0x7a, 0x41, 0xa8, 0x6d, 0xd9, 0xaa, 0x90, 0x87, 0x25, 0xce, 0x62, 0xac, 0x7d, 0x4b, - 0xb6, 0xc8, 0x97, 0x7f, 0x11, 0x98, 0x82, 0xeb, 0xf7, 0xae, 0x47, 0x18, 0x3d, 0x79, 0xe4, 0x5f, - 0xd4, 0x17, 0xf2, 0x09, 0xc0, 0x52, 0xa6, 0x0d, 0xdb, 0x3b, 0xd5, 0x28, 0xe5, 0x5a, 0xf9, 0x0e, - 0x5d, 0x2b, 0x85, 0x5a, 0xcb, 0x0c, 0x8a, 0xc5, 0xea, 0x65, 0xe9, 0x5d, 0xc6, 0xea, 0x29, 0xf4, - 0x9d, 0x42, 0x32, 0xc8, 0x3d, 0x68, 0xce, 0x42, 0x19, 0x60, 0x54, 0x03, 0xa3, 0xde, 0xad, 0x8e, - 0x32, 0xba, 0xe2, 0x3b, 0xb4, 0x31, 0x33, 0x12, 0xf3, 0x10, 0x36, 0x75, 0x1c, 0x5e, 0x55, 0x89, - 0x5e, 0xc7, 0x4e, 0xf3, 0xba, 0x86, 0xda, 0x8b, 0xab, 0x1b, 0x7a, 0x64, 0x2f, 0xf2, 0x7d, 0xd8, - 0x28, 0xb1, 0xf4, 0x3c, 0x75, 0xd6, 0xae, 0x23, 0xd1, 0x5a, 0x24, 0x4d, 0xe2, 0x91, 0xb5, 0x57, - 0x11, 0xbc, 0xa9, 0x0b, 0x2a, 0x1b, 0x62, 0x31, 0x0a, 0x08, 0x79, 0xe7, 0xca, 0xfa, 0x2e, 0x69, - 0x96, 0xef, 0xd0, 0xf6, 0xac, 0xe2, 0xfb, 0xb0, 0x0e, 0x2b, 0xf2, 0x30, 0x19, 0x7e, 0xf5, 0xf2, - 0xac, 0xeb, 0xbe, 0x3a, 0xeb, 0xba, 0xbf, 0x9d, 0x75, 0xdd, 0x1f, 0xce, 0xbb, 0xce, 0xab, 0xf3, - 0xae, 0xf3, 0xcb, 0x79, 0xd7, 0x79, 0x7e, 0x77, 0xca, 0xd5, 0xec, 0x70, 0xd2, 0x8b, 0x44, 0xd2, - 0x8f, 0x44, 0xc2, 0xd4, 0xe4, 0x1b, 0xb5, 0x7c, 0x28, 0x7e, 0x99, 0xaa, 0x7e, 0xba, 0x26, 0xab, - 0x68, 0xdb, 0xff, 0x33, 0x00, 0x00, 0xff, 0xff, 0xa6, 0xfa, 0x0b, 0x9d, 0x93, 0x09, 0x00, 0x00, + proto.RegisterType((*NewRoundStep)(nil), "cometbft.consensus.v1.NewRoundStep") + proto.RegisterType((*NewValidBlock)(nil), "cometbft.consensus.v1.NewValidBlock") + proto.RegisterType((*Proposal)(nil), "cometbft.consensus.v1.Proposal") + proto.RegisterType((*ProposalPOL)(nil), "cometbft.consensus.v1.ProposalPOL") + proto.RegisterType((*BlockPart)(nil), "cometbft.consensus.v1.BlockPart") + proto.RegisterType((*Vote)(nil), "cometbft.consensus.v1.Vote") + proto.RegisterType((*HasVote)(nil), "cometbft.consensus.v1.HasVote") + proto.RegisterType((*VoteSetMaj23)(nil), "cometbft.consensus.v1.VoteSetMaj23") + proto.RegisterType((*VoteSetBits)(nil), "cometbft.consensus.v1.VoteSetBits") + proto.RegisterType((*HasProposalBlockPart)(nil), "cometbft.consensus.v1.HasProposalBlockPart") + proto.RegisterType((*Message)(nil), "cometbft.consensus.v1.Message") +} + +func init() { proto.RegisterFile("cometbft/consensus/v1/types.proto", fileDescriptor_4179ae4c5322abef) } + +var fileDescriptor_4179ae4c5322abef = []byte{ + // 903 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0xcd, 0x6e, 0x23, 0x45, + 0x10, 0x9e, 0x21, 0x76, 0xec, 0x94, 0xf3, 0x03, 0xad, 0x84, 0xb5, 0xb2, 0xc2, 0x31, 0x03, 0x87, + 0x88, 0x45, 0xb6, 0xe2, 0x20, 0x38, 0xac, 0x90, 0xd8, 0x01, 0xc1, 0x44, 0xbb, 0xc9, 0x5a, 0xed, + 0xd5, 0x4a, 0xec, 0x65, 0x34, 0xf6, 0x34, 0x76, 0xc3, 0x78, 0x7a, 0x34, 0xdd, 0x71, 0xc8, 0x99, + 0x17, 0xe0, 0x05, 0x78, 0x0c, 0x2e, 0x3c, 0xc1, 0x1e, 0xf7, 0xc8, 0x69, 0x85, 0x12, 0xf1, 0x0a, + 0x70, 0x45, 0x5d, 0xd3, 0x1e, 0x8f, 0xbd, 0xf6, 0x86, 0x70, 0x40, 0xda, 0x5b, 0x4f, 0x57, 0xd5, + 0xd7, 0xd5, 0x5f, 0x55, 0x7d, 0x3d, 0xf0, 0xfe, 0x40, 0x8c, 0x99, 0xea, 0x7f, 0xa7, 0xda, 0x03, + 0x11, 0x4b, 0x16, 0xcb, 0x73, 0xd9, 0x9e, 0x1c, 0xb5, 0xd5, 0x65, 0xc2, 0x64, 0x2b, 0x49, 0x85, + 0x12, 0x64, 0x6f, 0xea, 0xd2, 0xca, 0x5d, 0x5a, 0x93, 0xa3, 0xfd, 0xdd, 0xa1, 0x18, 0x0a, 0xf4, + 0x68, 0xeb, 0x55, 0xe6, 0xbc, 0x3f, 0xc3, 0x8b, 0x78, 0x5f, 0xb6, 0xfb, 0x5c, 0x2d, 0xe2, 0xed, + 0xbf, 0x97, 0xbb, 0xe0, 0xee, 0x82, 0xd9, 0xf9, 0xd5, 0x86, 0xcd, 0x33, 0x76, 0x41, 0xc5, 0x79, + 0x1c, 0xf6, 0x14, 0x4b, 0xc8, 0xbb, 0xb0, 0x3e, 0x62, 0x7c, 0x38, 0x52, 0x75, 0xbb, 0x69, 0x1f, + 0xae, 0x51, 0xf3, 0x45, 0x76, 0xa1, 0x9c, 0x6a, 0xa7, 0xfa, 0x5b, 0x4d, 0xfb, 0xb0, 0x4c, 0xb3, + 0x0f, 0x42, 0xa0, 0x24, 0x15, 0x4b, 0xea, 0x6b, 0x4d, 0xfb, 0x70, 0x8b, 0xe2, 0x9a, 0x7c, 0x06, + 0x75, 0xc9, 0x06, 0x22, 0x0e, 0xa5, 0x2f, 0x79, 0x3c, 0x60, 0xbe, 0x54, 0x41, 0xaa, 0x7c, 0xc5, + 0xc7, 0xac, 0x5e, 0x42, 0xcc, 0x3d, 0x63, 0xef, 0x69, 0x73, 0x4f, 0x5b, 0x9f, 0xf0, 0x31, 0x23, + 0x1f, 0xc1, 0x3b, 0x51, 0x20, 0x95, 0x3f, 0x10, 0xe3, 0x31, 0x57, 0x7e, 0x76, 0x5c, 0x19, 0x8f, + 0xdb, 0xd1, 0x86, 0x2f, 0x71, 0x1f, 0x53, 0x75, 0xfe, 0xb6, 0x61, 0xeb, 0x8c, 0x5d, 0x3c, 0x0d, + 0x22, 0x1e, 0xba, 0x91, 0x18, 0xfc, 0x70, 0xcb, 0xc4, 0xbf, 0x85, 0xbd, 0xbe, 0x0e, 0xf3, 0x13, + 0x9d, 0x9b, 0x64, 0xca, 0x1f, 0xb1, 0x20, 0x64, 0x29, 0xde, 0xa4, 0xd6, 0x69, 0xb6, 0xf2, 0x32, + 0x64, 0x6c, 0x4d, 0x8e, 0x5a, 0xdd, 0x20, 0x55, 0x3d, 0xa6, 0x3c, 0xf4, 0x73, 0x4b, 0xcf, 0x5f, + 0x1e, 0x58, 0x94, 0x20, 0xc8, 0x9c, 0x85, 0x7c, 0x01, 0xb5, 0x19, 0xb4, 0xc4, 0x2b, 0xd7, 0x3a, + 0x07, 0x33, 0x40, 0x5d, 0xaa, 0x96, 0x2e, 0x95, 0x06, 0x75, 0xb9, 0x7a, 0x90, 0xa6, 0xc1, 0x25, + 0x85, 0x1c, 0x49, 0x92, 0xbb, 0xb0, 0xc1, 0xa5, 0xa1, 0x01, 0x09, 0xa8, 0xd2, 0x2a, 0x97, 0xd9, + 0xf5, 0x9d, 0x13, 0xa8, 0x76, 0x53, 0x91, 0x08, 0x19, 0x44, 0xe4, 0x73, 0xa8, 0x26, 0x66, 0x8d, + 0xb7, 0xae, 0x75, 0xee, 0x2e, 0x4b, 0xdc, 0xb8, 0x98, 0x9c, 0xf3, 0x10, 0xe7, 0x17, 0x1b, 0x6a, + 0x53, 0x63, 0xf7, 0xf1, 0xa3, 0x95, 0x14, 0x7e, 0x0c, 0x64, 0x1a, 0xe3, 0x27, 0x22, 0xf2, 0x8b, + 0x7c, 0xbe, 0x3d, 0xb5, 0x74, 0x45, 0x84, 0xa5, 0x21, 0x1e, 0x6c, 0x16, 0xbd, 0x0d, 0xa3, 0x37, + 0x11, 0x60, 0x92, 0xab, 0x15, 0xe0, 0x9c, 0x08, 0x36, 0xdc, 0x29, 0x2b, 0xb7, 0xac, 0xef, 0x11, + 0x94, 0x34, 0xfd, 0xe6, 0xf0, 0x3b, 0x2b, 0xca, 0x69, 0x0e, 0x45, 0x57, 0xe7, 0x18, 0x4a, 0x4f, + 0x85, 0x62, 0xe4, 0x1e, 0x94, 0x26, 0x42, 0x31, 0x43, 0xe8, 0xb2, 0x50, 0xed, 0x46, 0xd1, 0xc9, + 0xf9, 0xc9, 0x86, 0x8a, 0x17, 0x48, 0x0c, 0xbc, 0x5d, 0x86, 0x9f, 0x40, 0x49, 0x03, 0x62, 0x86, + 0xdb, 0x4b, 0x1b, 0xae, 0xc7, 0x87, 0x31, 0x0b, 0x4f, 0xe5, 0xf0, 0xc9, 0x65, 0xc2, 0x28, 0x7a, + 0x6b, 0x2c, 0x1e, 0x87, 0xec, 0x47, 0x6c, 0xab, 0x32, 0xcd, 0x3e, 0x9c, 0xdf, 0x6c, 0xd8, 0xd4, + 0x29, 0xf4, 0x98, 0x3a, 0x0d, 0xbe, 0xef, 0x1c, 0xff, 0x2f, 0xa9, 0x7c, 0x0d, 0xd5, 0xac, 0xcf, + 0x79, 0x68, 0x9a, 0x7c, 0x7f, 0x49, 0x24, 0x16, 0xf0, 0xe4, 0x2b, 0x77, 0x47, 0x33, 0x7d, 0xf5, + 0xf2, 0xa0, 0x62, 0x36, 0x68, 0x05, 0x83, 0x4f, 0x42, 0xe7, 0x2f, 0x1b, 0x6a, 0x26, 0x79, 0x97, + 0x2b, 0xf9, 0x26, 0xe5, 0x4e, 0xee, 0x43, 0x59, 0xb7, 0x81, 0xc4, 0x29, 0xfd, 0xd7, 0x4d, 0x9e, + 0xc5, 0x38, 0xcf, 0x60, 0xd7, 0x0b, 0x64, 0x3e, 0x9d, 0xff, 0xb1, 0xd3, 0xf3, 0x8e, 0x58, 0x2b, + 0x76, 0xc4, 0x9f, 0x65, 0xa8, 0x9c, 0x32, 0x29, 0x83, 0x21, 0x23, 0x0f, 0x61, 0x3b, 0x66, 0x17, + 0xd9, 0xd4, 0xfa, 0x28, 0xd7, 0x59, 0x6b, 0x7f, 0xd0, 0x5a, 0xfa, 0xd6, 0xb4, 0x8a, 0xef, 0x81, + 0x67, 0xd1, 0xcd, 0xb8, 0xf8, 0x3e, 0x9c, 0xc1, 0x8e, 0x06, 0x9b, 0x68, 0xe1, 0xf5, 0x91, 0x06, + 0x4c, 0xa7, 0xd6, 0xf9, 0x70, 0x35, 0xda, 0x4c, 0xa5, 0x3d, 0x8b, 0x6e, 0xc5, 0x73, 0xb2, 0x5d, + 0x94, 0xb0, 0x57, 0x94, 0x62, 0x0e, 0x68, 0x4a, 0x94, 0x57, 0x90, 0x30, 0xf2, 0xcd, 0x82, 0xd8, + 0x64, 0xc5, 0x74, 0x6e, 0x80, 0xe8, 0x3e, 0x7e, 0xe4, 0xcd, 0x6b, 0x0d, 0x79, 0x00, 0x30, 0x53, + 0x6d, 0x53, 0xce, 0xe6, 0x0a, 0x98, 0xbc, 0x54, 0x9e, 0x45, 0x37, 0x72, 0xdd, 0xd6, 0x9a, 0x83, + 0xc2, 0xb1, 0xbe, 0xa8, 0xc4, 0x73, 0xc1, 0xba, 0xd5, 0x3d, 0x2b, 0x93, 0x0f, 0x72, 0x1f, 0xaa, + 0xa3, 0x40, 0xfa, 0x18, 0x56, 0xc1, 0xb0, 0xc6, 0x8a, 0x30, 0x23, 0x32, 0x9e, 0x45, 0x2b, 0x23, + 0xa3, 0x37, 0x0f, 0x61, 0x5b, 0x07, 0xe2, 0xeb, 0x35, 0xd6, 0x63, 0x5f, 0xaf, 0xbe, 0xb6, 0xae, + 0x45, 0x85, 0xd0, 0x75, 0x9d, 0x14, 0x15, 0xc3, 0x83, 0xad, 0x1c, 0x4c, 0xb7, 0x6d, 0x7d, 0xe3, + 0xb5, 0x4c, 0x16, 0x06, 0x56, 0x33, 0x39, 0x29, 0xcc, 0x6f, 0x08, 0x77, 0xf4, 0x9d, 0xf2, 0xb2, + 0x14, 0x68, 0x05, 0xc4, 0xbc, 0xb7, 0xfa, 0x8a, 0xaf, 0x0c, 0x83, 0x67, 0xd1, 0xdd, 0xd1, 0x92, + 0x7d, 0xb7, 0x0c, 0x6b, 0xf2, 0x7c, 0xec, 0x76, 0x9f, 0x5f, 0x35, 0xec, 0x17, 0x57, 0x0d, 0xfb, + 0x8f, 0xab, 0x86, 0xfd, 0xf3, 0x75, 0xc3, 0x7a, 0x71, 0xdd, 0xb0, 0x7e, 0xbf, 0x6e, 0x58, 0xcf, + 0x3e, 0x1d, 0x72, 0x35, 0x3a, 0xef, 0xeb, 0xb3, 0xda, 0x85, 0xdf, 0x2e, 0xb3, 0x08, 0x12, 0xde, + 0x5e, 0xfa, 0x33, 0xd6, 0x5f, 0xc7, 0x1f, 0xa3, 0xe3, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, + 0x03, 0x11, 0x71, 0xac, 0x09, 0x00, 0x00, } func (m *NewRoundStep) Marshal() (dAtA []byte, err error) { @@ -1245,7 +1249,7 @@ func (m *HasVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *HasProposalBlockPart) Marshal() (dAtA []byte, err error) { +func (m *VoteSetMaj23) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1255,18 +1259,28 @@ func (m *HasProposalBlockPart) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *HasProposalBlockPart) MarshalTo(dAtA []byte) (int, error) { +func (m *VoteSetMaj23) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *HasProposalBlockPart) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *VoteSetMaj23) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Index != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) i-- dAtA[i] = 0x18 } @@ -1283,7 +1297,7 @@ func (m *HasProposalBlockPart) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *VoteSetMaj23) Marshal() (dAtA []byte, err error) { +func (m *VoteSetBits) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1293,16 +1307,26 @@ func (m *VoteSetMaj23) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VoteSetMaj23) MarshalTo(dAtA []byte) (int, error) { +func (m *VoteSetBits) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VoteSetMaj23) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *VoteSetBits) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + { + size, err := m.Votes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a { size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1331,7 +1355,7 @@ func (m *VoteSetMaj23) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *VoteSetBits) Marshal() (dAtA []byte, err error) { +func (m *HasProposalBlockPart) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1341,38 +1365,18 @@ func (m *VoteSetBits) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VoteSetBits) MarshalTo(dAtA []byte) (int, error) { +func (m *HasProposalBlockPart) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VoteSetBits) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *HasProposalBlockPart) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Votes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - { - size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if m.Type != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) i-- dAtA[i] = 0x18 } @@ -1769,7 +1773,7 @@ func (m *HasVote) Size() (n int) { return n } -func (m *HasProposalBlockPart) Size() (n int) { +func (m *VoteSetMaj23) Size() (n int) { if m == nil { return 0 } @@ -1781,13 +1785,15 @@ func (m *HasProposalBlockPart) Size() (n int) { if m.Round != 0 { n += 1 + sovTypes(uint64(m.Round)) } - if m.Index != 0 { - n += 1 + sovTypes(uint64(m.Index)) + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) return n } -func (m *VoteSetMaj23) Size() (n int) { +func (m *VoteSetBits) Size() (n int) { if m == nil { return 0 } @@ -1804,10 +1810,12 @@ func (m *VoteSetMaj23) Size() (n int) { } l = m.BlockID.Size() n += 1 + l + sovTypes(uint64(l)) + l = m.Votes.Size() + n += 1 + l + sovTypes(uint64(l)) return n } -func (m *VoteSetBits) Size() (n int) { +func (m *HasProposalBlockPart) Size() (n int) { if m == nil { return 0 } @@ -1819,13 +1827,9 @@ func (m *VoteSetBits) Size() (n int) { if m.Round != 0 { n += 1 + sovTypes(uint64(m.Round)) } - if m.Type != 0 { - n += 1 + sovTypes(uint64(m.Type)) + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) } - l = m.BlockID.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.Votes.Size() - n += 1 + l + sovTypes(uint64(l)) return n } @@ -2243,7 +2247,7 @@ func (m *NewValidBlock) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.BlockParts == nil { - m.BlockParts = &bits.BitArray{} + m.BlockParts = &v11.BitArray{} } if err := m.BlockParts.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2674,7 +2678,7 @@ func (m *Vote) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Vote == nil { - m.Vote = &types.Vote{} + m.Vote = &v1.Vote{} } if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2782,7 +2786,7 @@ func (m *HasVote) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= types.SignedMsgType(b&0x7F) << shift + m.Type |= v1.SignedMsgType(b&0x7F) << shift if b < 0x80 { break } @@ -2827,7 +2831,7 @@ func (m *HasVote) Unmarshal(dAtA []byte) error { } return nil } -func (m *HasProposalBlockPart) Unmarshal(dAtA []byte) error { +func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2850,10 +2854,10 @@ func (m *HasProposalBlockPart) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HasProposalBlockPart: wiretype end group for non-group") + return fmt.Errorf("proto: VoteSetMaj23: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HasProposalBlockPart: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VoteSetMaj23: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2896,9 +2900,9 @@ func (m *HasProposalBlockPart) Unmarshal(dAtA []byte) error { } case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Index = 0 + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2908,11 +2912,44 @@ func (m *HasProposalBlockPart) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= int32(b&0x7F) << shift + m.Type |= v1.SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -2934,7 +2971,7 @@ func (m *HasProposalBlockPart) Unmarshal(dAtA []byte) error { } return nil } -func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { +func (m *VoteSetBits) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2957,10 +2994,10 @@ func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VoteSetMaj23: wiretype end group for non-group") + return fmt.Errorf("proto: VoteSetBits: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VoteSetMaj23: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VoteSetBits: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -3015,7 +3052,7 @@ func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= types.SignedMsgType(b&0x7F) << shift + m.Type |= v1.SignedMsgType(b&0x7F) << shift if b < 0x80 { break } @@ -3053,6 +3090,39 @@ func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Votes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -3074,7 +3144,7 @@ func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { } return nil } -func (m *VoteSetBits) Unmarshal(dAtA []byte) error { +func (m *HasProposalBlockPart) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3097,10 +3167,10 @@ func (m *VoteSetBits) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VoteSetBits: wiretype end group for non-group") + return fmt.Errorf("proto: HasProposalBlockPart: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VoteSetBits: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HasProposalBlockPart: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -3143,61 +3213,9 @@ func (m *VoteSetBits) Unmarshal(dAtA []byte) error { } case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= types.SignedMsgType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) } - var msglen int + m.Index = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -3207,25 +3225,11 @@ func (m *VoteSetBits) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Index |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Votes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/api/cometbft/consensus/v1/wal.pb.go b/api/cometbft/consensus/v1/wal.pb.go new file mode 100644 index 00000000000..69b0c9ec34f --- /dev/null +++ b/api/cometbft/consensus/v1/wal.pb.go @@ -0,0 +1,1603 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/consensus/v1/wal.proto + +package v1 + +import ( + fmt "fmt" + v1 "github.com/cometbft/cometbft/api/cometbft/types/v1" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + _ "github.com/cosmos/gogoproto/types" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + _ "github.com/golang/protobuf/ptypes/duration" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgInfo are msgs from the reactor which may update the state +type MsgInfo struct { + Msg Message `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg"` + PeerID string `protobuf:"bytes,2,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` + ReceiveTime *time.Time `protobuf:"bytes,3,opt,name=receive_time,json=receiveTime,proto3,stdtime" json:"receive_time,omitempty"` +} + +func (m *MsgInfo) Reset() { *m = MsgInfo{} } +func (m *MsgInfo) String() string { return proto.CompactTextString(m) } +func (*MsgInfo) ProtoMessage() {} +func (*MsgInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_f013b385bb75d435, []int{0} +} +func (m *MsgInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgInfo.Merge(m, src) +} +func (m *MsgInfo) XXX_Size() int { + return m.Size() +} +func (m *MsgInfo) XXX_DiscardUnknown() { + xxx_messageInfo_MsgInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgInfo proto.InternalMessageInfo + +func (m *MsgInfo) GetMsg() Message { + if m != nil { + return m.Msg + } + return Message{} +} + +func (m *MsgInfo) GetPeerID() string { + if m != nil { + return m.PeerID + } + return "" +} + +func (m *MsgInfo) GetReceiveTime() *time.Time { + if m != nil { + return m.ReceiveTime + } + return nil +} + +// TimeoutInfo internally generated messages which may update the state +type TimeoutInfo struct { + Duration time.Duration `protobuf:"bytes,1,opt,name=duration,proto3,stdduration" json:"duration"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,3,opt,name=round,proto3" json:"round,omitempty"` + Step uint32 `protobuf:"varint,4,opt,name=step,proto3" json:"step,omitempty"` +} + +func (m *TimeoutInfo) Reset() { *m = TimeoutInfo{} } +func (m *TimeoutInfo) String() string { return proto.CompactTextString(m) } +func (*TimeoutInfo) ProtoMessage() {} +func (*TimeoutInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_f013b385bb75d435, []int{1} +} +func (m *TimeoutInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeoutInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeoutInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeoutInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeoutInfo.Merge(m, src) +} +func (m *TimeoutInfo) XXX_Size() int { + return m.Size() +} +func (m *TimeoutInfo) XXX_DiscardUnknown() { + xxx_messageInfo_TimeoutInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeoutInfo proto.InternalMessageInfo + +func (m *TimeoutInfo) GetDuration() time.Duration { + if m != nil { + return m.Duration + } + return 0 +} + +func (m *TimeoutInfo) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *TimeoutInfo) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *TimeoutInfo) GetStep() uint32 { + if m != nil { + return m.Step + } + return 0 +} + +// EndHeight marks the end of the given height inside WAL. +// @internal used by scripts/wal2json util. +type EndHeight struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *EndHeight) Reset() { *m = EndHeight{} } +func (m *EndHeight) String() string { return proto.CompactTextString(m) } +func (*EndHeight) ProtoMessage() {} +func (*EndHeight) Descriptor() ([]byte, []int) { + return fileDescriptor_f013b385bb75d435, []int{2} +} +func (m *EndHeight) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndHeight) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EndHeight.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EndHeight) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndHeight.Merge(m, src) +} +func (m *EndHeight) XXX_Size() int { + return m.Size() +} +func (m *EndHeight) XXX_DiscardUnknown() { + xxx_messageInfo_EndHeight.DiscardUnknown(m) +} + +var xxx_messageInfo_EndHeight proto.InternalMessageInfo + +func (m *EndHeight) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +// WALMessage describes a consensus WAL (Write Ahead Log) entry. +type WALMessage struct { + // Sum of all possible messages. + // + // Types that are valid to be assigned to Sum: + // *WALMessage_EventDataRoundState + // *WALMessage_MsgInfo + // *WALMessage_TimeoutInfo + // *WALMessage_EndHeight + Sum isWALMessage_Sum `protobuf_oneof:"sum"` +} + +func (m *WALMessage) Reset() { *m = WALMessage{} } +func (m *WALMessage) String() string { return proto.CompactTextString(m) } +func (*WALMessage) ProtoMessage() {} +func (*WALMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_f013b385bb75d435, []int{3} +} +func (m *WALMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WALMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WALMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WALMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_WALMessage.Merge(m, src) +} +func (m *WALMessage) XXX_Size() int { + return m.Size() +} +func (m *WALMessage) XXX_DiscardUnknown() { + xxx_messageInfo_WALMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_WALMessage proto.InternalMessageInfo + +type isWALMessage_Sum interface { + isWALMessage_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type WALMessage_EventDataRoundState struct { + EventDataRoundState *v1.EventDataRoundState `protobuf:"bytes,1,opt,name=event_data_round_state,json=eventDataRoundState,proto3,oneof" json:"event_data_round_state,omitempty"` +} +type WALMessage_MsgInfo struct { + MsgInfo *MsgInfo `protobuf:"bytes,2,opt,name=msg_info,json=msgInfo,proto3,oneof" json:"msg_info,omitempty"` +} +type WALMessage_TimeoutInfo struct { + TimeoutInfo *TimeoutInfo `protobuf:"bytes,3,opt,name=timeout_info,json=timeoutInfo,proto3,oneof" json:"timeout_info,omitempty"` +} +type WALMessage_EndHeight struct { + EndHeight *EndHeight `protobuf:"bytes,4,opt,name=end_height,json=endHeight,proto3,oneof" json:"end_height,omitempty"` +} + +func (*WALMessage_EventDataRoundState) isWALMessage_Sum() {} +func (*WALMessage_MsgInfo) isWALMessage_Sum() {} +func (*WALMessage_TimeoutInfo) isWALMessage_Sum() {} +func (*WALMessage_EndHeight) isWALMessage_Sum() {} + +func (m *WALMessage) GetSum() isWALMessage_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *WALMessage) GetEventDataRoundState() *v1.EventDataRoundState { + if x, ok := m.GetSum().(*WALMessage_EventDataRoundState); ok { + return x.EventDataRoundState + } + return nil +} + +func (m *WALMessage) GetMsgInfo() *MsgInfo { + if x, ok := m.GetSum().(*WALMessage_MsgInfo); ok { + return x.MsgInfo + } + return nil +} + +func (m *WALMessage) GetTimeoutInfo() *TimeoutInfo { + if x, ok := m.GetSum().(*WALMessage_TimeoutInfo); ok { + return x.TimeoutInfo + } + return nil +} + +func (m *WALMessage) GetEndHeight() *EndHeight { + if x, ok := m.GetSum().(*WALMessage_EndHeight); ok { + return x.EndHeight + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*WALMessage) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*WALMessage_EventDataRoundState)(nil), + (*WALMessage_MsgInfo)(nil), + (*WALMessage_TimeoutInfo)(nil), + (*WALMessage_EndHeight)(nil), + } +} + +// TimedWALMessage wraps WALMessage and adds Time for debugging purposes. +type TimedWALMessage struct { + Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time"` + Msg *WALMessage `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` +} + +func (m *TimedWALMessage) Reset() { *m = TimedWALMessage{} } +func (m *TimedWALMessage) String() string { return proto.CompactTextString(m) } +func (*TimedWALMessage) ProtoMessage() {} +func (*TimedWALMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_f013b385bb75d435, []int{4} +} +func (m *TimedWALMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimedWALMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimedWALMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimedWALMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimedWALMessage.Merge(m, src) +} +func (m *TimedWALMessage) XXX_Size() int { + return m.Size() +} +func (m *TimedWALMessage) XXX_DiscardUnknown() { + xxx_messageInfo_TimedWALMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_TimedWALMessage proto.InternalMessageInfo + +func (m *TimedWALMessage) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *TimedWALMessage) GetMsg() *WALMessage { + if m != nil { + return m.Msg + } + return nil +} + +func init() { + proto.RegisterType((*MsgInfo)(nil), "cometbft.consensus.v1.MsgInfo") + proto.RegisterType((*TimeoutInfo)(nil), "cometbft.consensus.v1.TimeoutInfo") + proto.RegisterType((*EndHeight)(nil), "cometbft.consensus.v1.EndHeight") + proto.RegisterType((*WALMessage)(nil), "cometbft.consensus.v1.WALMessage") + proto.RegisterType((*TimedWALMessage)(nil), "cometbft.consensus.v1.TimedWALMessage") +} + +func init() { proto.RegisterFile("cometbft/consensus/v1/wal.proto", fileDescriptor_f013b385bb75d435) } + +var fileDescriptor_f013b385bb75d435 = []byte{ + // 574 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xc1, 0x6a, 0xdb, 0x4c, + 0x10, 0xd6, 0xc6, 0x8e, 0x63, 0xaf, 0xf3, 0xf3, 0x83, 0x9a, 0x06, 0xd7, 0x07, 0xc9, 0x51, 0xa0, + 0xf8, 0x24, 0xe1, 0x06, 0x42, 0xa1, 0x87, 0x12, 0xe3, 0x10, 0x1b, 0x1a, 0x08, 0xdb, 0x42, 0xa1, + 0x50, 0x84, 0x6c, 0x8d, 0x65, 0x41, 0xa4, 0x15, 0xda, 0x95, 0x4b, 0x6f, 0x7d, 0x04, 0x1f, 0xfb, + 0x16, 0x7d, 0x80, 0xbe, 0x80, 0x8f, 0x39, 0xf6, 0xe4, 0x16, 0xfb, 0x45, 0xca, 0xee, 0x4a, 0xb6, + 0x69, 0xec, 0xd2, 0xdb, 0x8c, 0x66, 0xbe, 0x6f, 0xbf, 0x99, 0xf9, 0x84, 0xcd, 0x11, 0x8d, 0x80, + 0x0f, 0xc7, 0xdc, 0x19, 0xd1, 0x98, 0x41, 0xcc, 0x32, 0xe6, 0x4c, 0x3b, 0xce, 0x27, 0xef, 0xde, + 0x4e, 0x52, 0xca, 0xa9, 0xfe, 0xb4, 0x68, 0xb0, 0xd7, 0x0d, 0xf6, 0xb4, 0xd3, 0x3c, 0xdb, 0x8d, + 0xe3, 0x9f, 0x13, 0x60, 0x0a, 0xd9, 0x34, 0xd6, 0x2d, 0xf2, 0xab, 0x28, 0xc3, 0x14, 0x62, 0x5e, + 0xd4, 0x4f, 0x02, 0x1a, 0x50, 0x19, 0x3a, 0x22, 0x2a, 0x50, 0x01, 0xa5, 0xc1, 0x3d, 0x38, 0x32, + 0x1b, 0x66, 0x63, 0xc7, 0xcf, 0x52, 0x8f, 0x87, 0x34, 0xce, 0xeb, 0xe6, 0x9f, 0x75, 0x1e, 0x46, + 0xc0, 0xb8, 0x17, 0x25, 0xaa, 0xc1, 0xfa, 0x86, 0xf0, 0xd1, 0x2d, 0x0b, 0x06, 0xf1, 0x98, 0xea, + 0x97, 0xb8, 0x14, 0xb1, 0xa0, 0x81, 0x5a, 0xa8, 0x5d, 0x7f, 0x61, 0xd8, 0x3b, 0x47, 0xb1, 0x6f, + 0x81, 0x31, 0x2f, 0x80, 0x6e, 0x79, 0xbe, 0x30, 0x35, 0x22, 0x00, 0xfa, 0x39, 0x3e, 0x4a, 0x00, + 0x52, 0x37, 0xf4, 0x1b, 0x07, 0x2d, 0xd4, 0xae, 0x75, 0xf1, 0x72, 0x61, 0x56, 0xee, 0x00, 0xd2, + 0x41, 0x8f, 0x54, 0x44, 0x69, 0xe0, 0xeb, 0x37, 0xf8, 0x38, 0x85, 0x11, 0x84, 0x53, 0x70, 0x85, + 0x86, 0x46, 0x49, 0xbe, 0xd2, 0xb4, 0x95, 0x40, 0xbb, 0x10, 0x68, 0xbf, 0x2b, 0x04, 0x76, 0xab, + 0xf3, 0x85, 0x89, 0x66, 0x3f, 0x4d, 0x44, 0xea, 0x39, 0x52, 0xd4, 0xac, 0x19, 0xc2, 0x75, 0x11, + 0xd0, 0x8c, 0x4b, 0xd5, 0xaf, 0x71, 0xb5, 0x18, 0x3a, 0x97, 0xfe, 0xec, 0x11, 0x69, 0x2f, 0x6f, + 0x90, 0x9c, 0xda, 0x57, 0xc1, 0xb9, 0x06, 0xe9, 0xa7, 0xb8, 0x32, 0x81, 0x30, 0x98, 0x70, 0xa9, + 0xbe, 0x44, 0xf2, 0x4c, 0x3f, 0xc1, 0x87, 0x29, 0xcd, 0x62, 0x5f, 0x4a, 0x3d, 0x24, 0x2a, 0xd1, + 0x75, 0x5c, 0x66, 0x1c, 0x92, 0x46, 0xb9, 0x85, 0xda, 0xff, 0x11, 0x19, 0x5b, 0xe7, 0xb8, 0x76, + 0x1d, 0xfb, 0x7d, 0x05, 0xdb, 0xd0, 0xa1, 0x6d, 0x3a, 0xeb, 0xfb, 0x01, 0xc6, 0xef, 0xaf, 0xde, + 0xe4, 0xfb, 0xd3, 0x3f, 0xe2, 0x53, 0x79, 0x5f, 0xd7, 0xf7, 0xb8, 0xe7, 0x4a, 0x6e, 0x97, 0x71, + 0x8f, 0x43, 0x3e, 0xc4, 0xf3, 0xcd, 0xfe, 0x95, 0x4d, 0xa6, 0x1d, 0xfb, 0x5a, 0x00, 0x7a, 0x1e, + 0xf7, 0x88, 0x68, 0x7f, 0x2b, 0xba, 0xfb, 0x1a, 0x79, 0x02, 0x8f, 0x3f, 0xeb, 0xaf, 0x70, 0x35, + 0x62, 0x81, 0x1b, 0xc6, 0x63, 0x2a, 0xc7, 0xfa, 0xcb, 0x41, 0xd5, 0xf5, 0xfb, 0x1a, 0x39, 0x8a, + 0x72, 0x23, 0xdc, 0xe0, 0x63, 0xae, 0x36, 0xac, 0x08, 0xd4, 0xad, 0xac, 0x3d, 0x04, 0x5b, 0xc7, + 0xe8, 0x6b, 0xa4, 0xce, 0xb7, 0x6e, 0x73, 0x85, 0x31, 0xc4, 0xbe, 0x9b, 0xef, 0xa3, 0x2c, 0x69, + 0x5a, 0x7b, 0x68, 0xd6, 0x1b, 0xec, 0x6b, 0xa4, 0x06, 0x45, 0xd2, 0x3d, 0xc4, 0x25, 0x96, 0x45, + 0xd6, 0x17, 0x84, 0xff, 0x17, 0x0f, 0xf9, 0x5b, 0x2b, 0x7c, 0x89, 0xcb, 0xd2, 0x4a, 0xe8, 0x9f, + 0xac, 0xa4, 0x49, 0x2b, 0x49, 0x84, 0x7e, 0xa1, 0x9c, 0xae, 0x16, 0x73, 0xb6, 0x47, 0xd0, 0xe6, + 0x25, 0x69, 0xf3, 0xee, 0xdd, 0x7c, 0x69, 0xa0, 0x87, 0xa5, 0x81, 0x7e, 0x2d, 0x0d, 0x34, 0x5b, + 0x19, 0xda, 0xc3, 0xca, 0xd0, 0x7e, 0xac, 0x0c, 0xed, 0xc3, 0x65, 0x10, 0xf2, 0x49, 0x36, 0x14, + 0x3c, 0xce, 0xd6, 0x9f, 0x9e, 0x07, 0x5e, 0x12, 0x3a, 0x3b, 0xff, 0xff, 0x61, 0x45, 0x4a, 0xbd, + 0xf8, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xb6, 0x20, 0xcb, 0xbf, 0x57, 0x04, 0x00, 0x00, +} + +func (m *MsgInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ReceiveTime != nil { + n1, err1 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(*m.ReceiveTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(*m.ReceiveTime):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintWal(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x1a + } + if len(m.PeerID) > 0 { + i -= len(m.PeerID) + copy(dAtA[i:], m.PeerID) + i = encodeVarintWal(dAtA, i, uint64(len(m.PeerID))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Msg.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TimeoutInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeoutInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeoutInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Step != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Step)) + i-- + dAtA[i] = 0x20 + } + if m.Round != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x18 + } + if m.Height != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + n3, err3 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(m.Duration, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Duration):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintWal(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EndHeight) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndHeight) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndHeight) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WALMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WALMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WALMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *WALMessage_EventDataRoundState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WALMessage_EventDataRoundState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EventDataRoundState != nil { + { + size, err := m.EventDataRoundState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *WALMessage_MsgInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WALMessage_MsgInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MsgInfo != nil { + { + size, err := m.MsgInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *WALMessage_TimeoutInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WALMessage_TimeoutInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TimeoutInfo != nil { + { + size, err := m.TimeoutInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *WALMessage_EndHeight) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WALMessage_EndHeight) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EndHeight != nil { + { + size, err := m.EndHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *TimedWALMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimedWALMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimedWALMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Msg != nil { + { + size, err := m.Msg.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + n9, err9 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err9 != nil { + return 0, err9 + } + i -= n9 + i = encodeVarintWal(dAtA, i, uint64(n9)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintWal(dAtA []byte, offset int, v uint64) int { + offset -= sovWal(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Msg.Size() + n += 1 + l + sovWal(uint64(l)) + l = len(m.PeerID) + if l > 0 { + n += 1 + l + sovWal(uint64(l)) + } + if m.ReceiveTime != nil { + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(*m.ReceiveTime) + n += 1 + l + sovWal(uint64(l)) + } + return n +} + +func (m *TimeoutInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Duration) + n += 1 + l + sovWal(uint64(l)) + if m.Height != 0 { + n += 1 + sovWal(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovWal(uint64(m.Round)) + } + if m.Step != 0 { + n += 1 + sovWal(uint64(m.Step)) + } + return n +} + +func (m *EndHeight) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovWal(uint64(m.Height)) + } + return n +} + +func (m *WALMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *WALMessage_EventDataRoundState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EventDataRoundState != nil { + l = m.EventDataRoundState.Size() + n += 1 + l + sovWal(uint64(l)) + } + return n +} +func (m *WALMessage_MsgInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MsgInfo != nil { + l = m.MsgInfo.Size() + n += 1 + l + sovWal(uint64(l)) + } + return n +} +func (m *WALMessage_TimeoutInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeoutInfo != nil { + l = m.TimeoutInfo.Size() + n += 1 + l + sovWal(uint64(l)) + } + return n +} +func (m *WALMessage_EndHeight) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndHeight != nil { + l = m.EndHeight.Size() + n += 1 + l + sovWal(uint64(l)) + } + return n +} +func (m *TimedWALMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovWal(uint64(l)) + if m.Msg != nil { + l = m.Msg.Size() + n += 1 + l + sovWal(uint64(l)) + } + return n +} + +func sovWal(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWal(x uint64) (n int) { + return sovWal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Msg.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReceiveTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReceiveTime == nil { + m.ReceiveTime = new(time.Time) + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(m.ReceiveTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWal(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeoutInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeoutInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeoutInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdDurationUnmarshal(&m.Duration, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + } + m.Step = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Step |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWal(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndHeight) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndHeight: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndHeight: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWal(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WALMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WALMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WALMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventDataRoundState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &v1.EventDataRoundState{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &WALMessage_EventDataRoundState{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MsgInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &MsgInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &WALMessage_MsgInfo{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TimeoutInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &WALMessage_TimeoutInfo{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &EndHeight{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &WALMessage_EndHeight{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWal(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimedWALMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimedWALMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimedWALMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Msg == nil { + m.Msg = &WALMessage{} + } + if err := m.Msg.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWal(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWal(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWal + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupWal + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthWal + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthWal = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWal = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupWal = fmt.Errorf("proto: unexpected end of group") +) diff --git a/api/cometbft/consensus/v1beta1/message.go b/api/cometbft/consensus/v1beta1/message.go new file mode 100644 index 00000000000..6bfc60eea25 --- /dev/null +++ b/api/cometbft/consensus/v1beta1/message.go @@ -0,0 +1,97 @@ +package v1beta1 + +import ( + "fmt" + + "github.com/cosmos/gogoproto/proto" +) + +func (m *VoteSetBits) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_VoteSetBits{VoteSetBits: m} + return cm +} + +func (m *VoteSetMaj23) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_VoteSetMaj23{VoteSetMaj23: m} + return cm +} + +func (m *HasVote) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_HasVote{HasVote: m} + return cm +} + +func (m *Vote) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_Vote{Vote: m} + return cm +} + +func (m *BlockPart) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_BlockPart{BlockPart: m} + return cm +} + +func (m *ProposalPOL) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_ProposalPol{ProposalPol: m} + return cm +} + +func (m *Proposal) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_Proposal{Proposal: m} + return cm +} + +func (m *NewValidBlock) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_NewValidBlock{NewValidBlock: m} + return cm +} + +func (m *NewRoundStep) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_NewRoundStep{NewRoundStep: m} + return cm +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped consensus +// proto message. +func (m *Message) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *Message_NewRoundStep: + return m.GetNewRoundStep(), nil + + case *Message_NewValidBlock: + return m.GetNewValidBlock(), nil + + case *Message_Proposal: + return m.GetProposal(), nil + + case *Message_ProposalPol: + return m.GetProposalPol(), nil + + case *Message_BlockPart: + return m.GetBlockPart(), nil + + case *Message_Vote: + return m.GetVote(), nil + + case *Message_HasVote: + return m.GetHasVote(), nil + + case *Message_VoteSetMaj23: + return m.GetVoteSetMaj23(), nil + + case *Message_VoteSetBits: + return m.GetVoteSetBits(), nil + + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } +} diff --git a/api/cometbft/consensus/v1beta1/types.pb.go b/api/cometbft/consensus/v1beta1/types.pb.go new file mode 100644 index 00000000000..16943db6c3b --- /dev/null +++ b/api/cometbft/consensus/v1beta1/types.pb.go @@ -0,0 +1,3431 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/consensus/v1beta1/types.proto + +package v1beta1 + +import ( + fmt "fmt" + v1 "github.com/cometbft/cometbft/api/cometbft/libs/bits/v1" + v1beta1 "github.com/cometbft/cometbft/api/cometbft/types/v1beta1" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// NewRoundStep is sent for every step taken in the ConsensusState. +// For every height/round/step transition +type NewRoundStep struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Step uint32 `protobuf:"varint,3,opt,name=step,proto3" json:"step,omitempty"` + SecondsSinceStartTime int64 `protobuf:"varint,4,opt,name=seconds_since_start_time,json=secondsSinceStartTime,proto3" json:"seconds_since_start_time,omitempty"` + LastCommitRound int32 `protobuf:"varint,5,opt,name=last_commit_round,json=lastCommitRound,proto3" json:"last_commit_round,omitempty"` +} + +func (m *NewRoundStep) Reset() { *m = NewRoundStep{} } +func (m *NewRoundStep) String() string { return proto.CompactTextString(m) } +func (*NewRoundStep) ProtoMessage() {} +func (*NewRoundStep) Descriptor() ([]byte, []int) { + return fileDescriptor_68132c3a7139c33d, []int{0} +} +func (m *NewRoundStep) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NewRoundStep) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NewRoundStep.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NewRoundStep) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewRoundStep.Merge(m, src) +} +func (m *NewRoundStep) XXX_Size() int { + return m.Size() +} +func (m *NewRoundStep) XXX_DiscardUnknown() { + xxx_messageInfo_NewRoundStep.DiscardUnknown(m) +} + +var xxx_messageInfo_NewRoundStep proto.InternalMessageInfo + +func (m *NewRoundStep) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *NewRoundStep) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *NewRoundStep) GetStep() uint32 { + if m != nil { + return m.Step + } + return 0 +} + +func (m *NewRoundStep) GetSecondsSinceStartTime() int64 { + if m != nil { + return m.SecondsSinceStartTime + } + return 0 +} + +func (m *NewRoundStep) GetLastCommitRound() int32 { + if m != nil { + return m.LastCommitRound + } + return 0 +} + +// NewValidBlock is sent when a validator observes a valid block B in some round r, +// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// In case the block is also committed, then IsCommit flag is set to true. +type NewValidBlock struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + BlockPartSetHeader v1beta1.PartSetHeader `protobuf:"bytes,3,opt,name=block_part_set_header,json=blockPartSetHeader,proto3" json:"block_part_set_header"` + BlockParts *v1.BitArray `protobuf:"bytes,4,opt,name=block_parts,json=blockParts,proto3" json:"block_parts,omitempty"` + IsCommit bool `protobuf:"varint,5,opt,name=is_commit,json=isCommit,proto3" json:"is_commit,omitempty"` +} + +func (m *NewValidBlock) Reset() { *m = NewValidBlock{} } +func (m *NewValidBlock) String() string { return proto.CompactTextString(m) } +func (*NewValidBlock) ProtoMessage() {} +func (*NewValidBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_68132c3a7139c33d, []int{1} +} +func (m *NewValidBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NewValidBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NewValidBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NewValidBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewValidBlock.Merge(m, src) +} +func (m *NewValidBlock) XXX_Size() int { + return m.Size() +} +func (m *NewValidBlock) XXX_DiscardUnknown() { + xxx_messageInfo_NewValidBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_NewValidBlock proto.InternalMessageInfo + +func (m *NewValidBlock) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *NewValidBlock) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *NewValidBlock) GetBlockPartSetHeader() v1beta1.PartSetHeader { + if m != nil { + return m.BlockPartSetHeader + } + return v1beta1.PartSetHeader{} +} + +func (m *NewValidBlock) GetBlockParts() *v1.BitArray { + if m != nil { + return m.BlockParts + } + return nil +} + +func (m *NewValidBlock) GetIsCommit() bool { + if m != nil { + return m.IsCommit + } + return false +} + +// Proposal is sent when a new block is proposed. +type Proposal struct { + Proposal v1beta1.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal"` +} + +func (m *Proposal) Reset() { *m = Proposal{} } +func (m *Proposal) String() string { return proto.CompactTextString(m) } +func (*Proposal) ProtoMessage() {} +func (*Proposal) Descriptor() ([]byte, []int) { + return fileDescriptor_68132c3a7139c33d, []int{2} +} +func (m *Proposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Proposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Proposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Proposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Proposal.Merge(m, src) +} +func (m *Proposal) XXX_Size() int { + return m.Size() +} +func (m *Proposal) XXX_DiscardUnknown() { + xxx_messageInfo_Proposal.DiscardUnknown(m) +} + +var xxx_messageInfo_Proposal proto.InternalMessageInfo + +func (m *Proposal) GetProposal() v1beta1.Proposal { + if m != nil { + return m.Proposal + } + return v1beta1.Proposal{} +} + +// ProposalPOL is sent when a previous proposal is re-proposed. +type ProposalPOL struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + ProposalPolRound int32 `protobuf:"varint,2,opt,name=proposal_pol_round,json=proposalPolRound,proto3" json:"proposal_pol_round,omitempty"` + ProposalPol v1.BitArray `protobuf:"bytes,3,opt,name=proposal_pol,json=proposalPol,proto3" json:"proposal_pol"` +} + +func (m *ProposalPOL) Reset() { *m = ProposalPOL{} } +func (m *ProposalPOL) String() string { return proto.CompactTextString(m) } +func (*ProposalPOL) ProtoMessage() {} +func (*ProposalPOL) Descriptor() ([]byte, []int) { + return fileDescriptor_68132c3a7139c33d, []int{3} +} +func (m *ProposalPOL) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProposalPOL) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProposalPOL.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProposalPOL) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProposalPOL.Merge(m, src) +} +func (m *ProposalPOL) XXX_Size() int { + return m.Size() +} +func (m *ProposalPOL) XXX_DiscardUnknown() { + xxx_messageInfo_ProposalPOL.DiscardUnknown(m) +} + +var xxx_messageInfo_ProposalPOL proto.InternalMessageInfo + +func (m *ProposalPOL) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *ProposalPOL) GetProposalPolRound() int32 { + if m != nil { + return m.ProposalPolRound + } + return 0 +} + +func (m *ProposalPOL) GetProposalPol() v1.BitArray { + if m != nil { + return m.ProposalPol + } + return v1.BitArray{} +} + +// BlockPart is sent when gossipping a piece of the proposed block. +type BlockPart struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Part v1beta1.Part `protobuf:"bytes,3,opt,name=part,proto3" json:"part"` +} + +func (m *BlockPart) Reset() { *m = BlockPart{} } +func (m *BlockPart) String() string { return proto.CompactTextString(m) } +func (*BlockPart) ProtoMessage() {} +func (*BlockPart) Descriptor() ([]byte, []int) { + return fileDescriptor_68132c3a7139c33d, []int{4} +} +func (m *BlockPart) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockPart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockPart.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockPart) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockPart.Merge(m, src) +} +func (m *BlockPart) XXX_Size() int { + return m.Size() +} +func (m *BlockPart) XXX_DiscardUnknown() { + xxx_messageInfo_BlockPart.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockPart proto.InternalMessageInfo + +func (m *BlockPart) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *BlockPart) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *BlockPart) GetPart() v1beta1.Part { + if m != nil { + return m.Part + } + return v1beta1.Part{} +} + +// Vote is sent when voting for a proposal (or lack thereof). +type Vote struct { + Vote *v1beta1.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` +} + +func (m *Vote) Reset() { *m = Vote{} } +func (m *Vote) String() string { return proto.CompactTextString(m) } +func (*Vote) ProtoMessage() {} +func (*Vote) Descriptor() ([]byte, []int) { + return fileDescriptor_68132c3a7139c33d, []int{5} +} +func (m *Vote) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Vote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Vote.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Vote) XXX_Merge(src proto.Message) { + xxx_messageInfo_Vote.Merge(m, src) +} +func (m *Vote) XXX_Size() int { + return m.Size() +} +func (m *Vote) XXX_DiscardUnknown() { + xxx_messageInfo_Vote.DiscardUnknown(m) +} + +var xxx_messageInfo_Vote proto.InternalMessageInfo + +func (m *Vote) GetVote() *v1beta1.Vote { + if m != nil { + return m.Vote + } + return nil +} + +// HasVote is sent to indicate that a particular vote has been received. +type HasVote struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Type v1beta1.SignedMsgType `protobuf:"varint,3,opt,name=type,proto3,enum=cometbft.types.v1beta1.SignedMsgType" json:"type,omitempty"` + Index int32 `protobuf:"varint,4,opt,name=index,proto3" json:"index,omitempty"` +} + +func (m *HasVote) Reset() { *m = HasVote{} } +func (m *HasVote) String() string { return proto.CompactTextString(m) } +func (*HasVote) ProtoMessage() {} +func (*HasVote) Descriptor() ([]byte, []int) { + return fileDescriptor_68132c3a7139c33d, []int{6} +} +func (m *HasVote) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HasVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HasVote.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HasVote) XXX_Merge(src proto.Message) { + xxx_messageInfo_HasVote.Merge(m, src) +} +func (m *HasVote) XXX_Size() int { + return m.Size() +} +func (m *HasVote) XXX_DiscardUnknown() { + xxx_messageInfo_HasVote.DiscardUnknown(m) +} + +var xxx_messageInfo_HasVote proto.InternalMessageInfo + +func (m *HasVote) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *HasVote) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *HasVote) GetType() v1beta1.SignedMsgType { + if m != nil { + return m.Type + } + return v1beta1.UnknownType +} + +func (m *HasVote) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +// VoteSetMaj23 is sent to indicate that a given BlockID has seen +2/3 votes. +type VoteSetMaj23 struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Type v1beta1.SignedMsgType `protobuf:"varint,3,opt,name=type,proto3,enum=cometbft.types.v1beta1.SignedMsgType" json:"type,omitempty"` + BlockID v1beta1.BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` +} + +func (m *VoteSetMaj23) Reset() { *m = VoteSetMaj23{} } +func (m *VoteSetMaj23) String() string { return proto.CompactTextString(m) } +func (*VoteSetMaj23) ProtoMessage() {} +func (*VoteSetMaj23) Descriptor() ([]byte, []int) { + return fileDescriptor_68132c3a7139c33d, []int{7} +} +func (m *VoteSetMaj23) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VoteSetMaj23) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VoteSetMaj23.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VoteSetMaj23) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteSetMaj23.Merge(m, src) +} +func (m *VoteSetMaj23) XXX_Size() int { + return m.Size() +} +func (m *VoteSetMaj23) XXX_DiscardUnknown() { + xxx_messageInfo_VoteSetMaj23.DiscardUnknown(m) +} + +var xxx_messageInfo_VoteSetMaj23 proto.InternalMessageInfo + +func (m *VoteSetMaj23) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *VoteSetMaj23) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *VoteSetMaj23) GetType() v1beta1.SignedMsgType { + if m != nil { + return m.Type + } + return v1beta1.UnknownType +} + +func (m *VoteSetMaj23) GetBlockID() v1beta1.BlockID { + if m != nil { + return m.BlockID + } + return v1beta1.BlockID{} +} + +// VoteSetBits is sent to communicate the bit-array of votes seen for the BlockID. +type VoteSetBits struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Type v1beta1.SignedMsgType `protobuf:"varint,3,opt,name=type,proto3,enum=cometbft.types.v1beta1.SignedMsgType" json:"type,omitempty"` + BlockID v1beta1.BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` + Votes v1.BitArray `protobuf:"bytes,5,opt,name=votes,proto3" json:"votes"` +} + +func (m *VoteSetBits) Reset() { *m = VoteSetBits{} } +func (m *VoteSetBits) String() string { return proto.CompactTextString(m) } +func (*VoteSetBits) ProtoMessage() {} +func (*VoteSetBits) Descriptor() ([]byte, []int) { + return fileDescriptor_68132c3a7139c33d, []int{8} +} +func (m *VoteSetBits) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VoteSetBits) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VoteSetBits.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VoteSetBits) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteSetBits.Merge(m, src) +} +func (m *VoteSetBits) XXX_Size() int { + return m.Size() +} +func (m *VoteSetBits) XXX_DiscardUnknown() { + xxx_messageInfo_VoteSetBits.DiscardUnknown(m) +} + +var xxx_messageInfo_VoteSetBits proto.InternalMessageInfo + +func (m *VoteSetBits) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *VoteSetBits) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *VoteSetBits) GetType() v1beta1.SignedMsgType { + if m != nil { + return m.Type + } + return v1beta1.UnknownType +} + +func (m *VoteSetBits) GetBlockID() v1beta1.BlockID { + if m != nil { + return m.BlockID + } + return v1beta1.BlockID{} +} + +func (m *VoteSetBits) GetVotes() v1.BitArray { + if m != nil { + return m.Votes + } + return v1.BitArray{} +} + +// Message is an abstract consensus message. +type Message struct { + // Sum of all possible messages. + // + // Types that are valid to be assigned to Sum: + // *Message_NewRoundStep + // *Message_NewValidBlock + // *Message_Proposal + // *Message_ProposalPol + // *Message_BlockPart + // *Message_Vote + // *Message_HasVote + // *Message_VoteSetMaj23 + // *Message_VoteSetBits + Sum isMessage_Sum `protobuf_oneof:"sum"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_68132c3a7139c33d, []int{9} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +type isMessage_Sum interface { + isMessage_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Message_NewRoundStep struct { + NewRoundStep *NewRoundStep `protobuf:"bytes,1,opt,name=new_round_step,json=newRoundStep,proto3,oneof" json:"new_round_step,omitempty"` +} +type Message_NewValidBlock struct { + NewValidBlock *NewValidBlock `protobuf:"bytes,2,opt,name=new_valid_block,json=newValidBlock,proto3,oneof" json:"new_valid_block,omitempty"` +} +type Message_Proposal struct { + Proposal *Proposal `protobuf:"bytes,3,opt,name=proposal,proto3,oneof" json:"proposal,omitempty"` +} +type Message_ProposalPol struct { + ProposalPol *ProposalPOL `protobuf:"bytes,4,opt,name=proposal_pol,json=proposalPol,proto3,oneof" json:"proposal_pol,omitempty"` +} +type Message_BlockPart struct { + BlockPart *BlockPart `protobuf:"bytes,5,opt,name=block_part,json=blockPart,proto3,oneof" json:"block_part,omitempty"` +} +type Message_Vote struct { + Vote *Vote `protobuf:"bytes,6,opt,name=vote,proto3,oneof" json:"vote,omitempty"` +} +type Message_HasVote struct { + HasVote *HasVote `protobuf:"bytes,7,opt,name=has_vote,json=hasVote,proto3,oneof" json:"has_vote,omitempty"` +} +type Message_VoteSetMaj23 struct { + VoteSetMaj23 *VoteSetMaj23 `protobuf:"bytes,8,opt,name=vote_set_maj23,json=voteSetMaj23,proto3,oneof" json:"vote_set_maj23,omitempty"` +} +type Message_VoteSetBits struct { + VoteSetBits *VoteSetBits `protobuf:"bytes,9,opt,name=vote_set_bits,json=voteSetBits,proto3,oneof" json:"vote_set_bits,omitempty"` +} + +func (*Message_NewRoundStep) isMessage_Sum() {} +func (*Message_NewValidBlock) isMessage_Sum() {} +func (*Message_Proposal) isMessage_Sum() {} +func (*Message_ProposalPol) isMessage_Sum() {} +func (*Message_BlockPart) isMessage_Sum() {} +func (*Message_Vote) isMessage_Sum() {} +func (*Message_HasVote) isMessage_Sum() {} +func (*Message_VoteSetMaj23) isMessage_Sum() {} +func (*Message_VoteSetBits) isMessage_Sum() {} + +func (m *Message) GetSum() isMessage_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Message) GetNewRoundStep() *NewRoundStep { + if x, ok := m.GetSum().(*Message_NewRoundStep); ok { + return x.NewRoundStep + } + return nil +} + +func (m *Message) GetNewValidBlock() *NewValidBlock { + if x, ok := m.GetSum().(*Message_NewValidBlock); ok { + return x.NewValidBlock + } + return nil +} + +func (m *Message) GetProposal() *Proposal { + if x, ok := m.GetSum().(*Message_Proposal); ok { + return x.Proposal + } + return nil +} + +func (m *Message) GetProposalPol() *ProposalPOL { + if x, ok := m.GetSum().(*Message_ProposalPol); ok { + return x.ProposalPol + } + return nil +} + +func (m *Message) GetBlockPart() *BlockPart { + if x, ok := m.GetSum().(*Message_BlockPart); ok { + return x.BlockPart + } + return nil +} + +func (m *Message) GetVote() *Vote { + if x, ok := m.GetSum().(*Message_Vote); ok { + return x.Vote + } + return nil +} + +func (m *Message) GetHasVote() *HasVote { + if x, ok := m.GetSum().(*Message_HasVote); ok { + return x.HasVote + } + return nil +} + +func (m *Message) GetVoteSetMaj23() *VoteSetMaj23 { + if x, ok := m.GetSum().(*Message_VoteSetMaj23); ok { + return x.VoteSetMaj23 + } + return nil +} + +func (m *Message) GetVoteSetBits() *VoteSetBits { + if x, ok := m.GetSum().(*Message_VoteSetBits); ok { + return x.VoteSetBits + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Message) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Message_NewRoundStep)(nil), + (*Message_NewValidBlock)(nil), + (*Message_Proposal)(nil), + (*Message_ProposalPol)(nil), + (*Message_BlockPart)(nil), + (*Message_Vote)(nil), + (*Message_HasVote)(nil), + (*Message_VoteSetMaj23)(nil), + (*Message_VoteSetBits)(nil), + } +} + +func init() { + proto.RegisterType((*NewRoundStep)(nil), "cometbft.consensus.v1beta1.NewRoundStep") + proto.RegisterType((*NewValidBlock)(nil), "cometbft.consensus.v1beta1.NewValidBlock") + proto.RegisterType((*Proposal)(nil), "cometbft.consensus.v1beta1.Proposal") + proto.RegisterType((*ProposalPOL)(nil), "cometbft.consensus.v1beta1.ProposalPOL") + proto.RegisterType((*BlockPart)(nil), "cometbft.consensus.v1beta1.BlockPart") + proto.RegisterType((*Vote)(nil), "cometbft.consensus.v1beta1.Vote") + proto.RegisterType((*HasVote)(nil), "cometbft.consensus.v1beta1.HasVote") + proto.RegisterType((*VoteSetMaj23)(nil), "cometbft.consensus.v1beta1.VoteSetMaj23") + proto.RegisterType((*VoteSetBits)(nil), "cometbft.consensus.v1beta1.VoteSetBits") + proto.RegisterType((*Message)(nil), "cometbft.consensus.v1beta1.Message") +} + +func init() { + proto.RegisterFile("cometbft/consensus/v1beta1/types.proto", fileDescriptor_68132c3a7139c33d) +} + +var fileDescriptor_68132c3a7139c33d = []byte{ + // 872 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0xb7, 0xb7, 0x49, 0x93, 0x3c, 0x27, 0x2d, 0x8c, 0x76, 0x51, 0x14, 0x50, 0x1a, 0x0c, 0x0b, + 0x01, 0x21, 0x87, 0x66, 0x25, 0xfe, 0x68, 0x2f, 0x8b, 0x41, 0xc8, 0x2b, 0x9a, 0x6e, 0xe4, 0x94, + 0x1e, 0x38, 0x60, 0x39, 0xf1, 0x90, 0x0c, 0x24, 0x1e, 0xe3, 0x99, 0xa4, 0xf4, 0xc6, 0x91, 0x23, + 0x5f, 0x80, 0x6f, 0xc0, 0x95, 0x6f, 0xc0, 0xa1, 0xc7, 0x1e, 0x39, 0x55, 0x28, 0xfd, 0x1a, 0x1c, + 0xd0, 0x8c, 0x27, 0x8e, 0xd3, 0x92, 0xa4, 0xbd, 0x20, 0xed, 0x6d, 0x26, 0xf3, 0x7b, 0xbf, 0x79, + 0xf3, 0x7b, 0xef, 0xfd, 0x62, 0x78, 0x67, 0x40, 0x27, 0x98, 0xf7, 0xbf, 0xe3, 0xad, 0x01, 0x0d, + 0x19, 0x0e, 0xd9, 0x94, 0xb5, 0x66, 0x87, 0x7d, 0xcc, 0xfd, 0xc3, 0x16, 0x3f, 0x8f, 0x30, 0xb3, + 0xa2, 0x98, 0x72, 0x8a, 0x6a, 0x0b, 0x9c, 0x95, 0xe2, 0x2c, 0x85, 0xab, 0x3d, 0x1c, 0xd2, 0x21, + 0x95, 0xb0, 0x96, 0x58, 0x25, 0x11, 0x35, 0x33, 0x65, 0x96, 0x3c, 0xff, 0xc5, 0x5a, 0x7b, 0x33, + 0xc5, 0x8c, 0x49, 0x9f, 0xb5, 0xfa, 0x84, 0x0b, 0x5c, 0x16, 0x62, 0xfe, 0xa1, 0x43, 0xf9, 0x18, + 0x9f, 0xb9, 0x74, 0x1a, 0x06, 0x3d, 0x8e, 0x23, 0xf4, 0x1a, 0xec, 0x8e, 0x30, 0x19, 0x8e, 0x78, + 0x55, 0x6f, 0xe8, 0xcd, 0x1d, 0x57, 0xed, 0xd0, 0x43, 0xc8, 0xc7, 0x02, 0x54, 0x7d, 0xd0, 0xd0, + 0x9b, 0x79, 0x37, 0xd9, 0x20, 0x04, 0x39, 0xc6, 0x71, 0x54, 0xdd, 0x69, 0xe8, 0xcd, 0x8a, 0x2b, + 0xd7, 0xe8, 0x63, 0xa8, 0x32, 0x3c, 0xa0, 0x61, 0xc0, 0x3c, 0x46, 0xc2, 0x01, 0xf6, 0x18, 0xf7, + 0x63, 0xee, 0x71, 0x32, 0xc1, 0xd5, 0x9c, 0xe4, 0x7c, 0xa4, 0xce, 0x7b, 0xe2, 0xb8, 0x27, 0x4e, + 0x4f, 0xc8, 0x04, 0xa3, 0xf7, 0xe1, 0xd5, 0xb1, 0xcf, 0xb8, 0x37, 0xa0, 0x93, 0x09, 0xe1, 0x5e, + 0x72, 0x5d, 0x5e, 0x5e, 0xb7, 0x2f, 0x0e, 0x3e, 0x97, 0xbf, 0xcb, 0x54, 0xcd, 0x7f, 0x74, 0xa8, + 0x1c, 0xe3, 0xb3, 0x53, 0x7f, 0x4c, 0x02, 0x7b, 0x4c, 0x07, 0x3f, 0xdc, 0x33, 0xf1, 0x6f, 0xe1, + 0x51, 0x5f, 0x84, 0x79, 0x91, 0xc8, 0x8d, 0x61, 0xee, 0x8d, 0xb0, 0x1f, 0xe0, 0x58, 0xbe, 0xc4, + 0x68, 0x3f, 0xb6, 0xd2, 0x82, 0x24, 0x6a, 0x29, 0x79, 0xad, 0xae, 0x1f, 0xf3, 0x1e, 0xe6, 0x8e, + 0x04, 0xdb, 0xb9, 0x8b, 0xab, 0x03, 0xcd, 0x45, 0x92, 0x69, 0xe5, 0x04, 0x3d, 0x03, 0x63, 0xc9, + 0xcf, 0xe4, 0xbb, 0x8d, 0xf6, 0xc1, 0x92, 0x55, 0x14, 0xc4, 0x12, 0x05, 0xb1, 0x66, 0x87, 0x96, + 0x4d, 0xf8, 0x67, 0x71, 0xec, 0x9f, 0xbb, 0x90, 0x32, 0x31, 0xf4, 0x3a, 0x94, 0x08, 0x53, 0x5a, + 0x48, 0x15, 0x8a, 0x6e, 0x91, 0xb0, 0x44, 0x03, 0xf3, 0x18, 0x8a, 0xdd, 0x98, 0x46, 0x94, 0xf9, + 0x63, 0x64, 0x43, 0x31, 0x52, 0x6b, 0xf9, 0x74, 0xa3, 0xdd, 0x58, 0x9b, 0xbd, 0xc2, 0xa9, 0xc4, + 0xd3, 0x38, 0xf3, 0x37, 0x1d, 0x8c, 0xc5, 0x61, 0xf7, 0xc5, 0xd1, 0x5a, 0x31, 0x3f, 0x00, 0xb4, + 0x88, 0xf1, 0x22, 0x3a, 0xf6, 0xb2, 0xca, 0xbe, 0xb2, 0x38, 0xe9, 0xd2, 0xb1, 0x2c, 0x12, 0x72, + 0xa0, 0x9c, 0x45, 0x2b, 0x6d, 0xb7, 0xa9, 0xa0, 0x92, 0x33, 0x32, 0x74, 0xe6, 0x8f, 0x50, 0xb2, + 0x17, 0xd2, 0xdc, 0xb3, 0xd2, 0x1f, 0x41, 0x4e, 0xd4, 0x40, 0x5d, 0xfe, 0xc6, 0xa6, 0xc2, 0xaa, + 0x9b, 0x25, 0xde, 0xfc, 0x04, 0x72, 0xa7, 0x94, 0x63, 0xf4, 0x21, 0xe4, 0x66, 0x94, 0x63, 0x25, + 0xed, 0xda, 0x78, 0x81, 0x75, 0x25, 0xd2, 0xfc, 0x45, 0x87, 0x82, 0xe3, 0x33, 0x19, 0x7d, 0xbf, + 0x5c, 0x3f, 0x85, 0x9c, 0x60, 0x95, 0xb9, 0xee, 0xad, 0x6f, 0xc2, 0x1e, 0x19, 0x86, 0x38, 0xe8, + 0xb0, 0xe1, 0xc9, 0x79, 0x84, 0x5d, 0x19, 0x22, 0x08, 0x49, 0x18, 0xe0, 0x9f, 0x64, 0xab, 0xe5, + 0xdd, 0x64, 0x63, 0xfe, 0xa9, 0x43, 0x59, 0xe4, 0xd1, 0xc3, 0xbc, 0xe3, 0x7f, 0xdf, 0x7e, 0xf2, + 0xff, 0xe5, 0xf3, 0x15, 0x14, 0x93, 0x01, 0x20, 0xc1, 0xed, 0xee, 0x5f, 0x0d, 0x97, 0x95, 0x7d, + 0xfe, 0x85, 0xbd, 0x2f, 0xd4, 0x9f, 0x5f, 0x1d, 0x14, 0xd4, 0x0f, 0x6e, 0x41, 0x32, 0x3c, 0x0f, + 0xcc, 0x9f, 0x1f, 0x80, 0xa1, 0x9e, 0x61, 0x13, 0xce, 0x5e, 0xce, 0x57, 0xa0, 0xa7, 0x90, 0x17, + 0xfd, 0xc1, 0xe4, 0x34, 0xdf, 0x79, 0x0e, 0x92, 0x18, 0xf3, 0xf7, 0x3c, 0x14, 0x3a, 0x98, 0x31, + 0x7f, 0x88, 0x51, 0x17, 0xf6, 0x42, 0x7c, 0x96, 0x0c, 0x9f, 0x27, 0xfd, 0x37, 0x69, 0xce, 0xa6, + 0xb5, 0xfe, 0x6f, 0xc4, 0xca, 0xba, 0xbc, 0xa3, 0xb9, 0xe5, 0x30, 0xeb, 0xfa, 0x3d, 0xd8, 0x17, + 0x8c, 0x33, 0x61, 0xa7, 0x9e, 0xcc, 0x57, 0x4a, 0x68, 0xb4, 0xdf, 0xdb, 0x42, 0xb9, 0x34, 0x60, + 0x47, 0x73, 0x2b, 0xe1, 0x8a, 0x23, 0x67, 0x8d, 0x29, 0x99, 0xbe, 0xb7, 0x37, 0xb1, 0x2d, 0xfc, + 0xc7, 0xc9, 0x18, 0x13, 0x3a, 0xba, 0x61, 0x21, 0x49, 0x11, 0xde, 0xbd, 0x0b, 0x4f, 0xf7, 0xc5, + 0x91, 0xb3, 0x6a, 0x23, 0xe8, 0x4b, 0x80, 0xa5, 0x2b, 0xab, 0x32, 0x3c, 0xde, 0xc4, 0x95, 0x9a, + 0x8e, 0xa3, 0xb9, 0xa5, 0xd4, 0x9c, 0x85, 0xa7, 0x48, 0x4f, 0xd8, 0xbd, 0x69, 0xb7, 0xb7, 0x19, + 0x44, 0xdb, 0x3a, 0x5a, 0xe2, 0x0c, 0xe8, 0x19, 0x14, 0x47, 0x3e, 0xf3, 0x64, 0x6c, 0x41, 0xc6, + 0xbe, 0xb5, 0x29, 0x56, 0x99, 0x88, 0xa3, 0xb9, 0x85, 0x91, 0xf2, 0x93, 0x2e, 0xec, 0x89, 0x68, + 0xf9, 0x8f, 0x35, 0x11, 0x13, 0x5d, 0x2d, 0x6e, 0x2f, 0x7d, 0xd6, 0x01, 0x44, 0xe9, 0x67, 0x59, + 0x47, 0xe8, 0x40, 0x25, 0x65, 0x14, 0x2d, 0x58, 0x2d, 0x6d, 0x97, 0x38, 0x33, 0x8b, 0x42, 0xe2, + 0xd9, 0x72, 0x6b, 0xe7, 0x61, 0x87, 0x4d, 0x27, 0xf6, 0xd7, 0x17, 0xf3, 0xba, 0x7e, 0x39, 0xaf, + 0xeb, 0x7f, 0xcf, 0xeb, 0xfa, 0xaf, 0xd7, 0x75, 0xed, 0xf2, 0xba, 0xae, 0xfd, 0x75, 0x5d, 0xd7, + 0xbe, 0x79, 0x3a, 0x24, 0x7c, 0x34, 0xed, 0x0b, 0xfa, 0x56, 0xe6, 0xeb, 0x48, 0x2d, 0xfc, 0x88, + 0xb4, 0xd6, 0x7f, 0x33, 0xf5, 0x77, 0xe5, 0x57, 0xcb, 0x93, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, + 0xff, 0xea, 0x7b, 0x56, 0x58, 0x09, 0x00, 0x00, +} + +func (m *NewRoundStep) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NewRoundStep) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NewRoundStep) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastCommitRound != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastCommitRound)) + i-- + dAtA[i] = 0x28 + } + if m.SecondsSinceStartTime != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.SecondsSinceStartTime)) + i-- + dAtA[i] = 0x20 + } + if m.Step != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Step)) + i-- + dAtA[i] = 0x18 + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *NewValidBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NewValidBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NewValidBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IsCommit { + i-- + if m.IsCommit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.BlockParts != nil { + { + size, err := m.BlockParts.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.BlockPartSetHeader.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Proposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Proposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Proposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProposalPOL) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProposalPOL) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProposalPOL) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProposalPol.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.ProposalPolRound != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ProposalPolRound)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BlockPart) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockPart) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockPart) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Part.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Vote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Vote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Vote != nil { + { + size, err := m.Vote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HasVote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HasVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HasVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x20 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x18 + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *VoteSetMaj23) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VoteSetMaj23) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VoteSetMaj23) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x18 + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *VoteSetBits) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VoteSetBits) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VoteSetBits) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Votes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x18 + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Message_NewRoundStep) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_NewRoundStep) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NewRoundStep != nil { + { + size, err := m.NewRoundStep.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Message_NewValidBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_NewValidBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NewValidBlock != nil { + { + size, err := m.NewValidBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Message_Proposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Proposal != nil { + { + size, err := m.Proposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Message_ProposalPol) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_ProposalPol) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProposalPol != nil { + { + size, err := m.ProposalPol.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Message_BlockPart) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_BlockPart) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.BlockPart != nil { + { + size, err := m.BlockPart.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Message_Vote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Vote != nil { + { + size, err := m.Vote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Message_HasVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_HasVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HasVote != nil { + { + size, err := m.HasVote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Message_VoteSetMaj23) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_VoteSetMaj23) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.VoteSetMaj23 != nil { + { + size, err := m.VoteSetMaj23.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Message_VoteSetBits) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_VoteSetBits) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.VoteSetBits != nil { + { + size, err := m.VoteSetBits.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *NewRoundStep) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if m.Step != 0 { + n += 1 + sovTypes(uint64(m.Step)) + } + if m.SecondsSinceStartTime != 0 { + n += 1 + sovTypes(uint64(m.SecondsSinceStartTime)) + } + if m.LastCommitRound != 0 { + n += 1 + sovTypes(uint64(m.LastCommitRound)) + } + return n +} + +func (m *NewValidBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + l = m.BlockPartSetHeader.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.BlockParts != nil { + l = m.BlockParts.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.IsCommit { + n += 2 + } + return n +} + +func (m *Proposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Proposal.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *ProposalPOL) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.ProposalPolRound != 0 { + n += 1 + sovTypes(uint64(m.ProposalPolRound)) + } + l = m.ProposalPol.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *BlockPart) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + l = m.Part.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *Vote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Vote != nil { + l = m.Vote.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *HasVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + return n +} + +func (m *VoteSetMaj23) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *VoteSetBits) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = m.Votes.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Message_NewRoundStep) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NewRoundStep != nil { + l = m.NewRoundStep.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_NewValidBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NewValidBlock != nil { + l = m.NewValidBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_Proposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Proposal != nil { + l = m.Proposal.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_ProposalPol) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProposalPol != nil { + l = m.ProposalPol.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_BlockPart) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockPart != nil { + l = m.BlockPart.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_Vote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Vote != nil { + l = m.Vote.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_HasVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HasVote != nil { + l = m.HasVote.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_VoteSetMaj23) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VoteSetMaj23 != nil { + l = m.VoteSetMaj23.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_VoteSetBits) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VoteSetBits != nil { + l = m.VoteSetBits.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *NewRoundStep) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NewRoundStep: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NewRoundStep: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + } + m.Step = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Step |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SecondsSinceStartTime", wireType) + } + m.SecondsSinceStartTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SecondsSinceStartTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitRound", wireType) + } + m.LastCommitRound = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastCommitRound |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NewValidBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NewValidBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NewValidBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockPartSetHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockPartSetHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockParts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockParts == nil { + m.BlockParts = &v1.BitArray{} + } + if err := m.BlockParts.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsCommit", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsCommit = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Proposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Proposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Proposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Proposal.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProposalPOL) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProposalPOL: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProposalPOL: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposalPolRound", wireType) + } + m.ProposalPolRound = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProposalPolRound |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposalPol", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProposalPol.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockPart) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockPart: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockPart: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Part", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Part.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Vote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vote == nil { + m.Vote = &v1beta1.Vote{} + } + if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HasVote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HasVote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HasVote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= v1beta1.SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VoteSetMaj23: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VoteSetMaj23: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= v1beta1.SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VoteSetBits) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VoteSetBits: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VoteSetBits: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= v1beta1.SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Votes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewRoundStep", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NewRoundStep{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_NewRoundStep{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewValidBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NewValidBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_NewValidBlock{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Proposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_Proposal{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposalPol", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ProposalPOL{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_ProposalPol{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockPart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &BlockPart{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_BlockPart{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Vote{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_Vote{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HasVote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &HasVote{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_HasVote{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteSetMaj23", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &VoteSetMaj23{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_VoteSetMaj23{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteSetBits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &VoteSetBits{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_VoteSetBits{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/consensus/wal.pb.go b/api/cometbft/consensus/v1beta1/wal.pb.go similarity index 87% rename from proto/tendermint/consensus/wal.pb.go rename to api/cometbft/consensus/v1beta1/wal.pb.go index f870835ef8b..e0c3fb0d8f1 100644 --- a/proto/tendermint/consensus/wal.pb.go +++ b/api/cometbft/consensus/v1beta1/wal.pb.go @@ -1,11 +1,11 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/consensus/wal.proto +// source: cometbft/consensus/v1beta1/wal.proto -package consensus +package v1beta1 import ( fmt "fmt" - types "github.com/cometbft/cometbft/proto/tendermint/types" + v1beta1 "github.com/cometbft/cometbft/api/cometbft/types/v1beta1" _ "github.com/cosmos/gogoproto/gogoproto" proto "github.com/cosmos/gogoproto/proto" _ "github.com/cosmos/gogoproto/types" @@ -39,7 +39,7 @@ func (m *MsgInfo) Reset() { *m = MsgInfo{} } func (m *MsgInfo) String() string { return proto.CompactTextString(m) } func (*MsgInfo) ProtoMessage() {} func (*MsgInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_ed0b60c2d348ab09, []int{0} + return fileDescriptor_2778d8be83f1e994, []int{0} } func (m *MsgInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -94,7 +94,7 @@ func (m *TimeoutInfo) Reset() { *m = TimeoutInfo{} } func (m *TimeoutInfo) String() string { return proto.CompactTextString(m) } func (*TimeoutInfo) ProtoMessage() {} func (*TimeoutInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_ed0b60c2d348ab09, []int{1} + return fileDescriptor_2778d8be83f1e994, []int{1} } func (m *TimeoutInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +161,7 @@ func (m *EndHeight) Reset() { *m = EndHeight{} } func (m *EndHeight) String() string { return proto.CompactTextString(m) } func (*EndHeight) ProtoMessage() {} func (*EndHeight) Descriptor() ([]byte, []int) { - return fileDescriptor_ed0b60c2d348ab09, []int{2} + return fileDescriptor_2778d8be83f1e994, []int{2} } func (m *EndHeight) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -197,7 +197,10 @@ func (m *EndHeight) GetHeight() int64 { return 0 } +// WALMessage describes a consensus WAL (Write Ahead Log) entry. type WALMessage struct { + // Sum of all possible messages. + // // Types that are valid to be assigned to Sum: // *WALMessage_EventDataRoundState // *WALMessage_MsgInfo @@ -210,7 +213,7 @@ func (m *WALMessage) Reset() { *m = WALMessage{} } func (m *WALMessage) String() string { return proto.CompactTextString(m) } func (*WALMessage) ProtoMessage() {} func (*WALMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_ed0b60c2d348ab09, []int{3} + return fileDescriptor_2778d8be83f1e994, []int{3} } func (m *WALMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +249,7 @@ type isWALMessage_Sum interface { } type WALMessage_EventDataRoundState struct { - EventDataRoundState *types.EventDataRoundState `protobuf:"bytes,1,opt,name=event_data_round_state,json=eventDataRoundState,proto3,oneof" json:"event_data_round_state,omitempty"` + EventDataRoundState *v1beta1.EventDataRoundState `protobuf:"bytes,1,opt,name=event_data_round_state,json=eventDataRoundState,proto3,oneof" json:"event_data_round_state,omitempty"` } type WALMessage_MsgInfo struct { MsgInfo *MsgInfo `protobuf:"bytes,2,opt,name=msg_info,json=msgInfo,proto3,oneof" json:"msg_info,omitempty"` @@ -270,7 +273,7 @@ func (m *WALMessage) GetSum() isWALMessage_Sum { return nil } -func (m *WALMessage) GetEventDataRoundState() *types.EventDataRoundState { +func (m *WALMessage) GetEventDataRoundState() *v1beta1.EventDataRoundState { if x, ok := m.GetSum().(*WALMessage_EventDataRoundState); ok { return x.EventDataRoundState } @@ -318,7 +321,7 @@ func (m *TimedWALMessage) Reset() { *m = TimedWALMessage{} } func (m *TimedWALMessage) String() string { return proto.CompactTextString(m) } func (*TimedWALMessage) ProtoMessage() {} func (*TimedWALMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_ed0b60c2d348ab09, []int{4} + return fileDescriptor_2778d8be83f1e994, []int{4} } func (m *TimedWALMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -362,51 +365,54 @@ func (m *TimedWALMessage) GetMsg() *WALMessage { } func init() { - proto.RegisterType((*MsgInfo)(nil), "tendermint.consensus.MsgInfo") - proto.RegisterType((*TimeoutInfo)(nil), "tendermint.consensus.TimeoutInfo") - proto.RegisterType((*EndHeight)(nil), "tendermint.consensus.EndHeight") - proto.RegisterType((*WALMessage)(nil), "tendermint.consensus.WALMessage") - proto.RegisterType((*TimedWALMessage)(nil), "tendermint.consensus.TimedWALMessage") -} - -func init() { proto.RegisterFile("tendermint/consensus/wal.proto", fileDescriptor_ed0b60c2d348ab09) } - -var fileDescriptor_ed0b60c2d348ab09 = []byte{ - // 543 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x53, 0xdf, 0x8a, 0xd3, 0x4e, - 0x14, 0xce, 0x6c, 0xff, 0x9f, 0xfe, 0x7e, 0x08, 0xb1, 0x2c, 0xb5, 0xb0, 0x69, 0xec, 0x22, 0xf4, - 0x2a, 0x81, 0x15, 0x51, 0xbc, 0x51, 0x4b, 0x57, 0x5a, 0x70, 0x41, 0xc7, 0x05, 0x41, 0x84, 0x90, - 0x36, 0xa7, 0x69, 0x60, 0x33, 0x53, 0x32, 0x13, 0xc5, 0x2b, 0x5f, 0xa1, 0x97, 0xbe, 0x89, 0xaf, - 0xb0, 0x97, 0x7b, 0xe9, 0xd5, 0x2a, 0xed, 0x8b, 0x48, 0x66, 0xd2, 0x36, 0xb8, 0xf1, 0x6e, 0xce, - 0x9c, 0xef, 0x9c, 0xef, 0x9c, 0xef, 0x9b, 0x01, 0x4b, 0x22, 0x0b, 0x30, 0x89, 0x23, 0x26, 0xdd, - 0x39, 0x67, 0x02, 0x99, 0x48, 0x85, 0xfb, 0xc5, 0xbf, 0x72, 0x56, 0x09, 0x97, 0xdc, 0xec, 0x1c, - 0xf2, 0xce, 0x3e, 0xdf, 0xeb, 0x84, 0x3c, 0xe4, 0x0a, 0xe0, 0x66, 0x27, 0x8d, 0xed, 0xd9, 0xa5, - 0xbd, 0xe4, 0xd7, 0x15, 0x8a, 0x1c, 0x71, 0x52, 0x40, 0xa8, 0x7b, 0x17, 0x3f, 0x23, 0x93, 0xbb, - 0xb4, 0x15, 0x72, 0x1e, 0x5e, 0xa1, 0xab, 0xa2, 0x59, 0xba, 0x70, 0x83, 0x34, 0xf1, 0x65, 0xc4, - 0x59, 0x9e, 0xef, 0xff, 0x9d, 0x97, 0x51, 0x8c, 0x42, 0xfa, 0xf1, 0x4a, 0x03, 0x06, 0x08, 0x8d, - 0x0b, 0x11, 0x4e, 0xd9, 0x82, 0x9b, 0x4f, 0xa0, 0x12, 0x8b, 0xb0, 0x4b, 0x6c, 0x32, 0x6c, 0x9f, - 0x9d, 0x38, 0x65, 0x6b, 0x38, 0x17, 0x28, 0x84, 0x1f, 0xe2, 0xa8, 0x7a, 0x7d, 0xdb, 0x37, 0x68, - 0x86, 0x37, 0x4f, 0xa1, 0xb1, 0x42, 0x4c, 0xbc, 0x28, 0xe8, 0x1e, 0xd9, 0x64, 0xd8, 0x1a, 0xc1, - 0xe6, 0xb6, 0x5f, 0x7f, 0x8b, 0x98, 0x4c, 0xc7, 0xb4, 0x9e, 0xa5, 0xa6, 0xc1, 0x60, 0x4d, 0xa0, - 0x7d, 0x19, 0xc5, 0xc8, 0x53, 0xa9, 0xb8, 0x5e, 0x40, 0x73, 0x37, 0x69, 0x4e, 0xf8, 0xc0, 0xd1, - 0xa3, 0x3a, 0xbb, 0x51, 0x9d, 0x71, 0x0e, 0x18, 0x35, 0x33, 0xb2, 0xef, 0xbf, 0xfa, 0x84, 0xee, - 0x8b, 0xcc, 0x63, 0xa8, 0x2f, 0x31, 0x0a, 0x97, 0x52, 0x91, 0x56, 0x68, 0x1e, 0x99, 0x1d, 0xa8, - 0x25, 0x3c, 0x65, 0x41, 0xb7, 0x62, 0x93, 0x61, 0x8d, 0xea, 0xc0, 0x34, 0xa1, 0x2a, 0x24, 0xae, - 0xba, 0x55, 0x9b, 0x0c, 0xff, 0xa7, 0xea, 0x3c, 0x38, 0x85, 0xd6, 0x39, 0x0b, 0x26, 0xba, 0xec, - 0xd0, 0x8e, 0x14, 0xdb, 0x0d, 0x7e, 0x1c, 0x01, 0x7c, 0x78, 0xf5, 0x26, 0x5f, 0xdb, 0xfc, 0x04, - 0xc7, 0x4a, 0x7e, 0x2f, 0xf0, 0xa5, 0xef, 0xa9, 0xde, 0x9e, 0x90, 0xbe, 0xc4, 0x7c, 0x89, 0x47, - 0x45, 0xd5, 0xb4, 0x8d, 0xe7, 0x19, 0x7e, 0xec, 0x4b, 0x9f, 0x66, 0xe8, 0xf7, 0x19, 0x78, 0x62, - 0xd0, 0xfb, 0x78, 0xf7, 0xda, 0x7c, 0x0e, 0xcd, 0x58, 0x84, 0x5e, 0xc4, 0x16, 0x5c, 0x6d, 0xf5, - 0x6f, 0x17, 0xb4, 0x63, 0x13, 0x83, 0x36, 0xe2, 0xdc, 0xbc, 0xd7, 0xf0, 0x9f, 0xd4, 0xfa, 0xea, - 0xfa, 0x8a, 0xaa, 0x7f, 0x58, 0x5e, 0x5f, 0x70, 0x62, 0x62, 0xd0, 0xb6, 0x2c, 0x18, 0xf3, 0x12, - 0x00, 0x59, 0xe0, 0xe5, 0x62, 0x54, 0x55, 0x97, 0x7e, 0x79, 0x97, 0xbd, 0x7a, 0x13, 0x83, 0xb6, - 0x70, 0x17, 0x8c, 0x6a, 0x50, 0x11, 0x69, 0x3c, 0xf8, 0x06, 0xf7, 0x32, 0x9a, 0xa0, 0xa0, 0xde, - 0x33, 0xa8, 0x66, 0x54, 0xb9, 0x56, 0xbd, 0x3b, 0x86, 0x5f, 0xee, 0xde, 0xa6, 0x76, 0x7c, 0x9d, - 0x39, 0xae, 0x2a, 0xcc, 0x33, 0xfd, 0x34, 0xb5, 0x28, 0x76, 0xf9, 0x38, 0x07, 0x22, 0xf5, 0x2e, - 0x47, 0xef, 0xae, 0x37, 0x16, 0xb9, 0xd9, 0x58, 0xe4, 0xf7, 0xc6, 0x22, 0xeb, 0xad, 0x65, 0xdc, - 0x6c, 0x2d, 0xe3, 0xe7, 0xd6, 0x32, 0x3e, 0x3e, 0x0d, 0x23, 0xb9, 0x4c, 0x67, 0xce, 0x9c, 0xc7, - 0xee, 0x9c, 0xc7, 0x28, 0x67, 0x0b, 0x79, 0x38, 0xe8, 0x4f, 0x5a, 0xf6, 0x31, 0x67, 0x75, 0x95, - 0x7b, 0xfc, 0x27, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x81, 0x69, 0x90, 0x03, 0x04, 0x00, 0x00, + proto.RegisterType((*MsgInfo)(nil), "cometbft.consensus.v1beta1.MsgInfo") + proto.RegisterType((*TimeoutInfo)(nil), "cometbft.consensus.v1beta1.TimeoutInfo") + proto.RegisterType((*EndHeight)(nil), "cometbft.consensus.v1beta1.EndHeight") + proto.RegisterType((*WALMessage)(nil), "cometbft.consensus.v1beta1.WALMessage") + proto.RegisterType((*TimedWALMessage)(nil), "cometbft.consensus.v1beta1.TimedWALMessage") +} + +func init() { + proto.RegisterFile("cometbft/consensus/v1beta1/wal.proto", fileDescriptor_2778d8be83f1e994) +} + +var fileDescriptor_2778d8be83f1e994 = []byte{ + // 554 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xcf, 0x6b, 0x13, 0x41, + 0x14, 0xde, 0x69, 0xd2, 0xfc, 0x98, 0x28, 0xc2, 0x5a, 0x4a, 0xcc, 0x61, 0x13, 0x12, 0xad, 0x01, + 0x61, 0x97, 0xea, 0xa5, 0xd0, 0x83, 0x1a, 0x52, 0x49, 0xa0, 0x05, 0x19, 0x2b, 0x82, 0x97, 0x65, + 0xb6, 0xfb, 0xb2, 0x59, 0xec, 0xee, 0x2c, 0x99, 0xd9, 0x8a, 0x7f, 0x80, 0xf7, 0x1c, 0xfd, 0x93, + 0x7a, 0xb3, 0x47, 0x4f, 0x55, 0x92, 0x7f, 0x44, 0xe6, 0xc7, 0x26, 0xc1, 0xd2, 0x78, 0x9b, 0x37, + 0xf3, 0x7d, 0xdf, 0x7b, 0xef, 0x7b, 0x6f, 0xf0, 0xd3, 0x0b, 0x96, 0x80, 0x08, 0x26, 0xc2, 0xbb, + 0x60, 0x29, 0x87, 0x94, 0xe7, 0xdc, 0xbb, 0x3a, 0x0c, 0x40, 0xd0, 0x43, 0xef, 0x2b, 0xbd, 0x74, + 0xb3, 0x19, 0x13, 0xcc, 0x6e, 0x15, 0x28, 0x77, 0x85, 0x72, 0x0d, 0xaa, 0xb5, 0x17, 0xb1, 0x88, + 0x29, 0x98, 0x27, 0x4f, 0x9a, 0xd1, 0x3a, 0xd8, 0xa2, 0x2b, 0xbe, 0x65, 0xc0, 0x0d, 0xae, 0xb7, + 0xc2, 0xa9, 0xdb, 0x15, 0x06, 0xae, 0x20, 0x15, 0x05, 0xc8, 0x89, 0x18, 0x8b, 0x2e, 0xc1, 0x53, + 0x51, 0x90, 0x4f, 0xbc, 0x30, 0x9f, 0x51, 0x11, 0xb3, 0xd4, 0xbc, 0xb7, 0xff, 0x7d, 0x17, 0x71, + 0x02, 0x5c, 0xd0, 0x24, 0xd3, 0x80, 0xee, 0x17, 0x5c, 0x3d, 0xe3, 0xd1, 0x38, 0x9d, 0x30, 0xfb, + 0x18, 0x97, 0x12, 0x1e, 0x35, 0x51, 0x07, 0xf5, 0x1b, 0x2f, 0x7b, 0xee, 0xfd, 0x8d, 0xb9, 0x67, + 0xc0, 0x39, 0x8d, 0x60, 0x50, 0xbe, 0xbe, 0x6d, 0x5b, 0x44, 0xb2, 0xec, 0x1e, 0xae, 0x66, 0x00, + 0x33, 0x3f, 0x0e, 0x9b, 0x3b, 0x1d, 0xd4, 0xaf, 0x0f, 0xf0, 0xe2, 0xb6, 0x5d, 0x79, 0x0f, 0x30, + 0x1b, 0x0f, 0x49, 0x45, 0x3e, 0x8d, 0xc3, 0xee, 0x1c, 0xe1, 0xc6, 0x79, 0x9c, 0x00, 0xcb, 0x85, + 0xca, 0xf8, 0x1a, 0xd7, 0x8a, 0x7a, 0x4d, 0xda, 0x27, 0xae, 0x2e, 0xd8, 0x2d, 0x0a, 0x76, 0x87, + 0x06, 0x30, 0xa8, 0xc9, 0x64, 0x3f, 0x7e, 0xb7, 0x11, 0x59, 0x91, 0xec, 0x7d, 0x5c, 0x99, 0x42, + 0x1c, 0x4d, 0x85, 0x4a, 0x5a, 0x22, 0x26, 0xb2, 0xf7, 0xf0, 0xee, 0x8c, 0xe5, 0x69, 0xd8, 0x2c, + 0x75, 0x50, 0x7f, 0x97, 0xe8, 0xc0, 0xb6, 0x71, 0x99, 0x0b, 0xc8, 0x9a, 0xe5, 0x0e, 0xea, 0x3f, + 0x24, 0xea, 0xdc, 0xed, 0xe1, 0xfa, 0x49, 0x1a, 0x8e, 0x34, 0x6d, 0x2d, 0x87, 0x36, 0xe5, 0xba, + 0x3f, 0x77, 0x30, 0xfe, 0xf4, 0xf6, 0xd4, 0xb4, 0x6d, 0x07, 0x78, 0x5f, 0x0d, 0xc1, 0x0f, 0xa9, + 0xa0, 0xbe, 0xd2, 0xf6, 0xb9, 0xa0, 0x02, 0x4c, 0x13, 0x2f, 0xd6, 0xde, 0xe9, 0x81, 0x16, 0xbe, + 0x9d, 0x48, 0xd6, 0x90, 0x0a, 0x4a, 0x24, 0xe7, 0x83, 0xa4, 0x8c, 0x2c, 0xf2, 0x18, 0xee, 0x5e, + 0xdb, 0x6f, 0x70, 0x2d, 0xe1, 0x91, 0x1f, 0xa7, 0x13, 0xa6, 0x7a, 0xfb, 0xdf, 0x44, 0xf4, 0x0c, + 0x47, 0x16, 0xa9, 0x26, 0x66, 0x9c, 0xa7, 0xf8, 0x81, 0xd0, 0x5e, 0x6b, 0x95, 0x92, 0x52, 0x79, + 0xbe, 0x4d, 0x65, 0x63, 0x36, 0x23, 0x8b, 0x34, 0xc4, 0xc6, 0xa8, 0xde, 0x61, 0x0c, 0x69, 0xe8, + 0x1b, 0x7b, 0xca, 0x4a, 0xeb, 0xd9, 0x36, 0xad, 0x95, 0xab, 0x23, 0x8b, 0xd4, 0xa1, 0x08, 0x06, + 0xbb, 0xb8, 0xc4, 0xf3, 0xa4, 0xfb, 0x1d, 0xe1, 0x47, 0x32, 0x5b, 0xb8, 0x61, 0xeb, 0x11, 0x2e, + 0xcb, 0x8c, 0xc6, 0xc4, 0xd6, 0x9d, 0x4d, 0x38, 0x2f, 0x56, 0x57, 0xaf, 0xc2, 0x5c, 0xae, 0x82, + 0x62, 0xd8, 0x47, 0x7a, 0x73, 0xb5, 0x4f, 0x07, 0xdb, 0xaa, 0x5a, 0xa7, 0x53, 0x6b, 0x3b, 0xf8, + 0x78, 0xbd, 0x70, 0xd0, 0xcd, 0xc2, 0x41, 0x7f, 0x16, 0x0e, 0x9a, 0x2f, 0x1d, 0xeb, 0x66, 0xe9, + 0x58, 0xbf, 0x96, 0x8e, 0xf5, 0xf9, 0x38, 0x8a, 0xc5, 0x34, 0x0f, 0xa4, 0x98, 0xb7, 0xf1, 0x63, + 0xcd, 0x81, 0x66, 0xb1, 0x77, 0xff, 0x3f, 0x0e, 0x2a, 0xaa, 0xe8, 0x57, 0x7f, 0x03, 0x00, 0x00, + 0xff, 0xff, 0xe5, 0x41, 0xcc, 0x52, 0x44, 0x04, 0x00, 0x00, } func (m *MsgInfo) Marshal() (dAtA []byte, err error) { @@ -1203,7 +1209,7 @@ func (m *WALMessage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &types.EventDataRoundState{} + v := &v1beta1.EventDataRoundState{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } diff --git a/proto/tendermint/crypto/keys.pb.go b/api/cometbft/crypto/v1/keys.pb.go similarity index 74% rename from proto/tendermint/crypto/keys.pb.go rename to api/cometbft/crypto/v1/keys.pb.go index 0edb2269f53..2564ca993ef 100644 --- a/proto/tendermint/crypto/keys.pb.go +++ b/api/cometbft/crypto/v1/keys.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/crypto/keys.proto +// source: cometbft/crypto/v1/keys.proto -package crypto +package v1 import ( bytes "bytes" @@ -24,12 +24,15 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// PublicKey defines the keys available for use with Validators +// PublicKey is a ED25519 or a secp256k1 public key. type PublicKey struct { + // The type of key. + // // Types that are valid to be assigned to Sum: // // *PublicKey_Ed25519 // *PublicKey_Secp256K1 + // *PublicKey_Bls12381 Sum isPublicKey_Sum `protobuf_oneof:"sum"` } @@ -37,7 +40,7 @@ func (m *PublicKey) Reset() { *m = PublicKey{} } func (m *PublicKey) String() string { return proto.CompactTextString(m) } func (*PublicKey) ProtoMessage() {} func (*PublicKey) Descriptor() ([]byte, []int) { - return fileDescriptor_cb048658b234868c, []int{0} + return fileDescriptor_25c5fd298152e170, []int{0} } func (m *PublicKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -80,9 +83,13 @@ type PublicKey_Ed25519 struct { type PublicKey_Secp256K1 struct { Secp256K1 []byte `protobuf:"bytes,2,opt,name=secp256k1,proto3,oneof" json:"secp256k1,omitempty"` } +type PublicKey_Bls12381 struct { + Bls12381 []byte `protobuf:"bytes,3,opt,name=bls12381,proto3,oneof" json:"bls12381,omitempty"` +} func (*PublicKey_Ed25519) isPublicKey_Sum() {} func (*PublicKey_Secp256K1) isPublicKey_Sum() {} +func (*PublicKey_Bls12381) isPublicKey_Sum() {} func (m *PublicKey) GetSum() isPublicKey_Sum { if m != nil { @@ -105,35 +112,44 @@ func (m *PublicKey) GetSecp256K1() []byte { return nil } +func (m *PublicKey) GetBls12381() []byte { + if x, ok := m.GetSum().(*PublicKey_Bls12381); ok { + return x.Bls12381 + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*PublicKey) XXX_OneofWrappers() []interface{} { return []interface{}{ (*PublicKey_Ed25519)(nil), (*PublicKey_Secp256K1)(nil), + (*PublicKey_Bls12381)(nil), } } func init() { - proto.RegisterType((*PublicKey)(nil), "tendermint.crypto.PublicKey") + proto.RegisterType((*PublicKey)(nil), "cometbft.crypto.v1.PublicKey") } -func init() { proto.RegisterFile("tendermint/crypto/keys.proto", fileDescriptor_cb048658b234868c) } +func init() { proto.RegisterFile("cometbft/crypto/v1/keys.proto", fileDescriptor_25c5fd298152e170) } -var fileDescriptor_cb048658b234868c = []byte{ - // 204 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x29, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x4f, 0x2e, 0xaa, 0x2c, 0x28, 0xc9, 0xd7, 0xcf, 0x4e, - 0xad, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x44, 0xc8, 0xea, 0x41, 0x64, 0xa5, - 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xb2, 0xfa, 0x20, 0x16, 0x44, 0xa1, 0x52, 0x04, 0x17, 0x67, - 0x40, 0x69, 0x52, 0x4e, 0x66, 0xb2, 0x77, 0x6a, 0xa5, 0x90, 0x14, 0x17, 0x7b, 0x6a, 0x8a, 0x91, - 0xa9, 0xa9, 0xa1, 0xa5, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x8f, 0x07, 0x43, 0x10, 0x4c, 0x40, 0x48, - 0x8e, 0x8b, 0xb3, 0x38, 0x35, 0xb9, 0xc0, 0xc8, 0xd4, 0x2c, 0xdb, 0x50, 0x82, 0x09, 0x2a, 0x8b, - 0x10, 0xb2, 0xe2, 0x78, 0xb1, 0x40, 0x9e, 0xf1, 0xc5, 0x42, 0x79, 0x46, 0x27, 0x56, 0x2e, 0xe6, - 0xe2, 0xd2, 0x5c, 0x27, 0xbf, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, - 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x32, - 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x4d, 0x2d, - 0x49, 0x4a, 0x2b, 0x41, 0x30, 0x20, 0x4e, 0xc4, 0xf0, 0x5d, 0x12, 0x1b, 0x58, 0xc2, 0x18, 0x10, - 0x00, 0x00, 0xff, 0xff, 0xa3, 0xfb, 0xf7, 0x98, 0xf9, 0x00, 0x00, 0x00, +var fileDescriptor_25c5fd298152e170 = []byte{ + // 222 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4d, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x4f, 0x2e, 0xaa, 0x2c, 0x28, 0xc9, 0xd7, 0x2f, 0x33, 0xd4, 0xcf, + 0x4e, 0xad, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x82, 0x49, 0xeb, 0x41, 0xa4, + 0xf5, 0xca, 0x0c, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xd2, 0xfa, 0x20, 0x16, 0x44, 0xa5, + 0x52, 0x19, 0x17, 0x67, 0x40, 0x69, 0x52, 0x4e, 0x66, 0xb2, 0x77, 0x6a, 0xa5, 0x90, 0x14, 0x17, + 0x7b, 0x6a, 0x8a, 0x91, 0xa9, 0xa9, 0xa1, 0xa5, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x8f, 0x07, 0x43, + 0x10, 0x4c, 0x40, 0x48, 0x8e, 0x8b, 0xb3, 0x38, 0x35, 0xb9, 0xc0, 0xc8, 0xd4, 0x2c, 0xdb, 0x50, + 0x82, 0x09, 0x2a, 0x8b, 0x10, 0x12, 0x92, 0xe1, 0xe2, 0x48, 0xca, 0x29, 0x36, 0x34, 0x32, 0xb6, + 0x30, 0x94, 0x60, 0x86, 0x4a, 0xc3, 0x45, 0xac, 0x38, 0x5e, 0x2c, 0x90, 0x67, 0x7c, 0xb1, 0x50, + 0x9e, 0xd1, 0x89, 0x95, 0x8b, 0xb9, 0xb8, 0x34, 0xd7, 0xc9, 0xf7, 0xc4, 0x23, 0x39, 0xc6, 0x0b, + 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, + 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x8c, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, + 0xf5, 0x11, 0xbe, 0x84, 0x31, 0x12, 0x0b, 0x32, 0xf5, 0x31, 0xfd, 0x9e, 0xc4, 0x06, 0xf6, 0x8d, + 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x82, 0x23, 0x0d, 0x18, 0x01, 0x00, 0x00, } func (this *PublicKey) Compare(that interface{}) int { @@ -174,6 +190,8 @@ func (this *PublicKey) Compare(that interface{}) int { thisType = 0 case *PublicKey_Secp256K1: thisType = 1 + case *PublicKey_Bls12381: + thisType = 2 default: panic(fmt.Sprintf("compare: unexpected type %T in oneof", this.Sum)) } @@ -183,6 +201,8 @@ func (this *PublicKey) Compare(that interface{}) int { that1Type = 0 case *PublicKey_Secp256K1: that1Type = 1 + case *PublicKey_Bls12381: + that1Type = 2 default: panic(fmt.Sprintf("compare: unexpected type %T in oneof", that1.Sum)) } @@ -258,6 +278,36 @@ func (this *PublicKey_Secp256K1) Compare(that interface{}) int { } return 0 } +func (this *PublicKey_Bls12381) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*PublicKey_Bls12381) + if !ok { + that2, ok := that.(PublicKey_Bls12381) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.Bls12381, that1.Bls12381); c != 0 { + return c + } + return 0 +} func (this *PublicKey) Equal(that interface{}) bool { if that == nil { return this == nil @@ -336,6 +386,30 @@ func (this *PublicKey_Secp256K1) Equal(that interface{}) bool { } return true } +func (this *PublicKey_Bls12381) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PublicKey_Bls12381) + if !ok { + that2, ok := that.(PublicKey_Bls12381) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Bls12381, that1.Bls12381) { + return false + } + return true +} func (m *PublicKey) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -400,6 +474,22 @@ func (m *PublicKey_Secp256K1) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } +func (m *PublicKey_Bls12381) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PublicKey_Bls12381) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Bls12381 != nil { + i -= len(m.Bls12381) + copy(dAtA[i:], m.Bls12381) + i = encodeVarintKeys(dAtA, i, uint64(len(m.Bls12381))) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} func encodeVarintKeys(dAtA []byte, offset int, v uint64) int { offset -= sovKeys(v) base := offset @@ -447,6 +537,18 @@ func (m *PublicKey_Secp256K1) Size() (n int) { } return n } +func (m *PublicKey_Bls12381) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Bls12381 != nil { + l = len(m.Bls12381) + n += 1 + l + sovKeys(uint64(l)) + } + return n +} func sovKeys(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 @@ -549,6 +651,39 @@ func (m *PublicKey) Unmarshal(dAtA []byte) error { copy(v, dAtA[iNdEx:postIndex]) m.Sum = &PublicKey_Secp256K1{v} iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bls12381", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKeys + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKeys + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthKeys + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := make([]byte, postIndex-iNdEx) + copy(v, dAtA[iNdEx:postIndex]) + m.Sum = &PublicKey_Bls12381{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKeys(dAtA[iNdEx:]) diff --git a/proto/tendermint/crypto/proof.pb.go b/api/cometbft/crypto/v1/proof.pb.go similarity index 90% rename from proto/tendermint/crypto/proof.pb.go rename to api/cometbft/crypto/v1/proof.pb.go index d468cce41e3..9437efcbbae 100644 --- a/proto/tendermint/crypto/proof.pb.go +++ b/api/cometbft/crypto/v1/proof.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/crypto/proof.proto +// source: cometbft/crypto/v1/proof.proto -package crypto +package v1 import ( fmt "fmt" @@ -23,6 +23,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// Proof is a Merkle proof. type Proof struct { Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` Index int64 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` @@ -34,7 +35,7 @@ func (m *Proof) Reset() { *m = Proof{} } func (m *Proof) String() string { return proto.CompactTextString(m) } func (*Proof) ProtoMessage() {} func (*Proof) Descriptor() ([]byte, []int) { - return fileDescriptor_6b60b6ba2ab5b856, []int{0} + return fileDescriptor_d6fc6c2b7bed957e, []int{0} } func (m *Proof) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -91,6 +92,7 @@ func (m *Proof) GetAunts() [][]byte { return nil } +// ValueOp is a Merkle proof for a single key. type ValueOp struct { // Encoded in ProofOp.Key. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -102,7 +104,7 @@ func (m *ValueOp) Reset() { *m = ValueOp{} } func (m *ValueOp) String() string { return proto.CompactTextString(m) } func (*ValueOp) ProtoMessage() {} func (*ValueOp) Descriptor() ([]byte, []int) { - return fileDescriptor_6b60b6ba2ab5b856, []int{1} + return fileDescriptor_d6fc6c2b7bed957e, []int{1} } func (m *ValueOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -145,6 +147,7 @@ func (m *ValueOp) GetProof() *Proof { return nil } +// DominoOp always returns the given output. type DominoOp struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Input string `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"` @@ -155,7 +158,7 @@ func (m *DominoOp) Reset() { *m = DominoOp{} } func (m *DominoOp) String() string { return proto.CompactTextString(m) } func (*DominoOp) ProtoMessage() {} func (*DominoOp) Descriptor() ([]byte, []int) { - return fileDescriptor_6b60b6ba2ab5b856, []int{2} + return fileDescriptor_d6fc6c2b7bed957e, []int{2} } func (m *DominoOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +221,7 @@ func (m *ProofOp) Reset() { *m = ProofOp{} } func (m *ProofOp) String() string { return proto.CompactTextString(m) } func (*ProofOp) ProtoMessage() {} func (*ProofOp) Descriptor() ([]byte, []int) { - return fileDescriptor_6b60b6ba2ab5b856, []int{3} + return fileDescriptor_d6fc6c2b7bed957e, []int{3} } func (m *ProofOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -277,7 +280,7 @@ func (m *ProofOps) Reset() { *m = ProofOps{} } func (m *ProofOps) String() string { return proto.CompactTextString(m) } func (*ProofOps) ProtoMessage() {} func (*ProofOps) Descriptor() ([]byte, []int) { - return fileDescriptor_6b60b6ba2ab5b856, []int{4} + return fileDescriptor_d6fc6c2b7bed957e, []int{4} } func (m *ProofOps) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -314,40 +317,40 @@ func (m *ProofOps) GetOps() []ProofOp { } func init() { - proto.RegisterType((*Proof)(nil), "tendermint.crypto.Proof") - proto.RegisterType((*ValueOp)(nil), "tendermint.crypto.ValueOp") - proto.RegisterType((*DominoOp)(nil), "tendermint.crypto.DominoOp") - proto.RegisterType((*ProofOp)(nil), "tendermint.crypto.ProofOp") - proto.RegisterType((*ProofOps)(nil), "tendermint.crypto.ProofOps") + proto.RegisterType((*Proof)(nil), "cometbft.crypto.v1.Proof") + proto.RegisterType((*ValueOp)(nil), "cometbft.crypto.v1.ValueOp") + proto.RegisterType((*DominoOp)(nil), "cometbft.crypto.v1.DominoOp") + proto.RegisterType((*ProofOp)(nil), "cometbft.crypto.v1.ProofOp") + proto.RegisterType((*ProofOps)(nil), "cometbft.crypto.v1.ProofOps") } -func init() { proto.RegisterFile("tendermint/crypto/proof.proto", fileDescriptor_6b60b6ba2ab5b856) } +func init() { proto.RegisterFile("cometbft/crypto/v1/proof.proto", fileDescriptor_d6fc6c2b7bed957e) } -var fileDescriptor_6b60b6ba2ab5b856 = []byte{ - // 357 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0xbd, 0x6a, 0xe3, 0x40, - 0x10, 0x96, 0x2c, 0xf9, 0x6f, 0xed, 0xe2, 0x6e, 0x31, 0x87, 0xf0, 0x71, 0x3a, 0xa1, 0x4a, 0x95, - 0x04, 0x4e, 0xea, 0x14, 0x4e, 0x8a, 0x90, 0x40, 0x1c, 0x54, 0xa4, 0x48, 0x13, 0xd6, 0xf6, 0xca, - 0x12, 0xb1, 0x34, 0x8b, 0x34, 0x82, 0xf8, 0x2d, 0xf2, 0x58, 0x2e, 0x5d, 0xa6, 0x0a, 0xc1, 0x7e, - 0x91, 0xb0, 0xbb, 0x0a, 0x26, 0x98, 0x74, 0xdf, 0xcf, 0xec, 0x37, 0xdf, 0x20, 0x91, 0x7f, 0xc8, - 0x8b, 0x25, 0x2f, 0xf3, 0xac, 0xc0, 0x68, 0x51, 0x6e, 0x04, 0x42, 0x24, 0x4a, 0x80, 0x24, 0x14, - 0x25, 0x20, 0xd0, 0xdf, 0x47, 0x3b, 0xd4, 0xf6, 0x78, 0xb4, 0x82, 0x15, 0x28, 0x37, 0x92, 0x48, - 0x0f, 0xfa, 0x09, 0x69, 0xdf, 0xcb, 0x77, 0x74, 0x44, 0xda, 0x08, 0xc8, 0xd6, 0x8e, 0xe9, 0x99, - 0x81, 0x15, 0x6b, 0x22, 0xd5, 0xac, 0x58, 0xf2, 0x17, 0xa7, 0xa5, 0x55, 0x45, 0xe8, 0x5f, 0xd2, - 0x5f, 0x73, 0x96, 0x3c, 0xa5, 0xac, 0x4a, 0x1d, 0xcb, 0x33, 0x83, 0x61, 0xdc, 0x93, 0xc2, 0x35, - 0xab, 0x52, 0xf9, 0x84, 0xd5, 0x05, 0x56, 0x8e, 0xed, 0x59, 0xc1, 0x30, 0xd6, 0xc4, 0xbf, 0x25, - 0xdd, 0x07, 0xb6, 0xae, 0xf9, 0x4c, 0xd0, 0x5f, 0xc4, 0x7a, 0xe6, 0x1b, 0xb5, 0x67, 0x18, 0x4b, - 0x48, 0x43, 0xd2, 0x56, 0xe5, 0xd5, 0x96, 0xc1, 0xc4, 0x09, 0x4f, 0xda, 0x87, 0xaa, 0x64, 0xac, - 0xc7, 0xfc, 0x1b, 0xd2, 0xbb, 0x82, 0x3c, 0x2b, 0xe0, 0x7b, 0x5a, 0x5f, 0xa7, 0xa9, 0xce, 0xa2, - 0x46, 0x95, 0xd6, 0x8f, 0x35, 0xa1, 0x7f, 0x48, 0x07, 0x6a, 0x94, 0xb2, 0xa5, 0xe4, 0x86, 0xf9, - 0x97, 0xa4, 0xab, 0xb2, 0x67, 0x82, 0x52, 0x62, 0xe3, 0x46, 0xf0, 0x26, 0x4b, 0xe1, 0xaf, 0xf8, - 0xd6, 0xb1, 0x2c, 0x25, 0xf6, 0x92, 0x21, 0x6b, 0xee, 0x56, 0xd8, 0xbf, 0x20, 0xbd, 0x26, 0xa4, - 0xa2, 0x13, 0x62, 0x81, 0xa8, 0x1c, 0xd3, 0xb3, 0x82, 0xc1, 0x64, 0xfc, 0xd3, 0x29, 0x33, 0x31, - 0xb5, 0xb7, 0xef, 0xff, 0x8d, 0x58, 0x0e, 0x4f, 0xef, 0xb6, 0x7b, 0xd7, 0xdc, 0xed, 0x5d, 0xf3, - 0x63, 0xef, 0x9a, 0xaf, 0x07, 0xd7, 0xd8, 0x1d, 0x5c, 0xe3, 0xed, 0xe0, 0x1a, 0x8f, 0xe7, 0xab, - 0x0c, 0xd3, 0x7a, 0x1e, 0x2e, 0x20, 0x8f, 0x16, 0x90, 0x73, 0x9c, 0x27, 0x78, 0x04, 0xfa, 0x73, - 0x9e, 0xfc, 0x0a, 0xf3, 0x8e, 0x32, 0xce, 0x3e, 0x03, 0x00, 0x00, 0xff, 0xff, 0xa6, 0x5a, 0xb3, - 0xb6, 0x26, 0x02, 0x00, 0x00, +var fileDescriptor_d6fc6c2b7bed957e = []byte{ + // 356 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0xcd, 0x6a, 0xf2, 0x40, + 0x14, 0x4d, 0x4c, 0xfc, 0xbb, 0xba, 0xf8, 0x18, 0xe4, 0x23, 0xad, 0x30, 0x0d, 0x59, 0x65, 0x95, + 0x41, 0x7d, 0x80, 0x82, 0xed, 0xa2, 0x94, 0x16, 0xcb, 0x2c, 0xba, 0xe8, 0xa6, 0x8c, 0x9a, 0x98, + 0x50, 0x75, 0x06, 0x33, 0x91, 0xfa, 0x16, 0x7d, 0x2c, 0x97, 0x2e, 0xbb, 0x2a, 0x45, 0x5f, 0xa4, + 0xcc, 0x4c, 0x44, 0x8a, 0x74, 0x77, 0xce, 0xb9, 0x77, 0xce, 0x3d, 0x97, 0x3b, 0x80, 0x27, 0x7c, + 0x11, 0xcb, 0x71, 0x22, 0xc9, 0x64, 0xb5, 0x11, 0x92, 0x93, 0x75, 0x8f, 0x88, 0x15, 0xe7, 0x49, + 0x24, 0x56, 0x5c, 0x72, 0x84, 0x8e, 0xf5, 0xc8, 0xd4, 0xa3, 0x75, 0xef, 0xb2, 0x33, 0xe3, 0x33, + 0xae, 0xcb, 0x44, 0x21, 0xd3, 0x19, 0x24, 0x50, 0x7d, 0x52, 0x0f, 0x51, 0x07, 0xaa, 0x92, 0x4b, + 0x36, 0xf7, 0x6c, 0xdf, 0x0e, 0x1d, 0x6a, 0x88, 0x52, 0xb3, 0xe5, 0x34, 0x7e, 0xf7, 0x2a, 0x46, + 0xd5, 0x04, 0x75, 0xa1, 0x39, 0x8f, 0x59, 0xf2, 0x9a, 0xb2, 0x3c, 0xf5, 0x1c, 0xdf, 0x0e, 0xdb, + 0xb4, 0xa1, 0x84, 0x3b, 0x96, 0xa7, 0xea, 0x09, 0x2b, 0x96, 0x32, 0xf7, 0x5c, 0xdf, 0x09, 0xdb, + 0xd4, 0x90, 0xe0, 0x01, 0xea, 0xcf, 0x6c, 0x5e, 0xc4, 0x23, 0x81, 0xfe, 0x81, 0xf3, 0x16, 0x6f, + 0xf4, 0x9c, 0x36, 0x55, 0x10, 0x11, 0xa8, 0xea, 0xf4, 0x7a, 0x4a, 0xab, 0x7f, 0x11, 0x9d, 0xc7, + 0x8f, 0x74, 0x4a, 0x6a, 0xfa, 0x82, 0x7b, 0x68, 0xdc, 0xf2, 0x45, 0xb6, 0xe4, 0xbf, 0xed, 0x9a, + 0xc6, 0x4e, 0x87, 0x16, 0x85, 0xd4, 0x76, 0x4d, 0x6a, 0x08, 0xfa, 0x0f, 0x35, 0x5e, 0x48, 0x25, + 0x3b, 0x5a, 0x2e, 0x59, 0x70, 0x03, 0x75, 0xed, 0x3d, 0x12, 0x08, 0x81, 0x2b, 0x37, 0x22, 0x2e, + 0xbd, 0x34, 0x3e, 0xda, 0x57, 0x4e, 0x69, 0x11, 0xb8, 0x53, 0x26, 0x59, 0xb9, 0xb8, 0xc6, 0xc1, + 0x35, 0x34, 0x4a, 0x93, 0x1c, 0x0d, 0xc0, 0xe1, 0x22, 0xf7, 0x6c, 0xdf, 0x09, 0x5b, 0xfd, 0xee, + 0x9f, 0xbb, 0x8c, 0xc4, 0xd0, 0xdd, 0x7e, 0x5d, 0x59, 0x54, 0x75, 0x0f, 0x1f, 0xb7, 0x7b, 0x6c, + 0xef, 0xf6, 0xd8, 0xfe, 0xde, 0x63, 0xfb, 0xe3, 0x80, 0xad, 0xdd, 0x01, 0x5b, 0x9f, 0x07, 0x6c, + 0xbd, 0x0c, 0x66, 0x99, 0x4c, 0x8b, 0xb1, 0xf2, 0x21, 0xa7, 0xb3, 0x1f, 0x01, 0x13, 0x19, 0x39, + 0xff, 0x0c, 0xe3, 0x9a, 0xbe, 0xee, 0xe0, 0x27, 0x00, 0x00, 0xff, 0xff, 0x69, 0xea, 0x58, 0xaf, + 0x29, 0x02, 0x00, 0x00, } func (m *Proof) Marshal() (dAtA []byte, err error) { diff --git a/proto/tendermint/libs/bits/types.pb.go b/api/cometbft/libs/bits/v1/types.pb.go similarity index 86% rename from proto/tendermint/libs/bits/types.pb.go rename to api/cometbft/libs/bits/v1/types.pb.go index 9dc37733632..2817d0afb26 100644 --- a/proto/tendermint/libs/bits/types.pb.go +++ b/api/cometbft/libs/bits/v1/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/libs/bits/types.proto +// source: cometbft/libs/bits/v1/types.proto -package bits +package v1 import ( fmt "fmt" @@ -22,6 +22,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// BitArray is an array of bits. type BitArray struct { Bits int64 `protobuf:"varint,1,opt,name=bits,proto3" json:"bits,omitempty"` Elems []uint64 `protobuf:"varint,2,rep,packed,name=elems,proto3" json:"elems,omitempty"` @@ -31,7 +32,7 @@ func (m *BitArray) Reset() { *m = BitArray{} } func (m *BitArray) String() string { return proto.CompactTextString(m) } func (*BitArray) ProtoMessage() {} func (*BitArray) Descriptor() ([]byte, []int) { - return fileDescriptor_e91ab2672920d7d4, []int{0} + return fileDescriptor_14f91284a011ac96, []int{0} } func (m *BitArray) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,24 +76,24 @@ func (m *BitArray) GetElems() []uint64 { } func init() { - proto.RegisterType((*BitArray)(nil), "tendermint.libs.bits.BitArray") + proto.RegisterType((*BitArray)(nil), "cometbft.libs.bits.v1.BitArray") } -func init() { proto.RegisterFile("tendermint/libs/bits/types.proto", fileDescriptor_e91ab2672920d7d4) } +func init() { proto.RegisterFile("cometbft/libs/bits/v1/types.proto", fileDescriptor_14f91284a011ac96) } -var fileDescriptor_e91ab2672920d7d4 = []byte{ +var fileDescriptor_14f91284a011ac96 = []byte{ // 173 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x28, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0xcf, 0xc9, 0x4c, 0x2a, 0xd6, 0x4f, 0xca, 0x2c, 0x29, - 0xd6, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x41, 0xa8, - 0xd0, 0x03, 0xa9, 0xd0, 0x03, 0xa9, 0x50, 0x32, 0xe1, 0xe2, 0x70, 0xca, 0x2c, 0x71, 0x2c, 0x2a, - 0x4a, 0xac, 0x14, 0x12, 0xe2, 0x62, 0x01, 0x89, 0x49, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x81, - 0xd9, 0x42, 0x22, 0x5c, 0xac, 0xa9, 0x39, 0xa9, 0xb9, 0xc5, 0x12, 0x4c, 0x0a, 0xcc, 0x1a, 0x2c, - 0x41, 0x10, 0x8e, 0x53, 0xe0, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, - 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x99, - 0xa7, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x27, 0xe7, 0xe7, 0xa6, 0x96, - 0x24, 0xa5, 0x95, 0x20, 0x18, 0x60, 0x97, 0xe8, 0x63, 0x73, 0x6a, 0x12, 0x1b, 0x58, 0xce, 0x18, - 0x10, 0x00, 0x00, 0xff, 0xff, 0x09, 0xfd, 0x78, 0xed, 0xc9, 0x00, 0x00, 0x00, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0xcf, 0xc9, 0x4c, 0x2a, 0xd6, 0x4f, 0xca, 0x2c, 0x29, 0xd6, 0x2f, + 0x33, 0xd4, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x85, + 0x29, 0xd1, 0x03, 0x29, 0xd1, 0x03, 0x29, 0xd1, 0x2b, 0x33, 0x54, 0x32, 0xe1, 0xe2, 0x70, 0xca, + 0x2c, 0x71, 0x2c, 0x2a, 0x4a, 0xac, 0x14, 0x12, 0xe2, 0x62, 0x01, 0x09, 0x4b, 0x30, 0x2a, 0x30, + 0x6a, 0x30, 0x07, 0x81, 0xd9, 0x42, 0x22, 0x5c, 0xac, 0xa9, 0x39, 0xa9, 0xb9, 0xc5, 0x12, 0x4c, + 0x0a, 0xcc, 0x1a, 0x2c, 0x41, 0x10, 0x8e, 0x53, 0xc0, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, + 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, + 0xcb, 0x31, 0x44, 0x99, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xc3, + 0x1d, 0x05, 0x67, 0x24, 0x16, 0x64, 0xea, 0x63, 0x75, 0x6a, 0x12, 0x1b, 0xd8, 0x95, 0xc6, 0x80, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xd0, 0xd4, 0xb6, 0x64, 0xca, 0x00, 0x00, 0x00, } func (m *BitArray) Marshal() (dAtA []byte, err error) { diff --git a/proto/tendermint/mempool/message.go b/api/cometbft/mempool/v1/message.go similarity index 81% rename from proto/tendermint/mempool/message.go rename to api/cometbft/mempool/v1/message.go index 270a744faec..a2093ee8682 100644 --- a/proto/tendermint/mempool/message.go +++ b/api/cometbft/mempool/v1/message.go @@ -1,16 +1,11 @@ -package mempool +package v1 import ( "fmt" "github.com/cosmos/gogoproto/proto" - - "github.com/cometbft/cometbft/p2p" ) -var _ p2p.Wrapper = &Txs{} -var _ p2p.Unwrapper = &Message{} - // Wrap implements the p2p Wrapper interface and wraps a mempool message. func (m *Txs) Wrap() proto.Message { mm := &Message{} diff --git a/proto/tendermint/mempool/types.pb.go b/api/cometbft/mempool/v1/types.pb.go similarity index 87% rename from proto/tendermint/mempool/types.pb.go rename to api/cometbft/mempool/v1/types.pb.go index 4a6a40ef341..dac12ec1685 100644 --- a/proto/tendermint/mempool/types.pb.go +++ b/api/cometbft/mempool/v1/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/mempool/types.proto +// source: cometbft/mempool/v1/types.proto -package mempool +package v1 import ( fmt "fmt" @@ -22,6 +22,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// Txs contains a list of transaction from the mempool. type Txs struct { Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` } @@ -30,7 +31,7 @@ func (m *Txs) Reset() { *m = Txs{} } func (m *Txs) String() string { return proto.CompactTextString(m) } func (*Txs) ProtoMessage() {} func (*Txs) Descriptor() ([]byte, []int) { - return fileDescriptor_2af51926fdbcbc05, []int{0} + return fileDescriptor_d8bb39f484575b79, []int{0} } func (m *Txs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -66,7 +67,10 @@ func (m *Txs) GetTxs() [][]byte { return nil } +// Message is an abstract mempool message. type Message struct { + // Sum of all possible messages. + // // Types that are valid to be assigned to Sum: // // *Message_Txs @@ -77,7 +81,7 @@ func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_2af51926fdbcbc05, []int{1} + return fileDescriptor_d8bb39f484575b79, []int{1} } func (m *Message) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -140,26 +144,26 @@ func (*Message) XXX_OneofWrappers() []interface{} { } func init() { - proto.RegisterType((*Txs)(nil), "tendermint.mempool.Txs") - proto.RegisterType((*Message)(nil), "tendermint.mempool.Message") -} - -func init() { proto.RegisterFile("tendermint/mempool/types.proto", fileDescriptor_2af51926fdbcbc05) } - -var fileDescriptor_2af51926fdbcbc05 = []byte{ - // 184 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2b, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0xcf, 0x4d, 0xcd, 0x2d, 0xc8, 0xcf, 0xcf, 0xd1, 0x2f, - 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x42, 0xc8, 0xeb, 0x41, - 0xe5, 0x95, 0xc4, 0xb9, 0x98, 0x43, 0x2a, 0x8a, 0x85, 0x04, 0xb8, 0x98, 0x4b, 0x2a, 0x8a, 0x25, - 0x18, 0x15, 0x98, 0x35, 0x78, 0x82, 0x40, 0x4c, 0x25, 0x5b, 0x2e, 0x76, 0xdf, 0xd4, 0xe2, 0xe2, - 0xc4, 0xf4, 0x54, 0x21, 0x6d, 0x98, 0x24, 0xa3, 0x06, 0xb7, 0x91, 0xb8, 0x1e, 0xa6, 0x29, 0x7a, - 0x21, 0x15, 0xc5, 0x1e, 0x0c, 0x60, 0x7d, 0x4e, 0xac, 0x5c, 0xcc, 0xc5, 0xa5, 0xb9, 0x4e, 0xfe, - 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, - 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x9a, 0x9e, 0x59, 0x92, 0x51, - 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, 0x9c, 0x9f, 0x9b, 0x5a, 0x92, 0x94, 0x56, 0x82, 0x60, - 0x80, 0x5d, 0xaa, 0x8f, 0xe9, 0x91, 0x24, 0x36, 0xb0, 0x8c, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, - 0x53, 0xc3, 0xc4, 0x0a, 0xe5, 0x00, 0x00, 0x00, + proto.RegisterType((*Txs)(nil), "cometbft.mempool.v1.Txs") + proto.RegisterType((*Message)(nil), "cometbft.mempool.v1.Message") +} + +func init() { proto.RegisterFile("cometbft/mempool/v1/types.proto", fileDescriptor_d8bb39f484575b79) } + +var fileDescriptor_d8bb39f484575b79 = []byte{ + // 182 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0xcf, 0x4d, 0xcd, 0x2d, 0xc8, 0xcf, 0xcf, 0xd1, 0x2f, 0x33, 0xd4, + 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x86, 0x29, 0xd0, + 0x83, 0x2a, 0xd0, 0x2b, 0x33, 0x54, 0x12, 0xe7, 0x62, 0x0e, 0xa9, 0x28, 0x16, 0x12, 0xe0, 0x62, + 0x2e, 0xa9, 0x28, 0x96, 0x60, 0x54, 0x60, 0xd6, 0xe0, 0x09, 0x02, 0x31, 0x95, 0xec, 0xb8, 0xd8, + 0x7d, 0x53, 0x8b, 0x8b, 0x13, 0xd3, 0x53, 0x85, 0x74, 0x60, 0x92, 0x8c, 0x1a, 0xdc, 0x46, 0x12, + 0x7a, 0x58, 0x8c, 0xd1, 0x0b, 0xa9, 0x28, 0xf6, 0x60, 0x00, 0x6b, 0x74, 0x62, 0xe5, 0x62, 0x2e, + 0x2e, 0xcd, 0x75, 0xf2, 0x3b, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0x93, + 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0x90, 0x39, 0xfa, 0x70, 0x37, 0xc3, 0x19, 0x89, 0x05, 0x99, + 0xfa, 0x58, 0x7c, 0x92, 0xc4, 0x06, 0xf6, 0x84, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x09, + 0x89, 0xce, 0xe7, 0x00, 0x00, 0x00, } func (m *Txs) Marshal() (dAtA []byte, err error) { diff --git a/proto/tendermint/p2p/conn.pb.go b/api/cometbft/p2p/v1/conn.pb.go similarity index 86% rename from proto/tendermint/p2p/conn.pb.go rename to api/cometbft/p2p/v1/conn.pb.go index 4a0f8256428..fb2a0b5bae9 100644 --- a/proto/tendermint/p2p/conn.pb.go +++ b/api/cometbft/p2p/v1/conn.pb.go @@ -1,11 +1,11 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/p2p/conn.proto +// source: cometbft/p2p/v1/conn.proto -package p2p +package v1 import ( fmt "fmt" - crypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + v1 "github.com/cometbft/cometbft/api/cometbft/crypto/v1" _ "github.com/cosmos/gogoproto/gogoproto" proto "github.com/cosmos/gogoproto/proto" io "io" @@ -24,6 +24,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// PacketPing is a request to confirm that the connection is alive. type PacketPing struct { } @@ -31,7 +32,7 @@ func (m *PacketPing) Reset() { *m = PacketPing{} } func (m *PacketPing) String() string { return proto.CompactTextString(m) } func (*PacketPing) ProtoMessage() {} func (*PacketPing) Descriptor() ([]byte, []int) { - return fileDescriptor_22474b5527c8fa9f, []int{0} + return fileDescriptor_3ad66b5863681764, []int{0} } func (m *PacketPing) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -60,6 +61,7 @@ func (m *PacketPing) XXX_DiscardUnknown() { var xxx_messageInfo_PacketPing proto.InternalMessageInfo +// PacketPong is a response to confirm that the connection is alive. type PacketPong struct { } @@ -67,7 +69,7 @@ func (m *PacketPong) Reset() { *m = PacketPong{} } func (m *PacketPong) String() string { return proto.CompactTextString(m) } func (*PacketPong) ProtoMessage() {} func (*PacketPong) Descriptor() ([]byte, []int) { - return fileDescriptor_22474b5527c8fa9f, []int{1} + return fileDescriptor_3ad66b5863681764, []int{1} } func (m *PacketPong) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -96,6 +98,8 @@ func (m *PacketPong) XXX_DiscardUnknown() { var xxx_messageInfo_PacketPong proto.InternalMessageInfo +// PacketMsg contains data for the specified channel ID. EOF means the message +// is fully received. type PacketMsg struct { ChannelID int32 `protobuf:"varint,1,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` EOF bool `protobuf:"varint,2,opt,name=eof,proto3" json:"eof,omitempty"` @@ -106,7 +110,7 @@ func (m *PacketMsg) Reset() { *m = PacketMsg{} } func (m *PacketMsg) String() string { return proto.CompactTextString(m) } func (*PacketMsg) ProtoMessage() {} func (*PacketMsg) Descriptor() ([]byte, []int) { - return fileDescriptor_22474b5527c8fa9f, []int{2} + return fileDescriptor_3ad66b5863681764, []int{2} } func (m *PacketMsg) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -156,7 +160,10 @@ func (m *PacketMsg) GetData() []byte { return nil } +// Packet is an abstract p2p message. type Packet struct { + // Sum of all possible messages. + // // Types that are valid to be assigned to Sum: // // *Packet_PacketPing @@ -169,7 +176,7 @@ func (m *Packet) Reset() { *m = Packet{} } func (m *Packet) String() string { return proto.CompactTextString(m) } func (*Packet) ProtoMessage() {} func (*Packet) Descriptor() ([]byte, []int) { - return fileDescriptor_22474b5527c8fa9f, []int{3} + return fileDescriptor_3ad66b5863681764, []int{3} } func (m *Packet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -255,16 +262,18 @@ func (*Packet) XXX_OneofWrappers() []interface{} { } } +// AuthSigMessage is sent during the authentication and contains our/remote's +// signature along with the public key. type AuthSigMessage struct { - PubKey crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` - Sig []byte `protobuf:"bytes,2,opt,name=sig,proto3" json:"sig,omitempty"` + PubKey v1.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + Sig []byte `protobuf:"bytes,2,opt,name=sig,proto3" json:"sig,omitempty"` } func (m *AuthSigMessage) Reset() { *m = AuthSigMessage{} } func (m *AuthSigMessage) String() string { return proto.CompactTextString(m) } func (*AuthSigMessage) ProtoMessage() {} func (*AuthSigMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_22474b5527c8fa9f, []int{4} + return fileDescriptor_3ad66b5863681764, []int{4} } func (m *AuthSigMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -293,11 +302,11 @@ func (m *AuthSigMessage) XXX_DiscardUnknown() { var xxx_messageInfo_AuthSigMessage proto.InternalMessageInfo -func (m *AuthSigMessage) GetPubKey() crypto.PublicKey { +func (m *AuthSigMessage) GetPubKey() v1.PublicKey { if m != nil { return m.PubKey } - return crypto.PublicKey{} + return v1.PublicKey{} } func (m *AuthSigMessage) GetSig() []byte { @@ -308,42 +317,43 @@ func (m *AuthSigMessage) GetSig() []byte { } func init() { - proto.RegisterType((*PacketPing)(nil), "tendermint.p2p.PacketPing") - proto.RegisterType((*PacketPong)(nil), "tendermint.p2p.PacketPong") - proto.RegisterType((*PacketMsg)(nil), "tendermint.p2p.PacketMsg") - proto.RegisterType((*Packet)(nil), "tendermint.p2p.Packet") - proto.RegisterType((*AuthSigMessage)(nil), "tendermint.p2p.AuthSigMessage") -} - -func init() { proto.RegisterFile("tendermint/p2p/conn.proto", fileDescriptor_22474b5527c8fa9f) } - -var fileDescriptor_22474b5527c8fa9f = []byte{ - // 397 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0x4d, 0x8f, 0xd3, 0x30, - 0x10, 0x8d, 0xc9, 0x6e, 0x97, 0x4e, 0xcb, 0x0a, 0x59, 0x1c, 0xda, 0x6a, 0x95, 0x56, 0x3d, 0xf5, - 0x80, 0x12, 0x11, 0x6e, 0x20, 0x0e, 0x84, 0x0f, 0xb1, 0xaa, 0x2a, 0xaa, 0x70, 0xe3, 0x12, 0xe5, - 0xc3, 0xeb, 0x58, 0xdd, 0xd8, 0x56, 0xed, 0x1c, 0xf2, 0x2f, 0xf8, 0x59, 0xcb, 0xad, 0x47, 0x4e, - 0x15, 0x4a, 0xff, 0x08, 0x4a, 0x5c, 0x68, 0x2a, 0xb1, 0xb7, 0xf7, 0x66, 0xfc, 0x66, 0xde, 0x93, - 0x07, 0xc6, 0x9a, 0xf0, 0x8c, 0x6c, 0x0b, 0xc6, 0xb5, 0x27, 0x7d, 0xe9, 0xa5, 0x82, 0x73, 0x57, - 0x6e, 0x85, 0x16, 0xf8, 0xfa, 0xd4, 0x72, 0xa5, 0x2f, 0x27, 0x2f, 0xa8, 0xa0, 0xa2, 0x6d, 0x79, - 0x0d, 0x32, 0xaf, 0x26, 0x37, 0x9d, 0x01, 0xe9, 0xb6, 0x92, 0x5a, 0x78, 0x1b, 0x52, 0x29, 0xd3, - 0x9d, 0x0f, 0x01, 0xd6, 0x71, 0xba, 0x21, 0x7a, 0xcd, 0x38, 0xed, 0x30, 0xc1, 0xe9, 0x3c, 0x87, - 0xbe, 0x61, 0x2b, 0x45, 0xf1, 0x4b, 0x80, 0x34, 0x8f, 0x39, 0x27, 0xf7, 0x11, 0xcb, 0x46, 0x68, - 0x86, 0x16, 0x97, 0xc1, 0xb3, 0x7a, 0x3f, 0xed, 0x7f, 0x30, 0xd5, 0xdb, 0x8f, 0x61, 0xff, 0xf8, - 0xe0, 0x36, 0xc3, 0x63, 0xb0, 0x89, 0xb8, 0x1b, 0x3d, 0x99, 0xa1, 0xc5, 0xd3, 0xe0, 0xaa, 0xde, - 0x4f, 0xed, 0x4f, 0x5f, 0x3f, 0x87, 0x4d, 0x0d, 0x63, 0xb8, 0xc8, 0x62, 0x1d, 0x8f, 0xec, 0x19, - 0x5a, 0x0c, 0xc3, 0x16, 0xcf, 0x7f, 0x22, 0xe8, 0x99, 0x55, 0xf8, 0x1d, 0x0c, 0x64, 0x8b, 0x22, - 0xc9, 0x38, 0x6d, 0x17, 0x0d, 0xfc, 0x89, 0x7b, 0x1e, 0xd5, 0x3d, 0x79, 0xfe, 0x62, 0x85, 0x20, - 0xff, 0xb1, 0xae, 0x5c, 0x70, 0xda, 0x1a, 0x78, 0x5c, 0x2e, 0xce, 0xe4, 0x82, 0x53, 0xfc, 0x06, - 0x8e, 0x2c, 0x2a, 0x14, 0x6d, 0x2d, 0x0e, 0xfc, 0xf1, 0xff, 0xd5, 0x2b, 0xd5, 0x88, 0xfb, 0xf2, - 0x2f, 0x09, 0x2e, 0xc1, 0x56, 0x65, 0x31, 0x8f, 0xe0, 0xfa, 0x7d, 0xa9, 0xf3, 0x6f, 0x8c, 0xae, - 0x88, 0x52, 0x31, 0x25, 0xf8, 0x2d, 0x5c, 0xc9, 0x32, 0x89, 0x36, 0xa4, 0x3a, 0xc6, 0xb9, 0xe9, - 0x4e, 0x34, 0x7f, 0xe2, 0xae, 0xcb, 0xe4, 0x9e, 0xa5, 0x4b, 0x52, 0x05, 0x17, 0x0f, 0xfb, 0xa9, - 0x15, 0xf6, 0x64, 0x99, 0x2c, 0x49, 0x85, 0x9f, 0x83, 0xad, 0x98, 0x09, 0x32, 0x0c, 0x1b, 0x18, - 0x2c, 0x1f, 0x6a, 0x07, 0xed, 0x6a, 0x07, 0xfd, 0xae, 0x1d, 0xf4, 0xe3, 0xe0, 0x58, 0xbb, 0x83, - 0x63, 0xfd, 0x3a, 0x38, 0xd6, 0xf7, 0x57, 0x94, 0xe9, 0xbc, 0x4c, 0xdc, 0x54, 0x14, 0x5e, 0x2a, - 0x0a, 0xa2, 0x93, 0x3b, 0x7d, 0x02, 0xe6, 0x32, 0xce, 0xcf, 0x29, 0xe9, 0xb5, 0xd5, 0xd7, 0x7f, - 0x02, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xf9, 0x75, 0xae, 0x67, 0x02, 0x00, 0x00, + proto.RegisterType((*PacketPing)(nil), "cometbft.p2p.v1.PacketPing") + proto.RegisterType((*PacketPong)(nil), "cometbft.p2p.v1.PacketPong") + proto.RegisterType((*PacketMsg)(nil), "cometbft.p2p.v1.PacketMsg") + proto.RegisterType((*Packet)(nil), "cometbft.p2p.v1.Packet") + proto.RegisterType((*AuthSigMessage)(nil), "cometbft.p2p.v1.AuthSigMessage") +} + +func init() { proto.RegisterFile("cometbft/p2p/v1/conn.proto", fileDescriptor_3ad66b5863681764) } + +var fileDescriptor_3ad66b5863681764 = []byte{ + // 402 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0xf5, 0xe2, 0x36, 0xc5, 0x93, 0xf0, 0xa1, 0x15, 0x87, 0x60, 0x54, 0x27, 0xf2, 0x29, 0x07, + 0x64, 0x53, 0x73, 0x04, 0x21, 0x61, 0x3e, 0x44, 0xa9, 0x22, 0x2a, 0x73, 0xe3, 0x62, 0x6c, 0x67, + 0xbb, 0x5e, 0xa5, 0xd9, 0x5d, 0x75, 0xd7, 0x95, 0xfc, 0x2f, 0xf8, 0x59, 0x3d, 0x70, 0xe8, 0x91, + 0x53, 0x84, 0x9c, 0x3f, 0x82, 0xbc, 0x4e, 0x9a, 0x10, 0x09, 0x6e, 0xef, 0xcd, 0xcc, 0x7b, 0x33, + 0xa3, 0x19, 0x70, 0x0b, 0xb1, 0x20, 0x3a, 0xbf, 0xd0, 0xa1, 0x8c, 0x64, 0x78, 0x7d, 0x12, 0x16, + 0x82, 0xf3, 0x40, 0x5e, 0x09, 0x2d, 0xf0, 0xa3, 0x4d, 0x2e, 0x90, 0x91, 0x0c, 0xae, 0x4f, 0xdc, + 0x27, 0x54, 0x50, 0x61, 0x72, 0x61, 0x8b, 0xba, 0x32, 0xf7, 0xf8, 0xce, 0xa2, 0xb8, 0xaa, 0xa5, + 0x16, 0xad, 0xcb, 0x9c, 0xd4, 0xaa, 0x4b, 0xfb, 0x03, 0x80, 0xf3, 0xac, 0x98, 0x13, 0x7d, 0xce, + 0x38, 0xdd, 0x61, 0x82, 0x53, 0xbf, 0x04, 0xa7, 0x63, 0x53, 0x45, 0xf1, 0x73, 0x80, 0xa2, 0xcc, + 0x38, 0x27, 0x97, 0x29, 0x9b, 0x0d, 0xd1, 0x18, 0x4d, 0x0e, 0xe3, 0x07, 0xcd, 0x72, 0xe4, 0xbc, + 0xeb, 0xa2, 0xa7, 0xef, 0x13, 0x67, 0x5d, 0x70, 0x3a, 0xc3, 0x4f, 0xc1, 0x26, 0xe2, 0x62, 0x78, + 0x6f, 0x8c, 0x26, 0xf7, 0xe3, 0xa3, 0x66, 0x39, 0xb2, 0x3f, 0x7c, 0xf9, 0x98, 0xb4, 0x31, 0x8c, + 0xe1, 0x60, 0x96, 0xe9, 0x6c, 0x68, 0x8f, 0xd1, 0x64, 0x90, 0x18, 0xec, 0xff, 0x44, 0xd0, 0xeb, + 0x5a, 0xe1, 0x37, 0xd0, 0x97, 0x06, 0xa5, 0x92, 0x71, 0x6a, 0x1a, 0xf5, 0xa3, 0x67, 0xc1, 0xde, + 0xb2, 0xc1, 0x76, 0xe8, 0x4f, 0x56, 0x02, 0xf2, 0x8e, 0xed, 0xea, 0x05, 0xa7, 0x66, 0x82, 0xff, + 0xe8, 0xc5, 0x5f, 0x7a, 0xc1, 0x29, 0x7e, 0x05, 0x6b, 0x96, 0x2e, 0x14, 0x35, 0x43, 0xf6, 0x23, + 0xf7, 0x1f, 0xf2, 0xa9, 0x6a, 0xd5, 0x8e, 0xdc, 0x90, 0xf8, 0x10, 0x6c, 0x55, 0x2d, 0xfc, 0xef, + 0xf0, 0xf0, 0x6d, 0xa5, 0xcb, 0xaf, 0x8c, 0x4e, 0x89, 0x52, 0x19, 0x25, 0xf8, 0x35, 0x1c, 0xc9, + 0x2a, 0x4f, 0xe7, 0xa4, 0x5e, 0x6f, 0x74, 0xbc, 0xb5, 0xec, 0xee, 0x62, 0x5c, 0xab, 0xfc, 0x92, + 0x15, 0x67, 0xa4, 0x8e, 0x0f, 0x6e, 0x96, 0x23, 0x2b, 0xe9, 0xc9, 0x2a, 0x3f, 0x23, 0x35, 0x7e, + 0x0c, 0xb6, 0x62, 0xdd, 0x2e, 0x83, 0xa4, 0x85, 0xf1, 0xe7, 0x9b, 0xc6, 0x43, 0xb7, 0x8d, 0x87, + 0x7e, 0x37, 0x1e, 0xfa, 0xb1, 0xf2, 0xac, 0xdb, 0x95, 0x67, 0xfd, 0x5a, 0x79, 0xd6, 0xb7, 0x17, + 0x94, 0xe9, 0xb2, 0xca, 0x5b, 0xfb, 0x70, 0x7b, 0xfa, 0x0d, 0xc8, 0x24, 0x0b, 0xf7, 0x7e, 0x2a, + 0xef, 0x99, 0x4f, 0x78, 0xf9, 0x27, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x7c, 0x1d, 0x4e, 0x6d, 0x02, + 0x00, 0x00, } func (m *PacketPing) Marshal() (dAtA []byte, err error) { diff --git a/proto/tendermint/p2p/pex.go b/api/cometbft/p2p/v1/pex.go similarity index 98% rename from proto/tendermint/p2p/pex.go rename to api/cometbft/p2p/v1/pex.go index 6d369d4da72..be80aaa0312 100644 --- a/proto/tendermint/p2p/pex.go +++ b/api/cometbft/p2p/v1/pex.go @@ -1,4 +1,4 @@ -package p2p +package v1 import ( "fmt" diff --git a/proto/tendermint/p2p/pex.pb.go b/api/cometbft/p2p/v1/pex.pb.go similarity index 87% rename from proto/tendermint/p2p/pex.pb.go rename to api/cometbft/p2p/v1/pex.pb.go index d8dcb94add8..61ab210825c 100644 --- a/proto/tendermint/p2p/pex.pb.go +++ b/api/cometbft/p2p/v1/pex.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/p2p/pex.proto +// source: cometbft/p2p/v1/pex.proto -package p2p +package v1 import ( fmt "fmt" @@ -23,6 +23,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// PexRequest is a request for peer addresses. type PexRequest struct { } @@ -30,7 +31,7 @@ func (m *PexRequest) Reset() { *m = PexRequest{} } func (m *PexRequest) String() string { return proto.CompactTextString(m) } func (*PexRequest) ProtoMessage() {} func (*PexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{0} + return fileDescriptor_3aad92aea372f558, []int{0} } func (m *PexRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -59,6 +60,7 @@ func (m *PexRequest) XXX_DiscardUnknown() { var xxx_messageInfo_PexRequest proto.InternalMessageInfo +// PexAddrs is a response with peer addresses. type PexAddrs struct { Addrs []NetAddress `protobuf:"bytes,1,rep,name=addrs,proto3" json:"addrs"` } @@ -67,7 +69,7 @@ func (m *PexAddrs) Reset() { *m = PexAddrs{} } func (m *PexAddrs) String() string { return proto.CompactTextString(m) } func (*PexAddrs) ProtoMessage() {} func (*PexAddrs) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{1} + return fileDescriptor_3aad92aea372f558, []int{1} } func (m *PexAddrs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +105,10 @@ func (m *PexAddrs) GetAddrs() []NetAddress { return nil } +// Message is an abstract PEX message. type Message struct { + // Sum of all possible messages. + // // Types that are valid to be assigned to Sum: // *Message_PexRequest // *Message_PexAddrs @@ -114,7 +119,7 @@ func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{2} + return fileDescriptor_3aad92aea372f558, []int{2} } func (m *Message) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,33 +194,32 @@ func (*Message) XXX_OneofWrappers() []interface{} { } func init() { - proto.RegisterType((*PexRequest)(nil), "tendermint.p2p.PexRequest") - proto.RegisterType((*PexAddrs)(nil), "tendermint.p2p.PexAddrs") - proto.RegisterType((*Message)(nil), "tendermint.p2p.Message") -} - -func init() { proto.RegisterFile("tendermint/p2p/pex.proto", fileDescriptor_81c2f011fd13be57) } - -var fileDescriptor_81c2f011fd13be57 = []byte{ - // 273 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x28, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0x48, 0xad, 0xd0, 0x2b, - 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x43, 0xc8, 0xe8, 0x15, 0x18, 0x15, 0x48, 0x49, 0xa1, 0xa9, - 0x2c, 0xa9, 0x2c, 0x48, 0x2d, 0x86, 0xa8, 0x95, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x33, 0xf5, - 0x41, 0x2c, 0x88, 0xa8, 0x12, 0x0f, 0x17, 0x57, 0x40, 0x6a, 0x45, 0x50, 0x6a, 0x61, 0x69, 0x6a, - 0x71, 0x89, 0x92, 0x13, 0x17, 0x47, 0x40, 0x6a, 0x85, 0x63, 0x4a, 0x4a, 0x51, 0xb1, 0x90, 0x19, - 0x17, 0x6b, 0x22, 0x88, 0x21, 0xc1, 0xa8, 0xc0, 0xac, 0xc1, 0x6d, 0x24, 0xa5, 0x87, 0x6a, 0x97, - 0x9e, 0x5f, 0x6a, 0x09, 0x48, 0x61, 0x6a, 0x71, 0xb1, 0x13, 0xcb, 0x89, 0x7b, 0xf2, 0x0c, 0x41, - 0x10, 0xe5, 0x4a, 0x1d, 0x8c, 0x5c, 0xec, 0xbe, 0xa9, 0xc5, 0xc5, 0x89, 0xe9, 0xa9, 0x42, 0xb6, - 0x5c, 0xdc, 0x05, 0xa9, 0x15, 0xf1, 0x45, 0x10, 0xe3, 0x25, 0x18, 0x15, 0x18, 0xb1, 0x99, 0x84, - 0x70, 0x80, 0x07, 0x43, 0x10, 0x57, 0x01, 0x9c, 0x27, 0x64, 0xce, 0xc5, 0x09, 0xd2, 0x0e, 0x71, - 0x06, 0x13, 0x58, 0xb3, 0x04, 0x16, 0xcd, 0x60, 0xf7, 0x7a, 0x30, 0x04, 0x71, 0x14, 0x40, 0xd9, - 0x4e, 0xac, 0x5c, 0xcc, 0xc5, 0xa5, 0xb9, 0x4e, 0xde, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, - 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, - 0x2c, 0xc7, 0x10, 0x65, 0x98, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, - 0x9c, 0x9f, 0x9b, 0x5a, 0x92, 0x94, 0x56, 0x82, 0x60, 0x40, 0x42, 0x09, 0x35, 0x2c, 0x93, 0xd8, - 0xc0, 0xa2, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x02, 0xad, 0x52, 0xe1, 0x8e, 0x01, 0x00, - 0x00, + proto.RegisterType((*PexRequest)(nil), "cometbft.p2p.v1.PexRequest") + proto.RegisterType((*PexAddrs)(nil), "cometbft.p2p.v1.PexAddrs") + proto.RegisterType((*Message)(nil), "cometbft.p2p.v1.Message") +} + +func init() { proto.RegisterFile("cometbft/p2p/v1/pex.proto", fileDescriptor_3aad92aea372f558) } + +var fileDescriptor_3aad92aea372f558 = []byte{ + // 271 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0x33, 0xd4, 0x2f, 0x48, 0xad, 0xd0, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x87, 0x49, 0xe9, 0x15, 0x18, 0x15, 0xe8, 0x95, 0x19, + 0x4a, 0x49, 0xa3, 0xab, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0x86, 0xa8, 0x96, 0x12, 0x49, 0xcf, 0x4f, + 0xcf, 0x07, 0x33, 0xf5, 0x41, 0x2c, 0x88, 0xa8, 0x12, 0x0f, 0x17, 0x57, 0x40, 0x6a, 0x45, 0x50, + 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x92, 0x33, 0x17, 0x47, 0x40, 0x6a, 0x85, 0x63, 0x4a, 0x4a, + 0x51, 0xb1, 0x90, 0x39, 0x17, 0x6b, 0x22, 0x88, 0x21, 0xc1, 0xa8, 0xc0, 0xac, 0xc1, 0x6d, 0x24, + 0xad, 0x87, 0x66, 0x9b, 0x9e, 0x5f, 0x6a, 0x09, 0x48, 0x65, 0x6a, 0x71, 0xb1, 0x13, 0xcb, 0x89, + 0x7b, 0xf2, 0x0c, 0x41, 0x10, 0xf5, 0x4a, 0x5d, 0x8c, 0x5c, 0xec, 0xbe, 0xa9, 0xc5, 0xc5, 0x89, + 0xe9, 0xa9, 0x42, 0x76, 0x5c, 0xdc, 0x05, 0xa9, 0x15, 0xf1, 0x45, 0x10, 0xf3, 0x25, 0x18, 0x15, + 0x18, 0xb1, 0x1a, 0x85, 0x70, 0x82, 0x07, 0x43, 0x10, 0x57, 0x01, 0x9c, 0x27, 0x64, 0xc1, 0xc5, + 0x09, 0xd2, 0x0f, 0x71, 0x08, 0x13, 0x58, 0xb7, 0x24, 0x36, 0xdd, 0x60, 0x27, 0x7b, 0x30, 0x04, + 0x71, 0x14, 0x40, 0xd9, 0x4e, 0xac, 0x5c, 0xcc, 0xc5, 0xa5, 0xb9, 0x4e, 0x5e, 0x27, 0x1e, 0xc9, + 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, + 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x90, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0x04, 0x32, + 0x4d, 0x1f, 0x1e, 0x6e, 0x70, 0x46, 0x62, 0x41, 0xa6, 0x3e, 0x5a, 0x68, 0x26, 0xb1, 0x81, 0x83, + 0xcc, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x22, 0x9c, 0xde, 0x93, 0x01, 0x00, 0x00, } func (m *PexRequest) Marshal() (dAtA []byte, err error) { diff --git a/proto/tendermint/p2p/types.pb.go b/api/cometbft/p2p/v1/types.pb.go similarity index 88% rename from proto/tendermint/p2p/types.pb.go rename to api/cometbft/p2p/v1/types.pb.go index 9de144b3490..8e2cdd50658 100644 --- a/proto/tendermint/p2p/types.pb.go +++ b/api/cometbft/p2p/v1/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/p2p/types.proto +// source: cometbft/p2p/v1/types.proto -package p2p +package v1 import ( fmt "fmt" @@ -23,6 +23,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// NetAddress represents a peer's network address. type NetAddress struct { ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` IP string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` @@ -33,7 +34,7 @@ func (m *NetAddress) Reset() { *m = NetAddress{} } func (m *NetAddress) String() string { return proto.CompactTextString(m) } func (*NetAddress) ProtoMessage() {} func (*NetAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_c8a29e659aeca578, []int{0} + return fileDescriptor_b87302e2cbe06eca, []int{0} } func (m *NetAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -83,6 +84,7 @@ func (m *NetAddress) GetPort() uint32 { return 0 } +// ProtocolVersion represents the current p2p protocol version. type ProtocolVersion struct { P2P uint64 `protobuf:"varint,1,opt,name=p2p,proto3" json:"p2p,omitempty"` Block uint64 `protobuf:"varint,2,opt,name=block,proto3" json:"block,omitempty"` @@ -93,7 +95,7 @@ func (m *ProtocolVersion) Reset() { *m = ProtocolVersion{} } func (m *ProtocolVersion) String() string { return proto.CompactTextString(m) } func (*ProtocolVersion) ProtoMessage() {} func (*ProtocolVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_c8a29e659aeca578, []int{1} + return fileDescriptor_b87302e2cbe06eca, []int{1} } func (m *ProtocolVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -143,6 +145,8 @@ func (m *ProtocolVersion) GetApp() uint64 { return 0 } +// DefaultNodeInfo is a basic node's information sent to other peers during the +// p2p handshake. type DefaultNodeInfo struct { ProtocolVersion ProtocolVersion `protobuf:"bytes,1,opt,name=protocol_version,json=protocolVersion,proto3" json:"protocol_version"` DefaultNodeID string `protobuf:"bytes,2,opt,name=default_node_id,json=defaultNodeId,proto3" json:"default_node_id,omitempty"` @@ -158,7 +162,7 @@ func (m *DefaultNodeInfo) Reset() { *m = DefaultNodeInfo{} } func (m *DefaultNodeInfo) String() string { return proto.CompactTextString(m) } func (*DefaultNodeInfo) ProtoMessage() {} func (*DefaultNodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_c8a29e659aeca578, []int{2} + return fileDescriptor_b87302e2cbe06eca, []int{2} } func (m *DefaultNodeInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,6 +247,7 @@ func (m *DefaultNodeInfo) GetOther() DefaultNodeInfoOther { return DefaultNodeInfoOther{} } +// DefaultNodeInfoOther is the misc. application specific data. type DefaultNodeInfoOther struct { TxIndex string `protobuf:"bytes,1,opt,name=tx_index,json=txIndex,proto3" json:"tx_index,omitempty"` RPCAddress string `protobuf:"bytes,2,opt,name=rpc_address,json=rpcAddress,proto3" json:"rpc_address,omitempty"` @@ -252,7 +257,7 @@ func (m *DefaultNodeInfoOther) Reset() { *m = DefaultNodeInfoOther{} } func (m *DefaultNodeInfoOther) String() string { return proto.CompactTextString(m) } func (*DefaultNodeInfoOther) ProtoMessage() {} func (*DefaultNodeInfoOther) Descriptor() ([]byte, []int) { - return fileDescriptor_c8a29e659aeca578, []int{3} + return fileDescriptor_b87302e2cbe06eca, []int{3} } func (m *DefaultNodeInfoOther) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -296,47 +301,47 @@ func (m *DefaultNodeInfoOther) GetRPCAddress() string { } func init() { - proto.RegisterType((*NetAddress)(nil), "tendermint.p2p.NetAddress") - proto.RegisterType((*ProtocolVersion)(nil), "tendermint.p2p.ProtocolVersion") - proto.RegisterType((*DefaultNodeInfo)(nil), "tendermint.p2p.DefaultNodeInfo") - proto.RegisterType((*DefaultNodeInfoOther)(nil), "tendermint.p2p.DefaultNodeInfoOther") + proto.RegisterType((*NetAddress)(nil), "cometbft.p2p.v1.NetAddress") + proto.RegisterType((*ProtocolVersion)(nil), "cometbft.p2p.v1.ProtocolVersion") + proto.RegisterType((*DefaultNodeInfo)(nil), "cometbft.p2p.v1.DefaultNodeInfo") + proto.RegisterType((*DefaultNodeInfoOther)(nil), "cometbft.p2p.v1.DefaultNodeInfoOther") } -func init() { proto.RegisterFile("tendermint/p2p/types.proto", fileDescriptor_c8a29e659aeca578) } +func init() { proto.RegisterFile("cometbft/p2p/v1/types.proto", fileDescriptor_b87302e2cbe06eca) } -var fileDescriptor_c8a29e659aeca578 = []byte{ - // 483 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x3d, 0x8f, 0xda, 0x40, - 0x10, 0xc5, 0xc6, 0x7c, 0xdc, 0x10, 0x8e, 0xcb, 0x0a, 0x45, 0x3e, 0x0a, 0x1b, 0xa1, 0x14, 0x54, - 0xa0, 0x90, 0x2a, 0x5d, 0x42, 0x68, 0x50, 0xa4, 0x8b, 0xb5, 0x8a, 0x52, 0xa4, 0x41, 0xe0, 0x5d, - 0x60, 0x85, 0xd9, 0x5d, 0xad, 0xf7, 0x12, 0xf2, 0x2f, 0xf2, 0xb3, 0xae, 0xbc, 0x32, 0x95, 0x15, - 0x99, 0x32, 0x7f, 0x22, 0xf2, 0xae, 0x2f, 0xc7, 0xa1, 0xeb, 0xe6, 0xcd, 0x9b, 0x99, 0x37, 0xf3, - 0x34, 0xd0, 0xd3, 0x94, 0x13, 0xaa, 0xf6, 0x8c, 0xeb, 0xb1, 0x9c, 0xc8, 0xb1, 0xfe, 0x29, 0x69, - 0x3a, 0x92, 0x4a, 0x68, 0x81, 0x2e, 0x1f, 0xb9, 0x91, 0x9c, 0xc8, 0x5e, 0x77, 0x23, 0x36, 0xc2, - 0x50, 0xe3, 0x22, 0xb2, 0x55, 0x83, 0x08, 0xe0, 0x86, 0xea, 0x0f, 0x84, 0x28, 0x9a, 0xa6, 0xe8, - 0x15, 0xb8, 0x8c, 0xf8, 0x4e, 0xdf, 0x19, 0x5e, 0x4c, 0xeb, 0x79, 0x16, 0xba, 0xf3, 0x19, 0x76, - 0x19, 0x31, 0x79, 0xe9, 0xbb, 0x27, 0xf9, 0x08, 0xbb, 0x4c, 0x22, 0x04, 0x9e, 0x14, 0x4a, 0xfb, - 0xd5, 0xbe, 0x33, 0x6c, 0x63, 0x13, 0x0f, 0xbe, 0x40, 0x27, 0x2a, 0x46, 0xc7, 0x22, 0xf9, 0x4a, - 0x55, 0xca, 0x04, 0x47, 0xd7, 0x50, 0x95, 0x13, 0x69, 0xe6, 0x7a, 0xd3, 0x46, 0x9e, 0x85, 0xd5, - 0x68, 0x12, 0xe1, 0x22, 0x87, 0xba, 0x50, 0x5b, 0x25, 0x22, 0xde, 0x99, 0xe1, 0x1e, 0xb6, 0x00, - 0x5d, 0x41, 0x75, 0x29, 0xa5, 0x19, 0xeb, 0xe1, 0x22, 0x1c, 0xfc, 0x75, 0xa1, 0x33, 0xa3, 0xeb, - 0xe5, 0x6d, 0xa2, 0x6f, 0x04, 0xa1, 0x73, 0xbe, 0x16, 0x28, 0x82, 0x2b, 0x59, 0x2a, 0x2d, 0xbe, - 0x5b, 0x29, 0xa3, 0xd1, 0x9a, 0x84, 0xa3, 0xa7, 0xc7, 0x8f, 0xce, 0x36, 0x9a, 0x7a, 0x77, 0x59, - 0x58, 0xc1, 0x1d, 0x79, 0xb6, 0xe8, 0x3b, 0xe8, 0x10, 0x2b, 0xb2, 0xe0, 0x82, 0xd0, 0x05, 0x23, - 0xe5, 0xd1, 0x2f, 0xf3, 0x2c, 0x6c, 0x9f, 0xea, 0xcf, 0x70, 0x9b, 0x9c, 0x40, 0x82, 0x42, 0x68, - 0x25, 0x2c, 0xd5, 0x94, 0x2f, 0x96, 0x84, 0x28, 0xb3, 0xfa, 0x05, 0x06, 0x9b, 0x2a, 0xec, 0x45, - 0x3e, 0x34, 0x38, 0xd5, 0x3f, 0x84, 0xda, 0xf9, 0x9e, 0x21, 0x1f, 0x60, 0xc1, 0x3c, 0xac, 0x5f, - 0xb3, 0x4c, 0x09, 0x51, 0x0f, 0x9a, 0xf1, 0x76, 0xc9, 0x39, 0x4d, 0x52, 0xbf, 0xde, 0x77, 0x86, - 0x2f, 0xf0, 0x7f, 0x5c, 0x74, 0xed, 0x05, 0x67, 0x3b, 0xaa, 0xfc, 0x86, 0xed, 0x2a, 0x21, 0x7a, - 0x0f, 0x35, 0xa1, 0xb7, 0x54, 0xf9, 0x4d, 0x63, 0xc6, 0xeb, 0x73, 0x33, 0xce, 0x7c, 0xfc, 0x5c, - 0xd4, 0x96, 0x8e, 0xd8, 0xc6, 0xc1, 0x0a, 0xba, 0xcf, 0x15, 0xa1, 0x6b, 0x68, 0xea, 0xc3, 0x82, - 0x71, 0x42, 0x0f, 0xf6, 0x4b, 0x70, 0x43, 0x1f, 0xe6, 0x05, 0x44, 0x63, 0x68, 0x29, 0x19, 0x9b, - 0xe3, 0x69, 0x9a, 0x96, 0xb6, 0x5d, 0xe6, 0x59, 0x08, 0x38, 0xfa, 0x58, 0xfe, 0x17, 0x06, 0x25, - 0xe3, 0x32, 0x9e, 0x7e, 0xba, 0xcb, 0x03, 0xe7, 0x3e, 0x0f, 0x9c, 0x3f, 0x79, 0xe0, 0xfc, 0x3a, - 0x06, 0x95, 0xfb, 0x63, 0x50, 0xf9, 0x7d, 0x0c, 0x2a, 0xdf, 0xde, 0x6c, 0x98, 0xde, 0xde, 0xae, - 0x46, 0xb1, 0xd8, 0x8f, 0x63, 0xb1, 0xa7, 0x7a, 0xb5, 0xd6, 0x8f, 0x81, 0x7d, 0xe1, 0xa7, 0x8f, - 0xbf, 0xaa, 0x9b, 0xec, 0xdb, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xae, 0xdb, 0x56, 0x6d, 0x11, - 0x03, 0x00, 0x00, +var fileDescriptor_b87302e2cbe06eca = []byte{ + // 485 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x4d, 0x8f, 0xda, 0x30, + 0x10, 0x25, 0x21, 0x7c, 0xec, 0x50, 0xca, 0xd6, 0x42, 0x55, 0x76, 0x2b, 0x25, 0x08, 0xa9, 0x12, + 0x27, 0xd2, 0xa5, 0xa7, 0x1e, 0x97, 0x72, 0xa1, 0x87, 0x6d, 0x6a, 0x55, 0x3d, 0xf4, 0x82, 0x20, + 0x36, 0x60, 0xc1, 0xc6, 0x96, 0xe3, 0xa5, 0xf4, 0x5f, 0xf4, 0x67, 0xed, 0x71, 0x8f, 0x3d, 0x45, + 0x55, 0x38, 0xf7, 0x3f, 0x54, 0x76, 0x02, 0x42, 0xe9, 0xde, 0xe6, 0xcd, 0xf3, 0xcc, 0xbc, 0x79, + 0x1e, 0x78, 0x13, 0xf1, 0x7b, 0xaa, 0x16, 0x4b, 0x15, 0x88, 0x91, 0x08, 0x76, 0x37, 0x81, 0xfa, + 0x29, 0x68, 0x32, 0x14, 0x92, 0x2b, 0x8e, 0x3a, 0x47, 0x72, 0x28, 0x46, 0x62, 0xb8, 0xbb, 0xb9, + 0xee, 0xae, 0xf8, 0x8a, 0x1b, 0x2e, 0xd0, 0x51, 0xfe, 0xac, 0x1f, 0x02, 0xdc, 0x51, 0x75, 0x4b, + 0x88, 0xa4, 0x49, 0x82, 0x5e, 0x83, 0xcd, 0x88, 0x6b, 0xf5, 0xac, 0xc1, 0xc5, 0xb8, 0x9e, 0xa5, + 0xbe, 0x3d, 0x9d, 0x60, 0x9b, 0x11, 0x93, 0x17, 0xae, 0x7d, 0x96, 0x0f, 0xb1, 0xcd, 0x04, 0x42, + 0xe0, 0x08, 0x2e, 0x95, 0x5b, 0xed, 0x59, 0x83, 0x36, 0x36, 0x71, 0xff, 0x2b, 0x74, 0x42, 0xdd, + 0x3a, 0xe2, 0xdb, 0x6f, 0x54, 0x26, 0x8c, 0xc7, 0xe8, 0x0a, 0xaa, 0x62, 0x24, 0x4c, 0x5f, 0x67, + 0xdc, 0xc8, 0x52, 0xbf, 0x1a, 0x8e, 0x42, 0xac, 0x73, 0xa8, 0x0b, 0xb5, 0xc5, 0x96, 0x47, 0x1b, + 0xd3, 0xdc, 0xc1, 0x39, 0x40, 0x97, 0x50, 0x9d, 0x0b, 0x61, 0xda, 0x3a, 0x58, 0x87, 0xfd, 0xbf, + 0x36, 0x74, 0x26, 0x74, 0x39, 0x7f, 0xd8, 0xaa, 0x3b, 0x4e, 0xe8, 0x34, 0x5e, 0x72, 0xf4, 0x05, + 0x2e, 0x45, 0x31, 0x69, 0xb6, 0xcb, 0x47, 0x99, 0x19, 0xad, 0x51, 0x6f, 0x58, 0xda, 0x7e, 0x58, + 0x92, 0x34, 0x76, 0x1e, 0x53, 0xbf, 0x82, 0x3b, 0xa2, 0xa4, 0xf4, 0x03, 0x74, 0x48, 0x3e, 0x65, + 0x16, 0x73, 0x42, 0x67, 0x8c, 0x14, 0x5b, 0xbf, 0xca, 0x52, 0xbf, 0x7d, 0x2e, 0x60, 0x82, 0xdb, + 0xe4, 0x0c, 0x12, 0xe4, 0x43, 0x6b, 0xcb, 0x12, 0x45, 0xe3, 0xd9, 0x9c, 0x10, 0x69, 0xb4, 0x5f, + 0x60, 0xc8, 0x53, 0xda, 0x5f, 0xe4, 0x42, 0x23, 0xa6, 0xea, 0x07, 0x97, 0x1b, 0xd7, 0x31, 0xe4, + 0x11, 0x6a, 0xe6, 0xa8, 0xbf, 0x96, 0x33, 0x05, 0x44, 0xd7, 0xd0, 0x8c, 0xd6, 0xf3, 0x38, 0xa6, + 0xdb, 0xc4, 0xad, 0xf7, 0xac, 0xc1, 0x0b, 0x7c, 0xc2, 0xba, 0xea, 0x9e, 0xc7, 0x6c, 0x43, 0xa5, + 0xdb, 0xc8, 0xab, 0x0a, 0x88, 0x6e, 0xa1, 0xc6, 0xd5, 0x9a, 0x4a, 0xb7, 0x69, 0xdc, 0x78, 0xfb, + 0x9f, 0x1b, 0x25, 0x27, 0x3f, 0xeb, 0xc7, 0x85, 0x25, 0x79, 0x65, 0x7f, 0x01, 0xdd, 0xe7, 0x1e, + 0xa1, 0x2b, 0x68, 0xaa, 0xfd, 0x8c, 0xc5, 0x84, 0xee, 0xf3, 0x3b, 0xc1, 0x0d, 0xb5, 0x9f, 0x6a, + 0x88, 0x02, 0x68, 0x49, 0x11, 0x99, 0xed, 0x69, 0x92, 0x14, 0xbe, 0xbd, 0xcc, 0x52, 0x1f, 0x70, + 0xf8, 0xb1, 0xb8, 0x30, 0x0c, 0x52, 0x44, 0x45, 0x3c, 0xfe, 0xf4, 0x98, 0x79, 0xd6, 0x53, 0xe6, + 0x59, 0x7f, 0x32, 0xcf, 0xfa, 0x75, 0xf0, 0x2a, 0x4f, 0x07, 0xaf, 0xf2, 0xfb, 0xe0, 0x55, 0xbe, + 0xbf, 0x5b, 0x31, 0xb5, 0x7e, 0x58, 0x68, 0xdd, 0xc1, 0xe9, 0xc8, 0x4f, 0xc1, 0x5c, 0xb0, 0xa0, + 0x74, 0xfa, 0x8b, 0xba, 0xf9, 0xc9, 0xf7, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x50, 0x06, 0xfb, + 0xcd, 0x14, 0x03, 0x00, 0x00, } func (m *NetAddress) Marshal() (dAtA []byte, err error) { diff --git a/api/cometbft/privval/v1/types.pb.go b/api/cometbft/privval/v1/types.pb.go new file mode 100644 index 00000000000..2d42df876c9 --- /dev/null +++ b/api/cometbft/privval/v1/types.pb.go @@ -0,0 +1,3429 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/privval/v1/types.proto + +package v1 + +import ( + fmt "fmt" + v1 "github.com/cometbft/cometbft/api/cometbft/types/v1" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// remotesignererror is returned when the remote signer fails. +type RemoteSignerError struct { + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (m *RemoteSignerError) Reset() { *m = RemoteSignerError{} } +func (m *RemoteSignerError) String() string { return proto.CompactTextString(m) } +func (*RemoteSignerError) ProtoMessage() {} +func (*RemoteSignerError) Descriptor() ([]byte, []int) { + return fileDescriptor_00b969dcac92905e, []int{0} +} +func (m *RemoteSignerError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoteSignerError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoteSignerError.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoteSignerError) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoteSignerError.Merge(m, src) +} +func (m *RemoteSignerError) XXX_Size() int { + return m.Size() +} +func (m *RemoteSignerError) XXX_DiscardUnknown() { + xxx_messageInfo_RemoteSignerError.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoteSignerError proto.InternalMessageInfo + +func (m *RemoteSignerError) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *RemoteSignerError) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// PubKeyRequest requests the consensus public key from the remote signer. +type PubKeyRequest struct { + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *PubKeyRequest) Reset() { *m = PubKeyRequest{} } +func (m *PubKeyRequest) String() string { return proto.CompactTextString(m) } +func (*PubKeyRequest) ProtoMessage() {} +func (*PubKeyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00b969dcac92905e, []int{1} +} +func (m *PubKeyRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PubKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PubKeyRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PubKeyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PubKeyRequest.Merge(m, src) +} +func (m *PubKeyRequest) XXX_Size() int { + return m.Size() +} +func (m *PubKeyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PubKeyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PubKeyRequest proto.InternalMessageInfo + +func (m *PubKeyRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +// PubKeyResponse is a response message containing the public key. +type PubKeyResponse struct { + Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + PubKeyBytes []byte `protobuf:"bytes,3,opt,name=pub_key_bytes,json=pubKeyBytes,proto3" json:"pub_key_bytes,omitempty"` + PubKeyType string `protobuf:"bytes,4,opt,name=pub_key_type,json=pubKeyType,proto3" json:"pub_key_type,omitempty"` +} + +func (m *PubKeyResponse) Reset() { *m = PubKeyResponse{} } +func (m *PubKeyResponse) String() string { return proto.CompactTextString(m) } +func (*PubKeyResponse) ProtoMessage() {} +func (*PubKeyResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00b969dcac92905e, []int{2} +} +func (m *PubKeyResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PubKeyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PubKeyResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PubKeyResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PubKeyResponse.Merge(m, src) +} +func (m *PubKeyResponse) XXX_Size() int { + return m.Size() +} +func (m *PubKeyResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PubKeyResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PubKeyResponse proto.InternalMessageInfo + +func (m *PubKeyResponse) GetError() *RemoteSignerError { + if m != nil { + return m.Error + } + return nil +} + +func (m *PubKeyResponse) GetPubKeyBytes() []byte { + if m != nil { + return m.PubKeyBytes + } + return nil +} + +func (m *PubKeyResponse) GetPubKeyType() string { + if m != nil { + return m.PubKeyType + } + return "" +} + +// SignVoteRequest is a request to sign a vote +type SignVoteRequest struct { + Vote *v1.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + SkipExtensionSigning bool `protobuf:"varint,3,opt,name=skip_extension_signing,json=skipExtensionSigning,proto3" json:"skip_extension_signing,omitempty"` +} + +func (m *SignVoteRequest) Reset() { *m = SignVoteRequest{} } +func (m *SignVoteRequest) String() string { return proto.CompactTextString(m) } +func (*SignVoteRequest) ProtoMessage() {} +func (*SignVoteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00b969dcac92905e, []int{3} +} +func (m *SignVoteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignVoteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignVoteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignVoteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignVoteRequest.Merge(m, src) +} +func (m *SignVoteRequest) XXX_Size() int { + return m.Size() +} +func (m *SignVoteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SignVoteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SignVoteRequest proto.InternalMessageInfo + +func (m *SignVoteRequest) GetVote() *v1.Vote { + if m != nil { + return m.Vote + } + return nil +} + +func (m *SignVoteRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +func (m *SignVoteRequest) GetSkipExtensionSigning() bool { + if m != nil { + return m.SkipExtensionSigning + } + return false +} + +// SignedVoteResponse is a response containing a signed vote or an error +type SignedVoteResponse struct { + Vote v1.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote"` + Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *SignedVoteResponse) Reset() { *m = SignedVoteResponse{} } +func (m *SignedVoteResponse) String() string { return proto.CompactTextString(m) } +func (*SignedVoteResponse) ProtoMessage() {} +func (*SignedVoteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00b969dcac92905e, []int{4} +} +func (m *SignedVoteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignedVoteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignedVoteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignedVoteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignedVoteResponse.Merge(m, src) +} +func (m *SignedVoteResponse) XXX_Size() int { + return m.Size() +} +func (m *SignedVoteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SignedVoteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SignedVoteResponse proto.InternalMessageInfo + +func (m *SignedVoteResponse) GetVote() v1.Vote { + if m != nil { + return m.Vote + } + return v1.Vote{} +} + +func (m *SignedVoteResponse) GetError() *RemoteSignerError { + if m != nil { + return m.Error + } + return nil +} + +// SignProposalRequest is a request to sign a proposal +type SignProposalRequest struct { + Proposal *v1.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *SignProposalRequest) Reset() { *m = SignProposalRequest{} } +func (m *SignProposalRequest) String() string { return proto.CompactTextString(m) } +func (*SignProposalRequest) ProtoMessage() {} +func (*SignProposalRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00b969dcac92905e, []int{5} +} +func (m *SignProposalRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignProposalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignProposalRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignProposalRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignProposalRequest.Merge(m, src) +} +func (m *SignProposalRequest) XXX_Size() int { + return m.Size() +} +func (m *SignProposalRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SignProposalRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SignProposalRequest proto.InternalMessageInfo + +func (m *SignProposalRequest) GetProposal() *v1.Proposal { + if m != nil { + return m.Proposal + } + return nil +} + +func (m *SignProposalRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +// SignedProposalResponse is response containing a signed proposal or an error +type SignedProposalResponse struct { + Proposal v1.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal"` + Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *SignedProposalResponse) Reset() { *m = SignedProposalResponse{} } +func (m *SignedProposalResponse) String() string { return proto.CompactTextString(m) } +func (*SignedProposalResponse) ProtoMessage() {} +func (*SignedProposalResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00b969dcac92905e, []int{6} +} +func (m *SignedProposalResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignedProposalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignedProposalResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignedProposalResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignedProposalResponse.Merge(m, src) +} +func (m *SignedProposalResponse) XXX_Size() int { + return m.Size() +} +func (m *SignedProposalResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SignedProposalResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SignedProposalResponse proto.InternalMessageInfo + +func (m *SignedProposalResponse) GetProposal() v1.Proposal { + if m != nil { + return m.Proposal + } + return v1.Proposal{} +} + +func (m *SignedProposalResponse) GetError() *RemoteSignerError { + if m != nil { + return m.Error + } + return nil +} + +// SignBytesRequest is a request to sign arbitrary bytes +type SignBytesRequest struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *SignBytesRequest) Reset() { *m = SignBytesRequest{} } +func (m *SignBytesRequest) String() string { return proto.CompactTextString(m) } +func (*SignBytesRequest) ProtoMessage() {} +func (*SignBytesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00b969dcac92905e, []int{7} +} +func (m *SignBytesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignBytesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignBytesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignBytesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignBytesRequest.Merge(m, src) +} +func (m *SignBytesRequest) XXX_Size() int { + return m.Size() +} +func (m *SignBytesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SignBytesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SignBytesRequest proto.InternalMessageInfo + +func (m *SignBytesRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// SignBytesResponse is a response containing a signature or an error +type SignBytesResponse struct { + Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` + Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *SignBytesResponse) Reset() { *m = SignBytesResponse{} } +func (m *SignBytesResponse) String() string { return proto.CompactTextString(m) } +func (*SignBytesResponse) ProtoMessage() {} +func (*SignBytesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00b969dcac92905e, []int{8} +} +func (m *SignBytesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignBytesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignBytesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignBytesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignBytesResponse.Merge(m, src) +} +func (m *SignBytesResponse) XXX_Size() int { + return m.Size() +} +func (m *SignBytesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SignBytesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SignBytesResponse proto.InternalMessageInfo + +func (m *SignBytesResponse) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +func (m *SignBytesResponse) GetError() *RemoteSignerError { + if m != nil { + return m.Error + } + return nil +} + +// PingRequest is a request to confirm that the connection is alive. +type PingRequest struct { +} + +func (m *PingRequest) Reset() { *m = PingRequest{} } +func (m *PingRequest) String() string { return proto.CompactTextString(m) } +func (*PingRequest) ProtoMessage() {} +func (*PingRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00b969dcac92905e, []int{9} +} +func (m *PingRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PingRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingRequest.Merge(m, src) +} +func (m *PingRequest) XXX_Size() int { + return m.Size() +} +func (m *PingRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PingRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PingRequest proto.InternalMessageInfo + +// PingResponse is a response to confirm that the connection is alive. +type PingResponse struct { +} + +func (m *PingResponse) Reset() { *m = PingResponse{} } +func (m *PingResponse) String() string { return proto.CompactTextString(m) } +func (*PingResponse) ProtoMessage() {} +func (*PingResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00b969dcac92905e, []int{10} +} +func (m *PingResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PingResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PingResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingResponse.Merge(m, src) +} +func (m *PingResponse) XXX_Size() int { + return m.Size() +} +func (m *PingResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PingResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PingResponse proto.InternalMessageInfo + +// Message is an abstract message to/from the remote signer. +type Message struct { + // Sum of all possible messages. + // + // Types that are valid to be assigned to Sum: + // *Message_PubKeyRequest + // *Message_PubKeyResponse + // *Message_SignVoteRequest + // *Message_SignedVoteResponse + // *Message_SignProposalRequest + // *Message_SignedProposalResponse + // *Message_PingRequest + // *Message_PingResponse + // *Message_SignBytesRequest + // *Message_SignBytesResponse + Sum isMessage_Sum `protobuf_oneof:"sum"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_00b969dcac92905e, []int{11} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +type isMessage_Sum interface { + isMessage_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Message_PubKeyRequest struct { + PubKeyRequest *PubKeyRequest `protobuf:"bytes,1,opt,name=pub_key_request,json=pubKeyRequest,proto3,oneof" json:"pub_key_request,omitempty"` +} +type Message_PubKeyResponse struct { + PubKeyResponse *PubKeyResponse `protobuf:"bytes,2,opt,name=pub_key_response,json=pubKeyResponse,proto3,oneof" json:"pub_key_response,omitempty"` +} +type Message_SignVoteRequest struct { + SignVoteRequest *SignVoteRequest `protobuf:"bytes,3,opt,name=sign_vote_request,json=signVoteRequest,proto3,oneof" json:"sign_vote_request,omitempty"` +} +type Message_SignedVoteResponse struct { + SignedVoteResponse *SignedVoteResponse `protobuf:"bytes,4,opt,name=signed_vote_response,json=signedVoteResponse,proto3,oneof" json:"signed_vote_response,omitempty"` +} +type Message_SignProposalRequest struct { + SignProposalRequest *SignProposalRequest `protobuf:"bytes,5,opt,name=sign_proposal_request,json=signProposalRequest,proto3,oneof" json:"sign_proposal_request,omitempty"` +} +type Message_SignedProposalResponse struct { + SignedProposalResponse *SignedProposalResponse `protobuf:"bytes,6,opt,name=signed_proposal_response,json=signedProposalResponse,proto3,oneof" json:"signed_proposal_response,omitempty"` +} +type Message_PingRequest struct { + PingRequest *PingRequest `protobuf:"bytes,7,opt,name=ping_request,json=pingRequest,proto3,oneof" json:"ping_request,omitempty"` +} +type Message_PingResponse struct { + PingResponse *PingResponse `protobuf:"bytes,8,opt,name=ping_response,json=pingResponse,proto3,oneof" json:"ping_response,omitempty"` +} +type Message_SignBytesRequest struct { + SignBytesRequest *SignBytesRequest `protobuf:"bytes,9,opt,name=sign_bytes_request,json=signBytesRequest,proto3,oneof" json:"sign_bytes_request,omitempty"` +} +type Message_SignBytesResponse struct { + SignBytesResponse *SignBytesResponse `protobuf:"bytes,10,opt,name=sign_bytes_response,json=signBytesResponse,proto3,oneof" json:"sign_bytes_response,omitempty"` +} + +func (*Message_PubKeyRequest) isMessage_Sum() {} +func (*Message_PubKeyResponse) isMessage_Sum() {} +func (*Message_SignVoteRequest) isMessage_Sum() {} +func (*Message_SignedVoteResponse) isMessage_Sum() {} +func (*Message_SignProposalRequest) isMessage_Sum() {} +func (*Message_SignedProposalResponse) isMessage_Sum() {} +func (*Message_PingRequest) isMessage_Sum() {} +func (*Message_PingResponse) isMessage_Sum() {} +func (*Message_SignBytesRequest) isMessage_Sum() {} +func (*Message_SignBytesResponse) isMessage_Sum() {} + +func (m *Message) GetSum() isMessage_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Message) GetPubKeyRequest() *PubKeyRequest { + if x, ok := m.GetSum().(*Message_PubKeyRequest); ok { + return x.PubKeyRequest + } + return nil +} + +func (m *Message) GetPubKeyResponse() *PubKeyResponse { + if x, ok := m.GetSum().(*Message_PubKeyResponse); ok { + return x.PubKeyResponse + } + return nil +} + +func (m *Message) GetSignVoteRequest() *SignVoteRequest { + if x, ok := m.GetSum().(*Message_SignVoteRequest); ok { + return x.SignVoteRequest + } + return nil +} + +func (m *Message) GetSignedVoteResponse() *SignedVoteResponse { + if x, ok := m.GetSum().(*Message_SignedVoteResponse); ok { + return x.SignedVoteResponse + } + return nil +} + +func (m *Message) GetSignProposalRequest() *SignProposalRequest { + if x, ok := m.GetSum().(*Message_SignProposalRequest); ok { + return x.SignProposalRequest + } + return nil +} + +func (m *Message) GetSignedProposalResponse() *SignedProposalResponse { + if x, ok := m.GetSum().(*Message_SignedProposalResponse); ok { + return x.SignedProposalResponse + } + return nil +} + +func (m *Message) GetPingRequest() *PingRequest { + if x, ok := m.GetSum().(*Message_PingRequest); ok { + return x.PingRequest + } + return nil +} + +func (m *Message) GetPingResponse() *PingResponse { + if x, ok := m.GetSum().(*Message_PingResponse); ok { + return x.PingResponse + } + return nil +} + +func (m *Message) GetSignBytesRequest() *SignBytesRequest { + if x, ok := m.GetSum().(*Message_SignBytesRequest); ok { + return x.SignBytesRequest + } + return nil +} + +func (m *Message) GetSignBytesResponse() *SignBytesResponse { + if x, ok := m.GetSum().(*Message_SignBytesResponse); ok { + return x.SignBytesResponse + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Message) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Message_PubKeyRequest)(nil), + (*Message_PubKeyResponse)(nil), + (*Message_SignVoteRequest)(nil), + (*Message_SignedVoteResponse)(nil), + (*Message_SignProposalRequest)(nil), + (*Message_SignedProposalResponse)(nil), + (*Message_PingRequest)(nil), + (*Message_PingResponse)(nil), + (*Message_SignBytesRequest)(nil), + (*Message_SignBytesResponse)(nil), + } +} + +func init() { + proto.RegisterType((*RemoteSignerError)(nil), "cometbft.privval.v1.RemoteSignerError") + proto.RegisterType((*PubKeyRequest)(nil), "cometbft.privval.v1.PubKeyRequest") + proto.RegisterType((*PubKeyResponse)(nil), "cometbft.privval.v1.PubKeyResponse") + proto.RegisterType((*SignVoteRequest)(nil), "cometbft.privval.v1.SignVoteRequest") + proto.RegisterType((*SignedVoteResponse)(nil), "cometbft.privval.v1.SignedVoteResponse") + proto.RegisterType((*SignProposalRequest)(nil), "cometbft.privval.v1.SignProposalRequest") + proto.RegisterType((*SignedProposalResponse)(nil), "cometbft.privval.v1.SignedProposalResponse") + proto.RegisterType((*SignBytesRequest)(nil), "cometbft.privval.v1.SignBytesRequest") + proto.RegisterType((*SignBytesResponse)(nil), "cometbft.privval.v1.SignBytesResponse") + proto.RegisterType((*PingRequest)(nil), "cometbft.privval.v1.PingRequest") + proto.RegisterType((*PingResponse)(nil), "cometbft.privval.v1.PingResponse") + proto.RegisterType((*Message)(nil), "cometbft.privval.v1.Message") +} + +func init() { proto.RegisterFile("cometbft/privval/v1/types.proto", fileDescriptor_00b969dcac92905e) } + +var fileDescriptor_00b969dcac92905e = []byte{ + // 778 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4b, 0x6f, 0xd3, 0x4a, + 0x18, 0xb5, 0xdb, 0xa4, 0x4d, 0xbe, 0x24, 0x6d, 0x32, 0xc9, 0xed, 0xcd, 0xed, 0xbd, 0x37, 0x0d, + 0xe6, 0x15, 0x51, 0x29, 0x51, 0x4a, 0x25, 0x36, 0xb0, 0x89, 0x54, 0xc9, 0xe5, 0x59, 0x4d, 0x01, + 0x21, 0x90, 0x88, 0xf2, 0x18, 0xdc, 0x51, 0x5b, 0xcf, 0xe0, 0x71, 0x22, 0xf2, 0x03, 0x58, 0xc3, + 0x86, 0x0d, 0xbf, 0xa8, 0xcb, 0x2e, 0x59, 0x21, 0xd4, 0x2e, 0xf9, 0x13, 0xc8, 0x33, 0x63, 0xe7, + 0x51, 0x37, 0x02, 0x75, 0x37, 0xf3, 0xf9, 0xcc, 0xf9, 0xce, 0xf9, 0x3c, 0xc7, 0x86, 0x8d, 0x1e, + 0x3b, 0x26, 0x7e, 0xf7, 0x9d, 0xdf, 0xe0, 0x1e, 0x1d, 0x0e, 0x3b, 0x47, 0x8d, 0x61, 0xb3, 0xe1, + 0x8f, 0x38, 0x11, 0x75, 0xee, 0x31, 0x9f, 0xa1, 0x62, 0x08, 0xa8, 0x6b, 0x40, 0x7d, 0xd8, 0x5c, + 0xff, 0x3f, 0x3a, 0x25, 0xa1, 0x33, 0x67, 0xd6, 0x4b, 0x0e, 0x73, 0x98, 0x5c, 0x36, 0x82, 0x95, + 0xaa, 0x5a, 0xbb, 0x50, 0xc0, 0xe4, 0x98, 0xf9, 0x64, 0x9f, 0x3a, 0x2e, 0xf1, 0x76, 0x3c, 0x8f, + 0x79, 0x08, 0x41, 0xa2, 0xc7, 0xfa, 0xa4, 0x6c, 0x56, 0xcd, 0x5a, 0x12, 0xcb, 0x35, 0xaa, 0x42, + 0xa6, 0x4f, 0x44, 0xcf, 0xa3, 0xdc, 0xa7, 0xcc, 0x2d, 0x2f, 0x54, 0xcd, 0x5a, 0x1a, 0x4f, 0x96, + 0xac, 0x3b, 0x90, 0xdb, 0x1b, 0x74, 0x1f, 0x91, 0x11, 0x26, 0xef, 0x07, 0x44, 0xf8, 0xe8, 0x1f, + 0x48, 0xf5, 0x0e, 0x3a, 0xd4, 0x6d, 0xd3, 0xbe, 0xa4, 0x4a, 0xe3, 0x65, 0xb9, 0xdf, 0xed, 0x5b, + 0x5f, 0x4d, 0x58, 0x09, 0xc1, 0x82, 0x33, 0x57, 0x10, 0x74, 0x1f, 0x92, 0x24, 0xe8, 0x2e, 0xa9, + 0x33, 0x5b, 0xb7, 0xea, 0x31, 0x1e, 0xeb, 0x17, 0xb4, 0x62, 0x75, 0x08, 0x59, 0x90, 0xe3, 0x83, + 0x6e, 0xfb, 0x90, 0x8c, 0xda, 0xdd, 0x91, 0x4f, 0x44, 0x79, 0xb1, 0x6a, 0xd6, 0xb2, 0x38, 0xc3, + 0x65, 0x93, 0x56, 0x50, 0x42, 0x55, 0xc8, 0x86, 0x98, 0x60, 0x30, 0xe5, 0x84, 0xd4, 0x04, 0x0a, + 0xf2, 0x7c, 0xc4, 0xc9, 0xc3, 0x44, 0xca, 0xcc, 0x2f, 0x58, 0x9f, 0x4c, 0x58, 0x0d, 0x5a, 0xbc, + 0x64, 0x3e, 0x09, 0xbd, 0x6c, 0x42, 0x62, 0xc8, 0x7c, 0x35, 0x92, 0xcc, 0xd6, 0xdf, 0x63, 0x71, + 0x6a, 0xc4, 0xc3, 0x66, 0x5d, 0xa2, 0x25, 0x68, 0xca, 0xf8, 0xc2, 0x94, 0x71, 0xb4, 0x0d, 0x6b, + 0xe2, 0x90, 0xf2, 0x36, 0xf9, 0xe0, 0x13, 0x57, 0x50, 0xe6, 0xb6, 0x05, 0x75, 0x5c, 0xea, 0x3a, + 0x52, 0x70, 0x0a, 0x97, 0x82, 0xa7, 0x3b, 0xe1, 0xc3, 0x7d, 0xf5, 0xcc, 0xfa, 0x68, 0x02, 0x92, + 0xa6, 0xfb, 0x4a, 0x93, 0x1e, 0x59, 0xf3, 0xb7, 0x44, 0xb5, 0x12, 0x27, 0xdf, 0x37, 0x0c, 0x2d, + 0xed, 0x4a, 0x53, 0xb6, 0x28, 0x14, 0x83, 0xea, 0x9e, 0xc7, 0x38, 0x13, 0x9d, 0xa3, 0x70, 0x38, + 0xf7, 0x20, 0xc5, 0x75, 0x49, 0x6b, 0xf9, 0x37, 0x46, 0x4b, 0x74, 0x2a, 0x02, 0xcf, 0x19, 0x94, + 0xf5, 0xc5, 0x84, 0x35, 0x65, 0x79, 0xdc, 0x4d, 0xdb, 0x7e, 0xf0, 0x47, 0xed, 0xb4, 0xfd, 0x71, + 0xd3, 0xab, 0x8d, 0xa0, 0x06, 0xf9, 0xa0, 0x2a, 0x6f, 0x54, 0xe8, 0xbf, 0x04, 0xc9, 0x61, 0xe7, + 0x68, 0xa0, 0x5e, 0x44, 0x16, 0xab, 0x8d, 0xc5, 0xa0, 0x30, 0x81, 0xd4, 0xda, 0xff, 0x83, 0x74, + 0xf0, 0xc2, 0x3b, 0xfe, 0xc0, 0x0b, 0xe1, 0xe3, 0xc2, 0x15, 0xa5, 0xe5, 0x20, 0xb3, 0x47, 0x5d, + 0x47, 0xab, 0xb2, 0x56, 0x20, 0xab, 0xb6, 0xaa, 0xb5, 0xf5, 0x73, 0x09, 0x96, 0x9f, 0x10, 0x21, + 0x3a, 0x0e, 0x41, 0x8f, 0x61, 0x35, 0x8c, 0x82, 0xa7, 0xe0, 0x7a, 0x92, 0x56, 0x6c, 0xcb, 0xa9, + 0x5c, 0xdb, 0x06, 0xce, 0xf1, 0xa9, 0xa0, 0x3f, 0x83, 0xfc, 0x98, 0x4d, 0x75, 0xd3, 0x0e, 0xae, + 0xcf, 0xa5, 0x53, 0x50, 0xdb, 0xc0, 0x2b, 0x7c, 0xfa, 0x5b, 0x80, 0xa1, 0x10, 0x0c, 0xa5, 0x1d, + 0x5c, 0xd9, 0x48, 0xe0, 0xa2, 0x64, 0xbc, 0x11, 0xcb, 0x38, 0x13, 0x57, 0xdb, 0xc0, 0xab, 0x62, + 0x26, 0xc1, 0x6f, 0xa0, 0x24, 0xe4, 0x7d, 0x0a, 0x59, 0xb5, 0xd0, 0x84, 0xa4, 0xbd, 0x7d, 0x29, + 0xed, 0x74, 0xe6, 0x6c, 0x03, 0x23, 0x71, 0x31, 0x89, 0x6f, 0xe1, 0x2f, 0x29, 0x38, 0xbc, 0x64, + 0x91, 0xe8, 0xa4, 0x64, 0xaf, 0x5d, 0xca, 0x3e, 0x13, 0x25, 0xdb, 0xc0, 0x45, 0x11, 0x93, 0x30, + 0x07, 0xca, 0x5a, 0xfc, 0x44, 0x07, 0x6d, 0x60, 0x49, 0xb6, 0xd8, 0x9c, 0x63, 0x60, 0x36, 0x41, + 0xb6, 0x81, 0xd7, 0x44, 0x7c, 0xb6, 0x76, 0x20, 0xcb, 0xa9, 0xeb, 0x44, 0xfa, 0x97, 0x25, 0x79, + 0x35, 0xfe, 0x35, 0x8e, 0x2f, 0x9b, 0x6d, 0xe0, 0x0c, 0x1f, 0x6f, 0x91, 0x0d, 0x39, 0x4d, 0xa3, + 0x45, 0xa6, 0x24, 0xcf, 0xb5, 0x39, 0x3c, 0x91, 0xb4, 0x2c, 0x9f, 0xd8, 0xa3, 0x17, 0x20, 0xe7, + 0xad, 0xbe, 0xea, 0x91, 0xac, 0xb4, 0xa4, 0xbb, 0x79, 0xa9, 0xe7, 0xc9, 0x78, 0xda, 0x06, 0xce, + 0x8b, 0xd9, 0xc8, 0xbe, 0x82, 0xe2, 0x14, 0xad, 0x96, 0x09, 0x73, 0x72, 0x77, 0x21, 0xcc, 0xb6, + 0x81, 0x0b, 0x62, 0xb6, 0xd8, 0x4a, 0xc2, 0xa2, 0x18, 0x1c, 0xb7, 0x9e, 0x9e, 0x9c, 0x55, 0xcc, + 0xd3, 0xb3, 0x8a, 0xf9, 0xe3, 0xac, 0x62, 0x7e, 0x3e, 0xaf, 0x18, 0xa7, 0xe7, 0x15, 0xe3, 0xdb, + 0x79, 0xc5, 0x78, 0xbd, 0xed, 0x50, 0xff, 0x60, 0xd0, 0x0d, 0x7a, 0x34, 0xa2, 0x5f, 0x76, 0xb4, + 0xe8, 0x70, 0xda, 0x88, 0xf9, 0xfd, 0x77, 0x97, 0xe4, 0xff, 0xfa, 0xee, 0xaf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x18, 0x30, 0xda, 0x89, 0x1c, 0x08, 0x00, 0x00, +} + +func (m *RemoteSignerError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoteSignerError) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoteSignerError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x12 + } + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PubKeyRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PubKeyRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PubKeyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PubKeyResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PubKeyResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PubKeyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PubKeyType) > 0 { + i -= len(m.PubKeyType) + copy(dAtA[i:], m.PubKeyType) + i = encodeVarintTypes(dAtA, i, uint64(len(m.PubKeyType))) + i-- + dAtA[i] = 0x22 + } + if len(m.PubKeyBytes) > 0 { + i -= len(m.PubKeyBytes) + copy(dAtA[i:], m.PubKeyBytes) + i = encodeVarintTypes(dAtA, i, uint64(len(m.PubKeyBytes))) + i-- + dAtA[i] = 0x1a + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *SignVoteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignVoteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignVoteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SkipExtensionSigning { + i-- + if m.SkipExtensionSigning { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x12 + } + if m.Vote != nil { + { + size, err := m.Vote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignedVoteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignedVoteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignedVoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Vote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignProposalRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignProposalRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignProposalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x12 + } + if m.Proposal != nil { + { + size, err := m.Proposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignedProposalResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignedProposalResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignedProposalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Proposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignBytesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignBytesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignBytesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignBytesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignBytesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignBytesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PingRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PingRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PingRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *PingResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PingResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PingResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Message_PubKeyRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_PubKeyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PubKeyRequest != nil { + { + size, err := m.PubKeyRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Message_PubKeyResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_PubKeyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PubKeyResponse != nil { + { + size, err := m.PubKeyResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Message_SignVoteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SignVoteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignVoteRequest != nil { + { + size, err := m.SignVoteRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Message_SignedVoteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SignedVoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignedVoteResponse != nil { + { + size, err := m.SignedVoteResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Message_SignProposalRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SignProposalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignProposalRequest != nil { + { + size, err := m.SignProposalRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Message_SignedProposalResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SignedProposalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignedProposalResponse != nil { + { + size, err := m.SignedProposalResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Message_PingRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_PingRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PingRequest != nil { + { + size, err := m.PingRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Message_PingResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_PingResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PingResponse != nil { + { + size, err := m.PingResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Message_SignBytesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SignBytesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignBytesRequest != nil { + { + size, err := m.SignBytesRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Message_SignBytesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SignBytesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignBytesResponse != nil { + { + size, err := m.SignBytesResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RemoteSignerError) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PubKeyRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PubKeyResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.PubKeyBytes) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.PubKeyType) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignVoteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Vote != nil { + l = m.Vote.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.SkipExtensionSigning { + n += 2 + } + return n +} + +func (m *SignedVoteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Vote.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignProposalRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Proposal != nil { + l = m.Proposal.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignedProposalResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Proposal.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignBytesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignBytesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PingRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *PingResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Message_PubKeyRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PubKeyRequest != nil { + l = m.PubKeyRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_PubKeyResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PubKeyResponse != nil { + l = m.PubKeyResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SignVoteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignVoteRequest != nil { + l = m.SignVoteRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SignedVoteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignedVoteResponse != nil { + l = m.SignedVoteResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SignProposalRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignProposalRequest != nil { + l = m.SignProposalRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SignedProposalResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignedProposalResponse != nil { + l = m.SignedProposalResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_PingRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PingRequest != nil { + l = m.PingRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_PingResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PingResponse != nil { + l = m.PingResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SignBytesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignBytesRequest != nil { + l = m.SignBytesRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SignBytesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignBytesResponse != nil { + l = m.SignBytesResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RemoteSignerError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoteSignerError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoteSignerError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PubKeyRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PubKeyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PubKeyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PubKeyResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PubKeyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PubKeyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &RemoteSignerError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyBytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKeyBytes = append(m.PubKeyBytes[:0], dAtA[iNdEx:postIndex]...) + if m.PubKeyBytes == nil { + m.PubKeyBytes = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKeyType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignVoteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignVoteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignVoteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vote == nil { + m.Vote = &v1.Vote{} + } + if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipExtensionSigning", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipExtensionSigning = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignedVoteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignedVoteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignedVoteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &RemoteSignerError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignProposalRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignProposalRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignProposalRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proposal == nil { + m.Proposal = &v1.Proposal{} + } + if err := m.Proposal.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignedProposalResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignedProposalResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignedProposalResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Proposal.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &RemoteSignerError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignBytesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignBytesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignBytesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignBytesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignBytesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignBytesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &RemoteSignerError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PubKeyRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_PubKeyRequest{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PubKeyResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_PubKeyResponse{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignVoteRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignVoteRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SignVoteRequest{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedVoteResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignedVoteResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SignedVoteResponse{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignProposalRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignProposalRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SignProposalRequest{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedProposalResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignedProposalResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SignedProposalResponse{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PingRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PingRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_PingRequest{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PingResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PingResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_PingResponse{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignBytesRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignBytesRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SignBytesRequest{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignBytesResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignBytesResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SignBytesResponse{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/api/cometbft/privval/v1beta1/types.pb.go b/api/cometbft/privval/v1beta1/types.pb.go new file mode 100644 index 00000000000..69adae65550 --- /dev/null +++ b/api/cometbft/privval/v1beta1/types.pb.go @@ -0,0 +1,2817 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/privval/v1beta1/types.proto + +package v1beta1 + +import ( + fmt "fmt" + v1 "github.com/cometbft/cometbft/api/cometbft/crypto/v1" + v1beta1 "github.com/cometbft/cometbft/api/cometbft/types/v1beta1" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Errors is a list of error codes that can be returned by the remote signer. +type Errors int32 + +const ( + // Unknown error + Errors_ERRORS_UNKNOWN Errors = 0 + // Unexpected response + Errors_ERRORS_UNEXPECTED_RESPONSE Errors = 1 + // Connection lost + Errors_ERRORS_NO_CONNECTION Errors = 2 + // Connection timeout + Errors_ERRORS_CONNECTION_TIMEOUT Errors = 3 + // Read timeout + Errors_ERRORS_READ_TIMEOUT Errors = 4 + // Write timeout + Errors_ERRORS_WRITE_TIMEOUT Errors = 5 +) + +var Errors_name = map[int32]string{ + 0: "ERRORS_UNKNOWN", + 1: "ERRORS_UNEXPECTED_RESPONSE", + 2: "ERRORS_NO_CONNECTION", + 3: "ERRORS_CONNECTION_TIMEOUT", + 4: "ERRORS_READ_TIMEOUT", + 5: "ERRORS_WRITE_TIMEOUT", +} + +var Errors_value = map[string]int32{ + "ERRORS_UNKNOWN": 0, + "ERRORS_UNEXPECTED_RESPONSE": 1, + "ERRORS_NO_CONNECTION": 2, + "ERRORS_CONNECTION_TIMEOUT": 3, + "ERRORS_READ_TIMEOUT": 4, + "ERRORS_WRITE_TIMEOUT": 5, +} + +func (x Errors) String() string { + return proto.EnumName(Errors_name, int32(x)) +} + +func (Errors) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_2e82066b29171f6d, []int{0} +} + +// A service for broadcasting transactions. +type RemoteSignerError struct { + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (m *RemoteSignerError) Reset() { *m = RemoteSignerError{} } +func (m *RemoteSignerError) String() string { return proto.CompactTextString(m) } +func (*RemoteSignerError) ProtoMessage() {} +func (*RemoteSignerError) Descriptor() ([]byte, []int) { + return fileDescriptor_2e82066b29171f6d, []int{0} +} +func (m *RemoteSignerError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoteSignerError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoteSignerError.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoteSignerError) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoteSignerError.Merge(m, src) +} +func (m *RemoteSignerError) XXX_Size() int { + return m.Size() +} +func (m *RemoteSignerError) XXX_DiscardUnknown() { + xxx_messageInfo_RemoteSignerError.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoteSignerError proto.InternalMessageInfo + +func (m *RemoteSignerError) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *RemoteSignerError) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// PubKeyRequest requests the consensus public key from the remote signer. +type PubKeyRequest struct { + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *PubKeyRequest) Reset() { *m = PubKeyRequest{} } +func (m *PubKeyRequest) String() string { return proto.CompactTextString(m) } +func (*PubKeyRequest) ProtoMessage() {} +func (*PubKeyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2e82066b29171f6d, []int{1} +} +func (m *PubKeyRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PubKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PubKeyRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PubKeyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PubKeyRequest.Merge(m, src) +} +func (m *PubKeyRequest) XXX_Size() int { + return m.Size() +} +func (m *PubKeyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PubKeyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PubKeyRequest proto.InternalMessageInfo + +func (m *PubKeyRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +// PubKeyResponse is a response message containing the public key. +type PubKeyResponse struct { + PubKey v1.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *PubKeyResponse) Reset() { *m = PubKeyResponse{} } +func (m *PubKeyResponse) String() string { return proto.CompactTextString(m) } +func (*PubKeyResponse) ProtoMessage() {} +func (*PubKeyResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2e82066b29171f6d, []int{2} +} +func (m *PubKeyResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PubKeyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PubKeyResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PubKeyResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PubKeyResponse.Merge(m, src) +} +func (m *PubKeyResponse) XXX_Size() int { + return m.Size() +} +func (m *PubKeyResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PubKeyResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PubKeyResponse proto.InternalMessageInfo + +func (m *PubKeyResponse) GetPubKey() v1.PublicKey { + if m != nil { + return m.PubKey + } + return v1.PublicKey{} +} + +func (m *PubKeyResponse) GetError() *RemoteSignerError { + if m != nil { + return m.Error + } + return nil +} + +// SignVoteRequest is a request to sign a vote +type SignVoteRequest struct { + Vote *v1beta1.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *SignVoteRequest) Reset() { *m = SignVoteRequest{} } +func (m *SignVoteRequest) String() string { return proto.CompactTextString(m) } +func (*SignVoteRequest) ProtoMessage() {} +func (*SignVoteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2e82066b29171f6d, []int{3} +} +func (m *SignVoteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignVoteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignVoteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignVoteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignVoteRequest.Merge(m, src) +} +func (m *SignVoteRequest) XXX_Size() int { + return m.Size() +} +func (m *SignVoteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SignVoteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SignVoteRequest proto.InternalMessageInfo + +func (m *SignVoteRequest) GetVote() *v1beta1.Vote { + if m != nil { + return m.Vote + } + return nil +} + +func (m *SignVoteRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +// SignedVoteResponse is a response containing a signed vote or an error +type SignedVoteResponse struct { + Vote v1beta1.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote"` + Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *SignedVoteResponse) Reset() { *m = SignedVoteResponse{} } +func (m *SignedVoteResponse) String() string { return proto.CompactTextString(m) } +func (*SignedVoteResponse) ProtoMessage() {} +func (*SignedVoteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2e82066b29171f6d, []int{4} +} +func (m *SignedVoteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignedVoteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignedVoteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignedVoteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignedVoteResponse.Merge(m, src) +} +func (m *SignedVoteResponse) XXX_Size() int { + return m.Size() +} +func (m *SignedVoteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SignedVoteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SignedVoteResponse proto.InternalMessageInfo + +func (m *SignedVoteResponse) GetVote() v1beta1.Vote { + if m != nil { + return m.Vote + } + return v1beta1.Vote{} +} + +func (m *SignedVoteResponse) GetError() *RemoteSignerError { + if m != nil { + return m.Error + } + return nil +} + +// SignProposalRequest is a request to sign a proposal +type SignProposalRequest struct { + Proposal *v1beta1.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *SignProposalRequest) Reset() { *m = SignProposalRequest{} } +func (m *SignProposalRequest) String() string { return proto.CompactTextString(m) } +func (*SignProposalRequest) ProtoMessage() {} +func (*SignProposalRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2e82066b29171f6d, []int{5} +} +func (m *SignProposalRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignProposalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignProposalRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignProposalRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignProposalRequest.Merge(m, src) +} +func (m *SignProposalRequest) XXX_Size() int { + return m.Size() +} +func (m *SignProposalRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SignProposalRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SignProposalRequest proto.InternalMessageInfo + +func (m *SignProposalRequest) GetProposal() *v1beta1.Proposal { + if m != nil { + return m.Proposal + } + return nil +} + +func (m *SignProposalRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +// SignedProposalResponse is response containing a signed proposal or an error +type SignedProposalResponse struct { + Proposal v1beta1.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal"` + Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *SignedProposalResponse) Reset() { *m = SignedProposalResponse{} } +func (m *SignedProposalResponse) String() string { return proto.CompactTextString(m) } +func (*SignedProposalResponse) ProtoMessage() {} +func (*SignedProposalResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2e82066b29171f6d, []int{6} +} +func (m *SignedProposalResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignedProposalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignedProposalResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignedProposalResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignedProposalResponse.Merge(m, src) +} +func (m *SignedProposalResponse) XXX_Size() int { + return m.Size() +} +func (m *SignedProposalResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SignedProposalResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SignedProposalResponse proto.InternalMessageInfo + +func (m *SignedProposalResponse) GetProposal() v1beta1.Proposal { + if m != nil { + return m.Proposal + } + return v1beta1.Proposal{} +} + +func (m *SignedProposalResponse) GetError() *RemoteSignerError { + if m != nil { + return m.Error + } + return nil +} + +// PingRequest is a request to confirm that the connection is alive. +type PingRequest struct { +} + +func (m *PingRequest) Reset() { *m = PingRequest{} } +func (m *PingRequest) String() string { return proto.CompactTextString(m) } +func (*PingRequest) ProtoMessage() {} +func (*PingRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2e82066b29171f6d, []int{7} +} +func (m *PingRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PingRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingRequest.Merge(m, src) +} +func (m *PingRequest) XXX_Size() int { + return m.Size() +} +func (m *PingRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PingRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PingRequest proto.InternalMessageInfo + +// PingResponse is a response to confirm that the connection is alive. +type PingResponse struct { +} + +func (m *PingResponse) Reset() { *m = PingResponse{} } +func (m *PingResponse) String() string { return proto.CompactTextString(m) } +func (*PingResponse) ProtoMessage() {} +func (*PingResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2e82066b29171f6d, []int{8} +} +func (m *PingResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PingResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PingResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingResponse.Merge(m, src) +} +func (m *PingResponse) XXX_Size() int { + return m.Size() +} +func (m *PingResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PingResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PingResponse proto.InternalMessageInfo + +// Message is an abstract message to/from the remote signer. +type Message struct { + // Sum of all possible messages. + // + // Types that are valid to be assigned to Sum: + // *Message_PubKeyRequest + // *Message_PubKeyResponse + // *Message_SignVoteRequest + // *Message_SignedVoteResponse + // *Message_SignProposalRequest + // *Message_SignedProposalResponse + // *Message_PingRequest + // *Message_PingResponse + Sum isMessage_Sum `protobuf_oneof:"sum"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_2e82066b29171f6d, []int{9} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +type isMessage_Sum interface { + isMessage_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Message_PubKeyRequest struct { + PubKeyRequest *PubKeyRequest `protobuf:"bytes,1,opt,name=pub_key_request,json=pubKeyRequest,proto3,oneof" json:"pub_key_request,omitempty"` +} +type Message_PubKeyResponse struct { + PubKeyResponse *PubKeyResponse `protobuf:"bytes,2,opt,name=pub_key_response,json=pubKeyResponse,proto3,oneof" json:"pub_key_response,omitempty"` +} +type Message_SignVoteRequest struct { + SignVoteRequest *SignVoteRequest `protobuf:"bytes,3,opt,name=sign_vote_request,json=signVoteRequest,proto3,oneof" json:"sign_vote_request,omitempty"` +} +type Message_SignedVoteResponse struct { + SignedVoteResponse *SignedVoteResponse `protobuf:"bytes,4,opt,name=signed_vote_response,json=signedVoteResponse,proto3,oneof" json:"signed_vote_response,omitempty"` +} +type Message_SignProposalRequest struct { + SignProposalRequest *SignProposalRequest `protobuf:"bytes,5,opt,name=sign_proposal_request,json=signProposalRequest,proto3,oneof" json:"sign_proposal_request,omitempty"` +} +type Message_SignedProposalResponse struct { + SignedProposalResponse *SignedProposalResponse `protobuf:"bytes,6,opt,name=signed_proposal_response,json=signedProposalResponse,proto3,oneof" json:"signed_proposal_response,omitempty"` +} +type Message_PingRequest struct { + PingRequest *PingRequest `protobuf:"bytes,7,opt,name=ping_request,json=pingRequest,proto3,oneof" json:"ping_request,omitempty"` +} +type Message_PingResponse struct { + PingResponse *PingResponse `protobuf:"bytes,8,opt,name=ping_response,json=pingResponse,proto3,oneof" json:"ping_response,omitempty"` +} + +func (*Message_PubKeyRequest) isMessage_Sum() {} +func (*Message_PubKeyResponse) isMessage_Sum() {} +func (*Message_SignVoteRequest) isMessage_Sum() {} +func (*Message_SignedVoteResponse) isMessage_Sum() {} +func (*Message_SignProposalRequest) isMessage_Sum() {} +func (*Message_SignedProposalResponse) isMessage_Sum() {} +func (*Message_PingRequest) isMessage_Sum() {} +func (*Message_PingResponse) isMessage_Sum() {} + +func (m *Message) GetSum() isMessage_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Message) GetPubKeyRequest() *PubKeyRequest { + if x, ok := m.GetSum().(*Message_PubKeyRequest); ok { + return x.PubKeyRequest + } + return nil +} + +func (m *Message) GetPubKeyResponse() *PubKeyResponse { + if x, ok := m.GetSum().(*Message_PubKeyResponse); ok { + return x.PubKeyResponse + } + return nil +} + +func (m *Message) GetSignVoteRequest() *SignVoteRequest { + if x, ok := m.GetSum().(*Message_SignVoteRequest); ok { + return x.SignVoteRequest + } + return nil +} + +func (m *Message) GetSignedVoteResponse() *SignedVoteResponse { + if x, ok := m.GetSum().(*Message_SignedVoteResponse); ok { + return x.SignedVoteResponse + } + return nil +} + +func (m *Message) GetSignProposalRequest() *SignProposalRequest { + if x, ok := m.GetSum().(*Message_SignProposalRequest); ok { + return x.SignProposalRequest + } + return nil +} + +func (m *Message) GetSignedProposalResponse() *SignedProposalResponse { + if x, ok := m.GetSum().(*Message_SignedProposalResponse); ok { + return x.SignedProposalResponse + } + return nil +} + +func (m *Message) GetPingRequest() *PingRequest { + if x, ok := m.GetSum().(*Message_PingRequest); ok { + return x.PingRequest + } + return nil +} + +func (m *Message) GetPingResponse() *PingResponse { + if x, ok := m.GetSum().(*Message_PingResponse); ok { + return x.PingResponse + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Message) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Message_PubKeyRequest)(nil), + (*Message_PubKeyResponse)(nil), + (*Message_SignVoteRequest)(nil), + (*Message_SignedVoteResponse)(nil), + (*Message_SignProposalRequest)(nil), + (*Message_SignedProposalResponse)(nil), + (*Message_PingRequest)(nil), + (*Message_PingResponse)(nil), + } +} + +func init() { + proto.RegisterEnum("cometbft.privval.v1beta1.Errors", Errors_name, Errors_value) + proto.RegisterType((*RemoteSignerError)(nil), "cometbft.privval.v1beta1.RemoteSignerError") + proto.RegisterType((*PubKeyRequest)(nil), "cometbft.privval.v1beta1.PubKeyRequest") + proto.RegisterType((*PubKeyResponse)(nil), "cometbft.privval.v1beta1.PubKeyResponse") + proto.RegisterType((*SignVoteRequest)(nil), "cometbft.privval.v1beta1.SignVoteRequest") + proto.RegisterType((*SignedVoteResponse)(nil), "cometbft.privval.v1beta1.SignedVoteResponse") + proto.RegisterType((*SignProposalRequest)(nil), "cometbft.privval.v1beta1.SignProposalRequest") + proto.RegisterType((*SignedProposalResponse)(nil), "cometbft.privval.v1beta1.SignedProposalResponse") + proto.RegisterType((*PingRequest)(nil), "cometbft.privval.v1beta1.PingRequest") + proto.RegisterType((*PingResponse)(nil), "cometbft.privval.v1beta1.PingResponse") + proto.RegisterType((*Message)(nil), "cometbft.privval.v1beta1.Message") +} + +func init() { + proto.RegisterFile("cometbft/privval/v1beta1/types.proto", fileDescriptor_2e82066b29171f6d) +} + +var fileDescriptor_2e82066b29171f6d = []byte{ + // 773 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x4e, 0xeb, 0x46, + 0x14, 0xb6, 0x21, 0x3f, 0x70, 0x42, 0x42, 0x18, 0x28, 0x0d, 0x51, 0x49, 0xa3, 0xa8, 0x3f, 0x94, + 0xb6, 0x36, 0xa1, 0x52, 0xa5, 0x4a, 0x6c, 0x08, 0x58, 0x4a, 0x8a, 0x70, 0xd2, 0x49, 0x28, 0x55, + 0x17, 0x75, 0xf3, 0x33, 0x35, 0x16, 0xc1, 0x9e, 0x7a, 0x9c, 0x48, 0x79, 0x8a, 0xb6, 0x4f, 0xd0, + 0x6d, 0x1f, 0x85, 0x25, 0xcb, 0xae, 0xae, 0xae, 0xc2, 0x8b, 0x5c, 0x65, 0x3c, 0xb6, 0x93, 0x40, + 0xc2, 0xbd, 0x12, 0xbb, 0x99, 0x33, 0xe7, 0x7c, 0x3f, 0xc7, 0xe7, 0xc8, 0xf0, 0x59, 0xd7, 0xb9, + 0x23, 0x5e, 0xe7, 0x0f, 0x4f, 0xa5, 0xae, 0x35, 0x1c, 0xb6, 0xfb, 0xea, 0xb0, 0xdc, 0x21, 0x5e, + 0xbb, 0xac, 0x7a, 0x23, 0x4a, 0x98, 0x42, 0x5d, 0xc7, 0x73, 0x50, 0x2e, 0xc8, 0x52, 0x44, 0x96, + 0x22, 0xb2, 0xf2, 0xfb, 0x61, 0x7d, 0xd7, 0x1d, 0x51, 0xcf, 0x51, 0x87, 0x65, 0xf5, 0x96, 0x8c, + 0x44, 0x61, 0xbe, 0x14, 0x3e, 0x73, 0xb8, 0xe7, 0xc0, 0xf3, 0x3b, 0xa6, 0x63, 0x3a, 0xfc, 0xa8, + 0x4e, 0x4e, 0x7e, 0xb4, 0x54, 0x83, 0x2d, 0x4c, 0xee, 0x1c, 0x8f, 0x34, 0x2d, 0xd3, 0x26, 0xae, + 0xe6, 0xba, 0x8e, 0x8b, 0x10, 0xc4, 0xba, 0x4e, 0x8f, 0xe4, 0xe4, 0xa2, 0x7c, 0x10, 0xc7, 0xfc, + 0x8c, 0x8a, 0x90, 0xea, 0x11, 0xd6, 0x75, 0x2d, 0xea, 0x59, 0x8e, 0x9d, 0x5b, 0x29, 0xca, 0x07, + 0xeb, 0x78, 0x3a, 0x54, 0x3a, 0x84, 0x74, 0x63, 0xd0, 0xb9, 0x20, 0x23, 0x4c, 0xfe, 0x1c, 0x10, + 0xe6, 0xa1, 0x3d, 0x58, 0xeb, 0xde, 0xb4, 0x2d, 0xdb, 0xb0, 0x7a, 0x1c, 0x6a, 0x1d, 0x27, 0xf9, + 0xbd, 0xd6, 0x2b, 0xfd, 0x23, 0x43, 0x26, 0x48, 0x66, 0xd4, 0xb1, 0x19, 0x41, 0x27, 0x90, 0xa4, + 0x83, 0x8e, 0x71, 0x4b, 0x46, 0x3c, 0x39, 0x75, 0xbc, 0xaf, 0x84, 0xed, 0xf0, 0x4d, 0x2b, 0xc3, + 0xb2, 0xd2, 0x18, 0x74, 0xfa, 0x56, 0xf7, 0x82, 0x8c, 0x2a, 0xb1, 0xfb, 0x37, 0x9f, 0x4a, 0x38, + 0x41, 0x39, 0x0a, 0x3a, 0x85, 0x38, 0x99, 0x68, 0xe7, 0xc2, 0x52, 0xc7, 0x5f, 0x2b, 0x8b, 0x5a, + 0xa9, 0x3c, 0xb1, 0x8b, 0xfd, 0xca, 0xd2, 0x6f, 0xb0, 0x39, 0x89, 0xfe, 0xec, 0x78, 0x24, 0x70, + 0x70, 0x04, 0xb1, 0xa1, 0xe3, 0x11, 0x21, 0xe8, 0x93, 0x08, 0xd4, 0x6f, 0x6c, 0x00, 0xc9, 0x4b, + 0x78, 0xe6, 0x8c, 0xe7, 0x95, 0x59, 0xcf, 0x7f, 0xc9, 0x80, 0x38, 0x6d, 0xcf, 0xa7, 0x10, 0xbe, + 0xbf, 0x7f, 0x7f, 0x0e, 0xe1, 0xd9, 0x67, 0x7a, 0x05, 0xc7, 0x36, 0x6c, 0x4f, 0xa2, 0x0d, 0xd7, + 0xa1, 0x0e, 0x6b, 0xf7, 0x03, 0xd7, 0x27, 0xb0, 0x46, 0x45, 0x48, 0xa8, 0x2a, 0x2e, 0x52, 0x15, + 0x96, 0x86, 0x15, 0xcb, 0x3a, 0xf0, 0xaf, 0x0c, 0xbb, 0x7e, 0x07, 0x22, 0x4a, 0xd1, 0x85, 0xca, + 0x87, 0x73, 0x8a, 0x6e, 0x44, 0xcc, 0xaf, 0xd0, 0x91, 0x34, 0xa4, 0x1a, 0x96, 0x6d, 0x8a, 0x4e, + 0x94, 0x32, 0xb0, 0xe1, 0x5f, 0x7d, 0x95, 0xa5, 0x71, 0x1c, 0x92, 0x97, 0x84, 0xb1, 0xb6, 0x49, + 0xd0, 0x4f, 0xb0, 0x29, 0xe6, 0xd5, 0x70, 0xfd, 0x74, 0x21, 0xfc, 0xcb, 0xc5, 0xbc, 0x33, 0xfb, + 0x51, 0x95, 0x70, 0x9a, 0xce, 0x2c, 0x4c, 0x0b, 0xb2, 0x11, 0xa4, 0x4f, 0x29, 0xbc, 0x1c, 0xbc, + 0x8c, 0xe9, 0xe7, 0x57, 0x25, 0x9c, 0xa1, 0xb3, 0x8b, 0x75, 0x0d, 0x5b, 0xcc, 0x32, 0x6d, 0x63, + 0x32, 0x35, 0xa1, 0xd4, 0x55, 0x0e, 0xfb, 0xd5, 0x62, 0xd8, 0xb9, 0x55, 0xa8, 0x4a, 0x78, 0x93, + 0xcd, 0x6d, 0xc7, 0xef, 0xb0, 0xc3, 0xf8, 0xd7, 0x0c, 0xa0, 0x85, 0xe4, 0x18, 0xc7, 0xfe, 0x66, + 0x39, 0xf6, 0xec, 0x16, 0x54, 0x25, 0x8c, 0xd8, 0xd3, 0xdd, 0xe8, 0xc2, 0x47, 0x5c, 0x7a, 0xf0, + 0x89, 0x43, 0xf9, 0x71, 0x4e, 0xf1, 0xed, 0x72, 0x8a, 0xb9, 0xb9, 0xae, 0x4a, 0x78, 0x9b, 0x3d, + 0x33, 0xee, 0x7d, 0xc8, 0x09, 0x1b, 0x53, 0x34, 0xc2, 0x4a, 0x82, 0xf3, 0x1c, 0xbd, 0x64, 0x65, + 0x7e, 0x9c, 0xab, 0x12, 0xde, 0x65, 0xcf, 0x0f, 0xfa, 0x8f, 0xb0, 0x41, 0x2d, 0xdb, 0x0c, 0x9d, + 0x24, 0x39, 0xc3, 0xe7, 0x4b, 0xbe, 0x6f, 0x34, 0x8f, 0x55, 0x09, 0xa7, 0x68, 0x74, 0x45, 0x97, + 0x90, 0x16, 0x58, 0x42, 0xee, 0x1a, 0x07, 0xfb, 0xe2, 0x25, 0xb0, 0x50, 0xe4, 0x06, 0x9d, 0xba, + 0x57, 0xe2, 0xb0, 0xca, 0x06, 0x77, 0x87, 0xff, 0xc9, 0x90, 0xe0, 0x4b, 0xc1, 0x10, 0x82, 0x8c, + 0x86, 0x71, 0x1d, 0x37, 0x8d, 0x2b, 0xfd, 0x42, 0xaf, 0x5f, 0xeb, 0x59, 0x09, 0x15, 0x20, 0x1f, + 0xc6, 0xb4, 0x5f, 0x1a, 0xda, 0x59, 0x4b, 0x3b, 0x37, 0xb0, 0xd6, 0x6c, 0xd4, 0xf5, 0xa6, 0x96, + 0x95, 0x51, 0x0e, 0x76, 0xc4, 0xbb, 0x5e, 0x37, 0xce, 0xea, 0xba, 0xae, 0x9d, 0xb5, 0x6a, 0x75, + 0x3d, 0xbb, 0x82, 0xf6, 0x61, 0x4f, 0xbc, 0x44, 0x61, 0xa3, 0x55, 0xbb, 0xd4, 0xea, 0x57, 0xad, + 0xec, 0x2a, 0xfa, 0x18, 0xb6, 0xc5, 0x33, 0xd6, 0x4e, 0xcf, 0xc3, 0x87, 0xd8, 0x14, 0xe2, 0x35, + 0xae, 0xb5, 0xb4, 0xf0, 0x25, 0x5e, 0x69, 0xde, 0x8f, 0x0b, 0xf2, 0xc3, 0xb8, 0x20, 0xbf, 0x1d, + 0x17, 0xe4, 0xbf, 0x1f, 0x0b, 0xd2, 0xc3, 0x63, 0x41, 0xfa, 0xff, 0xb1, 0x20, 0xfd, 0xfa, 0x83, + 0x69, 0x79, 0x37, 0x83, 0xce, 0xa4, 0x13, 0x6a, 0xf4, 0xef, 0x0c, 0x0e, 0x6d, 0x6a, 0xa9, 0x8b, + 0xfe, 0xc8, 0x9d, 0x04, 0xff, 0x33, 0x7e, 0xf7, 0x2e, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x94, 0x16, + 0x90, 0xb4, 0x07, 0x00, 0x00, +} + +func (m *RemoteSignerError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoteSignerError) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoteSignerError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x12 + } + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PubKeyRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PubKeyRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PubKeyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PubKeyResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PubKeyResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PubKeyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignVoteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignVoteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignVoteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x12 + } + if m.Vote != nil { + { + size, err := m.Vote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignedVoteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignedVoteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignedVoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Vote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignProposalRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignProposalRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignProposalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x12 + } + if m.Proposal != nil { + { + size, err := m.Proposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignedProposalResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignedProposalResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignedProposalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Proposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PingRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PingRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PingRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *PingResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PingResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PingResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Message_PubKeyRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_PubKeyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PubKeyRequest != nil { + { + size, err := m.PubKeyRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Message_PubKeyResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_PubKeyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PubKeyResponse != nil { + { + size, err := m.PubKeyResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Message_SignVoteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SignVoteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignVoteRequest != nil { + { + size, err := m.SignVoteRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Message_SignedVoteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SignedVoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignedVoteResponse != nil { + { + size, err := m.SignedVoteResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Message_SignProposalRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SignProposalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignProposalRequest != nil { + { + size, err := m.SignProposalRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Message_SignedProposalResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SignedProposalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignedProposalResponse != nil { + { + size, err := m.SignedProposalResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Message_PingRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_PingRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PingRequest != nil { + { + size, err := m.PingRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Message_PingResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_PingResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PingResponse != nil { + { + size, err := m.PingResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RemoteSignerError) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PubKeyRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PubKeyResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PubKey.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignVoteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Vote != nil { + l = m.Vote.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignedVoteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Vote.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignProposalRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Proposal != nil { + l = m.Proposal.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignedProposalResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Proposal.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PingRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *PingResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Message_PubKeyRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PubKeyRequest != nil { + l = m.PubKeyRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_PubKeyResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PubKeyResponse != nil { + l = m.PubKeyResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SignVoteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignVoteRequest != nil { + l = m.SignVoteRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SignedVoteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignedVoteResponse != nil { + l = m.SignedVoteResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SignProposalRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignProposalRequest != nil { + l = m.SignProposalRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SignedProposalResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignedProposalResponse != nil { + l = m.SignedProposalResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_PingRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PingRequest != nil { + l = m.PingRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_PingResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PingResponse != nil { + l = m.PingResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RemoteSignerError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoteSignerError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoteSignerError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PubKeyRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PubKeyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PubKeyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PubKeyResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PubKeyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PubKeyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &RemoteSignerError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignVoteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignVoteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignVoteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vote == nil { + m.Vote = &v1beta1.Vote{} + } + if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignedVoteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignedVoteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignedVoteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &RemoteSignerError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignProposalRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignProposalRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignProposalRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proposal == nil { + m.Proposal = &v1beta1.Proposal{} + } + if err := m.Proposal.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignedProposalResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignedProposalResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignedProposalResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Proposal.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &RemoteSignerError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PubKeyRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_PubKeyRequest{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PubKeyResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_PubKeyResponse{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignVoteRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignVoteRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SignVoteRequest{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedVoteResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignedVoteResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SignedVoteResponse{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignProposalRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignProposalRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SignProposalRequest{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedProposalResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignedProposalResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SignedProposalResponse{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PingRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PingRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_PingRequest{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PingResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PingResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_PingResponse{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/privval/types.pb.go b/api/cometbft/privval/v1beta2/types.pb.go similarity index 88% rename from proto/tendermint/privval/types.pb.go rename to api/cometbft/privval/v1beta2/types.pb.go index cafa0e9278c..342a4d54bf4 100644 --- a/proto/tendermint/privval/types.pb.go +++ b/api/cometbft/privval/v1beta2/types.pb.go @@ -1,12 +1,12 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/privval/types.proto +// source: cometbft/privval/v1beta2/types.proto -package privval +package v1beta2 import ( fmt "fmt" - crypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - types "github.com/cometbft/cometbft/proto/tendermint/types" + v1 "github.com/cometbft/cometbft/api/cometbft/crypto/v1" + v11 "github.com/cometbft/cometbft/api/cometbft/types/v1" _ "github.com/cosmos/gogoproto/gogoproto" proto "github.com/cosmos/gogoproto/proto" io "io" @@ -25,15 +25,22 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// Errors is a list of error codes that can be returned by the remote signer. type Errors int32 const ( - Errors_ERRORS_UNKNOWN Errors = 0 + // Unknown error + Errors_ERRORS_UNKNOWN Errors = 0 + // Unexpected response Errors_ERRORS_UNEXPECTED_RESPONSE Errors = 1 - Errors_ERRORS_NO_CONNECTION Errors = 2 - Errors_ERRORS_CONNECTION_TIMEOUT Errors = 3 - Errors_ERRORS_READ_TIMEOUT Errors = 4 - Errors_ERRORS_WRITE_TIMEOUT Errors = 5 + // Connection lost + Errors_ERRORS_NO_CONNECTION Errors = 2 + // Connection timeout + Errors_ERRORS_CONNECTION_TIMEOUT Errors = 3 + // Read timeout + Errors_ERRORS_READ_TIMEOUT Errors = 4 + // Write timeout + Errors_ERRORS_WRITE_TIMEOUT Errors = 5 ) var Errors_name = map[int32]string{ @@ -59,9 +66,10 @@ func (x Errors) String() string { } func (Errors) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_cb4e437a5328cf9c, []int{0} + return fileDescriptor_7eb66e8513d3d538, []int{0} } +// remotesignererror is returned when the remote signer fails. type RemoteSignerError struct { Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` @@ -71,7 +79,7 @@ func (m *RemoteSignerError) Reset() { *m = RemoteSignerError{} } func (m *RemoteSignerError) String() string { return proto.CompactTextString(m) } func (*RemoteSignerError) ProtoMessage() {} func (*RemoteSignerError) Descriptor() ([]byte, []int) { - return fileDescriptor_cb4e437a5328cf9c, []int{0} + return fileDescriptor_7eb66e8513d3d538, []int{0} } func (m *RemoteSignerError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -123,7 +131,7 @@ func (m *PubKeyRequest) Reset() { *m = PubKeyRequest{} } func (m *PubKeyRequest) String() string { return proto.CompactTextString(m) } func (*PubKeyRequest) ProtoMessage() {} func (*PubKeyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cb4e437a5328cf9c, []int{1} + return fileDescriptor_7eb66e8513d3d538, []int{1} } func (m *PubKeyRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +169,7 @@ func (m *PubKeyRequest) GetChainId() string { // PubKeyResponse is a response message containing the public key. type PubKeyResponse struct { - PubKey crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + PubKey v1.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` } @@ -169,7 +177,7 @@ func (m *PubKeyResponse) Reset() { *m = PubKeyResponse{} } func (m *PubKeyResponse) String() string { return proto.CompactTextString(m) } func (*PubKeyResponse) ProtoMessage() {} func (*PubKeyResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cb4e437a5328cf9c, []int{2} + return fileDescriptor_7eb66e8513d3d538, []int{2} } func (m *PubKeyResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -198,11 +206,11 @@ func (m *PubKeyResponse) XXX_DiscardUnknown() { var xxx_messageInfo_PubKeyResponse proto.InternalMessageInfo -func (m *PubKeyResponse) GetPubKey() crypto.PublicKey { +func (m *PubKeyResponse) GetPubKey() v1.PublicKey { if m != nil { return m.PubKey } - return crypto.PublicKey{} + return v1.PublicKey{} } func (m *PubKeyResponse) GetError() *RemoteSignerError { @@ -214,15 +222,15 @@ func (m *PubKeyResponse) GetError() *RemoteSignerError { // SignVoteRequest is a request to sign a vote type SignVoteRequest struct { - Vote *types.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` - ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + Vote *v11.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` } func (m *SignVoteRequest) Reset() { *m = SignVoteRequest{} } func (m *SignVoteRequest) String() string { return proto.CompactTextString(m) } func (*SignVoteRequest) ProtoMessage() {} func (*SignVoteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cb4e437a5328cf9c, []int{3} + return fileDescriptor_7eb66e8513d3d538, []int{3} } func (m *SignVoteRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -251,7 +259,7 @@ func (m *SignVoteRequest) XXX_DiscardUnknown() { var xxx_messageInfo_SignVoteRequest proto.InternalMessageInfo -func (m *SignVoteRequest) GetVote() *types.Vote { +func (m *SignVoteRequest) GetVote() *v11.Vote { if m != nil { return m.Vote } @@ -267,7 +275,7 @@ func (m *SignVoteRequest) GetChainId() string { // SignedVoteResponse is a response containing a signed vote or an error type SignedVoteResponse struct { - Vote types.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote"` + Vote v11.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote"` Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` } @@ -275,7 +283,7 @@ func (m *SignedVoteResponse) Reset() { *m = SignedVoteResponse{} } func (m *SignedVoteResponse) String() string { return proto.CompactTextString(m) } func (*SignedVoteResponse) ProtoMessage() {} func (*SignedVoteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cb4e437a5328cf9c, []int{4} + return fileDescriptor_7eb66e8513d3d538, []int{4} } func (m *SignedVoteResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,11 +312,11 @@ func (m *SignedVoteResponse) XXX_DiscardUnknown() { var xxx_messageInfo_SignedVoteResponse proto.InternalMessageInfo -func (m *SignedVoteResponse) GetVote() types.Vote { +func (m *SignedVoteResponse) GetVote() v11.Vote { if m != nil { return m.Vote } - return types.Vote{} + return v11.Vote{} } func (m *SignedVoteResponse) GetError() *RemoteSignerError { @@ -320,15 +328,15 @@ func (m *SignedVoteResponse) GetError() *RemoteSignerError { // SignProposalRequest is a request to sign a proposal type SignProposalRequest struct { - Proposal *types.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"` - ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + Proposal *v11.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` } func (m *SignProposalRequest) Reset() { *m = SignProposalRequest{} } func (m *SignProposalRequest) String() string { return proto.CompactTextString(m) } func (*SignProposalRequest) ProtoMessage() {} func (*SignProposalRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cb4e437a5328cf9c, []int{5} + return fileDescriptor_7eb66e8513d3d538, []int{5} } func (m *SignProposalRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +365,7 @@ func (m *SignProposalRequest) XXX_DiscardUnknown() { var xxx_messageInfo_SignProposalRequest proto.InternalMessageInfo -func (m *SignProposalRequest) GetProposal() *types.Proposal { +func (m *SignProposalRequest) GetProposal() *v11.Proposal { if m != nil { return m.Proposal } @@ -373,7 +381,7 @@ func (m *SignProposalRequest) GetChainId() string { // SignedProposalResponse is response containing a signed proposal or an error type SignedProposalResponse struct { - Proposal types.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal"` + Proposal v11.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal"` Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` } @@ -381,7 +389,7 @@ func (m *SignedProposalResponse) Reset() { *m = SignedProposalResponse{} func (m *SignedProposalResponse) String() string { return proto.CompactTextString(m) } func (*SignedProposalResponse) ProtoMessage() {} func (*SignedProposalResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cb4e437a5328cf9c, []int{6} + return fileDescriptor_7eb66e8513d3d538, []int{6} } func (m *SignedProposalResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,11 +418,11 @@ func (m *SignedProposalResponse) XXX_DiscardUnknown() { var xxx_messageInfo_SignedProposalResponse proto.InternalMessageInfo -func (m *SignedProposalResponse) GetProposal() types.Proposal { +func (m *SignedProposalResponse) GetProposal() v11.Proposal { if m != nil { return m.Proposal } - return types.Proposal{} + return v11.Proposal{} } func (m *SignedProposalResponse) GetError() *RemoteSignerError { @@ -432,7 +440,7 @@ func (m *PingRequest) Reset() { *m = PingRequest{} } func (m *PingRequest) String() string { return proto.CompactTextString(m) } func (*PingRequest) ProtoMessage() {} func (*PingRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cb4e437a5328cf9c, []int{7} + return fileDescriptor_7eb66e8513d3d538, []int{7} } func (m *PingRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +477,7 @@ func (m *PingResponse) Reset() { *m = PingResponse{} } func (m *PingResponse) String() string { return proto.CompactTextString(m) } func (*PingResponse) ProtoMessage() {} func (*PingResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cb4e437a5328cf9c, []int{8} + return fileDescriptor_7eb66e8513d3d538, []int{8} } func (m *PingResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +506,10 @@ func (m *PingResponse) XXX_DiscardUnknown() { var xxx_messageInfo_PingResponse proto.InternalMessageInfo +// Message is an abstract message to/from the remote signer. type Message struct { + // Sum of all possible messages. + // // Types that are valid to be assigned to Sum: // *Message_PubKeyRequest // *Message_PubKeyResponse @@ -515,7 +526,7 @@ func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_cb4e437a5328cf9c, []int{9} + return fileDescriptor_7eb66e8513d3d538, []int{9} } func (m *Message) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -662,71 +673,74 @@ func (*Message) XXX_OneofWrappers() []interface{} { } func init() { - proto.RegisterEnum("tendermint.privval.Errors", Errors_name, Errors_value) - proto.RegisterType((*RemoteSignerError)(nil), "tendermint.privval.RemoteSignerError") - proto.RegisterType((*PubKeyRequest)(nil), "tendermint.privval.PubKeyRequest") - proto.RegisterType((*PubKeyResponse)(nil), "tendermint.privval.PubKeyResponse") - proto.RegisterType((*SignVoteRequest)(nil), "tendermint.privval.SignVoteRequest") - proto.RegisterType((*SignedVoteResponse)(nil), "tendermint.privval.SignedVoteResponse") - proto.RegisterType((*SignProposalRequest)(nil), "tendermint.privval.SignProposalRequest") - proto.RegisterType((*SignedProposalResponse)(nil), "tendermint.privval.SignedProposalResponse") - proto.RegisterType((*PingRequest)(nil), "tendermint.privval.PingRequest") - proto.RegisterType((*PingResponse)(nil), "tendermint.privval.PingResponse") - proto.RegisterType((*Message)(nil), "tendermint.privval.Message") -} - -func init() { proto.RegisterFile("tendermint/privval/types.proto", fileDescriptor_cb4e437a5328cf9c) } - -var fileDescriptor_cb4e437a5328cf9c = []byte{ - // 756 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4d, 0x4f, 0xe3, 0x46, - 0x18, 0xb6, 0x21, 0x1f, 0xf0, 0x86, 0x84, 0x30, 0x50, 0x1a, 0x22, 0x6a, 0xd2, 0x54, 0x6d, 0x51, - 0x0e, 0x49, 0x45, 0xd5, 0x5e, 0xe8, 0xa5, 0x80, 0xd5, 0x44, 0x11, 0x76, 0x3a, 0x09, 0x05, 0x21, - 0x55, 0x56, 0x3e, 0x06, 0x63, 0x41, 0x3c, 0x5e, 0x8f, 0x83, 0x94, 0xf3, 0xde, 0xf6, 0xb4, 0xd2, - 0xfe, 0x89, 0x3d, 0xef, 0xaf, 0xe0, 0xc8, 0x71, 0x4f, 0xab, 0x15, 0xfc, 0x91, 0x55, 0xc6, 0x13, - 0xdb, 0xf9, 0x42, 0xbb, 0xe2, 0x36, 0xf3, 0xbe, 0xef, 0x3c, 0x1f, 0x33, 0x8f, 0x65, 0x50, 0x3c, - 0x62, 0xf7, 0x88, 0xdb, 0xb7, 0x6c, 0xaf, 0xe2, 0xb8, 0xd6, 0xdd, 0x5d, 0xfb, 0xb6, 0xe2, 0x0d, - 0x1d, 0xc2, 0xca, 0x8e, 0x4b, 0x3d, 0x8a, 0x50, 0xd8, 0x2f, 0x8b, 0x7e, 0x7e, 0x37, 0x72, 0xa6, - 0xeb, 0x0e, 0x1d, 0x8f, 0x56, 0x6e, 0xc8, 0x50, 0x9c, 0x98, 0xe8, 0x72, 0xa4, 0x28, 0x5e, 0x7e, - 0xcb, 0xa4, 0x26, 0xe5, 0xcb, 0xca, 0x68, 0xe5, 0x57, 0x8b, 0x35, 0xd8, 0xc0, 0xa4, 0x4f, 0x3d, - 0xd2, 0xb4, 0x4c, 0x9b, 0xb8, 0xaa, 0xeb, 0x52, 0x17, 0x21, 0x88, 0x75, 0x69, 0x8f, 0xe4, 0xe4, - 0x82, 0xbc, 0x1f, 0xc7, 0x7c, 0x8d, 0x0a, 0x90, 0xea, 0x11, 0xd6, 0x75, 0x2d, 0xc7, 0xb3, 0xa8, - 0x9d, 0x5b, 0x2a, 0xc8, 0xfb, 0xab, 0x38, 0x5a, 0x2a, 0x96, 0x20, 0xdd, 0x18, 0x74, 0xea, 0x64, - 0x88, 0xc9, 0xab, 0x01, 0x61, 0x1e, 0xda, 0x81, 0x95, 0xee, 0x75, 0xdb, 0xb2, 0x0d, 0xab, 0xc7, - 0xa1, 0x56, 0x71, 0x92, 0xef, 0x6b, 0xbd, 0xe2, 0x1b, 0x19, 0x32, 0xe3, 0x61, 0xe6, 0x50, 0x9b, - 0x11, 0x74, 0x08, 0x49, 0x67, 0xd0, 0x31, 0x6e, 0xc8, 0x90, 0x0f, 0xa7, 0x0e, 0x76, 0xcb, 0x91, - 0x1b, 0xf0, 0xdd, 0x96, 0x1b, 0x83, 0xce, 0xad, 0xd5, 0xad, 0x93, 0xe1, 0x51, 0xec, 0xfe, 0xd3, - 0x9e, 0x84, 0x13, 0x0e, 0x07, 0x41, 0x87, 0x10, 0x27, 0x23, 0xe9, 0x5c, 0x57, 0xea, 0xe0, 0xe7, - 0xf2, 0xec, 0xe5, 0x95, 0x67, 0x7c, 0x62, 0xff, 0x4c, 0xf1, 0x02, 0xd6, 0x47, 0xd5, 0xff, 0xa8, - 0x47, 0xc6, 0xd2, 0x4b, 0x10, 0xbb, 0xa3, 0x1e, 0x11, 0x4a, 0xb6, 0xa3, 0x70, 0xfe, 0x9d, 0xf2, - 0x61, 0x3e, 0x33, 0x61, 0x73, 0x69, 0xd2, 0xe6, 0x6b, 0x19, 0x10, 0x27, 0xec, 0xf9, 0xe0, 0xc2, - 0xea, 0x6f, 0x5f, 0x83, 0x2e, 0x1c, 0xfa, 0x1c, 0x2f, 0xf2, 0x77, 0x0d, 0x9b, 0xa3, 0x6a, 0xc3, - 0xa5, 0x0e, 0x65, 0xed, 0xdb, 0xb1, 0xc7, 0x3f, 0x61, 0xc5, 0x11, 0x25, 0xa1, 0x24, 0x3f, 0xab, - 0x24, 0x38, 0x14, 0xcc, 0x3e, 0xe7, 0xf7, 0x9d, 0x0c, 0xdb, 0xbe, 0xdf, 0x90, 0x4c, 0x78, 0xfe, - 0xeb, 0x5b, 0xd8, 0x84, 0xf7, 0x90, 0xf3, 0x45, 0xfe, 0xd3, 0x90, 0x6a, 0x58, 0xb6, 0x29, 0x7c, - 0x17, 0x33, 0xb0, 0xe6, 0x6f, 0x7d, 0x65, 0xc5, 0x0f, 0x71, 0x48, 0x9e, 0x12, 0xc6, 0xda, 0x26, - 0x41, 0x75, 0x58, 0x17, 0x21, 0x34, 0x5c, 0x7f, 0x5c, 0x88, 0xfd, 0x71, 0x1e, 0xe3, 0x44, 0xdc, - 0xab, 0x12, 0x4e, 0x3b, 0x13, 0xf9, 0xd7, 0x20, 0x1b, 0x82, 0xf9, 0x64, 0x42, 0x7f, 0xf1, 0x39, - 0x34, 0x7f, 0xb2, 0x2a, 0xe1, 0x8c, 0x33, 0xf9, 0x85, 0xfc, 0x0b, 0x1b, 0xcc, 0x32, 0x6d, 0x63, - 0x94, 0x88, 0x40, 0xde, 0x32, 0x07, 0xfc, 0x69, 0x1e, 0xe0, 0x54, 0xa8, 0xab, 0x12, 0x5e, 0x67, - 0x53, 0x39, 0xbf, 0x84, 0x2d, 0xc6, 0xdf, 0x6b, 0x0c, 0x2a, 0x64, 0xc6, 0x38, 0xea, 0x2f, 0x8b, - 0x50, 0x27, 0xf3, 0x5c, 0x95, 0x30, 0x62, 0xb3, 0x29, 0xff, 0x1f, 0xbe, 0xe3, 0x72, 0xc7, 0x8f, - 0x18, 0x48, 0x8e, 0x73, 0xf0, 0x5f, 0x17, 0x81, 0x4f, 0xe5, 0xb4, 0x2a, 0xe1, 0x4d, 0x36, 0x27, - 0xbe, 0x57, 0x90, 0x13, 0xd2, 0x23, 0x04, 0x42, 0x7e, 0x82, 0x33, 0x94, 0x16, 0xcb, 0x9f, 0x8e, - 0x67, 0x55, 0xc2, 0xdb, 0x6c, 0x7e, 0x70, 0x4f, 0x60, 0xcd, 0xb1, 0x6c, 0x33, 0x50, 0x9f, 0xe4, - 0xd8, 0x7b, 0x73, 0x5f, 0x30, 0x4c, 0x59, 0x55, 0xc2, 0x29, 0x27, 0xdc, 0xa2, 0x7f, 0x20, 0x2d, - 0x50, 0x84, 0xc4, 0x15, 0x0e, 0x53, 0x58, 0x0c, 0x13, 0x08, 0x5b, 0x73, 0x22, 0xfb, 0xa3, 0x38, - 0x2c, 0xb3, 0x41, 0xbf, 0xf4, 0x5e, 0x86, 0x04, 0x0f, 0x39, 0x43, 0x08, 0x32, 0x2a, 0xc6, 0x3a, - 0x6e, 0x1a, 0x67, 0x5a, 0x5d, 0xd3, 0xcf, 0xb5, 0xac, 0x84, 0x14, 0xc8, 0x07, 0x35, 0xf5, 0xa2, - 0xa1, 0x1e, 0xb7, 0xd4, 0x13, 0x03, 0xab, 0xcd, 0x86, 0xae, 0x35, 0xd5, 0xac, 0x8c, 0x72, 0xb0, - 0x25, 0xfa, 0x9a, 0x6e, 0x1c, 0xeb, 0x9a, 0xa6, 0x1e, 0xb7, 0x6a, 0xba, 0x96, 0x5d, 0x42, 0x3f, - 0xc0, 0x8e, 0xe8, 0x84, 0x65, 0xa3, 0x55, 0x3b, 0x55, 0xf5, 0xb3, 0x56, 0x76, 0x19, 0x7d, 0x0f, - 0x9b, 0xa2, 0x8d, 0xd5, 0xbf, 0x4f, 0x82, 0x46, 0x2c, 0x82, 0x78, 0x8e, 0x6b, 0x2d, 0x35, 0xe8, - 0xc4, 0x8f, 0xf4, 0xfb, 0x47, 0x45, 0x7e, 0x78, 0x54, 0xe4, 0xcf, 0x8f, 0x8a, 0xfc, 0xf6, 0x49, - 0x91, 0x1e, 0x9e, 0x14, 0xe9, 0xe3, 0x93, 0x22, 0x5d, 0xfe, 0x61, 0x5a, 0xde, 0xf5, 0xa0, 0x53, - 0xee, 0xd2, 0x7e, 0xa5, 0x4b, 0xfb, 0xc4, 0xeb, 0x5c, 0x79, 0xe1, 0xc2, 0xff, 0x57, 0xcd, 0xfe, - 0x25, 0x3b, 0x09, 0xde, 0xf9, 0xfd, 0x4b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xda, 0x9f, 0x99, 0x3e, - 0x42, 0x07, 0x00, 0x00, + proto.RegisterEnum("cometbft.privval.v1beta2.Errors", Errors_name, Errors_value) + proto.RegisterType((*RemoteSignerError)(nil), "cometbft.privval.v1beta2.RemoteSignerError") + proto.RegisterType((*PubKeyRequest)(nil), "cometbft.privval.v1beta2.PubKeyRequest") + proto.RegisterType((*PubKeyResponse)(nil), "cometbft.privval.v1beta2.PubKeyResponse") + proto.RegisterType((*SignVoteRequest)(nil), "cometbft.privval.v1beta2.SignVoteRequest") + proto.RegisterType((*SignedVoteResponse)(nil), "cometbft.privval.v1beta2.SignedVoteResponse") + proto.RegisterType((*SignProposalRequest)(nil), "cometbft.privval.v1beta2.SignProposalRequest") + proto.RegisterType((*SignedProposalResponse)(nil), "cometbft.privval.v1beta2.SignedProposalResponse") + proto.RegisterType((*PingRequest)(nil), "cometbft.privval.v1beta2.PingRequest") + proto.RegisterType((*PingResponse)(nil), "cometbft.privval.v1beta2.PingResponse") + proto.RegisterType((*Message)(nil), "cometbft.privval.v1beta2.Message") +} + +func init() { + proto.RegisterFile("cometbft/privval/v1beta2/types.proto", fileDescriptor_7eb66e8513d3d538) +} + +var fileDescriptor_7eb66e8513d3d538 = []byte{ + // 773 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcb, 0x4e, 0xdb, 0x4c, + 0x14, 0xb6, 0x21, 0x17, 0x38, 0x21, 0x21, 0x0c, 0xfc, 0x10, 0xf8, 0x45, 0x7e, 0x14, 0xfd, 0x6d, + 0x29, 0xb4, 0x76, 0x43, 0x17, 0x55, 0xa5, 0x76, 0xc1, 0xc5, 0x52, 0x52, 0x84, 0x93, 0x4e, 0x42, + 0x69, 0xbb, 0x71, 0x73, 0x99, 0x1a, 0x8b, 0x10, 0x4f, 0x3d, 0x4e, 0xa4, 0xbc, 0x42, 0x57, 0xed, + 0xb6, 0x4f, 0xd0, 0x47, 0x61, 0xc9, 0xb2, 0xab, 0xaa, 0x0a, 0x2f, 0x52, 0x65, 0x3c, 0xb1, 0x93, + 0x40, 0x42, 0x2b, 0xb1, 0x9b, 0x39, 0xe7, 0xcc, 0x77, 0x39, 0x3e, 0x47, 0x86, 0xff, 0x6b, 0xf6, + 0x39, 0x71, 0xab, 0x1f, 0x5d, 0x95, 0x3a, 0x56, 0xbb, 0x5d, 0x69, 0xa8, 0xed, 0x6c, 0x95, 0xb8, + 0x95, 0x1d, 0xd5, 0xed, 0x50, 0xc2, 0x14, 0xea, 0xd8, 0xae, 0x8d, 0x52, 0xfd, 0x2a, 0x45, 0x54, + 0x29, 0xa2, 0x6a, 0x6d, 0xdd, 0x7f, 0x5f, 0x73, 0x3a, 0xd4, 0xb5, 0xd5, 0x76, 0x56, 0x3d, 0x23, + 0x1d, 0xf1, 0x70, 0x20, 0xcd, 0xe1, 0x7a, 0xd9, 0x01, 0xdc, 0xb5, 0x25, 0xd3, 0x36, 0x6d, 0x7e, + 0x54, 0x7b, 0x27, 0x2f, 0x9a, 0xc9, 0xc3, 0x02, 0x26, 0xe7, 0xb6, 0x4b, 0x4a, 0x96, 0xd9, 0x24, + 0x8e, 0xe6, 0x38, 0xb6, 0x83, 0x10, 0x84, 0x6a, 0x76, 0x9d, 0xa4, 0xe4, 0x0d, 0x79, 0x33, 0x8c, + 0xf9, 0x19, 0x6d, 0x40, 0xac, 0x4e, 0x58, 0xcd, 0xb1, 0xa8, 0x6b, 0xd9, 0xcd, 0xd4, 0xd4, 0x86, + 0xbc, 0x39, 0x8b, 0x07, 0x43, 0x99, 0x2d, 0x88, 0x17, 0x5b, 0xd5, 0x43, 0xd2, 0xc1, 0xe4, 0x53, + 0x8b, 0x30, 0x17, 0xad, 0xc2, 0x4c, 0xed, 0xb4, 0x62, 0x35, 0x0d, 0xab, 0xce, 0xa1, 0x66, 0x71, + 0x94, 0xdf, 0xf3, 0xf5, 0xcc, 0x57, 0x19, 0x12, 0xfd, 0x62, 0x46, 0xed, 0x26, 0x23, 0xe8, 0x05, + 0x44, 0x69, 0xab, 0x6a, 0x9c, 0x91, 0x0e, 0x2f, 0x8e, 0xed, 0xac, 0x2b, 0x7e, 0x27, 0x3c, 0xbf, + 0x4a, 0x3b, 0xab, 0x14, 0x5b, 0xd5, 0x86, 0x55, 0x3b, 0x24, 0x9d, 0xbd, 0xd0, 0xc5, 0xcf, 0xff, + 0x24, 0x1c, 0xa1, 0x1c, 0x05, 0xed, 0x42, 0x98, 0xf4, 0xb4, 0x73, 0x61, 0xb1, 0x9d, 0x6d, 0x65, + 0x5c, 0x17, 0x95, 0x6b, 0x76, 0xb1, 0xf7, 0x32, 0xf3, 0x0e, 0xe6, 0x7b, 0xd1, 0x37, 0xb6, 0x4b, + 0xfa, 0x0e, 0xb6, 0x21, 0xd4, 0xb6, 0x5d, 0x22, 0x04, 0xad, 0x04, 0xa0, 0x5e, 0x63, 0xdb, 0x59, + 0x85, 0x57, 0xf3, 0xa2, 0x21, 0xbb, 0x53, 0xc3, 0x76, 0x3f, 0xcb, 0x80, 0x38, 0x63, 0xdd, 0x43, + 0x17, 0x96, 0xb3, 0x7f, 0x04, 0x2f, 0x9c, 0x7a, 0x24, 0x77, 0xe0, 0xd3, 0x82, 0xc5, 0x5e, 0xb4, + 0xe8, 0xd8, 0xd4, 0x66, 0x95, 0x46, 0xdf, 0xeb, 0x33, 0x98, 0xa1, 0x22, 0x24, 0x04, 0xfd, 0x7b, + 0x83, 0x20, 0xff, 0x95, 0x5f, 0x3c, 0xc9, 0xf7, 0x37, 0x19, 0x96, 0x3d, 0xdf, 0x01, 0x9b, 0xf0, + 0xfe, 0xf2, 0xaf, 0xe8, 0x44, 0x0f, 0x02, 0xd2, 0x3b, 0xe8, 0x43, 0x1c, 0x62, 0x45, 0xab, 0x69, + 0x0a, 0xff, 0x99, 0x04, 0xcc, 0x79, 0x57, 0x4f, 0x60, 0xa6, 0x1b, 0x86, 0xe8, 0x11, 0x61, 0xac, + 0x62, 0x12, 0xf4, 0x1a, 0xe6, 0xc5, 0x6c, 0x1a, 0x8e, 0x57, 0x2e, 0x34, 0x3f, 0x18, 0xcf, 0x3b, + 0xb4, 0x0b, 0x39, 0x09, 0xc7, 0xe9, 0xd0, 0x72, 0x94, 0x21, 0x19, 0x40, 0x7a, 0x94, 0xc2, 0xcb, + 0xe6, 0xed, 0x98, 0x5e, 0x7d, 0x4e, 0xc2, 0x09, 0x3a, 0xbc, 0x44, 0x27, 0xb0, 0xc0, 0x2c, 0xb3, + 0x69, 0xf4, 0x66, 0xc5, 0x97, 0x3a, 0xcd, 0x61, 0x1f, 0x8e, 0x87, 0x1d, 0x19, 0xfb, 0x9c, 0x84, + 0xe7, 0xd9, 0xc8, 0x26, 0x7c, 0x80, 0x25, 0xc6, 0x3f, 0x64, 0x1f, 0x5a, 0x48, 0x0e, 0x71, 0xec, + 0x47, 0x93, 0xb1, 0x87, 0xc7, 0x3e, 0x27, 0x61, 0xc4, 0xae, 0x2f, 0x43, 0x0d, 0xfe, 0xe1, 0xd2, + 0xfb, 0x9f, 0xd8, 0x97, 0x1f, 0xe6, 0x14, 0x8f, 0x27, 0x53, 0x8c, 0x4c, 0x73, 0x4e, 0xc2, 0x8b, + 0xec, 0x86, 0x21, 0x6f, 0x40, 0x4a, 0xd8, 0x18, 0xa0, 0x11, 0x56, 0x22, 0x9c, 0xe7, 0xc9, 0x6d, + 0x56, 0x46, 0x27, 0x39, 0x27, 0xe1, 0x65, 0x76, 0xf3, 0x8c, 0xbf, 0x82, 0x39, 0x6a, 0x35, 0x4d, + 0xdf, 0x49, 0x94, 0x33, 0xdc, 0x9b, 0xf0, 0x7d, 0x83, 0x79, 0xcc, 0x49, 0x38, 0x46, 0x83, 0x2b, + 0x3a, 0x82, 0xb8, 0xc0, 0x12, 0x72, 0x67, 0x38, 0xd8, 0xfd, 0xdb, 0xc0, 0x7c, 0x91, 0x73, 0x74, + 0xe0, 0xbe, 0x17, 0x86, 0x69, 0xd6, 0x3a, 0xdf, 0xfa, 0x2e, 0x43, 0x84, 0x2f, 0x05, 0x43, 0x08, + 0x12, 0x1a, 0xc6, 0x05, 0x5c, 0x32, 0x8e, 0xf5, 0x43, 0xbd, 0x70, 0xa2, 0x27, 0x25, 0x94, 0x86, + 0x35, 0x3f, 0xa6, 0xbd, 0x2d, 0x6a, 0xfb, 0x65, 0xed, 0xc0, 0xc0, 0x5a, 0xa9, 0x58, 0xd0, 0x4b, + 0x5a, 0x52, 0x46, 0x29, 0x58, 0x12, 0x79, 0xbd, 0x60, 0xec, 0x17, 0x74, 0x5d, 0xdb, 0x2f, 0xe7, + 0x0b, 0x7a, 0x72, 0x0a, 0xad, 0xc3, 0xaa, 0xc8, 0x04, 0x61, 0xa3, 0x9c, 0x3f, 0xd2, 0x0a, 0xc7, + 0xe5, 0xe4, 0x34, 0x5a, 0x81, 0x45, 0x91, 0xc6, 0xda, 0xee, 0x81, 0x9f, 0x08, 0x0d, 0x20, 0x9e, + 0xe0, 0x7c, 0x59, 0xf3, 0x33, 0xe1, 0xbd, 0xd2, 0x45, 0x37, 0x2d, 0x5f, 0x76, 0xd3, 0xf2, 0xaf, + 0x6e, 0x5a, 0xfe, 0x72, 0x95, 0x96, 0x2e, 0xaf, 0xd2, 0xd2, 0x8f, 0xab, 0xb4, 0xf4, 0xfe, 0xb9, + 0x69, 0xb9, 0xa7, 0xad, 0x6a, 0xaf, 0x13, 0x6a, 0xf0, 0x8b, 0xec, 0x1f, 0x2a, 0xd4, 0x52, 0xc7, + 0xfd, 0x78, 0xab, 0x11, 0xfe, 0x17, 0x7c, 0xfa, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x4c, 0x2e, 0x08, + 0x1c, 0x9b, 0x07, 0x00, 0x00, } func (m *RemoteSignerError) Marshal() (dAtA []byte, err error) { @@ -1872,7 +1886,7 @@ func (m *SignVoteRequest) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Vote == nil { - m.Vote = &types.Vote{} + m.Vote = &v11.Vote{} } if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2109,7 +2123,7 @@ func (m *SignProposalRequest) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Proposal == nil { - m.Proposal = &types.Proposal{} + m.Proposal = &v11.Proposal{} } if err := m.Proposal.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err diff --git a/api/cometbft/rpc/grpc/v1beta1/types.pb.go b/api/cometbft/rpc/grpc/v1beta1/types.pb.go new file mode 100644 index 00000000000..8de2081c7c0 --- /dev/null +++ b/api/cometbft/rpc/grpc/v1beta1/types.pb.go @@ -0,0 +1,937 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/rpc/grpc/v1beta1/types.proto + +package v1beta1 + +import ( + context "context" + fmt "fmt" + v1beta1 "github.com/cometbft/cometbft/api/cometbft/abci/v1beta1" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// RequestPing is a request to confirm that the connection is alive. +type RequestPing struct { +} + +func (m *RequestPing) Reset() { *m = RequestPing{} } +func (m *RequestPing) String() string { return proto.CompactTextString(m) } +func (*RequestPing) ProtoMessage() {} +func (*RequestPing) Descriptor() ([]byte, []int) { + return fileDescriptor_2bd787770c28cdca, []int{0} +} +func (m *RequestPing) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestPing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestPing.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestPing) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestPing.Merge(m, src) +} +func (m *RequestPing) XXX_Size() int { + return m.Size() +} +func (m *RequestPing) XXX_DiscardUnknown() { + xxx_messageInfo_RequestPing.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestPing proto.InternalMessageInfo + +// RequestBroadcastTx is a request to broadcast the transaction. +type RequestBroadcastTx struct { + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` +} + +func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } +func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } +func (*RequestBroadcastTx) ProtoMessage() {} +func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { + return fileDescriptor_2bd787770c28cdca, []int{1} +} +func (m *RequestBroadcastTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestBroadcastTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestBroadcastTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestBroadcastTx.Merge(m, src) +} +func (m *RequestBroadcastTx) XXX_Size() int { + return m.Size() +} +func (m *RequestBroadcastTx) XXX_DiscardUnknown() { + xxx_messageInfo_RequestBroadcastTx.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestBroadcastTx proto.InternalMessageInfo + +func (m *RequestBroadcastTx) GetTx() []byte { + if m != nil { + return m.Tx + } + return nil +} + +// ResponsePing is a response to confirm that the connection is alive. +type ResponsePing struct { +} + +func (m *ResponsePing) Reset() { *m = ResponsePing{} } +func (m *ResponsePing) String() string { return proto.CompactTextString(m) } +func (*ResponsePing) ProtoMessage() {} +func (*ResponsePing) Descriptor() ([]byte, []int) { + return fileDescriptor_2bd787770c28cdca, []int{2} +} +func (m *ResponsePing) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponsePing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponsePing.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponsePing) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponsePing.Merge(m, src) +} +func (m *ResponsePing) XXX_Size() int { + return m.Size() +} +func (m *ResponsePing) XXX_DiscardUnknown() { + xxx_messageInfo_ResponsePing.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponsePing proto.InternalMessageInfo + +// ResponseBroadcastTx is a response of broadcasting the transaction. +type ResponseBroadcastTx struct { + CheckTx *v1beta1.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx,proto3" json:"check_tx,omitempty"` + DeliverTx *v1beta1.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx,proto3" json:"deliver_tx,omitempty"` +} + +func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } +func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } +func (*ResponseBroadcastTx) ProtoMessage() {} +func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { + return fileDescriptor_2bd787770c28cdca, []int{3} +} +func (m *ResponseBroadcastTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseBroadcastTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseBroadcastTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseBroadcastTx.Merge(m, src) +} +func (m *ResponseBroadcastTx) XXX_Size() int { + return m.Size() +} +func (m *ResponseBroadcastTx) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseBroadcastTx.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseBroadcastTx proto.InternalMessageInfo + +func (m *ResponseBroadcastTx) GetCheckTx() *v1beta1.ResponseCheckTx { + if m != nil { + return m.CheckTx + } + return nil +} + +func (m *ResponseBroadcastTx) GetDeliverTx() *v1beta1.ResponseDeliverTx { + if m != nil { + return m.DeliverTx + } + return nil +} + +func init() { + proto.RegisterType((*RequestPing)(nil), "cometbft.rpc.grpc.v1beta1.RequestPing") + proto.RegisterType((*RequestBroadcastTx)(nil), "cometbft.rpc.grpc.v1beta1.RequestBroadcastTx") + proto.RegisterType((*ResponsePing)(nil), "cometbft.rpc.grpc.v1beta1.ResponsePing") + proto.RegisterType((*ResponseBroadcastTx)(nil), "cometbft.rpc.grpc.v1beta1.ResponseBroadcastTx") +} + +func init() { + proto.RegisterFile("cometbft/rpc/grpc/v1beta1/types.proto", fileDescriptor_2bd787770c28cdca) +} + +var fileDescriptor_2bd787770c28cdca = []byte{ + // 325 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0x2a, 0x48, 0xd6, 0x4f, 0x07, 0x11, 0x65, 0x86, 0x49, 0xa9, + 0x25, 0x89, 0x86, 0xfa, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, + 0x92, 0x30, 0x65, 0x7a, 0x45, 0x05, 0xc9, 0x7a, 0x20, 0x65, 0x7a, 0x50, 0x65, 0x52, 0x8a, 0x70, + 0x13, 0x12, 0x93, 0x92, 0x33, 0xb1, 0xe9, 0x56, 0xe2, 0xe5, 0xe2, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, + 0x2d, 0x2e, 0x09, 0xc8, 0xcc, 0x4b, 0x57, 0x52, 0xe1, 0x12, 0x82, 0x72, 0x9d, 0x8a, 0xf2, 0x13, + 0x53, 0x92, 0x13, 0x8b, 0x4b, 0x42, 0x2a, 0x84, 0xf8, 0xb8, 0x98, 0x4a, 0x2a, 0x24, 0x18, 0x15, + 0x18, 0x35, 0x78, 0x82, 0x98, 0x4a, 0x2a, 0x94, 0xf8, 0xb8, 0x78, 0x82, 0x52, 0x8b, 0x0b, 0xf2, + 0xf3, 0x8a, 0x53, 0xc1, 0xba, 0x16, 0x32, 0x72, 0x09, 0xc3, 0x04, 0x90, 0xf5, 0x39, 0x72, 0x71, + 0x24, 0x67, 0xa4, 0x26, 0x67, 0xc7, 0x43, 0x75, 0x73, 0x1b, 0xa9, 0xe9, 0xc1, 0x5d, 0x0b, 0x72, + 0x12, 0xcc, 0xa5, 0x7a, 0x30, 0xdd, 0xce, 0x20, 0xe5, 0x21, 0x15, 0x41, 0xec, 0xc9, 0x10, 0x86, + 0x90, 0x3b, 0x17, 0x57, 0x4a, 0x6a, 0x4e, 0x66, 0x59, 0x6a, 0x11, 0xc8, 0x10, 0x26, 0xb0, 0x21, + 0x1a, 0x04, 0x0c, 0x71, 0x81, 0x68, 0x08, 0xa9, 0x08, 0xe2, 0x4c, 0x81, 0x31, 0x8d, 0xae, 0x32, + 0x72, 0xf1, 0xc0, 0xdd, 0xe6, 0x18, 0xe0, 0x29, 0x14, 0xce, 0xc5, 0x02, 0x72, 0xbc, 0x10, 0x92, + 0x93, 0xd0, 0x03, 0x50, 0x0f, 0x29, 0x68, 0xa4, 0xd4, 0xf1, 0xaa, 0x43, 0x84, 0x86, 0x50, 0x0e, + 0x17, 0x37, 0x72, 0x20, 0xe8, 0x12, 0x36, 0x1f, 0x49, 0xb9, 0x94, 0x1e, 0x11, 0xd6, 0x20, 0xa9, + 0x77, 0x0a, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, + 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xab, 0xf4, 0xcc, + 0x92, 0x8c, 0xd2, 0x24, 0x90, 0x79, 0xfa, 0xf0, 0x84, 0x80, 0x48, 0x11, 0x05, 0x99, 0xfa, 0x38, + 0x13, 0x58, 0x12, 0x1b, 0x38, 0x75, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x56, 0x2e, + 0x4a, 0x84, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// BroadcastAPIClient is the client API for BroadcastAPI service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BroadcastAPIClient interface { + // Ping the connection. + Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) + // BroadcastTx broadcasts the transaction. + BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) +} + +type broadcastAPIClient struct { + cc grpc1.ClientConn +} + +func NewBroadcastAPIClient(cc grpc1.ClientConn) BroadcastAPIClient { + return &broadcastAPIClient{cc} +} + +func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { + out := new(ResponsePing) + err := c.cc.Invoke(ctx, "/cometbft.rpc.grpc.v1beta1.BroadcastAPI/Ping", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { + out := new(ResponseBroadcastTx) + err := c.cc.Invoke(ctx, "/cometbft.rpc.grpc.v1beta1.BroadcastAPI/BroadcastTx", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BroadcastAPIServer is the server API for BroadcastAPI service. +type BroadcastAPIServer interface { + // Ping the connection. + Ping(context.Context, *RequestPing) (*ResponsePing, error) + // BroadcastTx broadcasts the transaction. + BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) +} + +// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. +type UnimplementedBroadcastAPIServer struct { +} + +func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { + return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") +} +func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { + return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") +} + +func RegisterBroadcastAPIServer(s grpc1.Server, srv BroadcastAPIServer) { + s.RegisterService(&_BroadcastAPI_serviceDesc, srv) +} + +func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestPing) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BroadcastAPIServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.rpc.grpc.v1beta1.BroadcastAPI/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) + } + return interceptor(ctx, in, info, handler) +} + +func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestBroadcastTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.rpc.grpc.v1beta1.BroadcastAPI/BroadcastTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) + } + return interceptor(ctx, in, info, handler) +} + +var BroadcastAPI_serviceDesc = _BroadcastAPI_serviceDesc +var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cometbft.rpc.grpc.v1beta1.BroadcastAPI", + HandlerType: (*BroadcastAPIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Ping", + Handler: _BroadcastAPI_Ping_Handler, + }, + { + MethodName: "BroadcastTx", + Handler: _BroadcastAPI_BroadcastTx_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cometbft/rpc/grpc/v1beta1/types.proto", +} + +func (m *RequestPing) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponsePing) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DeliverTx != nil { + { + size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.CheckTx != nil { + { + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RequestPing) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RequestBroadcastTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponsePing) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ResponseBroadcastTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.DeliverTx != nil { + l = m.DeliverTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RequestPing) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestPing: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestPing: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestBroadcastTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponsePing) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponsePing: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponsePing: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CheckTx == nil { + m.CheckTx = &v1beta1.ResponseCheckTx{} + } + if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeliverTx == nil { + m.DeliverTx = &v1beta1.ResponseDeliverTx{} + } + if err := m.DeliverTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/api/cometbft/rpc/grpc/v1beta2/types.pb.go b/api/cometbft/rpc/grpc/v1beta2/types.pb.go new file mode 100644 index 00000000000..2b969b56669 --- /dev/null +++ b/api/cometbft/rpc/grpc/v1beta2/types.pb.go @@ -0,0 +1,523 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/rpc/grpc/v1beta2/types.proto + +package v1beta2 + +import ( + context "context" + fmt "fmt" + v1beta2 "github.com/cometbft/cometbft/api/cometbft/abci/v1beta2" + v1beta1 "github.com/cometbft/cometbft/api/cometbft/rpc/grpc/v1beta1" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ResponseBroadcastTx is a response of broadcasting the transaction. +type ResponseBroadcastTx struct { + CheckTx *v1beta2.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx,proto3" json:"check_tx,omitempty"` + DeliverTx *v1beta2.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx,proto3" json:"deliver_tx,omitempty"` +} + +func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } +func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } +func (*ResponseBroadcastTx) ProtoMessage() {} +func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { + return fileDescriptor_cc9b252f8c29b3ef, []int{0} +} +func (m *ResponseBroadcastTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseBroadcastTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseBroadcastTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseBroadcastTx.Merge(m, src) +} +func (m *ResponseBroadcastTx) XXX_Size() int { + return m.Size() +} +func (m *ResponseBroadcastTx) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseBroadcastTx.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseBroadcastTx proto.InternalMessageInfo + +func (m *ResponseBroadcastTx) GetCheckTx() *v1beta2.ResponseCheckTx { + if m != nil { + return m.CheckTx + } + return nil +} + +func (m *ResponseBroadcastTx) GetDeliverTx() *v1beta2.ResponseDeliverTx { + if m != nil { + return m.DeliverTx + } + return nil +} + +func init() { + proto.RegisterType((*ResponseBroadcastTx)(nil), "cometbft.rpc.grpc.v1beta2.ResponseBroadcastTx") +} + +func init() { + proto.RegisterFile("cometbft/rpc/grpc/v1beta2/types.proto", fileDescriptor_cc9b252f8c29b3ef) +} + +var fileDescriptor_cc9b252f8c29b3ef = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0x2a, 0x48, 0xd6, 0x4f, 0x07, 0x11, 0x65, 0x86, 0x49, 0xa9, + 0x25, 0x89, 0x46, 0xfa, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, + 0x92, 0x30, 0x65, 0x7a, 0x45, 0x05, 0xc9, 0x7a, 0x20, 0x65, 0x7a, 0x50, 0x65, 0x52, 0x38, 0x4d, + 0x30, 0x44, 0x36, 0x41, 0x4a, 0x11, 0xae, 0x2c, 0x31, 0x29, 0x39, 0x13, 0x9b, 0x25, 0x4a, 0x0b, + 0x19, 0xb9, 0x84, 0x83, 0x52, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x9d, 0x8a, 0xf2, 0x13, 0x53, + 0x92, 0x13, 0x8b, 0x4b, 0x42, 0x2a, 0x84, 0x1c, 0xb9, 0x38, 0x92, 0x33, 0x52, 0x93, 0xb3, 0xe3, + 0x4b, 0x2a, 0x24, 0x18, 0x15, 0x18, 0x35, 0xb8, 0x8d, 0xd4, 0xf4, 0xe0, 0xee, 0x01, 0x99, 0x06, + 0x73, 0x8b, 0x1e, 0x4c, 0xb7, 0x33, 0x48, 0x79, 0x48, 0x45, 0x10, 0x7b, 0x32, 0x84, 0x21, 0xe4, + 0xce, 0xc5, 0x95, 0x92, 0x9a, 0x93, 0x59, 0x96, 0x5a, 0x04, 0x32, 0x84, 0x09, 0x6c, 0x88, 0x06, + 0x01, 0x43, 0x5c, 0x20, 0x1a, 0x42, 0x2a, 0x82, 0x38, 0x53, 0x60, 0x4c, 0xa3, 0xab, 0x8c, 0x5c, + 0x3c, 0x70, 0xb7, 0x39, 0x06, 0x78, 0x0a, 0x85, 0x73, 0xb1, 0x04, 0x64, 0xe6, 0xa5, 0x0b, 0x21, + 0x39, 0x09, 0x2d, 0x88, 0x0c, 0xf5, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x40, 0xea, 0xa4, + 0xd4, 0xf1, 0xaa, 0x83, 0xd8, 0x0c, 0x36, 0x30, 0x87, 0x8b, 0x1b, 0x39, 0x10, 0x74, 0x09, 0x9b, + 0x8f, 0xa4, 0x5c, 0x4a, 0x0f, 0xa7, 0x72, 0x84, 0x07, 0x91, 0xd4, 0x3b, 0x85, 0x9c, 0x78, 0x24, + 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, + 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x55, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x12, 0xc8, + 0x3c, 0x7d, 0x78, 0x1c, 0x22, 0x22, 0xb3, 0x20, 0x53, 0x1f, 0x67, 0x12, 0x4a, 0x62, 0x03, 0x47, + 0xac, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xa1, 0x30, 0x2b, 0x71, 0x66, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// BroadcastAPIClient is the client API for BroadcastAPI service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BroadcastAPIClient interface { + // Ping the connection. + Ping(ctx context.Context, in *v1beta1.RequestPing, opts ...grpc.CallOption) (*v1beta1.ResponsePing, error) + // BroadcastTx broadcasts the transaction. + BroadcastTx(ctx context.Context, in *v1beta1.RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) +} + +type broadcastAPIClient struct { + cc grpc1.ClientConn +} + +func NewBroadcastAPIClient(cc grpc1.ClientConn) BroadcastAPIClient { + return &broadcastAPIClient{cc} +} + +func (c *broadcastAPIClient) Ping(ctx context.Context, in *v1beta1.RequestPing, opts ...grpc.CallOption) (*v1beta1.ResponsePing, error) { + out := new(v1beta1.ResponsePing) + err := c.cc.Invoke(ctx, "/cometbft.rpc.grpc.v1beta2.BroadcastAPI/Ping", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *v1beta1.RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { + out := new(ResponseBroadcastTx) + err := c.cc.Invoke(ctx, "/cometbft.rpc.grpc.v1beta2.BroadcastAPI/BroadcastTx", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BroadcastAPIServer is the server API for BroadcastAPI service. +type BroadcastAPIServer interface { + // Ping the connection. + Ping(context.Context, *v1beta1.RequestPing) (*v1beta1.ResponsePing, error) + // BroadcastTx broadcasts the transaction. + BroadcastTx(context.Context, *v1beta1.RequestBroadcastTx) (*ResponseBroadcastTx, error) +} + +// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. +type UnimplementedBroadcastAPIServer struct { +} + +func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *v1beta1.RequestPing) (*v1beta1.ResponsePing, error) { + return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") +} +func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *v1beta1.RequestBroadcastTx) (*ResponseBroadcastTx, error) { + return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") +} + +func RegisterBroadcastAPIServer(s grpc1.Server, srv BroadcastAPIServer) { + s.RegisterService(&_BroadcastAPI_serviceDesc, srv) +} + +func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestPing) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BroadcastAPIServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.rpc.grpc.v1beta2.BroadcastAPI/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BroadcastAPIServer).Ping(ctx, req.(*v1beta1.RequestPing)) + } + return interceptor(ctx, in, info, handler) +} + +func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestBroadcastTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.rpc.grpc.v1beta2.BroadcastAPI/BroadcastTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*v1beta1.RequestBroadcastTx)) + } + return interceptor(ctx, in, info, handler) +} + +var BroadcastAPI_serviceDesc = _BroadcastAPI_serviceDesc +var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cometbft.rpc.grpc.v1beta2.BroadcastAPI", + HandlerType: (*BroadcastAPIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Ping", + Handler: _BroadcastAPI_Ping_Handler, + }, + { + MethodName: "BroadcastTx", + Handler: _BroadcastAPI_BroadcastTx_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cometbft/rpc/grpc/v1beta2/types.proto", +} + +func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DeliverTx != nil { + { + size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.CheckTx != nil { + { + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ResponseBroadcastTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.DeliverTx != nil { + l = m.DeliverTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CheckTx == nil { + m.CheckTx = &v1beta2.ResponseCheckTx{} + } + if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeliverTx == nil { + m.DeliverTx = &v1beta2.ResponseDeliverTx{} + } + if err := m.DeliverTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/api/cometbft/rpc/grpc/v1beta3/types.pb.go b/api/cometbft/rpc/grpc/v1beta3/types.pb.go new file mode 100644 index 00000000000..353d371b1fd --- /dev/null +++ b/api/cometbft/rpc/grpc/v1beta3/types.pb.go @@ -0,0 +1,524 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/rpc/grpc/v1beta3/types.proto + +package v1beta3 + +import ( + context "context" + fmt "fmt" + v1beta3 "github.com/cometbft/cometbft/api/cometbft/abci/v1beta3" + v1beta1 "github.com/cometbft/cometbft/api/cometbft/rpc/grpc/v1beta1" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ResponseBroadcastTx is a response of broadcasting the transaction. +type ResponseBroadcastTx struct { + CheckTx *v1beta3.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx,proto3" json:"check_tx,omitempty"` + TxResult *v1beta3.ExecTxResult `protobuf:"bytes,2,opt,name=tx_result,json=txResult,proto3" json:"tx_result,omitempty"` +} + +func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } +func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } +func (*ResponseBroadcastTx) ProtoMessage() {} +func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { + return fileDescriptor_e521bcdb5edbf680, []int{0} +} +func (m *ResponseBroadcastTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseBroadcastTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseBroadcastTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseBroadcastTx.Merge(m, src) +} +func (m *ResponseBroadcastTx) XXX_Size() int { + return m.Size() +} +func (m *ResponseBroadcastTx) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseBroadcastTx.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseBroadcastTx proto.InternalMessageInfo + +func (m *ResponseBroadcastTx) GetCheckTx() *v1beta3.ResponseCheckTx { + if m != nil { + return m.CheckTx + } + return nil +} + +func (m *ResponseBroadcastTx) GetTxResult() *v1beta3.ExecTxResult { + if m != nil { + return m.TxResult + } + return nil +} + +func init() { + proto.RegisterType((*ResponseBroadcastTx)(nil), "cometbft.rpc.grpc.v1beta3.ResponseBroadcastTx") +} + +func init() { + proto.RegisterFile("cometbft/rpc/grpc/v1beta3/types.proto", fileDescriptor_e521bcdb5edbf680) +} + +var fileDescriptor_e521bcdb5edbf680 = []byte{ + // 308 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0x2a, 0x48, 0xd6, 0x4f, 0x07, 0x11, 0x65, 0x86, 0x49, 0xa9, + 0x25, 0x89, 0xc6, 0xfa, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, + 0x92, 0x30, 0x65, 0x7a, 0x45, 0x05, 0xc9, 0x7a, 0x20, 0x65, 0x7a, 0x50, 0x65, 0x52, 0x38, 0x4d, + 0x30, 0x44, 0x36, 0x41, 0x4a, 0x11, 0xae, 0x2c, 0x31, 0x29, 0x39, 0x13, 0x9b, 0x25, 0x4a, 0xb3, + 0x18, 0xb9, 0x84, 0x83, 0x52, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x9d, 0x8a, 0xf2, 0x13, 0x53, + 0x92, 0x13, 0x8b, 0x4b, 0x42, 0x2a, 0x84, 0x1c, 0xb9, 0x38, 0x92, 0x33, 0x52, 0x93, 0xb3, 0xe3, + 0x4b, 0x2a, 0x24, 0x18, 0x15, 0x18, 0x35, 0xb8, 0x8d, 0xd4, 0xf4, 0xe0, 0xee, 0x01, 0x99, 0x06, + 0x73, 0x8b, 0x1e, 0x4c, 0xb7, 0x33, 0x48, 0x79, 0x48, 0x45, 0x10, 0x7b, 0x32, 0x84, 0x21, 0xe4, + 0xc0, 0xc5, 0x59, 0x52, 0x11, 0x5f, 0x94, 0x5a, 0x5c, 0x9a, 0x53, 0x22, 0xc1, 0x04, 0x36, 0x43, + 0x19, 0x87, 0x19, 0xae, 0x15, 0xa9, 0xc9, 0x21, 0x15, 0x41, 0x60, 0xa5, 0x41, 0x1c, 0x25, 0x50, + 0x96, 0xd1, 0x55, 0x46, 0x2e, 0x1e, 0xb8, 0xa3, 0x1c, 0x03, 0x3c, 0x85, 0xc2, 0xb9, 0x58, 0x02, + 0x32, 0xf3, 0xd2, 0x85, 0x90, 0xdc, 0x82, 0x16, 0x36, 0x86, 0x7a, 0x41, 0xa9, 0x85, 0xa5, 0xa9, + 0xc5, 0x25, 0x20, 0x75, 0x52, 0xea, 0x78, 0xd5, 0x41, 0xdc, 0x0d, 0x36, 0x30, 0x87, 0x8b, 0x1b, + 0xd9, 0xf7, 0xba, 0x84, 0xcd, 0x47, 0x52, 0x2e, 0xa5, 0x87, 0x53, 0x39, 0x22, 0x78, 0x90, 0xd4, + 0x3b, 0x85, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, + 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x55, 0x7a, 0x66, + 0x49, 0x46, 0x69, 0x12, 0xc8, 0x3c, 0x7d, 0x78, 0xe4, 0x21, 0x62, 0xb1, 0x20, 0x53, 0x1f, 0x67, + 0xda, 0x49, 0x62, 0x03, 0xc7, 0xa8, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xc2, 0xcf, 0xef, 0x1f, + 0x5f, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// BroadcastAPIClient is the client API for BroadcastAPI service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BroadcastAPIClient interface { + // Ping the connection. + Ping(ctx context.Context, in *v1beta1.RequestPing, opts ...grpc.CallOption) (*v1beta1.ResponsePing, error) + // BroadcastTx broadcasts a transaction. + BroadcastTx(ctx context.Context, in *v1beta1.RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) +} + +type broadcastAPIClient struct { + cc grpc1.ClientConn +} + +func NewBroadcastAPIClient(cc grpc1.ClientConn) BroadcastAPIClient { + return &broadcastAPIClient{cc} +} + +func (c *broadcastAPIClient) Ping(ctx context.Context, in *v1beta1.RequestPing, opts ...grpc.CallOption) (*v1beta1.ResponsePing, error) { + out := new(v1beta1.ResponsePing) + err := c.cc.Invoke(ctx, "/cometbft.rpc.grpc.v1beta3.BroadcastAPI/Ping", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *v1beta1.RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { + out := new(ResponseBroadcastTx) + err := c.cc.Invoke(ctx, "/cometbft.rpc.grpc.v1beta3.BroadcastAPI/BroadcastTx", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BroadcastAPIServer is the server API for BroadcastAPI service. +type BroadcastAPIServer interface { + // Ping the connection. + Ping(context.Context, *v1beta1.RequestPing) (*v1beta1.ResponsePing, error) + // BroadcastTx broadcasts a transaction. + BroadcastTx(context.Context, *v1beta1.RequestBroadcastTx) (*ResponseBroadcastTx, error) +} + +// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. +type UnimplementedBroadcastAPIServer struct { +} + +func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *v1beta1.RequestPing) (*v1beta1.ResponsePing, error) { + return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") +} +func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *v1beta1.RequestBroadcastTx) (*ResponseBroadcastTx, error) { + return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") +} + +func RegisterBroadcastAPIServer(s grpc1.Server, srv BroadcastAPIServer) { + s.RegisterService(&_BroadcastAPI_serviceDesc, srv) +} + +func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestPing) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BroadcastAPIServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.rpc.grpc.v1beta3.BroadcastAPI/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BroadcastAPIServer).Ping(ctx, req.(*v1beta1.RequestPing)) + } + return interceptor(ctx, in, info, handler) +} + +func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1beta1.RequestBroadcastTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.rpc.grpc.v1beta3.BroadcastAPI/BroadcastTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*v1beta1.RequestBroadcastTx)) + } + return interceptor(ctx, in, info, handler) +} + +var BroadcastAPI_serviceDesc = _BroadcastAPI_serviceDesc +var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cometbft.rpc.grpc.v1beta3.BroadcastAPI", + HandlerType: (*BroadcastAPIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Ping", + Handler: _BroadcastAPI_Ping_Handler, + }, + { + MethodName: "BroadcastTx", + Handler: _BroadcastAPI_BroadcastTx_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cometbft/rpc/grpc/v1beta3/types.proto", +} + +func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TxResult != nil { + { + size, err := m.TxResult.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.CheckTx != nil { + { + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ResponseBroadcastTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.TxResult != nil { + l = m.TxResult.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CheckTx == nil { + m.CheckTx = &v1beta3.ResponseCheckTx{} + } + if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxResult", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TxResult == nil { + m.TxResult = &v1beta3.ExecTxResult{} + } + if err := m.TxResult.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/services/block/v1/block.pb.go b/api/cometbft/services/block/v1/block.pb.go similarity index 61% rename from proto/tendermint/services/block/v1/block.pb.go rename to api/cometbft/services/block/v1/block.pb.go index 3868540f921..a1cfd20bbe8 100644 --- a/proto/tendermint/services/block/v1/block.pb.go +++ b/api/cometbft/services/block/v1/block.pb.go @@ -1,11 +1,11 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/services/block/v1/block.proto +// source: cometbft/services/block/v1/block.proto package v1 import ( fmt "fmt" - types "github.com/cometbft/cometbft/proto/tendermint/types" + v1 "github.com/cometbft/cometbft/api/cometbft/types/v1" proto "github.com/cosmos/gogoproto/proto" io "io" math "math" @@ -23,8 +23,9 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// GetByHeightRequest is a request for a block at the specified height. type GetByHeightRequest struct { - // The height of the block requested. If set to 0, the latest height will be returned. + // The height of the block requested. Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } @@ -32,7 +33,7 @@ func (m *GetByHeightRequest) Reset() { *m = GetByHeightRequest{} } func (m *GetByHeightRequest) String() string { return proto.CompactTextString(m) } func (*GetByHeightRequest) ProtoMessage() {} func (*GetByHeightRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_d48acf20d1015667, []int{0} + return fileDescriptor_a30eb8f0c11b1783, []int{0} } func (m *GetByHeightRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -68,16 +69,17 @@ func (m *GetByHeightRequest) GetHeight() int64 { return 0 } +// GetByHeightResponse contains the block ID and the block at the specified height. type GetByHeightResponse struct { - BlockId *types.BlockID `protobuf:"bytes,1,opt,name=block_id,json=blockId,proto3" json:"block_id,omitempty"` - Block *types.Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + BlockId *v1.BlockID `protobuf:"bytes,1,opt,name=block_id,json=blockId,proto3" json:"block_id,omitempty"` + Block *v1.Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` } func (m *GetByHeightResponse) Reset() { *m = GetByHeightResponse{} } func (m *GetByHeightResponse) String() string { return proto.CompactTextString(m) } func (*GetByHeightResponse) ProtoMessage() {} func (*GetByHeightResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_d48acf20d1015667, []int{1} + return fileDescriptor_a30eb8f0c11b1783, []int{1} } func (m *GetByHeightResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,102 +108,14 @@ func (m *GetByHeightResponse) XXX_DiscardUnknown() { var xxx_messageInfo_GetByHeightResponse proto.InternalMessageInfo -func (m *GetByHeightResponse) GetBlockId() *types.BlockID { +func (m *GetByHeightResponse) GetBlockId() *v1.BlockID { if m != nil { return m.BlockId } return nil } -func (m *GetByHeightResponse) GetBlock() *types.Block { - if m != nil { - return m.Block - } - return nil -} - -type GetLatestRequest struct { -} - -func (m *GetLatestRequest) Reset() { *m = GetLatestRequest{} } -func (m *GetLatestRequest) String() string { return proto.CompactTextString(m) } -func (*GetLatestRequest) ProtoMessage() {} -func (*GetLatestRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_d48acf20d1015667, []int{2} -} -func (m *GetLatestRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetLatestRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetLatestRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetLatestRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetLatestRequest.Merge(m, src) -} -func (m *GetLatestRequest) XXX_Size() int { - return m.Size() -} -func (m *GetLatestRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetLatestRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetLatestRequest proto.InternalMessageInfo - -type GetLatestResponse struct { - BlockId *types.BlockID `protobuf:"bytes,1,opt,name=block_id,json=blockId,proto3" json:"block_id,omitempty"` - Block *types.Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` -} - -func (m *GetLatestResponse) Reset() { *m = GetLatestResponse{} } -func (m *GetLatestResponse) String() string { return proto.CompactTextString(m) } -func (*GetLatestResponse) ProtoMessage() {} -func (*GetLatestResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_d48acf20d1015667, []int{3} -} -func (m *GetLatestResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetLatestResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetLatestResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetLatestResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetLatestResponse.Merge(m, src) -} -func (m *GetLatestResponse) XXX_Size() int { - return m.Size() -} -func (m *GetLatestResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetLatestResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetLatestResponse proto.InternalMessageInfo - -func (m *GetLatestResponse) GetBlockId() *types.BlockID { - if m != nil { - return m.BlockId - } - return nil -} - -func (m *GetLatestResponse) GetBlock() *types.Block { +func (m *GetByHeightResponse) GetBlock() *v1.Block { if m != nil { return m.Block } @@ -216,7 +130,7 @@ func (m *GetLatestHeightRequest) Reset() { *m = GetLatestHeightRequest{} func (m *GetLatestHeightRequest) String() string { return proto.CompactTextString(m) } func (*GetLatestHeightRequest) ProtoMessage() {} func (*GetLatestHeightRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_d48acf20d1015667, []int{4} + return fileDescriptor_a30eb8f0c11b1783, []int{2} } func (m *GetLatestHeightRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -256,7 +170,7 @@ func (m *GetLatestHeightResponse) Reset() { *m = GetLatestHeightResponse func (m *GetLatestHeightResponse) String() string { return proto.CompactTextString(m) } func (*GetLatestHeightResponse) ProtoMessage() {} func (*GetLatestHeightResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_d48acf20d1015667, []int{5} + return fileDescriptor_a30eb8f0c11b1783, []int{3} } func (m *GetLatestHeightResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -293,39 +207,35 @@ func (m *GetLatestHeightResponse) GetHeight() int64 { } func init() { - proto.RegisterType((*GetByHeightRequest)(nil), "tendermint.services.block.v1.GetByHeightRequest") - proto.RegisterType((*GetByHeightResponse)(nil), "tendermint.services.block.v1.GetByHeightResponse") - proto.RegisterType((*GetLatestRequest)(nil), "tendermint.services.block.v1.GetLatestRequest") - proto.RegisterType((*GetLatestResponse)(nil), "tendermint.services.block.v1.GetLatestResponse") - proto.RegisterType((*GetLatestHeightRequest)(nil), "tendermint.services.block.v1.GetLatestHeightRequest") - proto.RegisterType((*GetLatestHeightResponse)(nil), "tendermint.services.block.v1.GetLatestHeightResponse") + proto.RegisterType((*GetByHeightRequest)(nil), "cometbft.services.block.v1.GetByHeightRequest") + proto.RegisterType((*GetByHeightResponse)(nil), "cometbft.services.block.v1.GetByHeightResponse") + proto.RegisterType((*GetLatestHeightRequest)(nil), "cometbft.services.block.v1.GetLatestHeightRequest") + proto.RegisterType((*GetLatestHeightResponse)(nil), "cometbft.services.block.v1.GetLatestHeightResponse") } func init() { - proto.RegisterFile("tendermint/services/block/v1/block.proto", fileDescriptor_d48acf20d1015667) -} - -var fileDescriptor_d48acf20d1015667 = []byte{ - // 290 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x28, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x2d, 0xd6, - 0x4f, 0xca, 0xc9, 0x4f, 0xce, 0xd6, 0x2f, 0x33, 0x84, 0x30, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, - 0x85, 0x64, 0x10, 0x2a, 0xf5, 0x60, 0x2a, 0xf5, 0x20, 0x0a, 0xca, 0x0c, 0xa5, 0x90, 0x64, 0xf5, - 0x4b, 0x2a, 0x0b, 0x60, 0x86, 0x40, 0xf4, 0x62, 0x91, 0x05, 0x93, 0x10, 0x59, 0x25, 0x1d, 0x2e, - 0x21, 0xf7, 0xd4, 0x12, 0xa7, 0x4a, 0x8f, 0xd4, 0xcc, 0xf4, 0x8c, 0x92, 0xa0, 0xd4, 0xc2, 0xd2, - 0xd4, 0xe2, 0x12, 0x21, 0x31, 0x2e, 0xb6, 0x0c, 0xb0, 0x80, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x73, - 0x10, 0x94, 0xa7, 0x54, 0xc5, 0x25, 0x8c, 0xa2, 0xba, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8, - 0x84, 0x8b, 0x03, 0x6c, 0x63, 0x7c, 0x66, 0x0a, 0x58, 0x03, 0xb7, 0x91, 0xa4, 0x1e, 0x92, 0x8b, - 0x21, 0xf6, 0x39, 0x81, 0x54, 0x78, 0xba, 0x04, 0xb1, 0x83, 0x95, 0x7a, 0xa6, 0x08, 0xe9, 0x72, - 0xb1, 0x82, 0x99, 0x12, 0x4c, 0x60, 0x2d, 0xe2, 0x38, 0xb4, 0x04, 0x41, 0x54, 0x29, 0x09, 0x71, - 0x09, 0xb8, 0xa7, 0x96, 0xf8, 0x24, 0x96, 0xa4, 0x16, 0xc3, 0xdc, 0xa9, 0x54, 0xc1, 0x25, 0x88, - 0x24, 0x46, 0x4f, 0xd7, 0x48, 0x70, 0x89, 0xc1, 0x6d, 0x46, 0x09, 0x3b, 0x25, 0x43, 0x2e, 0x71, - 0x0c, 0x19, 0xa8, 0xcb, 0x70, 0x04, 0xab, 0x53, 0xe4, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, - 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, - 0xcb, 0x31, 0x44, 0xd9, 0xa7, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x27, - 0xe7, 0xe7, 0xa6, 0x96, 0x24, 0xa5, 0x95, 0x20, 0x18, 0xe0, 0x28, 0xd4, 0xc7, 0x97, 0x8a, 0x92, - 0xd8, 0xc0, 0x6a, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x20, 0x07, 0x95, 0x47, 0x6c, 0x02, - 0x00, 0x00, + proto.RegisterFile("cometbft/services/block/v1/block.proto", fileDescriptor_a30eb8f0c11b1783) +} + +var fileDescriptor_a30eb8f0c11b1783 = []byte{ + // 271 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4b, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x2d, 0xd6, 0x4f, 0xca, + 0xc9, 0x4f, 0xce, 0xd6, 0x2f, 0x33, 0x84, 0x30, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0xa4, + 0x60, 0xea, 0xf4, 0x60, 0xea, 0xf4, 0x20, 0xd2, 0x65, 0x86, 0x52, 0xb2, 0x70, 0x33, 0x4a, 0x2a, + 0x0b, 0x52, 0x8b, 0x41, 0x5a, 0xc1, 0x0c, 0x88, 0x56, 0x6c, 0xd2, 0x48, 0x26, 0x2b, 0xe9, 0x70, + 0x09, 0xb9, 0xa7, 0x96, 0x38, 0x55, 0x7a, 0xa4, 0x66, 0xa6, 0x67, 0x94, 0x04, 0xa5, 0x16, 0x96, + 0xa6, 0x16, 0x97, 0x08, 0x89, 0x71, 0xb1, 0x65, 0x80, 0x05, 0x24, 0x18, 0x15, 0x18, 0x35, 0x98, + 0x83, 0xa0, 0x3c, 0xa5, 0x1a, 0x2e, 0x61, 0x14, 0xd5, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x42, + 0xa6, 0x5c, 0x1c, 0x60, 0x33, 0xe3, 0x33, 0x53, 0xc0, 0x1a, 0xb8, 0x8d, 0xa4, 0xf4, 0xe0, 0x2e, + 0x86, 0x38, 0xa6, 0xcc, 0x50, 0xcf, 0x09, 0xa4, 0xc4, 0xd3, 0x25, 0x88, 0x1d, 0xac, 0xd6, 0x33, + 0x45, 0x48, 0x8f, 0x8b, 0x15, 0xcc, 0x94, 0x60, 0x02, 0xeb, 0x91, 0xc0, 0xa5, 0x27, 0x08, 0xa2, + 0x4c, 0x49, 0x82, 0x4b, 0xcc, 0x3d, 0xb5, 0xc4, 0x27, 0xb1, 0x24, 0xb5, 0xb8, 0x04, 0xc5, 0xbd, + 0x4a, 0x86, 0x5c, 0xe2, 0x18, 0x32, 0x50, 0xb7, 0xe1, 0xf0, 0x8a, 0x53, 0xe8, 0x89, 0x47, 0x72, + 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, + 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x59, 0xa7, 0x67, 0x96, 0x64, 0x94, 0x26, 0x81, 0x5c, + 0xa3, 0x0f, 0x0f, 0x3c, 0x38, 0x23, 0xb1, 0x20, 0x53, 0x1f, 0x77, 0xac, 0x25, 0xb1, 0x81, 0x83, + 0xd5, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x65, 0xb5, 0xa5, 0x74, 0xda, 0x01, 0x00, 0x00, } func (m *GetByHeightRequest) Marshal() (dAtA []byte, err error) { @@ -403,76 +313,6 @@ func (m *GetByHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *GetLatestRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetLatestRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetLatestRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *GetLatestResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetLatestResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetLatestResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Block != nil { - { - size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBlock(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.BlockId != nil { - { - size, err := m.BlockId.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBlock(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *GetLatestHeightRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -564,32 +404,6 @@ func (m *GetByHeightResponse) Size() (n int) { return n } -func (m *GetLatestRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *GetLatestResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BlockId != nil { - l = m.BlockId.Size() - n += 1 + l + sovBlock(uint64(l)) - } - if m.Block != nil { - l = m.Block.Size() - n += 1 + l + sovBlock(uint64(l)) - } - return n -} - func (m *GetLatestHeightRequest) Size() (n int) { if m == nil { return 0 @@ -745,179 +559,7 @@ func (m *GetByHeightResponse) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.BlockId == nil { - m.BlockId = &types.BlockID{} - } - if err := m.BlockId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBlock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBlock - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBlock - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Block == nil { - m.Block = &types.Block{} - } - if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBlock(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBlock - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetLatestRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBlock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetLatestRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetLatestRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipBlock(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBlock - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetLatestResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBlock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetLatestResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetLatestResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockId", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBlock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBlock - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBlock - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.BlockId == nil { - m.BlockId = &types.BlockID{} + m.BlockId = &v1.BlockID{} } if err := m.BlockId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -953,7 +595,7 @@ func (m *GetLatestResponse) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Block == nil { - m.Block = &types.Block{} + m.Block = &v1.Block{} } if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err diff --git a/proto/tendermint/services/block/v1/block_service.pb.go b/api/cometbft/services/block/v1/block_service.pb.go similarity index 63% rename from proto/tendermint/services/block/v1/block_service.pb.go rename to api/cometbft/services/block/v1/block_service.pb.go index bac266961ea..4eefdb51ffd 100644 --- a/proto/tendermint/services/block/v1/block_service.pb.go +++ b/api/cometbft/services/block/v1/block_service.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/services/block/v1/block_service.proto +// source: cometbft/services/block/v1/block_service.proto package v1 @@ -26,27 +26,25 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { - proto.RegisterFile("tendermint/services/block/v1/block_service.proto", fileDescriptor_1488dadaa3ae41e3) -} - -var fileDescriptor_1488dadaa3ae41e3 = []byte{ - // 246 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x28, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x2d, 0xd6, - 0x4f, 0xca, 0xc9, 0x4f, 0xce, 0xd6, 0x2f, 0x33, 0x84, 0x30, 0xe2, 0xa1, 0xe2, 0x7a, 0x05, 0x45, - 0xf9, 0x25, 0xf9, 0x42, 0x32, 0x08, 0x1d, 0x7a, 0x30, 0x1d, 0x7a, 0x60, 0x85, 0x7a, 0x65, 0x86, - 0x52, 0x1a, 0x84, 0xcd, 0x83, 0x98, 0x63, 0xf4, 0x99, 0x89, 0x8b, 0xc7, 0x09, 0xc4, 0x0f, 0x86, - 0x28, 0x13, 0x2a, 0xe2, 0xe2, 0x76, 0x4f, 0x2d, 0x71, 0xaa, 0xf4, 0x48, 0xcd, 0x4c, 0xcf, 0x28, - 0x11, 0x32, 0xd0, 0xc3, 0x67, 0x91, 0x1e, 0x92, 0xd2, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, - 0x29, 0x43, 0x12, 0x74, 0x14, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x0a, 0xe5, 0x70, 0x71, 0xba, 0xa7, - 0x96, 0xf8, 0x24, 0x96, 0xa4, 0x16, 0x97, 0x08, 0xe9, 0x11, 0xd4, 0x0f, 0x51, 0x08, 0xb3, 0x4f, - 0x9f, 0x68, 0xf5, 0x50, 0xdb, 0x1a, 0x18, 0xb9, 0xf8, 0xe1, 0xa2, 0x50, 0x6f, 0x9a, 0x10, 0x69, - 0x08, 0xaa, 0x57, 0x4d, 0x49, 0xd4, 0x05, 0x71, 0x80, 0x01, 0xa3, 0x53, 0xe4, 0x89, 0x47, 0x72, - 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, - 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0xd9, 0xa7, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, - 0xe7, 0xe7, 0xea, 0x27, 0xe7, 0xe7, 0xa6, 0x96, 0x24, 0xa5, 0x95, 0x20, 0x18, 0xe0, 0x38, 0xd3, - 0xc7, 0x17, 0xb9, 0x49, 0x6c, 0x60, 0x35, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0xf6, - 0xc7, 0x53, 0x53, 0x02, 0x00, 0x00, + proto.RegisterFile("cometbft/services/block/v1/block_service.proto", fileDescriptor_5768ae424af71eff) +} + +var fileDescriptor_5768ae424af71eff = []byte{ + // 219 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4b, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x2d, 0xd6, 0x4f, 0xca, + 0xc9, 0x4f, 0xce, 0xd6, 0x2f, 0x33, 0x84, 0x30, 0xe2, 0xa1, 0xe2, 0x7a, 0x05, 0x45, 0xf9, 0x25, + 0xf9, 0x42, 0x52, 0x30, 0xf5, 0x7a, 0x30, 0xf5, 0x7a, 0x60, 0x65, 0x7a, 0x65, 0x86, 0x52, 0x6a, + 0x84, 0xcc, 0x82, 0x98, 0x61, 0xf4, 0x87, 0x91, 0x8b, 0xc7, 0x09, 0xc4, 0x0f, 0x86, 0x28, 0x13, + 0xca, 0xe3, 0xe2, 0x76, 0x4f, 0x2d, 0x71, 0xaa, 0xf4, 0x48, 0xcd, 0x4c, 0xcf, 0x28, 0x11, 0xd2, + 0xd3, 0xc3, 0x6d, 0x89, 0x1e, 0x92, 0xc2, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0x29, 0x7d, + 0xa2, 0xd5, 0x17, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x0a, 0xd5, 0x70, 0xf1, 0xbb, 0xa7, 0x96, 0xf8, + 0x24, 0x96, 0xa4, 0x16, 0x97, 0x40, 0xed, 0x34, 0x22, 0x60, 0x06, 0xb2, 0x62, 0x98, 0xbd, 0xc6, + 0x24, 0xe9, 0x81, 0xd8, 0x6d, 0xc0, 0xe8, 0x14, 0x7a, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, + 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, + 0x72, 0x0c, 0x51, 0xd6, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x20, 0x63, 0xf5, 0xe1, 0x61, 0x09, + 0x67, 0x24, 0x16, 0x64, 0xea, 0xe3, 0x0e, 0xe1, 0x24, 0x36, 0x70, 0xe0, 0x1a, 0x03, 0x02, 0x00, + 0x00, 0xff, 0xff, 0xab, 0x27, 0xc3, 0x98, 0xd2, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -63,8 +61,6 @@ const _ = grpc.SupportPackageIsVersion4 type BlockServiceClient interface { // GetBlock retrieves the block information at a particular height. GetByHeight(ctx context.Context, in *GetByHeightRequest, opts ...grpc.CallOption) (*GetByHeightResponse, error) - // GetLatest retrieves the latest block. - GetLatest(ctx context.Context, in *GetLatestRequest, opts ...grpc.CallOption) (*GetLatestResponse, error) // GetLatestHeight returns a stream of the latest block heights committed by // the network. This is a long-lived stream that is only terminated by the // server if an error occurs. The caller is expected to handle such @@ -82,16 +78,7 @@ func NewBlockServiceClient(cc grpc1.ClientConn) BlockServiceClient { func (c *blockServiceClient) GetByHeight(ctx context.Context, in *GetByHeightRequest, opts ...grpc.CallOption) (*GetByHeightResponse, error) { out := new(GetByHeightResponse) - err := c.cc.Invoke(ctx, "/tendermint.services.block.v1.BlockService/GetByHeight", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blockServiceClient) GetLatest(ctx context.Context, in *GetLatestRequest, opts ...grpc.CallOption) (*GetLatestResponse, error) { - out := new(GetLatestResponse) - err := c.cc.Invoke(ctx, "/tendermint.services.block.v1.BlockService/GetLatest", in, out, opts...) + err := c.cc.Invoke(ctx, "/cometbft.services.block.v1.BlockService/GetByHeight", in, out, opts...) if err != nil { return nil, err } @@ -99,7 +86,7 @@ func (c *blockServiceClient) GetLatest(ctx context.Context, in *GetLatestRequest } func (c *blockServiceClient) GetLatestHeight(ctx context.Context, in *GetLatestHeightRequest, opts ...grpc.CallOption) (BlockService_GetLatestHeightClient, error) { - stream, err := c.cc.NewStream(ctx, &_BlockService_serviceDesc.Streams[0], "/tendermint.services.block.v1.BlockService/GetLatestHeight", opts...) + stream, err := c.cc.NewStream(ctx, &_BlockService_serviceDesc.Streams[0], "/cometbft.services.block.v1.BlockService/GetLatestHeight", opts...) if err != nil { return nil, err } @@ -134,8 +121,6 @@ func (x *blockServiceGetLatestHeightClient) Recv() (*GetLatestHeightResponse, er type BlockServiceServer interface { // GetBlock retrieves the block information at a particular height. GetByHeight(context.Context, *GetByHeightRequest) (*GetByHeightResponse, error) - // GetLatest retrieves the latest block. - GetLatest(context.Context, *GetLatestRequest) (*GetLatestResponse, error) // GetLatestHeight returns a stream of the latest block heights committed by // the network. This is a long-lived stream that is only terminated by the // server if an error occurs. The caller is expected to handle such @@ -150,9 +135,6 @@ type UnimplementedBlockServiceServer struct { func (*UnimplementedBlockServiceServer) GetByHeight(ctx context.Context, req *GetByHeightRequest) (*GetByHeightResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetByHeight not implemented") } -func (*UnimplementedBlockServiceServer) GetLatest(ctx context.Context, req *GetLatestRequest) (*GetLatestResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetLatest not implemented") -} func (*UnimplementedBlockServiceServer) GetLatestHeight(req *GetLatestHeightRequest, srv BlockService_GetLatestHeightServer) error { return status.Errorf(codes.Unimplemented, "method GetLatestHeight not implemented") } @@ -171,7 +153,7 @@ func _BlockService_GetByHeight_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.services.block.v1.BlockService/GetByHeight", + FullMethod: "/cometbft.services.block.v1.BlockService/GetByHeight", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlockServiceServer).GetByHeight(ctx, req.(*GetByHeightRequest)) @@ -179,24 +161,6 @@ func _BlockService_GetByHeight_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } -func _BlockService_GetLatest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetLatestRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlockServiceServer).GetLatest(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.services.block.v1.BlockService/GetLatest", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlockServiceServer).GetLatest(ctx, req.(*GetLatestRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _BlockService_GetLatestHeight_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(GetLatestHeightRequest) if err := stream.RecvMsg(m); err != nil { @@ -218,18 +182,15 @@ func (x *blockServiceGetLatestHeightServer) Send(m *GetLatestHeightResponse) err return x.ServerStream.SendMsg(m) } +var BlockService_serviceDesc = _BlockService_serviceDesc var _BlockService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tendermint.services.block.v1.BlockService", + ServiceName: "cometbft.services.block.v1.BlockService", HandlerType: (*BlockServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "GetByHeight", Handler: _BlockService_GetByHeight_Handler, }, - { - MethodName: "GetLatest", - Handler: _BlockService_GetLatest_Handler, - }, }, Streams: []grpc.StreamDesc{ { @@ -238,5 +199,5 @@ var _BlockService_serviceDesc = grpc.ServiceDesc{ ServerStreams: true, }, }, - Metadata: "tendermint/services/block/v1/block_service.proto", + Metadata: "cometbft/services/block/v1/block_service.proto", } diff --git a/proto/tendermint/services/block_results/v1/block_results.pb.go b/api/cometbft/services/block_results/v1/block_results.pb.go similarity index 66% rename from proto/tendermint/services/block_results/v1/block_results.pb.go rename to api/cometbft/services/block_results/v1/block_results.pb.go index 856d64f9490..ea87eebbde3 100644 --- a/proto/tendermint/services/block_results/v1/block_results.pb.go +++ b/api/cometbft/services/block_results/v1/block_results.pb.go @@ -1,12 +1,12 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/services/block_results/v1/block_results.proto +// source: cometbft/services/block_results/v1/block_results.proto package v1 import ( fmt "fmt" - types "github.com/cometbft/cometbft/abci/types" - types1 "github.com/cometbft/cometbft/proto/tendermint/types" + v1 "github.com/cometbft/cometbft/api/cometbft/abci/v1" + v11 "github.com/cometbft/cometbft/api/cometbft/types/v1" proto "github.com/cosmos/gogoproto/proto" io "io" math "math" @@ -24,6 +24,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// GetBlockResults is a request for the BlockResults of a given height. type GetBlockResultsRequest struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } @@ -32,7 +33,7 @@ func (m *GetBlockResultsRequest) Reset() { *m = GetBlockResultsRequest{} func (m *GetBlockResultsRequest) String() string { return proto.CompactTextString(m) } func (*GetBlockResultsRequest) ProtoMessage() {} func (*GetBlockResultsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b54f1890172d3d7d, []int{0} + return fileDescriptor_3fd862496bf20f1b, []int{0} } func (m *GetBlockResultsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -68,56 +69,21 @@ func (m *GetBlockResultsRequest) GetHeight() int64 { return 0 } -type GetLatestBlockResultsRequest struct { -} - -func (m *GetLatestBlockResultsRequest) Reset() { *m = GetLatestBlockResultsRequest{} } -func (m *GetLatestBlockResultsRequest) String() string { return proto.CompactTextString(m) } -func (*GetLatestBlockResultsRequest) ProtoMessage() {} -func (*GetLatestBlockResultsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b54f1890172d3d7d, []int{1} -} -func (m *GetLatestBlockResultsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetLatestBlockResultsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetLatestBlockResultsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetLatestBlockResultsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetLatestBlockResultsRequest.Merge(m, src) -} -func (m *GetLatestBlockResultsRequest) XXX_Size() int { - return m.Size() -} -func (m *GetLatestBlockResultsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetLatestBlockResultsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetLatestBlockResultsRequest proto.InternalMessageInfo - +// GetBlockResultsResponse contains the block results for the given height. type GetBlockResultsResponse struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - TxsResults []*types.ExecTxResult `protobuf:"bytes,2,rep,name=txs_results,json=txsResults,proto3" json:"txs_results,omitempty"` - FinalizeBlockEvents []*types.Event `protobuf:"bytes,3,rep,name=finalize_block_events,json=finalizeBlockEvents,proto3" json:"finalize_block_events,omitempty"` - ValidatorUpdates []*types.ValidatorUpdate `protobuf:"bytes,4,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates,omitempty"` - ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,5,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` - AppHash []byte `protobuf:"bytes,6,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + TxResults []*v1.ExecTxResult `protobuf:"bytes,2,rep,name=tx_results,json=txResults,proto3" json:"tx_results,omitempty"` + FinalizeBlockEvents []*v1.Event `protobuf:"bytes,3,rep,name=finalize_block_events,json=finalizeBlockEvents,proto3" json:"finalize_block_events,omitempty"` + ValidatorUpdates []*v1.ValidatorUpdate `protobuf:"bytes,4,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates,omitempty"` + ConsensusParamUpdates *v11.ConsensusParams `protobuf:"bytes,5,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + AppHash []byte `protobuf:"bytes,6,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` } func (m *GetBlockResultsResponse) Reset() { *m = GetBlockResultsResponse{} } func (m *GetBlockResultsResponse) String() string { return proto.CompactTextString(m) } func (*GetBlockResultsResponse) ProtoMessage() {} func (*GetBlockResultsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b54f1890172d3d7d, []int{2} + return fileDescriptor_3fd862496bf20f1b, []int{1} } func (m *GetBlockResultsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -153,28 +119,28 @@ func (m *GetBlockResultsResponse) GetHeight() int64 { return 0 } -func (m *GetBlockResultsResponse) GetTxsResults() []*types.ExecTxResult { +func (m *GetBlockResultsResponse) GetTxResults() []*v1.ExecTxResult { if m != nil { - return m.TxsResults + return m.TxResults } return nil } -func (m *GetBlockResultsResponse) GetFinalizeBlockEvents() []*types.Event { +func (m *GetBlockResultsResponse) GetFinalizeBlockEvents() []*v1.Event { if m != nil { return m.FinalizeBlockEvents } return nil } -func (m *GetBlockResultsResponse) GetValidatorUpdates() []*types.ValidatorUpdate { +func (m *GetBlockResultsResponse) GetValidatorUpdates() []*v1.ValidatorUpdate { if m != nil { return m.ValidatorUpdates } return nil } -func (m *GetBlockResultsResponse) GetConsensusParamUpdates() *types1.ConsensusParams { +func (m *GetBlockResultsResponse) GetConsensusParamUpdates() *v11.ConsensusParams { if m != nil { return m.ConsensusParamUpdates } @@ -189,43 +155,41 @@ func (m *GetBlockResultsResponse) GetAppHash() []byte { } func init() { - proto.RegisterType((*GetBlockResultsRequest)(nil), "tendermint.services.block_results.v1.GetBlockResultsRequest") - proto.RegisterType((*GetLatestBlockResultsRequest)(nil), "tendermint.services.block_results.v1.GetLatestBlockResultsRequest") - proto.RegisterType((*GetBlockResultsResponse)(nil), "tendermint.services.block_results.v1.GetBlockResultsResponse") + proto.RegisterType((*GetBlockResultsRequest)(nil), "cometbft.services.block_results.v1.GetBlockResultsRequest") + proto.RegisterType((*GetBlockResultsResponse)(nil), "cometbft.services.block_results.v1.GetBlockResultsResponse") } func init() { - proto.RegisterFile("tendermint/services/block_results/v1/block_results.proto", fileDescriptor_b54f1890172d3d7d) -} - -var fileDescriptor_b54f1890172d3d7d = []byte{ - // 411 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8f, 0x93, 0x40, - 0x14, 0xc6, 0x8b, 0xd5, 0x6a, 0xa6, 0x1e, 0x14, 0xb3, 0x5d, 0x5c, 0x5d, 0x82, 0x8d, 0x87, 0x9e, - 0xc0, 0x5d, 0x2f, 0x9e, 0x3c, 0xac, 0x31, 0x35, 0x46, 0x13, 0x33, 0x51, 0x13, 0xbd, 0x90, 0x61, - 0xfa, 0x76, 0x99, 0x58, 0x60, 0xe4, 0x3d, 0x08, 0x7a, 0xf4, 0x2f, 0xf0, 0xcf, 0xf2, 0xb8, 0x47, - 0x8f, 0xa6, 0xfd, 0x47, 0x0c, 0x03, 0x58, 0x48, 0x35, 0xf1, 0x06, 0xdf, 0xfb, 0xbe, 0x5f, 0xe6, - 0x9b, 0x79, 0xec, 0x09, 0x41, 0xba, 0x82, 0x3c, 0x51, 0x29, 0x05, 0x08, 0x79, 0xa9, 0x24, 0x60, - 0x10, 0xad, 0x33, 0xf9, 0x29, 0xcc, 0x01, 0x8b, 0x35, 0x61, 0x50, 0x9e, 0x0c, 0x05, 0x5f, 0xe7, - 0x19, 0x65, 0xf6, 0xc3, 0x5d, 0xd2, 0xef, 0x92, 0xfe, 0xd0, 0x58, 0x9e, 0x1c, 0xdd, 0xeb, 0xf1, - 0x45, 0x24, 0x55, 0x40, 0x5f, 0x34, 0xb4, 0x88, 0xa3, 0xe3, 0xde, 0xd0, 0xe8, 0x81, 0x16, 0xb9, - 0x48, 0xda, 0xf1, 0xfc, 0x11, 0x9b, 0x2d, 0x81, 0xce, 0x6a, 0x24, 0x6f, 0x88, 0x1c, 0x3e, 0x17, - 0x80, 0x64, 0xcf, 0xd8, 0x24, 0x06, 0x75, 0x11, 0x93, 0x63, 0x79, 0xd6, 0x62, 0xcc, 0xdb, 0xbf, - 0xb9, 0xcb, 0xee, 0x2f, 0x81, 0x5e, 0x09, 0x02, 0xfc, 0x5b, 0x6e, 0xfe, 0x6d, 0xcc, 0x0e, 0xf7, - 0x90, 0xa8, 0xb3, 0x14, 0xe1, 0x5f, 0x4c, 0xfb, 0x29, 0x9b, 0x52, 0x85, 0x5d, 0x27, 0xe7, 0x8a, - 0x37, 0x5e, 0x4c, 0x4f, 0x8f, 0xfd, 0x5e, 0xfb, 0xba, 0x97, 0xff, 0xbc, 0x02, 0xf9, 0xb6, 0x6a, - 0xa0, 0x9c, 0x51, 0x85, 0x2d, 0xdf, 0x7e, 0xc9, 0x0e, 0xce, 0x55, 0x2a, 0xd6, 0xea, 0x2b, 0x84, - 0xcd, 0xf5, 0x40, 0x09, 0x29, 0xa1, 0x33, 0x36, 0xa4, 0xd9, 0x3e, 0xa9, 0x1e, 0xf3, 0x3b, 0x5d, - 0xc8, 0x1c, 0xd6, 0x68, 0x68, 0xbf, 0x66, 0xb7, 0x4b, 0xb1, 0x56, 0x2b, 0x41, 0x59, 0x1e, 0x16, - 0x7a, 0x55, 0x17, 0x75, 0xae, 0x1a, 0x8e, 0xb7, 0xc7, 0x79, 0xdf, 0x39, 0xdf, 0x19, 0x23, 0xbf, - 0x55, 0x0e, 0x05, 0xb4, 0x3f, 0xb0, 0x43, 0x59, 0x77, 0x4f, 0xb1, 0xc0, 0xd0, 0x5c, 0xfd, 0x1f, - 0xe8, 0x35, 0xcf, 0x5a, 0x4c, 0x4f, 0x1f, 0xf4, 0xa1, 0xcd, 0xcb, 0x3d, 0xeb, 0x02, 0x6f, 0xcc, - 0x53, 0xf1, 0x03, 0x39, 0x10, 0x3a, 0xf4, 0x5d, 0x76, 0x43, 0x68, 0x1d, 0xc6, 0x02, 0x63, 0x67, - 0xe2, 0x59, 0x8b, 0x9b, 0xfc, 0xba, 0xd0, 0xfa, 0x85, 0xc0, 0xf8, 0x4c, 0xfc, 0xd8, 0xb8, 0xd6, - 0xe5, 0xc6, 0xb5, 0x7e, 0x6d, 0x5c, 0xeb, 0xfb, 0xd6, 0x1d, 0x5d, 0x6e, 0xdd, 0xd1, 0xcf, 0xad, - 0x3b, 0xfa, 0xb8, 0xbc, 0x50, 0x14, 0x17, 0x91, 0x2f, 0xb3, 0x24, 0x90, 0x59, 0x02, 0x14, 0x9d, - 0xd3, 0xee, 0xc3, 0x2c, 0x45, 0xf0, 0x3f, 0xfb, 0x1a, 0x4d, 0x8c, 0xf7, 0xf1, 0xef, 0x00, 0x00, - 0x00, 0xff, 0xff, 0xaa, 0x0e, 0x3b, 0xa9, 0xde, 0x02, 0x00, 0x00, + proto.RegisterFile("cometbft/services/block_results/v1/block_results.proto", fileDescriptor_3fd862496bf20f1b) +} + +var fileDescriptor_3fd862496bf20f1b = []byte{ + // 396 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x4f, 0xcf, 0x93, 0x40, + 0x10, 0xc6, 0x8b, 0x68, 0xd5, 0x7d, 0x3d, 0x28, 0xe6, 0x6d, 0xb1, 0x31, 0x1b, 0xe4, 0xc4, 0x09, + 0xa4, 0x26, 0xde, 0xbc, 0xb4, 0x31, 0x9a, 0x98, 0x18, 0xb3, 0x51, 0x0f, 0x4d, 0x0c, 0x59, 0xe8, + 0xb6, 0x6c, 0xa4, 0xb0, 0x32, 0xcb, 0xa6, 0x7a, 0xf6, 0x03, 0xf8, 0xb1, 0x3c, 0xf6, 0xe8, 0xd1, + 0xb4, 0x5f, 0xc4, 0xb0, 0xfc, 0x69, 0x48, 0x35, 0xde, 0xe0, 0x79, 0xe6, 0xf9, 0x31, 0xc3, 0x0c, + 0x7a, 0x9e, 0x14, 0x3b, 0x26, 0xe3, 0x8d, 0x0c, 0x80, 0x95, 0x8a, 0x27, 0x0c, 0x82, 0x38, 0x2b, + 0x92, 0xcf, 0x51, 0xc9, 0xa0, 0xca, 0x24, 0x04, 0x2a, 0x1c, 0x0a, 0xbe, 0x28, 0x0b, 0x59, 0x58, + 0x6e, 0x97, 0xf3, 0xbb, 0x9c, 0x3f, 0x2c, 0x53, 0xe1, 0xec, 0x71, 0xcf, 0xa6, 0x71, 0xc2, 0x6b, + 0x92, 0xfc, 0x2a, 0x58, 0x4b, 0x98, 0xe1, 0xde, 0xd5, 0x6a, 0x6d, 0x0b, 0x5a, 0xd2, 0x5d, 0xeb, + 0xbb, 0x4f, 0xd1, 0xe4, 0x15, 0x93, 0x8b, 0x1a, 0x4a, 0x1a, 0x26, 0x61, 0x5f, 0x2a, 0x06, 0xd2, + 0x9a, 0xa0, 0x71, 0xca, 0xf8, 0x36, 0x95, 0xb6, 0xe1, 0x18, 0x9e, 0x49, 0xda, 0x37, 0xf7, 0xbb, + 0x89, 0xa6, 0x17, 0x11, 0x10, 0x45, 0x0e, 0xec, 0x5f, 0x19, 0xeb, 0x05, 0x42, 0x72, 0xdf, 0x35, + 0x6d, 0xdf, 0x70, 0x4c, 0xef, 0x6a, 0x8e, 0xfd, 0x7e, 0xb8, 0xba, 0x71, 0x5f, 0x85, 0xfe, 0xcb, + 0x3d, 0x4b, 0xde, 0xef, 0x1b, 0x28, 0xb9, 0x2b, 0xdb, 0x27, 0xb0, 0xde, 0xa0, 0xeb, 0x0d, 0xcf, + 0x69, 0xc6, 0xbf, 0xb1, 0xa8, 0x99, 0x9f, 0x29, 0x96, 0x4b, 0xb0, 0x4d, 0x4d, 0x9a, 0xfe, 0x85, + 0x54, 0xfb, 0xe4, 0x61, 0x97, 0xd2, 0xcd, 0x6a, 0x0d, 0xac, 0xb7, 0xe8, 0x81, 0xa2, 0x19, 0x5f, + 0x53, 0x59, 0x94, 0x51, 0x25, 0xd6, 0x54, 0x32, 0xb0, 0x6f, 0x6a, 0xd0, 0x93, 0x4b, 0xd0, 0xc7, + 0xae, 0xf4, 0x83, 0xae, 0x24, 0xf7, 0xd5, 0x50, 0x00, 0x6b, 0x85, 0xa6, 0x49, 0x3d, 0x7c, 0x0e, + 0x15, 0x44, 0xfa, 0xdf, 0xf6, 0xd4, 0x5b, 0x8e, 0xe1, 0x5d, 0xcd, 0xdd, 0x33, 0xb5, 0xd9, 0x8c, + 0x0a, 0xfd, 0x65, 0x97, 0x78, 0xa7, 0x97, 0x41, 0xae, 0x93, 0x81, 0xd0, 0xb1, 0x1f, 0xa1, 0x3b, + 0x54, 0x88, 0x28, 0xa5, 0x90, 0xda, 0x63, 0xc7, 0xf0, 0xee, 0x91, 0xdb, 0x54, 0x88, 0xd7, 0x14, + 0xd2, 0xc5, 0xa7, 0x9f, 0x47, 0x6c, 0x1c, 0x8e, 0xd8, 0xf8, 0x7d, 0xc4, 0xc6, 0x8f, 0x13, 0x1e, + 0x1d, 0x4e, 0x78, 0xf4, 0xeb, 0x84, 0x47, 0xab, 0xe5, 0x96, 0xcb, 0xb4, 0x8a, 0xeb, 0xaf, 0x06, + 0xfd, 0xf6, 0xcf, 0x47, 0x22, 0x78, 0xf0, 0xff, 0x6b, 0x8c, 0xc7, 0xfa, 0x3c, 0x9e, 0xfd, 0x09, + 0x00, 0x00, 0xff, 0xff, 0xf5, 0x7a, 0x3e, 0x1d, 0xba, 0x02, 0x00, 0x00, } func (m *GetBlockResultsRequest) Marshal() (dAtA []byte, err error) { @@ -256,29 +220,6 @@ func (m *GetBlockResultsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *GetLatestBlockResultsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetLatestBlockResultsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetLatestBlockResultsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - func (m *GetBlockResultsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -346,10 +287,10 @@ func (m *GetBlockResultsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) dAtA[i] = 0x1a } } - if len(m.TxsResults) > 0 { - for iNdEx := len(m.TxsResults) - 1; iNdEx >= 0; iNdEx-- { + if len(m.TxResults) > 0 { + for iNdEx := len(m.TxResults) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.TxsResults[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.TxResults[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -391,15 +332,6 @@ func (m *GetBlockResultsRequest) Size() (n int) { return n } -func (m *GetLatestBlockResultsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - func (m *GetBlockResultsResponse) Size() (n int) { if m == nil { return 0 @@ -409,8 +341,8 @@ func (m *GetBlockResultsResponse) Size() (n int) { if m.Height != 0 { n += 1 + sovBlockResults(uint64(m.Height)) } - if len(m.TxsResults) > 0 { - for _, e := range m.TxsResults { + if len(m.TxResults) > 0 { + for _, e := range m.TxResults { l = e.Size() n += 1 + l + sovBlockResults(uint64(l)) } @@ -513,56 +445,6 @@ func (m *GetBlockResultsRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetLatestBlockResultsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBlockResults - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetLatestBlockResultsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetLatestBlockResultsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipBlockResults(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBlockResults - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *GetBlockResultsResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -613,7 +495,7 @@ func (m *GetBlockResultsResponse) Unmarshal(dAtA []byte) error { } case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TxsResults", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TxResults", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -640,8 +522,8 @@ func (m *GetBlockResultsResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TxsResults = append(m.TxsResults, &types.ExecTxResult{}) - if err := m.TxsResults[len(m.TxsResults)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.TxResults = append(m.TxResults, &v1.ExecTxResult{}) + if err := m.TxResults[len(m.TxResults)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -674,7 +556,7 @@ func (m *GetBlockResultsResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FinalizeBlockEvents = append(m.FinalizeBlockEvents, &types.Event{}) + m.FinalizeBlockEvents = append(m.FinalizeBlockEvents, &v1.Event{}) if err := m.FinalizeBlockEvents[len(m.FinalizeBlockEvents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -708,7 +590,7 @@ func (m *GetBlockResultsResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ValidatorUpdates = append(m.ValidatorUpdates, &types.ValidatorUpdate{}) + m.ValidatorUpdates = append(m.ValidatorUpdates, &v1.ValidatorUpdate{}) if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -743,7 +625,7 @@ func (m *GetBlockResultsResponse) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ConsensusParamUpdates == nil { - m.ConsensusParamUpdates = &types1.ConsensusParams{} + m.ConsensusParamUpdates = &v11.ConsensusParams{} } if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err diff --git a/api/cometbft/services/block_results/v1/block_results_service.pb.go b/api/cometbft/services/block_results/v1/block_results_service.pb.go new file mode 100644 index 00000000000..9361641a85d --- /dev/null +++ b/api/cometbft/services/block_results/v1/block_results_service.pb.go @@ -0,0 +1,130 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/services/block_results/v1/block_results_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("cometbft/services/block_results/v1/block_results_service.proto", fileDescriptor_03cd7b7c1632b595) +} + +var fileDescriptor_03cd7b7c1632b595 = []byte{ + // 195 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4b, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x2d, 0xd6, 0x4f, 0xca, + 0xc9, 0x4f, 0xce, 0x8e, 0x2f, 0x4a, 0x2d, 0x2e, 0xcd, 0x29, 0x29, 0xd6, 0x2f, 0x33, 0x44, 0x15, + 0x88, 0x87, 0xaa, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x52, 0x82, 0xe9, 0xd7, 0x83, 0xe9, + 0xd7, 0x43, 0x51, 0xae, 0x57, 0x66, 0x28, 0x65, 0x46, 0xaa, 0x1d, 0x10, 0xb3, 0x8d, 0x16, 0x31, + 0x72, 0x09, 0x3b, 0x81, 0xc4, 0x83, 0x20, 0xc2, 0xc1, 0x10, 0xdd, 0x42, 0x5d, 0x8c, 0x5c, 0xfc, + 0xee, 0xa9, 0x25, 0xc8, 0x52, 0x42, 0x56, 0x7a, 0x84, 0x1d, 0xa2, 0x87, 0xa6, 0x29, 0x28, 0xb5, + 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0xca, 0x9a, 0x2c, 0xbd, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x4e, + 0xb1, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, + 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0xe5, 0x9c, 0x9e, 0x59, 0x92, + 0x51, 0x9a, 0x04, 0x32, 0x5c, 0x1f, 0x1e, 0x02, 0x70, 0x46, 0x62, 0x41, 0xa6, 0x3e, 0xe1, 0x70, + 0x49, 0x62, 0x03, 0x07, 0x85, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xbb, 0xc2, 0x14, 0x1e, 0xa8, + 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// BlockResultsServiceClient is the client API for BlockResultsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BlockResultsServiceClient interface { + // GetBlockResults returns the BlockResults of the requested height. + GetBlockResults(ctx context.Context, in *GetBlockResultsRequest, opts ...grpc.CallOption) (*GetBlockResultsResponse, error) +} + +type blockResultsServiceClient struct { + cc grpc1.ClientConn +} + +func NewBlockResultsServiceClient(cc grpc1.ClientConn) BlockResultsServiceClient { + return &blockResultsServiceClient{cc} +} + +func (c *blockResultsServiceClient) GetBlockResults(ctx context.Context, in *GetBlockResultsRequest, opts ...grpc.CallOption) (*GetBlockResultsResponse, error) { + out := new(GetBlockResultsResponse) + err := c.cc.Invoke(ctx, "/cometbft.services.block_results.v1.BlockResultsService/GetBlockResults", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BlockResultsServiceServer is the server API for BlockResultsService service. +type BlockResultsServiceServer interface { + // GetBlockResults returns the BlockResults of the requested height. + GetBlockResults(context.Context, *GetBlockResultsRequest) (*GetBlockResultsResponse, error) +} + +// UnimplementedBlockResultsServiceServer can be embedded to have forward compatible implementations. +type UnimplementedBlockResultsServiceServer struct { +} + +func (*UnimplementedBlockResultsServiceServer) GetBlockResults(ctx context.Context, req *GetBlockResultsRequest) (*GetBlockResultsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetBlockResults not implemented") +} + +func RegisterBlockResultsServiceServer(s grpc1.Server, srv BlockResultsServiceServer) { + s.RegisterService(&_BlockResultsService_serviceDesc, srv) +} + +func _BlockResultsService_GetBlockResults_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBlockResultsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BlockResultsServiceServer).GetBlockResults(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cometbft.services.block_results.v1.BlockResultsService/GetBlockResults", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BlockResultsServiceServer).GetBlockResults(ctx, req.(*GetBlockResultsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var BlockResultsService_serviceDesc = _BlockResultsService_serviceDesc +var _BlockResultsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cometbft.services.block_results.v1.BlockResultsService", + HandlerType: (*BlockResultsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetBlockResults", + Handler: _BlockResultsService_GetBlockResults_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cometbft/services/block_results/v1/block_results_service.proto", +} diff --git a/proto/tendermint/services/pruning/v1/pruning.pb.go b/api/cometbft/services/pruning/v1/pruning.pb.go similarity index 91% rename from proto/tendermint/services/pruning/v1/pruning.pb.go rename to api/cometbft/services/pruning/v1/pruning.pb.go index f96a4d18fce..2af72387cf1 100644 --- a/proto/tendermint/services/pruning/v1/pruning.pb.go +++ b/api/cometbft/services/pruning/v1/pruning.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/services/pruning/v1/pruning.proto +// source: cometbft/services/pruning/v1/pruning.proto -package tendermint_services_pruning_v1 +package cometbft_services_pruning_v1 import ( fmt "fmt" @@ -22,6 +22,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// SetBlockRetainHeightRequest sets the retain height for blocks. type SetBlockRetainHeightRequest struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } @@ -30,7 +31,7 @@ func (m *SetBlockRetainHeightRequest) Reset() { *m = SetBlockRetainHeigh func (m *SetBlockRetainHeightRequest) String() string { return proto.CompactTextString(m) } func (*SetBlockRetainHeightRequest) ProtoMessage() {} func (*SetBlockRetainHeightRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{0} + return fileDescriptor_14bf9cf2a477c5d2, []int{0} } func (m *SetBlockRetainHeightRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -66,6 +67,7 @@ func (m *SetBlockRetainHeightRequest) GetHeight() uint64 { return 0 } +// SetBlockRetainHeightResponse is empty. type SetBlockRetainHeightResponse struct { } @@ -73,7 +75,7 @@ func (m *SetBlockRetainHeightResponse) Reset() { *m = SetBlockRetainHeig func (m *SetBlockRetainHeightResponse) String() string { return proto.CompactTextString(m) } func (*SetBlockRetainHeightResponse) ProtoMessage() {} func (*SetBlockRetainHeightResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{1} + return fileDescriptor_14bf9cf2a477c5d2, []int{1} } func (m *SetBlockRetainHeightResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,6 +104,7 @@ func (m *SetBlockRetainHeightResponse) XXX_DiscardUnknown() { var xxx_messageInfo_SetBlockRetainHeightResponse proto.InternalMessageInfo +// GetBlockRetainHeightRequest is a request for the retain height. type GetBlockRetainHeightRequest struct { } @@ -109,7 +112,7 @@ func (m *GetBlockRetainHeightRequest) Reset() { *m = GetBlockRetainHeigh func (m *GetBlockRetainHeightRequest) String() string { return proto.CompactTextString(m) } func (*GetBlockRetainHeightRequest) ProtoMessage() {} func (*GetBlockRetainHeightRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{2} + return fileDescriptor_14bf9cf2a477c5d2, []int{2} } func (m *GetBlockRetainHeightRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -138,6 +141,7 @@ func (m *GetBlockRetainHeightRequest) XXX_DiscardUnknown() { var xxx_messageInfo_GetBlockRetainHeightRequest proto.InternalMessageInfo +// GetBlockRetainHeightResponse returns the retain height for blocks. type GetBlockRetainHeightResponse struct { // The retain height set by the application. AppRetainHeight uint64 `protobuf:"varint,1,opt,name=app_retain_height,json=appRetainHeight,proto3" json:"app_retain_height,omitempty"` @@ -150,7 +154,7 @@ func (m *GetBlockRetainHeightResponse) Reset() { *m = GetBlockRetainHeig func (m *GetBlockRetainHeightResponse) String() string { return proto.CompactTextString(m) } func (*GetBlockRetainHeightResponse) ProtoMessage() {} func (*GetBlockRetainHeightResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{3} + return fileDescriptor_14bf9cf2a477c5d2, []int{3} } func (m *GetBlockRetainHeightResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -193,6 +197,7 @@ func (m *GetBlockRetainHeightResponse) GetPruningServiceRetainHeight() uint64 { return 0 } +// SetBlockResultsRetainHeightRequest sets the retain height for block results. type SetBlockResultsRetainHeightRequest struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } @@ -201,7 +206,7 @@ func (m *SetBlockResultsRetainHeightRequest) Reset() { *m = SetBlockResu func (m *SetBlockResultsRetainHeightRequest) String() string { return proto.CompactTextString(m) } func (*SetBlockResultsRetainHeightRequest) ProtoMessage() {} func (*SetBlockResultsRetainHeightRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{4} + return fileDescriptor_14bf9cf2a477c5d2, []int{4} } func (m *SetBlockResultsRetainHeightRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -237,6 +242,7 @@ func (m *SetBlockResultsRetainHeightRequest) GetHeight() uint64 { return 0 } +// SetBlockResultsRetainHeightResponse is empty. type SetBlockResultsRetainHeightResponse struct { } @@ -244,7 +250,7 @@ func (m *SetBlockResultsRetainHeightResponse) Reset() { *m = SetBlockRes func (m *SetBlockResultsRetainHeightResponse) String() string { return proto.CompactTextString(m) } func (*SetBlockResultsRetainHeightResponse) ProtoMessage() {} func (*SetBlockResultsRetainHeightResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{5} + return fileDescriptor_14bf9cf2a477c5d2, []int{5} } func (m *SetBlockResultsRetainHeightResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,6 +279,7 @@ func (m *SetBlockResultsRetainHeightResponse) XXX_DiscardUnknown() { var xxx_messageInfo_SetBlockResultsRetainHeightResponse proto.InternalMessageInfo +// GetBlockResultsRetainHeightRequest is a request for the retain height. type GetBlockResultsRetainHeightRequest struct { } @@ -280,7 +287,7 @@ func (m *GetBlockResultsRetainHeightRequest) Reset() { *m = GetBlockResu func (m *GetBlockResultsRetainHeightRequest) String() string { return proto.CompactTextString(m) } func (*GetBlockResultsRetainHeightRequest) ProtoMessage() {} func (*GetBlockResultsRetainHeightRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{6} + return fileDescriptor_14bf9cf2a477c5d2, []int{6} } func (m *GetBlockResultsRetainHeightRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -309,6 +316,7 @@ func (m *GetBlockResultsRetainHeightRequest) XXX_DiscardUnknown() { var xxx_messageInfo_GetBlockResultsRetainHeightRequest proto.InternalMessageInfo +// GetBlockResultsRetainHeightResponse returns the retain height for block results. type GetBlockResultsRetainHeightResponse struct { // The retain height set by the pruning service (e.g. by the data // companion) specifically for block results. @@ -319,7 +327,7 @@ func (m *GetBlockResultsRetainHeightResponse) Reset() { *m = GetBlockRes func (m *GetBlockResultsRetainHeightResponse) String() string { return proto.CompactTextString(m) } func (*GetBlockResultsRetainHeightResponse) ProtoMessage() {} func (*GetBlockResultsRetainHeightResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{7} + return fileDescriptor_14bf9cf2a477c5d2, []int{7} } func (m *GetBlockResultsRetainHeightResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,6 +363,7 @@ func (m *GetBlockResultsRetainHeightResponse) GetPruningServiceRetainHeight() ui return 0 } +// SetTxIndexerRetainHeightRequest sets the retain height for the tx indexer. type SetTxIndexerRetainHeightRequest struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } @@ -363,7 +372,7 @@ func (m *SetTxIndexerRetainHeightRequest) Reset() { *m = SetTxIndexerRet func (m *SetTxIndexerRetainHeightRequest) String() string { return proto.CompactTextString(m) } func (*SetTxIndexerRetainHeightRequest) ProtoMessage() {} func (*SetTxIndexerRetainHeightRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{8} + return fileDescriptor_14bf9cf2a477c5d2, []int{8} } func (m *SetTxIndexerRetainHeightRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -399,6 +408,7 @@ func (m *SetTxIndexerRetainHeightRequest) GetHeight() uint64 { return 0 } +// SetTxIndexerRetainHeightResponse is empty. type SetTxIndexerRetainHeightResponse struct { } @@ -406,7 +416,7 @@ func (m *SetTxIndexerRetainHeightResponse) Reset() { *m = SetTxIndexerRe func (m *SetTxIndexerRetainHeightResponse) String() string { return proto.CompactTextString(m) } func (*SetTxIndexerRetainHeightResponse) ProtoMessage() {} func (*SetTxIndexerRetainHeightResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{9} + return fileDescriptor_14bf9cf2a477c5d2, []int{9} } func (m *SetTxIndexerRetainHeightResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -435,6 +445,7 @@ func (m *SetTxIndexerRetainHeightResponse) XXX_DiscardUnknown() { var xxx_messageInfo_SetTxIndexerRetainHeightResponse proto.InternalMessageInfo +// GetTxIndexerRetainHeightRequest is a request for the retain height. type GetTxIndexerRetainHeightRequest struct { } @@ -442,7 +453,7 @@ func (m *GetTxIndexerRetainHeightRequest) Reset() { *m = GetTxIndexerRet func (m *GetTxIndexerRetainHeightRequest) String() string { return proto.CompactTextString(m) } func (*GetTxIndexerRetainHeightRequest) ProtoMessage() {} func (*GetTxIndexerRetainHeightRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{10} + return fileDescriptor_14bf9cf2a477c5d2, []int{10} } func (m *GetTxIndexerRetainHeightRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,6 +482,7 @@ func (m *GetTxIndexerRetainHeightRequest) XXX_DiscardUnknown() { var xxx_messageInfo_GetTxIndexerRetainHeightRequest proto.InternalMessageInfo +// GetTxIndexerRetainHeightResponse returns the retain height for the tx indexer. type GetTxIndexerRetainHeightResponse struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } @@ -479,7 +491,7 @@ func (m *GetTxIndexerRetainHeightResponse) Reset() { *m = GetTxIndexerRe func (m *GetTxIndexerRetainHeightResponse) String() string { return proto.CompactTextString(m) } func (*GetTxIndexerRetainHeightResponse) ProtoMessage() {} func (*GetTxIndexerRetainHeightResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{11} + return fileDescriptor_14bf9cf2a477c5d2, []int{11} } func (m *GetTxIndexerRetainHeightResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -515,6 +527,7 @@ func (m *GetTxIndexerRetainHeightResponse) GetHeight() uint64 { return 0 } +// SetBlockIndexerRetainHeightRequest sets the retain height for the block indexer. type SetBlockIndexerRetainHeightRequest struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } @@ -523,7 +536,7 @@ func (m *SetBlockIndexerRetainHeightRequest) Reset() { *m = SetBlockInde func (m *SetBlockIndexerRetainHeightRequest) String() string { return proto.CompactTextString(m) } func (*SetBlockIndexerRetainHeightRequest) ProtoMessage() {} func (*SetBlockIndexerRetainHeightRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{12} + return fileDescriptor_14bf9cf2a477c5d2, []int{12} } func (m *SetBlockIndexerRetainHeightRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -559,6 +572,7 @@ func (m *SetBlockIndexerRetainHeightRequest) GetHeight() uint64 { return 0 } +// SetBlockIndexerRetainHeightResponse is empty. type SetBlockIndexerRetainHeightResponse struct { } @@ -566,7 +580,7 @@ func (m *SetBlockIndexerRetainHeightResponse) Reset() { *m = SetBlockInd func (m *SetBlockIndexerRetainHeightResponse) String() string { return proto.CompactTextString(m) } func (*SetBlockIndexerRetainHeightResponse) ProtoMessage() {} func (*SetBlockIndexerRetainHeightResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{13} + return fileDescriptor_14bf9cf2a477c5d2, []int{13} } func (m *SetBlockIndexerRetainHeightResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -595,6 +609,7 @@ func (m *SetBlockIndexerRetainHeightResponse) XXX_DiscardUnknown() { var xxx_messageInfo_SetBlockIndexerRetainHeightResponse proto.InternalMessageInfo +// GetBlockIndexerRetainHeightRequest is a request for the retain height. type GetBlockIndexerRetainHeightRequest struct { } @@ -602,7 +617,7 @@ func (m *GetBlockIndexerRetainHeightRequest) Reset() { *m = GetBlockInde func (m *GetBlockIndexerRetainHeightRequest) String() string { return proto.CompactTextString(m) } func (*GetBlockIndexerRetainHeightRequest) ProtoMessage() {} func (*GetBlockIndexerRetainHeightRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{14} + return fileDescriptor_14bf9cf2a477c5d2, []int{14} } func (m *GetBlockIndexerRetainHeightRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -631,6 +646,7 @@ func (m *GetBlockIndexerRetainHeightRequest) XXX_DiscardUnknown() { var xxx_messageInfo_GetBlockIndexerRetainHeightRequest proto.InternalMessageInfo +// GetBlockIndexerRetainHeightResponse returns the retain height for the block indexer. type GetBlockIndexerRetainHeightResponse struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } @@ -639,7 +655,7 @@ func (m *GetBlockIndexerRetainHeightResponse) Reset() { *m = GetBlockInd func (m *GetBlockIndexerRetainHeightResponse) String() string { return proto.CompactTextString(m) } func (*GetBlockIndexerRetainHeightResponse) ProtoMessage() {} func (*GetBlockIndexerRetainHeightResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_93addfb1f9435773, []int{15} + return fileDescriptor_14bf9cf2a477c5d2, []int{15} } func (m *GetBlockIndexerRetainHeightResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -676,51 +692,51 @@ func (m *GetBlockIndexerRetainHeightResponse) GetHeight() uint64 { } func init() { - proto.RegisterType((*SetBlockRetainHeightRequest)(nil), "tendermint.services.pruning.v1.SetBlockRetainHeightRequest") - proto.RegisterType((*SetBlockRetainHeightResponse)(nil), "tendermint.services.pruning.v1.SetBlockRetainHeightResponse") - proto.RegisterType((*GetBlockRetainHeightRequest)(nil), "tendermint.services.pruning.v1.GetBlockRetainHeightRequest") - proto.RegisterType((*GetBlockRetainHeightResponse)(nil), "tendermint.services.pruning.v1.GetBlockRetainHeightResponse") - proto.RegisterType((*SetBlockResultsRetainHeightRequest)(nil), "tendermint.services.pruning.v1.SetBlockResultsRetainHeightRequest") - proto.RegisterType((*SetBlockResultsRetainHeightResponse)(nil), "tendermint.services.pruning.v1.SetBlockResultsRetainHeightResponse") - proto.RegisterType((*GetBlockResultsRetainHeightRequest)(nil), "tendermint.services.pruning.v1.GetBlockResultsRetainHeightRequest") - proto.RegisterType((*GetBlockResultsRetainHeightResponse)(nil), "tendermint.services.pruning.v1.GetBlockResultsRetainHeightResponse") - proto.RegisterType((*SetTxIndexerRetainHeightRequest)(nil), "tendermint.services.pruning.v1.SetTxIndexerRetainHeightRequest") - proto.RegisterType((*SetTxIndexerRetainHeightResponse)(nil), "tendermint.services.pruning.v1.SetTxIndexerRetainHeightResponse") - proto.RegisterType((*GetTxIndexerRetainHeightRequest)(nil), "tendermint.services.pruning.v1.GetTxIndexerRetainHeightRequest") - proto.RegisterType((*GetTxIndexerRetainHeightResponse)(nil), "tendermint.services.pruning.v1.GetTxIndexerRetainHeightResponse") - proto.RegisterType((*SetBlockIndexerRetainHeightRequest)(nil), "tendermint.services.pruning.v1.SetBlockIndexerRetainHeightRequest") - proto.RegisterType((*SetBlockIndexerRetainHeightResponse)(nil), "tendermint.services.pruning.v1.SetBlockIndexerRetainHeightResponse") - proto.RegisterType((*GetBlockIndexerRetainHeightRequest)(nil), "tendermint.services.pruning.v1.GetBlockIndexerRetainHeightRequest") - proto.RegisterType((*GetBlockIndexerRetainHeightResponse)(nil), "tendermint.services.pruning.v1.GetBlockIndexerRetainHeightResponse") + proto.RegisterType((*SetBlockRetainHeightRequest)(nil), "cometbft.services.pruning.v1.SetBlockRetainHeightRequest") + proto.RegisterType((*SetBlockRetainHeightResponse)(nil), "cometbft.services.pruning.v1.SetBlockRetainHeightResponse") + proto.RegisterType((*GetBlockRetainHeightRequest)(nil), "cometbft.services.pruning.v1.GetBlockRetainHeightRequest") + proto.RegisterType((*GetBlockRetainHeightResponse)(nil), "cometbft.services.pruning.v1.GetBlockRetainHeightResponse") + proto.RegisterType((*SetBlockResultsRetainHeightRequest)(nil), "cometbft.services.pruning.v1.SetBlockResultsRetainHeightRequest") + proto.RegisterType((*SetBlockResultsRetainHeightResponse)(nil), "cometbft.services.pruning.v1.SetBlockResultsRetainHeightResponse") + proto.RegisterType((*GetBlockResultsRetainHeightRequest)(nil), "cometbft.services.pruning.v1.GetBlockResultsRetainHeightRequest") + proto.RegisterType((*GetBlockResultsRetainHeightResponse)(nil), "cometbft.services.pruning.v1.GetBlockResultsRetainHeightResponse") + proto.RegisterType((*SetTxIndexerRetainHeightRequest)(nil), "cometbft.services.pruning.v1.SetTxIndexerRetainHeightRequest") + proto.RegisterType((*SetTxIndexerRetainHeightResponse)(nil), "cometbft.services.pruning.v1.SetTxIndexerRetainHeightResponse") + proto.RegisterType((*GetTxIndexerRetainHeightRequest)(nil), "cometbft.services.pruning.v1.GetTxIndexerRetainHeightRequest") + proto.RegisterType((*GetTxIndexerRetainHeightResponse)(nil), "cometbft.services.pruning.v1.GetTxIndexerRetainHeightResponse") + proto.RegisterType((*SetBlockIndexerRetainHeightRequest)(nil), "cometbft.services.pruning.v1.SetBlockIndexerRetainHeightRequest") + proto.RegisterType((*SetBlockIndexerRetainHeightResponse)(nil), "cometbft.services.pruning.v1.SetBlockIndexerRetainHeightResponse") + proto.RegisterType((*GetBlockIndexerRetainHeightRequest)(nil), "cometbft.services.pruning.v1.GetBlockIndexerRetainHeightRequest") + proto.RegisterType((*GetBlockIndexerRetainHeightResponse)(nil), "cometbft.services.pruning.v1.GetBlockIndexerRetainHeightResponse") } func init() { - proto.RegisterFile("tendermint/services/pruning/v1/pruning.proto", fileDescriptor_93addfb1f9435773) -} - -var fileDescriptor_93addfb1f9435773 = []byte{ - // 329 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x4a, 0xc3, 0x40, - 0x14, 0x85, 0x3b, 0x22, 0x5d, 0xdc, 0x8d, 0x98, 0x85, 0x14, 0xdb, 0x4e, 0xeb, 0x54, 0x41, 0x44, - 0x5a, 0x8a, 0xb8, 0x50, 0x74, 0x61, 0x37, 0xa3, 0xdb, 0xd6, 0x7d, 0xa9, 0xed, 0xa5, 0x0d, 0xd6, - 0xc9, 0x38, 0x33, 0x09, 0x7d, 0x09, 0xc1, 0xc7, 0x72, 0xd9, 0xa5, 0x4b, 0x49, 0x5e, 0x44, 0x88, - 0x13, 0x12, 0xc5, 0x24, 0xb4, 0xbb, 0xfc, 0x9c, 0x73, 0xe6, 0x7e, 0xf7, 0x30, 0x70, 0x6e, 0x50, - 0xcc, 0x50, 0xbd, 0xb8, 0xc2, 0xf4, 0x34, 0xaa, 0xc0, 0x9d, 0xa2, 0xee, 0x49, 0xe5, 0x0b, 0x57, - 0xcc, 0x7b, 0x41, 0x3f, 0x79, 0xec, 0x4a, 0xe5, 0x19, 0xcf, 0xa1, 0xa9, 0xba, 0x9b, 0xa8, 0xbb, - 0x89, 0x24, 0xe8, 0xb3, 0x4b, 0xa8, 0x8f, 0xd0, 0x0c, 0x96, 0xde, 0xf4, 0x79, 0x88, 0x66, 0xe2, - 0x8a, 0x7b, 0x74, 0xe7, 0x0b, 0x33, 0xc4, 0x57, 0x1f, 0xb5, 0x71, 0x0e, 0xa0, 0xba, 0x88, 0x3f, - 0xd4, 0x48, 0x9b, 0x9c, 0xee, 0x0e, 0xed, 0x1b, 0xa3, 0xd0, 0xf8, 0xdf, 0xa6, 0xa5, 0x27, 0x34, - 0xb2, 0x26, 0xd4, 0x79, 0x7e, 0x2c, 0x7b, 0x23, 0xd0, 0xe0, 0x05, 0x7e, 0xe7, 0x0c, 0xf6, 0x27, - 0x52, 0x8e, 0x55, 0xfc, 0x6f, 0xfc, 0x6b, 0x84, 0xbd, 0x89, 0x94, 0x59, 0x8f, 0x73, 0x07, 0x4d, - 0x0b, 0x34, 0xb6, 0x84, 0x7f, 0x7c, 0x3b, 0xb1, 0xef, 0xd0, 0x8a, 0x46, 0x3f, 0x9a, 0x6c, 0x04, - 0xbb, 0x01, 0x96, 0xe2, 0x68, 0x7f, 0x69, 0xf4, 0x26, 0xcb, 0x38, 0x81, 0x4e, 0xa1, 0xdb, 0xee, - 0xe4, 0x18, 0x18, 0x2f, 0x3d, 0x84, 0x2d, 0xa0, 0xc3, 0xcb, 0xc3, 0xca, 0xa1, 0x49, 0x29, 0xf4, - 0x15, 0xb4, 0x46, 0x68, 0x1e, 0x57, 0x0f, 0x62, 0x86, 0x2b, 0x54, 0x9b, 0x10, 0x33, 0x68, 0xe7, - 0x5b, 0x2d, 0xee, 0x11, 0xb4, 0x78, 0x71, 0x3c, 0xbb, 0x86, 0x36, 0x2f, 0x89, 0xc9, 0x1d, 0x21, - 0x53, 0xd9, 0x16, 0x00, 0x99, 0xca, 0x8a, 0x18, 0x32, 0x95, 0x15, 0x60, 0xdc, 0xa6, 0x95, 0x6d, - 0x41, 0x32, 0xa8, 0x7d, 0x84, 0x94, 0xac, 0x43, 0x4a, 0xbe, 0x42, 0x4a, 0xde, 0x23, 0x5a, 0x59, - 0x47, 0xb4, 0xf2, 0x19, 0xd1, 0xca, 0x53, 0x35, 0xbe, 0xc3, 0x17, 0xdf, 0x01, 0x00, 0x00, 0xff, - 0xff, 0x1c, 0x18, 0xad, 0x8c, 0xf3, 0x03, 0x00, 0x00, + proto.RegisterFile("cometbft/services/pruning/v1/pruning.proto", fileDescriptor_14bf9cf2a477c5d2) +} + +var fileDescriptor_14bf9cf2a477c5d2 = []byte{ + // 328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4a, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x2d, 0xd6, 0x2f, 0x28, + 0x2a, 0xcd, 0xcb, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, 0x84, 0x31, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, + 0x85, 0x64, 0x60, 0x6a, 0xf5, 0x60, 0x6a, 0xf5, 0x60, 0x0a, 0xca, 0x0c, 0x95, 0x4c, 0xb9, 0xa4, + 0x83, 0x53, 0x4b, 0x9c, 0x72, 0xf2, 0x93, 0xb3, 0x83, 0x52, 0x4b, 0x12, 0x33, 0xf3, 0x3c, 0x52, + 0x33, 0xd3, 0x33, 0x4a, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0xc4, 0xb8, 0xd8, 0x32, + 0xc0, 0x02, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x2c, 0x41, 0x50, 0x9e, 0x92, 0x1c, 0x97, 0x0c, 0x76, + 0x6d, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x4a, 0xb2, 0x5c, 0xd2, 0xee, 0xb8, 0x8d, 0x55, 0xea, + 0x65, 0xe4, 0x92, 0x71, 0xc7, 0xa3, 0x5f, 0x48, 0x8b, 0x4b, 0x30, 0xb1, 0xa0, 0x20, 0xbe, 0x08, + 0x2c, 0x17, 0x8f, 0xe2, 0x04, 0xfe, 0xc4, 0x82, 0x02, 0x64, 0x3d, 0x42, 0x8e, 0x5c, 0xb2, 0x50, + 0x0f, 0xc5, 0x43, 0x7d, 0x88, 0xa6, 0x8f, 0x09, 0xac, 0x4f, 0x0a, 0xaa, 0x28, 0x18, 0xa2, 0x06, + 0xd9, 0x08, 0x25, 0x1b, 0x2e, 0x25, 0x84, 0x77, 0x8a, 0x4b, 0x73, 0x4a, 0x8a, 0x49, 0x09, 0x0c, + 0x55, 0x2e, 0x65, 0xbc, 0xba, 0xa1, 0x61, 0xa2, 0xc2, 0xa5, 0xe4, 0x4e, 0xd0, 0x12, 0xa5, 0x0c, + 0x2e, 0x65, 0x77, 0xc2, 0x86, 0x11, 0xf6, 0x34, 0x23, 0x41, 0x4f, 0x5b, 0x72, 0xc9, 0x07, 0xa7, + 0x96, 0x84, 0x54, 0x78, 0xe6, 0xa5, 0xa4, 0x56, 0xa4, 0x16, 0x91, 0xe2, 0x63, 0x25, 0x2e, 0x05, + 0xdc, 0x5a, 0xa1, 0xde, 0x55, 0xe4, 0x92, 0x77, 0xc7, 0x6f, 0xbc, 0x92, 0x15, 0x97, 0x82, 0x3b, + 0x01, 0x63, 0x70, 0x3a, 0x01, 0x29, 0xca, 0xc8, 0xf0, 0x00, 0x52, 0x94, 0xe1, 0xf3, 0x03, 0x52, + 0x94, 0xe1, 0xf1, 0x86, 0x2d, 0x22, 0xca, 0xc8, 0xf0, 0x89, 0x93, 0xc4, 0x89, 0x47, 0x72, 0x8c, + 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, + 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0x73, 0xb0, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, + 0xfc, 0xe3, 0x82, 0x21, 0xef, 0x03, 0x00, 0x00, } func (m *SetBlockRetainHeightRequest) Marshal() (dAtA []byte, err error) { diff --git a/proto/tendermint/services/pruning/v1/service.pb.go b/api/cometbft/services/pruning/v1/service.pb.go similarity index 82% rename from proto/tendermint/services/pruning/v1/service.pb.go rename to api/cometbft/services/pruning/v1/service.pb.go index 4e570ba17a1..930c11f0b63 100644 --- a/proto/tendermint/services/pruning/v1/service.pb.go +++ b/api/cometbft/services/pruning/v1/service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/services/pruning/v1/service.proto +// source: cometbft/services/pruning/v1/service.proto -package tendermint_services_pruning_v1 +package cometbft_services_pruning_v1 import ( context "context" @@ -26,30 +26,30 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { - proto.RegisterFile("tendermint/services/pruning/v1/service.proto", fileDescriptor_eae74ac291bdaa31) -} - -var fileDescriptor_eae74ac291bdaa31 = []byte{ - // 300 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0xd4, 0x31, 0x4e, 0xc3, 0x30, - 0x14, 0x06, 0xe0, 0x7a, 0x61, 0xf0, 0xc0, 0x10, 0x31, 0x54, 0x45, 0xf2, 0x09, 0x90, 0xa3, 0xc2, - 0x08, 0x12, 0x28, 0x1d, 0x1e, 0x6c, 0xa8, 0xe1, 0x02, 0xd0, 0x3e, 0x15, 0x8b, 0xe2, 0x04, 0xdb, - 0x89, 0x7a, 0x0c, 0xd8, 0x61, 0xe5, 0x2c, 0x8c, 0x1d, 0x19, 0x51, 0x72, 0x11, 0xa4, 0x06, 0xab, - 0xa9, 0x88, 0x92, 0xc8, 0xee, 0xfa, 0xf4, 0xbf, 0x3f, 0x5f, 0xa2, 0xa7, 0xd0, 0x13, 0x83, 0x72, - 0x8e, 0xea, 0x59, 0x48, 0x13, 0x6a, 0x54, 0xb9, 0x98, 0xa1, 0x0e, 0x53, 0x95, 0x49, 0x21, 0x17, - 0x61, 0x3e, 0xb6, 0x33, 0x9e, 0xaa, 0xc4, 0x24, 0x01, 0xdb, 0xa6, 0xb9, 0x4d, 0xf3, 0xbf, 0x34, - 0xcf, 0xc7, 0xa3, 0xae, 0x36, 0x1b, 0xdd, 0xb4, 0x9d, 0xbe, 0x53, 0x7a, 0x78, 0x5b, 0x4d, 0xe2, - 0x2a, 0x1c, 0xbc, 0x11, 0x7a, 0x14, 0xa3, 0x89, 0x96, 0xc9, 0xec, 0x69, 0x8a, 0xe6, 0x5e, 0xc8, - 0x6b, 0x14, 0x8b, 0x47, 0x13, 0x9c, 0xf3, 0xf6, 0x47, 0xf3, 0xa6, 0xad, 0x29, 0xbe, 0x64, 0xa8, - 0xcd, 0xe8, 0xc2, 0x6d, 0x59, 0xa7, 0x89, 0xd4, 0x95, 0x09, 0x9c, 0x4c, 0xe0, 0x63, 0x82, 0x36, - 0xd3, 0x27, 0xa1, 0xc7, 0x5b, 0xb4, 0xce, 0x96, 0x46, 0xef, 0xd0, 0xa2, 0xfe, 0x6f, 0xfc, 0x6f, - 0xd9, 0x0a, 0x27, 0x5e, 0x1d, 0x35, 0x28, 0xf8, 0x40, 0x61, 0x0f, 0x50, 0xe8, 0x01, 0xfd, 0x20, - 0x74, 0x18, 0xa3, 0xb9, 0x5b, 0xdd, 0xc8, 0x39, 0xae, 0x50, 0xed, 0x28, 0x2f, 0x7b, 0x7c, 0x8a, - 0xc6, 0x4d, 0x4b, 0xbc, 0x72, 0x2f, 0xa8, 0xf9, 0xc0, 0xd9, 0x07, 0xbe, 0x3e, 0xe8, 0xf2, 0xd5, - 0x2f, 0xb2, 0x89, 0xd8, 0xfb, 0x22, 0x5b, 0x94, 0x13, 0xaf, 0x8e, 0x86, 0x8b, 0x74, 0x82, 0xc2, - 0x1e, 0xa0, 0xd0, 0x0d, 0x8d, 0x86, 0x5f, 0x05, 0x23, 0xeb, 0x82, 0x91, 0x9f, 0x82, 0x91, 0xd7, - 0x92, 0x0d, 0xd6, 0x25, 0x1b, 0x7c, 0x97, 0x6c, 0xf0, 0x70, 0xb0, 0xf9, 0x7f, 0x9e, 0xfd, 0x06, - 0x00, 0x00, 0xff, 0xff, 0xc3, 0x60, 0x40, 0x4e, 0xbd, 0x05, 0x00, 0x00, + proto.RegisterFile("cometbft/services/pruning/v1/service.proto", fileDescriptor_58672b711a903587) +} + +var fileDescriptor_58672b711a903587 = []byte{ + // 298 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4a, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x2d, 0xd6, 0x2f, 0x28, + 0x2a, 0xcd, 0xcb, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, 0x84, 0x89, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, + 0x0b, 0xc9, 0xc0, 0xd4, 0xea, 0xc1, 0xd4, 0xea, 0x41, 0xd5, 0xea, 0x95, 0x19, 0x4a, 0xe1, 0x37, + 0x09, 0xa6, 0x10, 0x6c, 0x92, 0xd1, 0x1f, 0x4e, 0x2e, 0xbe, 0x00, 0x88, 0x48, 0x30, 0x44, 0xb1, + 0x50, 0x2f, 0x23, 0x97, 0x48, 0x70, 0x6a, 0x89, 0x53, 0x4e, 0x7e, 0x72, 0x76, 0x50, 0x6a, 0x49, + 0x62, 0x66, 0x9e, 0x47, 0x6a, 0x66, 0x7a, 0x46, 0x89, 0x90, 0xa5, 0x1e, 0x3e, 0x6b, 0xf5, 0xb0, + 0xe9, 0x09, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x91, 0xb2, 0x22, 0x47, 0x6b, 0x71, 0x41, 0x7e, + 0x5e, 0x31, 0xc4, 0x3d, 0xee, 0x64, 0xb8, 0xc7, 0x9d, 0x7c, 0xf7, 0xb8, 0xe3, 0x73, 0xcf, 0x22, + 0x46, 0x2e, 0x69, 0x84, 0x83, 0x8b, 0x4b, 0x73, 0x4a, 0x8a, 0x51, 0x9c, 0xe5, 0x40, 0xac, 0x5f, + 0x31, 0xb4, 0xc2, 0x5c, 0xe7, 0x48, 0x81, 0x09, 0x48, 0x8e, 0x74, 0x27, 0xdf, 0x91, 0xee, 0x14, + 0x3b, 0xd2, 0x9d, 0x08, 0x47, 0xce, 0x64, 0xe4, 0x92, 0x08, 0x4e, 0x2d, 0x09, 0xa9, 0xf0, 0xcc, + 0x4b, 0x49, 0xad, 0x48, 0x2d, 0x42, 0x71, 0xa1, 0x2d, 0xc1, 0x40, 0xc0, 0xaa, 0x0f, 0xe6, 0x3c, + 0x3b, 0x72, 0xb5, 0x23, 0xb9, 0xcd, 0x9d, 0x4c, 0xb7, 0xb9, 0x53, 0xe6, 0x36, 0x77, 0x42, 0x6e, + 0x43, 0x4e, 0x81, 0xd8, 0x9c, 0x47, 0x64, 0x0a, 0xc4, 0xe3, 0x42, 0x47, 0x0a, 0x4c, 0xc0, 0x92, + 0x02, 0xc9, 0x70, 0xa4, 0x3b, 0xc5, 0x8e, 0x74, 0x27, 0xec, 0x48, 0x27, 0x89, 0x13, 0x8f, 0xe4, + 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, + 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x48, 0x62, 0x03, 0x97, 0x8f, 0xc6, 0x80, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x7b, 0x8e, 0x2b, 0x5d, 0x97, 0x05, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -107,7 +107,7 @@ func NewPruningServiceClient(cc grpc1.ClientConn) PruningServiceClient { func (c *pruningServiceClient) SetBlockRetainHeight(ctx context.Context, in *SetBlockRetainHeightRequest, opts ...grpc.CallOption) (*SetBlockRetainHeightResponse, error) { out := new(SetBlockRetainHeightResponse) - err := c.cc.Invoke(ctx, "/tendermint.services.pruning.v1.PruningService/SetBlockRetainHeight", in, out, opts...) + err := c.cc.Invoke(ctx, "/cometbft.services.pruning.v1.PruningService/SetBlockRetainHeight", in, out, opts...) if err != nil { return nil, err } @@ -116,7 +116,7 @@ func (c *pruningServiceClient) SetBlockRetainHeight(ctx context.Context, in *Set func (c *pruningServiceClient) GetBlockRetainHeight(ctx context.Context, in *GetBlockRetainHeightRequest, opts ...grpc.CallOption) (*GetBlockRetainHeightResponse, error) { out := new(GetBlockRetainHeightResponse) - err := c.cc.Invoke(ctx, "/tendermint.services.pruning.v1.PruningService/GetBlockRetainHeight", in, out, opts...) + err := c.cc.Invoke(ctx, "/cometbft.services.pruning.v1.PruningService/GetBlockRetainHeight", in, out, opts...) if err != nil { return nil, err } @@ -125,7 +125,7 @@ func (c *pruningServiceClient) GetBlockRetainHeight(ctx context.Context, in *Get func (c *pruningServiceClient) SetBlockResultsRetainHeight(ctx context.Context, in *SetBlockResultsRetainHeightRequest, opts ...grpc.CallOption) (*SetBlockResultsRetainHeightResponse, error) { out := new(SetBlockResultsRetainHeightResponse) - err := c.cc.Invoke(ctx, "/tendermint.services.pruning.v1.PruningService/SetBlockResultsRetainHeight", in, out, opts...) + err := c.cc.Invoke(ctx, "/cometbft.services.pruning.v1.PruningService/SetBlockResultsRetainHeight", in, out, opts...) if err != nil { return nil, err } @@ -134,7 +134,7 @@ func (c *pruningServiceClient) SetBlockResultsRetainHeight(ctx context.Context, func (c *pruningServiceClient) GetBlockResultsRetainHeight(ctx context.Context, in *GetBlockResultsRetainHeightRequest, opts ...grpc.CallOption) (*GetBlockResultsRetainHeightResponse, error) { out := new(GetBlockResultsRetainHeightResponse) - err := c.cc.Invoke(ctx, "/tendermint.services.pruning.v1.PruningService/GetBlockResultsRetainHeight", in, out, opts...) + err := c.cc.Invoke(ctx, "/cometbft.services.pruning.v1.PruningService/GetBlockResultsRetainHeight", in, out, opts...) if err != nil { return nil, err } @@ -143,7 +143,7 @@ func (c *pruningServiceClient) GetBlockResultsRetainHeight(ctx context.Context, func (c *pruningServiceClient) SetTxIndexerRetainHeight(ctx context.Context, in *SetTxIndexerRetainHeightRequest, opts ...grpc.CallOption) (*SetTxIndexerRetainHeightResponse, error) { out := new(SetTxIndexerRetainHeightResponse) - err := c.cc.Invoke(ctx, "/tendermint.services.pruning.v1.PruningService/SetTxIndexerRetainHeight", in, out, opts...) + err := c.cc.Invoke(ctx, "/cometbft.services.pruning.v1.PruningService/SetTxIndexerRetainHeight", in, out, opts...) if err != nil { return nil, err } @@ -152,7 +152,7 @@ func (c *pruningServiceClient) SetTxIndexerRetainHeight(ctx context.Context, in func (c *pruningServiceClient) GetTxIndexerRetainHeight(ctx context.Context, in *GetTxIndexerRetainHeightRequest, opts ...grpc.CallOption) (*GetTxIndexerRetainHeightResponse, error) { out := new(GetTxIndexerRetainHeightResponse) - err := c.cc.Invoke(ctx, "/tendermint.services.pruning.v1.PruningService/GetTxIndexerRetainHeight", in, out, opts...) + err := c.cc.Invoke(ctx, "/cometbft.services.pruning.v1.PruningService/GetTxIndexerRetainHeight", in, out, opts...) if err != nil { return nil, err } @@ -161,7 +161,7 @@ func (c *pruningServiceClient) GetTxIndexerRetainHeight(ctx context.Context, in func (c *pruningServiceClient) SetBlockIndexerRetainHeight(ctx context.Context, in *SetBlockIndexerRetainHeightRequest, opts ...grpc.CallOption) (*SetBlockIndexerRetainHeightResponse, error) { out := new(SetBlockIndexerRetainHeightResponse) - err := c.cc.Invoke(ctx, "/tendermint.services.pruning.v1.PruningService/SetBlockIndexerRetainHeight", in, out, opts...) + err := c.cc.Invoke(ctx, "/cometbft.services.pruning.v1.PruningService/SetBlockIndexerRetainHeight", in, out, opts...) if err != nil { return nil, err } @@ -170,7 +170,7 @@ func (c *pruningServiceClient) SetBlockIndexerRetainHeight(ctx context.Context, func (c *pruningServiceClient) GetBlockIndexerRetainHeight(ctx context.Context, in *GetBlockIndexerRetainHeightRequest, opts ...grpc.CallOption) (*GetBlockIndexerRetainHeightResponse, error) { out := new(GetBlockIndexerRetainHeightResponse) - err := c.cc.Invoke(ctx, "/tendermint.services.pruning.v1.PruningService/GetBlockIndexerRetainHeight", in, out, opts...) + err := c.cc.Invoke(ctx, "/cometbft.services.pruning.v1.PruningService/GetBlockIndexerRetainHeight", in, out, opts...) if err != nil { return nil, err } @@ -255,7 +255,7 @@ func _PruningService_SetBlockRetainHeight_Handler(srv interface{}, ctx context.C } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.services.pruning.v1.PruningService/SetBlockRetainHeight", + FullMethod: "/cometbft.services.pruning.v1.PruningService/SetBlockRetainHeight", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PruningServiceServer).SetBlockRetainHeight(ctx, req.(*SetBlockRetainHeightRequest)) @@ -273,7 +273,7 @@ func _PruningService_GetBlockRetainHeight_Handler(srv interface{}, ctx context.C } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.services.pruning.v1.PruningService/GetBlockRetainHeight", + FullMethod: "/cometbft.services.pruning.v1.PruningService/GetBlockRetainHeight", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PruningServiceServer).GetBlockRetainHeight(ctx, req.(*GetBlockRetainHeightRequest)) @@ -291,7 +291,7 @@ func _PruningService_SetBlockResultsRetainHeight_Handler(srv interface{}, ctx co } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.services.pruning.v1.PruningService/SetBlockResultsRetainHeight", + FullMethod: "/cometbft.services.pruning.v1.PruningService/SetBlockResultsRetainHeight", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PruningServiceServer).SetBlockResultsRetainHeight(ctx, req.(*SetBlockResultsRetainHeightRequest)) @@ -309,7 +309,7 @@ func _PruningService_GetBlockResultsRetainHeight_Handler(srv interface{}, ctx co } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.services.pruning.v1.PruningService/GetBlockResultsRetainHeight", + FullMethod: "/cometbft.services.pruning.v1.PruningService/GetBlockResultsRetainHeight", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PruningServiceServer).GetBlockResultsRetainHeight(ctx, req.(*GetBlockResultsRetainHeightRequest)) @@ -327,7 +327,7 @@ func _PruningService_SetTxIndexerRetainHeight_Handler(srv interface{}, ctx conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.services.pruning.v1.PruningService/SetTxIndexerRetainHeight", + FullMethod: "/cometbft.services.pruning.v1.PruningService/SetTxIndexerRetainHeight", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PruningServiceServer).SetTxIndexerRetainHeight(ctx, req.(*SetTxIndexerRetainHeightRequest)) @@ -345,7 +345,7 @@ func _PruningService_GetTxIndexerRetainHeight_Handler(srv interface{}, ctx conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.services.pruning.v1.PruningService/GetTxIndexerRetainHeight", + FullMethod: "/cometbft.services.pruning.v1.PruningService/GetTxIndexerRetainHeight", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PruningServiceServer).GetTxIndexerRetainHeight(ctx, req.(*GetTxIndexerRetainHeightRequest)) @@ -363,7 +363,7 @@ func _PruningService_SetBlockIndexerRetainHeight_Handler(srv interface{}, ctx co } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.services.pruning.v1.PruningService/SetBlockIndexerRetainHeight", + FullMethod: "/cometbft.services.pruning.v1.PruningService/SetBlockIndexerRetainHeight", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PruningServiceServer).SetBlockIndexerRetainHeight(ctx, req.(*SetBlockIndexerRetainHeightRequest)) @@ -381,7 +381,7 @@ func _PruningService_GetBlockIndexerRetainHeight_Handler(srv interface{}, ctx co } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.services.pruning.v1.PruningService/GetBlockIndexerRetainHeight", + FullMethod: "/cometbft.services.pruning.v1.PruningService/GetBlockIndexerRetainHeight", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PruningServiceServer).GetBlockIndexerRetainHeight(ctx, req.(*GetBlockIndexerRetainHeightRequest)) @@ -389,8 +389,9 @@ func _PruningService_GetBlockIndexerRetainHeight_Handler(srv interface{}, ctx co return interceptor(ctx, in, info, handler) } +var PruningService_serviceDesc = _PruningService_serviceDesc var _PruningService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tendermint.services.pruning.v1.PruningService", + ServiceName: "cometbft.services.pruning.v1.PruningService", HandlerType: (*PruningServiceServer)(nil), Methods: []grpc.MethodDesc{ { @@ -427,5 +428,5 @@ var _PruningService_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "tendermint/services/pruning/v1/service.proto", + Metadata: "cometbft/services/pruning/v1/service.proto", } diff --git a/proto/tendermint/services/version/v1/version.pb.go b/api/cometbft/services/version/v1/version.pb.go similarity index 85% rename from proto/tendermint/services/version/v1/version.pb.go rename to api/cometbft/services/version/v1/version.pb.go index 52d41705798..db93157155a 100644 --- a/proto/tendermint/services/version/v1/version.pb.go +++ b/api/cometbft/services/version/v1/version.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/services/version/v1/version.proto +// source: cometbft/services/version/v1/version.proto package v1 @@ -22,6 +22,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// GetVersionRequest is the request for the ABCI version. type GetVersionRequest struct { } @@ -29,7 +30,7 @@ func (m *GetVersionRequest) Reset() { *m = GetVersionRequest{} } func (m *GetVersionRequest) String() string { return proto.CompactTextString(m) } func (*GetVersionRequest) ProtoMessage() {} func (*GetVersionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cb714fcdb4f96eec, []int{0} + return fileDescriptor_0d34df0ab45614ed, []int{0} } func (m *GetVersionRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -58,6 +59,7 @@ func (m *GetVersionRequest) XXX_DiscardUnknown() { var xxx_messageInfo_GetVersionRequest proto.InternalMessageInfo +// GetVersionResponse contains the ABCI application version info. type GetVersionResponse struct { Node string `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` Abci string `protobuf:"bytes,2,opt,name=abci,proto3" json:"abci,omitempty"` @@ -69,7 +71,7 @@ func (m *GetVersionResponse) Reset() { *m = GetVersionResponse{} } func (m *GetVersionResponse) String() string { return proto.CompactTextString(m) } func (*GetVersionResponse) ProtoMessage() {} func (*GetVersionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cb714fcdb4f96eec, []int{1} + return fileDescriptor_0d34df0ab45614ed, []int{1} } func (m *GetVersionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -127,30 +129,30 @@ func (m *GetVersionResponse) GetBlock() uint64 { } func init() { - proto.RegisterType((*GetVersionRequest)(nil), "tendermint.services.version.v1.GetVersionRequest") - proto.RegisterType((*GetVersionResponse)(nil), "tendermint.services.version.v1.GetVersionResponse") + proto.RegisterType((*GetVersionRequest)(nil), "cometbft.services.version.v1.GetVersionRequest") + proto.RegisterType((*GetVersionResponse)(nil), "cometbft.services.version.v1.GetVersionResponse") } func init() { - proto.RegisterFile("tendermint/services/version/v1/version.proto", fileDescriptor_cb714fcdb4f96eec) + proto.RegisterFile("cometbft/services/version/v1/version.proto", fileDescriptor_0d34df0ab45614ed) } -var fileDescriptor_cb714fcdb4f96eec = []byte{ - // 224 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0xb1, 0x4e, 0xc3, 0x30, - 0x10, 0x86, 0x63, 0x1a, 0x90, 0xf0, 0x04, 0x86, 0x21, 0x93, 0x55, 0x75, 0xea, 0x80, 0x62, 0x15, - 0x9e, 0x00, 0x16, 0xf6, 0x0c, 0x0c, 0x30, 0x61, 0xe7, 0x00, 0x0b, 0xe2, 0x33, 0xf6, 0xd5, 0xcf, - 0xc1, 0x63, 0x31, 0x76, 0x64, 0x44, 0xc9, 0x8b, 0xa0, 0x38, 0x8a, 0xca, 0xd4, 0xed, 0xbb, 0x4f, - 0xdf, 0x72, 0x3f, 0xbf, 0x22, 0x70, 0x2d, 0x84, 0xce, 0x3a, 0x52, 0x11, 0x42, 0xb2, 0x06, 0xa2, - 0x4a, 0x10, 0xa2, 0x45, 0xa7, 0xd2, 0x66, 0xc6, 0xda, 0x07, 0x24, 0x14, 0x72, 0x5f, 0xd7, 0x73, - 0x5d, 0xcf, 0x49, 0xda, 0xac, 0x2e, 0xf8, 0xf9, 0x3d, 0xd0, 0xc3, 0x24, 0x1a, 0xf8, 0xdc, 0x42, - 0xa4, 0x55, 0xcb, 0xc5, 0x7f, 0x19, 0x3d, 0xba, 0x08, 0x42, 0xf0, 0xd2, 0x61, 0x0b, 0x15, 0x5b, - 0xb2, 0xf5, 0x69, 0x93, 0x79, 0x74, 0xcf, 0xda, 0xd8, 0xea, 0x68, 0x72, 0x23, 0x8b, 0x33, 0xbe, - 0xf0, 0xd7, 0xbe, 0x5a, 0x2c, 0xd9, 0xba, 0x6c, 0x46, 0x14, 0x97, 0xfc, 0x58, 0x7f, 0xa0, 0x79, - 0xaf, 0xca, 0xec, 0xa6, 0xe3, 0xee, 0xe9, 0xbb, 0x97, 0x6c, 0xd7, 0x4b, 0xf6, 0xdb, 0x4b, 0xf6, - 0x35, 0xc8, 0x62, 0x37, 0xc8, 0xe2, 0x67, 0x90, 0xc5, 0xe3, 0xed, 0xab, 0xa5, 0xb7, 0xad, 0xae, - 0x0d, 0x76, 0xca, 0x60, 0x07, 0xa4, 0x5f, 0x68, 0x0f, 0xf9, 0x31, 0x75, 0x78, 0x05, 0x7d, 0x92, - 0xab, 0x9b, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xec, 0xb3, 0xa1, 0xef, 0x2e, 0x01, 0x00, 0x00, +var fileDescriptor_0d34df0ab45614ed = []byte{ + // 218 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4a, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x2d, 0xd6, 0x2f, 0x4b, + 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0xd3, 0x2f, 0x33, 0x84, 0x31, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, + 0x85, 0x64, 0x60, 0x6a, 0xf5, 0x60, 0x6a, 0xf5, 0x60, 0x0a, 0xca, 0x0c, 0x95, 0x84, 0xb9, 0x04, + 0xdd, 0x53, 0x4b, 0xc2, 0x20, 0x02, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x4a, 0x29, 0x5c, + 0x42, 0xc8, 0x82, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x42, 0x42, 0x5c, 0x2c, 0x79, 0xf9, 0x29, + 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0x36, 0x48, 0x2c, 0x31, 0x29, 0x39, 0x53, + 0x82, 0x09, 0x22, 0x06, 0x62, 0x0b, 0x09, 0x70, 0x31, 0x17, 0x18, 0x15, 0x48, 0x30, 0x2b, 0x30, + 0x6a, 0xb0, 0x04, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x49, 0x39, 0xf9, 0xc9, 0xd9, 0x12, 0x2c, + 0x60, 0x31, 0x08, 0xc7, 0x29, 0xfc, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, + 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, + 0x6c, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xe1, 0x3e, 0x85, 0x33, + 0x12, 0x0b, 0x32, 0xf5, 0xf1, 0xf9, 0x3f, 0x89, 0x0d, 0xec, 0x71, 0x63, 0x40, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xb1, 0x8f, 0xf3, 0x3e, 0x26, 0x01, 0x00, 0x00, } func (m *GetVersionRequest) Marshal() (dAtA []byte, err error) { diff --git a/proto/tendermint/services/version/v1/version_service.pb.go b/api/cometbft/services/version/v1/version_service.pb.go similarity index 68% rename from proto/tendermint/services/version/v1/version_service.pb.go rename to api/cometbft/services/version/v1/version_service.pb.go index 06a1d138be0..63b2580239d 100644 --- a/proto/tendermint/services/version/v1/version_service.pb.go +++ b/api/cometbft/services/version/v1/version_service.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/services/version/v1/version_service.proto +// source: cometbft/services/version/v1/version_service.proto package v1 @@ -26,24 +26,23 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { - proto.RegisterFile("tendermint/services/version/v1/version_service.proto", fileDescriptor_cf84e5a7678f5dd8) + proto.RegisterFile("cometbft/services/version/v1/version_service.proto", fileDescriptor_054267f78f0fa7a9) } -var fileDescriptor_cf84e5a7678f5dd8 = []byte{ - // 193 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x29, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x2d, 0xd6, - 0x2f, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0xd3, 0x2f, 0x33, 0x84, 0x31, 0xe3, 0xa1, 0x72, 0x7a, - 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x72, 0x08, 0x5d, 0x7a, 0x30, 0x5d, 0x7a, 0x50, 0xa5, 0x7a, - 0x65, 0x86, 0x52, 0x3a, 0xc4, 0x99, 0x0a, 0x31, 0xcd, 0xa8, 0x95, 0x91, 0x8b, 0x2f, 0x0c, 0x22, - 0x12, 0x0c, 0x51, 0x2c, 0x54, 0xcc, 0xc5, 0xe5, 0x9e, 0x5a, 0x02, 0x15, 0x14, 0x32, 0xd4, 0xc3, - 0x6f, 0x9f, 0x1e, 0x42, 0x6d, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x94, 0x11, 0x29, 0x5a, - 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x9d, 0xa2, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, - 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, - 0x8e, 0x21, 0xca, 0x31, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0x3f, 0x39, - 0x3f, 0x37, 0xb5, 0x24, 0x29, 0xad, 0x04, 0xc1, 0x00, 0xfb, 0x42, 0x1f, 0xbf, 0x97, 0x93, 0xd8, - 0xc0, 0xaa, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x17, 0x3d, 0xcf, 0x55, 0x71, 0x01, 0x00, - 0x00, +var fileDescriptor_054267f78f0fa7a9 = []byte{ + // 184 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4a, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x2d, 0xd6, 0x2f, 0x4b, + 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0xd3, 0x2f, 0x33, 0x84, 0x31, 0xe3, 0xa1, 0x72, 0x7a, 0x05, 0x45, + 0xf9, 0x25, 0xf9, 0x42, 0x32, 0x30, 0x3d, 0x7a, 0x30, 0x3d, 0x7a, 0x50, 0x85, 0x7a, 0x65, 0x86, + 0x52, 0x5a, 0xc4, 0x98, 0x08, 0x31, 0xc9, 0xa8, 0x91, 0x91, 0x8b, 0x2f, 0x0c, 0x22, 0x12, 0x0c, + 0x51, 0x2c, 0x94, 0xcf, 0xc5, 0xe5, 0x9e, 0x5a, 0x02, 0x15, 0x14, 0xd2, 0xd7, 0xc3, 0x67, 0x97, + 0x1e, 0x42, 0x65, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x94, 0x01, 0xf1, 0x1a, 0x8a, 0x0b, + 0xf2, 0xf3, 0x8a, 0x53, 0x9d, 0xc2, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, + 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, + 0xca, 0x36, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x09, 0x64, 0xa2, 0x3e, 0xdc, 0x53, 0x70, 0x46, 0x62, + 0x41, 0xa6, 0x3e, 0x3e, 0xaf, 0x26, 0xb1, 0x81, 0xfd, 0x68, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, + 0xbb, 0x08, 0xe5, 0x2b, 0x63, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -73,7 +72,7 @@ func NewVersionServiceClient(cc grpc1.ClientConn) VersionServiceClient { func (c *versionServiceClient) GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) { out := new(GetVersionResponse) - err := c.cc.Invoke(ctx, "/tendermint.services.version.v1.VersionService/GetVersion", in, out, opts...) + err := c.cc.Invoke(ctx, "/cometbft.services.version.v1.VersionService/GetVersion", in, out, opts...) if err != nil { return nil, err } @@ -109,7 +108,7 @@ func _VersionService_GetVersion_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.services.version.v1.VersionService/GetVersion", + FullMethod: "/cometbft.services.version.v1.VersionService/GetVersion", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VersionServiceServer).GetVersion(ctx, req.(*GetVersionRequest)) @@ -117,8 +116,9 @@ func _VersionService_GetVersion_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +var VersionService_serviceDesc = _VersionService_serviceDesc var _VersionService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tendermint.services.version.v1.VersionService", + ServiceName: "cometbft.services.version.v1.VersionService", HandlerType: (*VersionServiceServer)(nil), Methods: []grpc.MethodDesc{ { @@ -127,5 +127,5 @@ var _VersionService_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "tendermint/services/version/v1/version_service.proto", + Metadata: "cometbft/services/version/v1/version_service.proto", } diff --git a/proto/tendermint/state/types.pb.go b/api/cometbft/state/v1/types.pb.go similarity index 78% rename from proto/tendermint/state/types.pb.go rename to api/cometbft/state/v1/types.pb.go index 4426543ed9b..c6cec7108e8 100644 --- a/proto/tendermint/state/types.pb.go +++ b/api/cometbft/state/v1/types.pb.go @@ -1,17 +1,18 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/state/types.proto +// source: cometbft/state/v1/types.proto -package state +package v1 import ( fmt "fmt" - types "github.com/cometbft/cometbft/abci/types" - types1 "github.com/cometbft/cometbft/proto/tendermint/types" - version "github.com/cometbft/cometbft/proto/tendermint/version" + v1 "github.com/cometbft/cometbft/api/cometbft/abci/v1" + v11 "github.com/cometbft/cometbft/api/cometbft/types/v1" + v12 "github.com/cometbft/cometbft/api/cometbft/version/v1" _ "github.com/cosmos/gogoproto/gogoproto" proto "github.com/cosmos/gogoproto/proto" _ "github.com/cosmos/gogoproto/types" github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + _ "github.com/golang/protobuf/ptypes/duration" io "io" math "math" math_bits "math/bits" @@ -35,16 +36,16 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // Note ReponseDeliverTx is renamed to ExecTxResult but they are semantically the same // Kept for backwards compatibility for versions prior to v0.38 type LegacyABCIResponses struct { - DeliverTxs []*types.ExecTxResult `protobuf:"bytes,1,rep,name=deliver_txs,json=deliverTxs,proto3" json:"deliver_txs,omitempty"` - EndBlock *ResponseEndBlock `protobuf:"bytes,2,opt,name=end_block,json=endBlock,proto3" json:"end_block,omitempty"` - BeginBlock *ResponseBeginBlock `protobuf:"bytes,3,opt,name=begin_block,json=beginBlock,proto3" json:"begin_block,omitempty"` + DeliverTxs []*v1.ExecTxResult `protobuf:"bytes,1,rep,name=deliver_txs,json=deliverTxs,proto3" json:"deliver_txs,omitempty"` + EndBlock *ResponseEndBlock `protobuf:"bytes,2,opt,name=end_block,json=endBlock,proto3" json:"end_block,omitempty"` + BeginBlock *ResponseBeginBlock `protobuf:"bytes,3,opt,name=begin_block,json=beginBlock,proto3" json:"begin_block,omitempty"` } func (m *LegacyABCIResponses) Reset() { *m = LegacyABCIResponses{} } func (m *LegacyABCIResponses) String() string { return proto.CompactTextString(m) } func (*LegacyABCIResponses) ProtoMessage() {} func (*LegacyABCIResponses) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{0} + return fileDescriptor_eb6b2e03ecdbc0c2, []int{0} } func (m *LegacyABCIResponses) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -73,7 +74,7 @@ func (m *LegacyABCIResponses) XXX_DiscardUnknown() { var xxx_messageInfo_LegacyABCIResponses proto.InternalMessageInfo -func (m *LegacyABCIResponses) GetDeliverTxs() []*types.ExecTxResult { +func (m *LegacyABCIResponses) GetDeliverTxs() []*v1.ExecTxResult { if m != nil { return m.DeliverTxs } @@ -94,16 +95,17 @@ func (m *LegacyABCIResponses) GetBeginBlock() *ResponseBeginBlock { return nil } -// ResponseBeginBlock is kept for backwards compatibility for versions prior to v0.38 +// ResponseBeginBlock is kept for backward compatibility for versions prior to v0.38, +// as it was then defined in the cometbft.abci packages. type ResponseBeginBlock struct { - Events []types.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + Events []v1.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` } func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{1} + return fileDescriptor_eb6b2e03ecdbc0c2, []int{1} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,25 +134,28 @@ func (m *ResponseBeginBlock) XXX_DiscardUnknown() { var xxx_messageInfo_ResponseBeginBlock proto.InternalMessageInfo -func (m *ResponseBeginBlock) GetEvents() []types.Event { +func (m *ResponseBeginBlock) GetEvents() []v1.Event { if m != nil { return m.Events } return nil } -// ResponseEndBlock is kept for backwards compatibility for versions prior to v0.38 +// ResponseEndBlock is kept for backward compatibility for versions prior to v0.38, +// its earlier revisions were defined in the cometbft.abci packages. +// It uses an updated definition for the consensus_param_updates field to keep the +// generated data types interoperable with the latest protocol. type ResponseEndBlock struct { - ValidatorUpdates []types.ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` - ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` - Events []types.Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` + ValidatorUpdates []v1.ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` + ConsensusParamUpdates *v11.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + Events []v1.Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` } func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{2} + return fileDescriptor_eb6b2e03ecdbc0c2, []int{2} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -179,21 +184,21 @@ func (m *ResponseEndBlock) XXX_DiscardUnknown() { var xxx_messageInfo_ResponseEndBlock proto.InternalMessageInfo -func (m *ResponseEndBlock) GetValidatorUpdates() []types.ValidatorUpdate { +func (m *ResponseEndBlock) GetValidatorUpdates() []v1.ValidatorUpdate { if m != nil { return m.ValidatorUpdates } return nil } -func (m *ResponseEndBlock) GetConsensusParamUpdates() *types1.ConsensusParams { +func (m *ResponseEndBlock) GetConsensusParamUpdates() *v11.ConsensusParams { if m != nil { return m.ConsensusParamUpdates } return nil } -func (m *ResponseEndBlock) GetEvents() []types.Event { +func (m *ResponseEndBlock) GetEvents() []v1.Event { if m != nil { return m.Events } @@ -202,15 +207,15 @@ func (m *ResponseEndBlock) GetEvents() []types.Event { // ValidatorsInfo represents the latest validator set, or the last height it changed type ValidatorsInfo struct { - ValidatorSet *types1.ValidatorSet `protobuf:"bytes,1,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` - LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` + ValidatorSet *v11.ValidatorSet `protobuf:"bytes,1,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` + LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` } func (m *ValidatorsInfo) Reset() { *m = ValidatorsInfo{} } func (m *ValidatorsInfo) String() string { return proto.CompactTextString(m) } func (*ValidatorsInfo) ProtoMessage() {} func (*ValidatorsInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{3} + return fileDescriptor_eb6b2e03ecdbc0c2, []int{3} } func (m *ValidatorsInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -239,7 +244,7 @@ func (m *ValidatorsInfo) XXX_DiscardUnknown() { var xxx_messageInfo_ValidatorsInfo proto.InternalMessageInfo -func (m *ValidatorsInfo) GetValidatorSet() *types1.ValidatorSet { +func (m *ValidatorsInfo) GetValidatorSet() *v11.ValidatorSet { if m != nil { return m.ValidatorSet } @@ -255,15 +260,15 @@ func (m *ValidatorsInfo) GetLastHeightChanged() int64 { // ConsensusParamsInfo represents the latest consensus params, or the last height it changed type ConsensusParamsInfo struct { - ConsensusParams types1.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` - LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` + ConsensusParams v11.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` + LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` } func (m *ConsensusParamsInfo) Reset() { *m = ConsensusParamsInfo{} } func (m *ConsensusParamsInfo) String() string { return proto.CompactTextString(m) } func (*ConsensusParamsInfo) ProtoMessage() {} func (*ConsensusParamsInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{4} + return fileDescriptor_eb6b2e03ecdbc0c2, []int{4} } func (m *ConsensusParamsInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -292,11 +297,11 @@ func (m *ConsensusParamsInfo) XXX_DiscardUnknown() { var xxx_messageInfo_ConsensusParamsInfo proto.InternalMessageInfo -func (m *ConsensusParamsInfo) GetConsensusParams() types1.ConsensusParams { +func (m *ConsensusParamsInfo) GetConsensusParams() v11.ConsensusParams { if m != nil { return m.ConsensusParams } - return types1.ConsensusParams{} + return v11.ConsensusParams{} } func (m *ConsensusParamsInfo) GetLastHeightChanged() int64 { @@ -306,17 +311,19 @@ func (m *ConsensusParamsInfo) GetLastHeightChanged() int64 { return 0 } +// ABCIResponsesInfo retains the responses of the ABCI calls during block processing. type ABCIResponsesInfo struct { - LegacyAbciResponses *LegacyABCIResponses `protobuf:"bytes,1,opt,name=legacy_abci_responses,json=legacyAbciResponses,proto3" json:"legacy_abci_responses,omitempty"` - Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` - ResponseFinalizeBlock *types.ResponseFinalizeBlock `protobuf:"bytes,3,opt,name=response_finalize_block,json=responseFinalizeBlock,proto3" json:"response_finalize_block,omitempty"` + // Retains the responses of the legacy ABCI calls during block processing. + LegacyAbciResponses *LegacyABCIResponses `protobuf:"bytes,1,opt,name=legacy_abci_responses,json=legacyAbciResponses,proto3" json:"legacy_abci_responses,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + FinalizeBlock *v1.FinalizeBlockResponse `protobuf:"bytes,3,opt,name=finalize_block,json=finalizeBlock,proto3" json:"finalize_block,omitempty"` } func (m *ABCIResponsesInfo) Reset() { *m = ABCIResponsesInfo{} } func (m *ABCIResponsesInfo) String() string { return proto.CompactTextString(m) } func (*ABCIResponsesInfo) ProtoMessage() {} func (*ABCIResponsesInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{5} + return fileDescriptor_eb6b2e03ecdbc0c2, []int{5} } func (m *ABCIResponsesInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,23 +366,24 @@ func (m *ABCIResponsesInfo) GetHeight() int64 { return 0 } -func (m *ABCIResponsesInfo) GetResponseFinalizeBlock() *types.ResponseFinalizeBlock { +func (m *ABCIResponsesInfo) GetFinalizeBlock() *v1.FinalizeBlockResponse { if m != nil { - return m.ResponseFinalizeBlock + return m.FinalizeBlock } return nil } +// Version is a message for storing versioning information. type Version struct { - Consensus version.Consensus `protobuf:"bytes,1,opt,name=consensus,proto3" json:"consensus"` - Software string `protobuf:"bytes,2,opt,name=software,proto3" json:"software,omitempty"` + Consensus v12.Consensus `protobuf:"bytes,1,opt,name=consensus,proto3" json:"consensus"` + Software string `protobuf:"bytes,2,opt,name=software,proto3" json:"software,omitempty"` } func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{6} + return fileDescriptor_eb6b2e03ecdbc0c2, []int{6} } func (m *Version) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -404,11 +412,11 @@ func (m *Version) XXX_DiscardUnknown() { var xxx_messageInfo_Version proto.InternalMessageInfo -func (m *Version) GetConsensus() version.Consensus { +func (m *Version) GetConsensus() v12.Consensus { if m != nil { return m.Consensus } - return version.Consensus{} + return v12.Consensus{} } func (m *Version) GetSoftware() string { @@ -418,40 +426,44 @@ func (m *Version) GetSoftware() string { return "" } +// State represents the state of the blockchain. type State struct { Version Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version"` // immutable ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` InitialHeight int64 `protobuf:"varint,14,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) - LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` - LastBlockID types1.BlockID `protobuf:"bytes,4,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` - LastBlockTime time.Time `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3,stdtime" json:"last_block_time"` + LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockID v11.BlockID `protobuf:"bytes,4,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` + LastBlockTime time.Time `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3,stdtime" json:"last_block_time"` // LastValidators is used to validate block.LastCommit. // Validators are persisted to the database separately every time they change, // so we can query for historical validator sets. // Note that if s.LastBlockHeight causes a valset change, // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + 1 // Extra +1 due to nextValSet delay. - NextValidators *types1.ValidatorSet `protobuf:"bytes,6,opt,name=next_validators,json=nextValidators,proto3" json:"next_validators,omitempty"` - Validators *types1.ValidatorSet `protobuf:"bytes,7,opt,name=validators,proto3" json:"validators,omitempty"` - LastValidators *types1.ValidatorSet `protobuf:"bytes,8,opt,name=last_validators,json=lastValidators,proto3" json:"last_validators,omitempty"` - LastHeightValidatorsChanged int64 `protobuf:"varint,9,opt,name=last_height_validators_changed,json=lastHeightValidatorsChanged,proto3" json:"last_height_validators_changed,omitempty"` + NextValidators *v11.ValidatorSet `protobuf:"bytes,6,opt,name=next_validators,json=nextValidators,proto3" json:"next_validators,omitempty"` + Validators *v11.ValidatorSet `protobuf:"bytes,7,opt,name=validators,proto3" json:"validators,omitempty"` + LastValidators *v11.ValidatorSet `protobuf:"bytes,8,opt,name=last_validators,json=lastValidators,proto3" json:"last_validators,omitempty"` + LastHeightValidatorsChanged int64 `protobuf:"varint,9,opt,name=last_height_validators_changed,json=lastHeightValidatorsChanged,proto3" json:"last_height_validators_changed,omitempty"` // Consensus parameters used for validating blocks. // Changes returned by EndBlock and updated after Commit. - ConsensusParams types1.ConsensusParams `protobuf:"bytes,10,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` - LastHeightConsensusParamsChanged int64 `protobuf:"varint,11,opt,name=last_height_consensus_params_changed,json=lastHeightConsensusParamsChanged,proto3" json:"last_height_consensus_params_changed,omitempty"` + ConsensusParams v11.ConsensusParams `protobuf:"bytes,10,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` + LastHeightConsensusParamsChanged int64 `protobuf:"varint,11,opt,name=last_height_consensus_params_changed,json=lastHeightConsensusParamsChanged,proto3" json:"last_height_consensus_params_changed,omitempty"` // Merkle root of the results from executing prev block LastResultsHash []byte `protobuf:"bytes,12,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` // the latest AppHash we've received from calling abci.Commit() AppHash []byte `protobuf:"bytes,13,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + // delay between the time when this block is committed and the next height is started. + // previously `timeout_commit` in config.toml + NextBlockDelay time.Duration `protobuf:"bytes,15,opt,name=next_block_delay,json=nextBlockDelay,proto3,stdduration" json:"next_block_delay"` } func (m *State) Reset() { *m = State{} } func (m *State) String() string { return proto.CompactTextString(m) } func (*State) ProtoMessage() {} func (*State) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{7} + return fileDescriptor_eb6b2e03ecdbc0c2, []int{7} } func (m *State) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -508,11 +520,11 @@ func (m *State) GetLastBlockHeight() int64 { return 0 } -func (m *State) GetLastBlockID() types1.BlockID { +func (m *State) GetLastBlockID() v11.BlockID { if m != nil { return m.LastBlockID } - return types1.BlockID{} + return v11.BlockID{} } func (m *State) GetLastBlockTime() time.Time { @@ -522,21 +534,21 @@ func (m *State) GetLastBlockTime() time.Time { return time.Time{} } -func (m *State) GetNextValidators() *types1.ValidatorSet { +func (m *State) GetNextValidators() *v11.ValidatorSet { if m != nil { return m.NextValidators } return nil } -func (m *State) GetValidators() *types1.ValidatorSet { +func (m *State) GetValidators() *v11.ValidatorSet { if m != nil { return m.Validators } return nil } -func (m *State) GetLastValidators() *types1.ValidatorSet { +func (m *State) GetLastValidators() *v11.ValidatorSet { if m != nil { return m.LastValidators } @@ -550,11 +562,11 @@ func (m *State) GetLastHeightValidatorsChanged() int64 { return 0 } -func (m *State) GetConsensusParams() types1.ConsensusParams { +func (m *State) GetConsensusParams() v11.ConsensusParams { if m != nil { return m.ConsensusParams } - return types1.ConsensusParams{} + return v11.ConsensusParams{} } func (m *State) GetLastHeightConsensusParamsChanged() int64 { @@ -578,82 +590,91 @@ func (m *State) GetAppHash() []byte { return nil } +func (m *State) GetNextBlockDelay() time.Duration { + if m != nil { + return m.NextBlockDelay + } + return 0 +} + func init() { - proto.RegisterType((*LegacyABCIResponses)(nil), "tendermint.state.LegacyABCIResponses") - proto.RegisterType((*ResponseBeginBlock)(nil), "tendermint.state.ResponseBeginBlock") - proto.RegisterType((*ResponseEndBlock)(nil), "tendermint.state.ResponseEndBlock") - proto.RegisterType((*ValidatorsInfo)(nil), "tendermint.state.ValidatorsInfo") - proto.RegisterType((*ConsensusParamsInfo)(nil), "tendermint.state.ConsensusParamsInfo") - proto.RegisterType((*ABCIResponsesInfo)(nil), "tendermint.state.ABCIResponsesInfo") - proto.RegisterType((*Version)(nil), "tendermint.state.Version") - proto.RegisterType((*State)(nil), "tendermint.state.State") -} - -func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) } - -var fileDescriptor_ccfacf933f22bf93 = []byte{ - // 963 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4b, 0x6f, 0xdb, 0x46, - 0x17, 0x35, 0xe3, 0x44, 0x8f, 0x2b, 0xcb, 0x96, 0x47, 0x9f, 0x13, 0x45, 0xf9, 0x22, 0xa9, 0x42, - 0x12, 0x18, 0x45, 0x41, 0x01, 0xc9, 0xaa, 0x9b, 0x14, 0x96, 0xec, 0xd6, 0x02, 0xdc, 0xa2, 0xa0, - 0xdd, 0x00, 0xe9, 0x22, 0xc4, 0x88, 0x1c, 0x49, 0x83, 0x4a, 0x24, 0xc1, 0x19, 0xa9, 0x72, 0xf7, - 0xdd, 0x75, 0x91, 0x6d, 0xff, 0x51, 0x96, 0x59, 0x76, 0x53, 0xb7, 0x95, 0x81, 0x2e, 0xfa, 0x2b, - 0x8a, 0x79, 0xf0, 0x25, 0xba, 0xa8, 0x8b, 0xec, 0xc8, 0xb9, 0xe7, 0x9e, 0x7b, 0xee, 0x99, 0xb9, - 0x43, 0xc2, 0xff, 0x39, 0xf1, 0x5c, 0x12, 0xce, 0xa9, 0xc7, 0x7b, 0x8c, 0x63, 0x4e, 0x7a, 0xfc, - 0x32, 0x20, 0xcc, 0x0c, 0x42, 0x9f, 0xfb, 0xa8, 0x96, 0x44, 0x4d, 0x19, 0x6d, 0xfe, 0x6f, 0xe2, - 0x4f, 0x7c, 0x19, 0xec, 0x89, 0x27, 0x85, 0x6b, 0x3e, 0x4a, 0xb1, 0xe0, 0x91, 0x43, 0xd3, 0x24, - 0xcd, 0x74, 0x09, 0xb9, 0x9e, 0x89, 0x76, 0x72, 0xd1, 0x25, 0x9e, 0x51, 0x17, 0x73, 0x3f, 0xd4, - 0x88, 0xc7, 0x39, 0x44, 0x80, 0x43, 0x3c, 0x8f, 0x08, 0x5a, 0xa9, 0xf0, 0x92, 0x84, 0x8c, 0xfa, - 0x5e, 0xa6, 0x40, 0x7b, 0xe2, 0xfb, 0x93, 0x19, 0xe9, 0xc9, 0xb7, 0xd1, 0x62, 0xdc, 0xe3, 0x74, - 0x4e, 0x18, 0xc7, 0xf3, 0x40, 0x01, 0xba, 0xbf, 0x1a, 0x50, 0x3f, 0x23, 0x13, 0xec, 0x5c, 0x1e, - 0xf5, 0x07, 0x43, 0x8b, 0xb0, 0xc0, 0xf7, 0x18, 0x61, 0xe8, 0x25, 0x54, 0x5c, 0x32, 0xa3, 0x4b, - 0x12, 0xda, 0x7c, 0xc5, 0x1a, 0x46, 0x67, 0xfb, 0xb0, 0xf2, 0xfc, 0xb1, 0x99, 0xb2, 0x44, 0xb4, - 0x6a, 0x9e, 0xac, 0x88, 0x73, 0xb1, 0xb2, 0x08, 0x5b, 0xcc, 0xb8, 0x05, 0x3a, 0xe3, 0x62, 0xc5, - 0xd0, 0x67, 0x50, 0x26, 0x9e, 0x6b, 0x8f, 0x66, 0xbe, 0xf3, 0x5d, 0xe3, 0x4e, 0xc7, 0x38, 0xac, - 0x3c, 0xef, 0x9a, 0x9b, 0x86, 0x9a, 0x51, 0xbd, 0x13, 0xcf, 0xed, 0x0b, 0xa4, 0x55, 0x22, 0xfa, - 0x09, 0x9d, 0x40, 0x65, 0x44, 0x26, 0xd4, 0xd3, 0x14, 0xdb, 0x92, 0xe2, 0xc9, 0x3f, 0x53, 0xf4, - 0x05, 0x58, 0x91, 0xc0, 0x28, 0x7e, 0xee, 0xbe, 0x01, 0x94, 0x47, 0xa0, 0x53, 0x28, 0x90, 0x25, - 0xf1, 0x78, 0xd4, 0xd8, 0xfd, 0x7c, 0x63, 0x22, 0xdc, 0x6f, 0xbc, 0xbb, 0x6a, 0x6f, 0xfd, 0x75, - 0xd5, 0xae, 0x29, 0xf4, 0x27, 0xfe, 0x9c, 0x72, 0x32, 0x0f, 0xf8, 0xa5, 0xa5, 0xf3, 0xbb, 0x3f, - 0xdd, 0x81, 0xda, 0x66, 0x17, 0xe8, 0x1c, 0xf6, 0xe3, 0x7d, 0xb4, 0x17, 0x81, 0x8b, 0x39, 0x89, - 0x2a, 0x75, 0x72, 0x95, 0x5e, 0x45, 0xc8, 0x6f, 0x24, 0xb0, 0x7f, 0x57, 0xd4, 0xb4, 0x6a, 0xcb, - 0xec, 0x32, 0x43, 0xaf, 0xe1, 0x81, 0x23, 0xaa, 0x78, 0x6c, 0xc1, 0x6c, 0x79, 0x08, 0x62, 0x6a, - 0xe5, 0xef, 0x47, 0x69, 0x6a, 0x75, 0x08, 0x06, 0x51, 0xc2, 0xd7, 0xf2, 0xd0, 0x58, 0x07, 0x4e, - 0x66, 0x21, 0xa2, 0x4e, 0xec, 0xd8, 0xfe, 0x40, 0x3b, 0x7e, 0x34, 0x60, 0x37, 0x6e, 0x88, 0x0d, - 0xbd, 0xb1, 0x8f, 0x06, 0x50, 0x4d, 0xcc, 0x60, 0x84, 0x37, 0x0c, 0xa9, 0xb6, 0x95, 0x57, 0x1b, - 0x27, 0x9e, 0x13, 0x6e, 0xed, 0x2c, 0x53, 0x6f, 0xc8, 0x84, 0xfa, 0x0c, 0x33, 0x6e, 0x4f, 0x09, - 0x9d, 0x4c, 0xb9, 0xed, 0x4c, 0xb1, 0x37, 0x21, 0xae, 0x6c, 0x7c, 0xdb, 0xda, 0x17, 0xa1, 0x53, - 0x19, 0x19, 0xa8, 0x40, 0xf7, 0x67, 0x03, 0xea, 0x1b, 0xcd, 0x4b, 0x31, 0x16, 0xd4, 0x36, 0x4c, - 0x64, 0x5a, 0xcf, 0xbf, 0xbb, 0xa7, 0x77, 0x66, 0x2f, 0xeb, 0x21, 0xfb, 0xcf, 0xda, 0xfe, 0x34, - 0x60, 0x3f, 0x33, 0x6c, 0x52, 0xd9, 0x6b, 0x38, 0x98, 0xc9, 0x39, 0xb4, 0x85, 0xe1, 0x76, 0x18, - 0x05, 0xb5, 0xbc, 0xa7, 0xf9, 0x93, 0x7f, 0xc3, 0xd8, 0x5a, 0x75, 0xc5, 0x71, 0x34, 0x72, 0x68, - 0x32, 0xcb, 0xf7, 0xa1, 0xa0, 0xb4, 0x69, 0x4d, 0xfa, 0x0d, 0xbd, 0x81, 0x07, 0x51, 0x19, 0x7b, - 0x4c, 0x3d, 0x3c, 0xa3, 0x3f, 0x90, 0xcc, 0xb8, 0x3d, 0xcb, 0x9d, 0x83, 0x88, 0xf4, 0x73, 0x0d, - 0x57, 0x03, 0x77, 0x10, 0xde, 0xb4, 0xdc, 0x9d, 0x42, 0xf1, 0x95, 0xba, 0x93, 0xd0, 0x11, 0x94, - 0x63, 0xdb, 0x74, 0x47, 0x99, 0xcb, 0x44, 0xdf, 0x5d, 0x89, 0xe5, 0xda, 0xec, 0x24, 0x0b, 0x35, - 0xa1, 0xc4, 0xfc, 0x31, 0xff, 0x1e, 0x87, 0x44, 0xf6, 0x51, 0xb6, 0xe2, 0xf7, 0xee, 0x1f, 0x05, - 0xb8, 0x77, 0x2e, 0x4c, 0x41, 0x9f, 0x42, 0x51, 0x73, 0xe9, 0x32, 0x0f, 0xf3, 0xc6, 0x69, 0x51, - 0xba, 0x44, 0x84, 0x47, 0xcf, 0xa0, 0xe4, 0x4c, 0x31, 0xf5, 0x6c, 0xaa, 0x36, 0xaf, 0xdc, 0xaf, - 0xac, 0xaf, 0xda, 0xc5, 0x81, 0x58, 0x1b, 0x1e, 0x5b, 0x45, 0x19, 0x1c, 0xba, 0xe8, 0x29, 0xec, - 0x52, 0x8f, 0x72, 0x8a, 0x67, 0x7a, 0xcb, 0x1b, 0xbb, 0xd2, 0xd6, 0xaa, 0x5e, 0x55, 0xbb, 0x8d, - 0x3e, 0x06, 0xb9, 0xf7, 0xca, 0xd0, 0x08, 0xb9, 0x2d, 0x91, 0x7b, 0x22, 0x20, 0x3d, 0xd2, 0x58, - 0x0b, 0xaa, 0x29, 0x2c, 0x75, 0x1b, 0x77, 0xf3, 0xda, 0xd5, 0x99, 0x94, 0x59, 0xc3, 0xe3, 0x7e, - 0x5d, 0x68, 0x5f, 0x5f, 0xb5, 0x2b, 0x67, 0x11, 0xd5, 0xf0, 0xd8, 0xaa, 0xc4, 0xbc, 0x43, 0x17, - 0x9d, 0xc1, 0x5e, 0x8a, 0x53, 0xdc, 0xfb, 0x8d, 0x7b, 0x92, 0xb5, 0x69, 0xaa, 0x8f, 0x82, 0x19, - 0x7d, 0x14, 0xcc, 0x8b, 0xe8, 0xa3, 0xd0, 0x2f, 0x09, 0xda, 0xb7, 0xbf, 0xb5, 0x0d, 0xab, 0x1a, - 0x73, 0x89, 0x28, 0xfa, 0x02, 0xf6, 0x3c, 0xb2, 0xe2, 0x76, 0x3c, 0x95, 0xac, 0x51, 0xb8, 0xd5, - 0x1c, 0xef, 0x8a, 0xb4, 0xe4, 0x4a, 0x40, 0x2f, 0x01, 0x52, 0x1c, 0xc5, 0x5b, 0x71, 0xa4, 0x32, - 0x84, 0x10, 0xd9, 0x56, 0x8a, 0xa4, 0x74, 0x3b, 0x21, 0x22, 0x2d, 0x25, 0x64, 0x00, 0xad, 0xf4, - 0xd8, 0x26, 0x7c, 0xf1, 0x04, 0x97, 0xe5, 0x66, 0x3d, 0x4a, 0x26, 0x38, 0xc9, 0xd6, 0xb3, 0x7c, - 0xe3, 0x7d, 0x02, 0x1f, 0x78, 0x9f, 0x7c, 0x05, 0x4f, 0x32, 0xf7, 0xc9, 0x06, 0x7f, 0x2c, 0xaf, - 0x22, 0xe5, 0x75, 0x52, 0x17, 0x4c, 0x96, 0x28, 0xd2, 0x18, 0x1d, 0xc4, 0x50, 0x7e, 0xa5, 0x99, - 0x3d, 0xc5, 0x6c, 0xda, 0xd8, 0xe9, 0x18, 0x87, 0x3b, 0xea, 0x20, 0xaa, 0xaf, 0x37, 0x3b, 0xc5, - 0x6c, 0x8a, 0x1e, 0x42, 0x09, 0x07, 0x81, 0x82, 0x54, 0x25, 0xa4, 0x88, 0x83, 0x40, 0x84, 0xfa, - 0x5f, 0xbe, 0x5b, 0xb7, 0x8c, 0xf7, 0xeb, 0x96, 0xf1, 0xfb, 0xba, 0x65, 0xbc, 0xbd, 0x6e, 0x6d, - 0xbd, 0xbf, 0x6e, 0x6d, 0xfd, 0x72, 0xdd, 0xda, 0xfa, 0xf6, 0xc5, 0x84, 0xf2, 0xe9, 0x62, 0x64, - 0x3a, 0xfe, 0xbc, 0xe7, 0xf8, 0x73, 0xc2, 0x47, 0x63, 0x9e, 0x3c, 0xa8, 0xff, 0xa5, 0xcd, 0x3f, - 0xad, 0x51, 0x41, 0xae, 0xbf, 0xf8, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xec, 0x26, 0xcf, 0x93, 0x84, - 0x09, 0x00, 0x00, + proto.RegisterType((*LegacyABCIResponses)(nil), "cometbft.state.v1.LegacyABCIResponses") + proto.RegisterType((*ResponseBeginBlock)(nil), "cometbft.state.v1.ResponseBeginBlock") + proto.RegisterType((*ResponseEndBlock)(nil), "cometbft.state.v1.ResponseEndBlock") + proto.RegisterType((*ValidatorsInfo)(nil), "cometbft.state.v1.ValidatorsInfo") + proto.RegisterType((*ConsensusParamsInfo)(nil), "cometbft.state.v1.ConsensusParamsInfo") + proto.RegisterType((*ABCIResponsesInfo)(nil), "cometbft.state.v1.ABCIResponsesInfo") + proto.RegisterType((*Version)(nil), "cometbft.state.v1.Version") + proto.RegisterType((*State)(nil), "cometbft.state.v1.State") +} + +func init() { proto.RegisterFile("cometbft/state/v1/types.proto", fileDescriptor_eb6b2e03ecdbc0c2) } + +var fileDescriptor_eb6b2e03ecdbc0c2 = []byte{ + // 1008 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x36, 0xa3, 0xc4, 0x92, 0x46, 0xd6, 0x8f, 0x57, 0x4d, 0xc3, 0xb8, 0x8d, 0xe4, 0xa8, 0x4d, + 0x6a, 0x14, 0x05, 0x05, 0xbb, 0xb7, 0x5e, 0xd2, 0x50, 0x4a, 0x60, 0x01, 0x6e, 0x50, 0xd0, 0x6e, + 0x0e, 0xbe, 0x10, 0x2b, 0x72, 0x25, 0x2d, 0x4a, 0x91, 0x84, 0x76, 0xc5, 0xca, 0x7d, 0x80, 0x5e, + 0x9b, 0x63, 0xd1, 0x07, 0xe9, 0x33, 0xe4, 0x98, 0x5b, 0x7b, 0x72, 0x03, 0xf9, 0xd6, 0xa7, 0x28, + 0x76, 0xb9, 0xfc, 0xd1, 0x0f, 0x0a, 0x17, 0xb9, 0x91, 0x3b, 0x33, 0xdf, 0x7c, 0xf3, 0xed, 0xcc, + 0xee, 0xc2, 0x23, 0x27, 0x98, 0x12, 0x3e, 0x1c, 0xf1, 0x2e, 0xe3, 0x98, 0x93, 0x6e, 0x74, 0xdc, + 0xe5, 0x57, 0x21, 0x61, 0x46, 0x38, 0x0b, 0x78, 0x80, 0xf6, 0x13, 0xb3, 0x21, 0xcd, 0x46, 0x74, + 0x7c, 0xf0, 0x69, 0x1a, 0x81, 0x87, 0x0e, 0x5d, 0x0b, 0x38, 0x68, 0xa5, 0x56, 0xb9, 0x2a, 0xcc, + 0x21, 0x9e, 0xe1, 0x69, 0x62, 0x7f, 0xb4, 0x69, 0xcf, 0x87, 0x3f, 0xde, 0x34, 0x47, 0xd8, 0xa3, + 0x2e, 0xe6, 0xc1, 0x4c, 0xb9, 0xb4, 0x53, 0x97, 0x88, 0xcc, 0x18, 0x0d, 0xfc, 0x75, 0x8c, 0x8f, + 0xc6, 0xc1, 0x38, 0x90, 0x9f, 0x5d, 0xf1, 0x95, 0x84, 0x8d, 0x83, 0x60, 0xec, 0x91, 0xae, 0xfc, + 0x1b, 0xce, 0x47, 0x5d, 0x4e, 0xa7, 0x84, 0x71, 0x3c, 0x0d, 0x13, 0xe6, 0xeb, 0x0e, 0xee, 0x7c, + 0x86, 0x39, 0x0d, 0xfc, 0xd8, 0xde, 0x79, 0xaf, 0x41, 0xf3, 0x8c, 0x8c, 0xb1, 0x73, 0xf5, 0xdc, + 0xec, 0x0d, 0x2c, 0xc2, 0xc2, 0xc0, 0x67, 0x84, 0xa1, 0x67, 0x50, 0x71, 0x89, 0x47, 0x23, 0x32, + 0xb3, 0xf9, 0x82, 0xe9, 0xda, 0x61, 0xe1, 0xa8, 0x72, 0xd2, 0x32, 0x52, 0xe1, 0x84, 0x4a, 0x46, + 0x74, 0x6c, 0xbc, 0x58, 0x10, 0xe7, 0x62, 0x61, 0x11, 0x36, 0xf7, 0xb8, 0x05, 0x2a, 0xe4, 0x62, + 0xc1, 0xd0, 0xb7, 0x50, 0x26, 0xbe, 0x6b, 0x0f, 0xbd, 0xc0, 0xf9, 0x51, 0xbf, 0x73, 0xa8, 0x1d, + 0x55, 0x4e, 0x3e, 0x33, 0x36, 0x74, 0x37, 0x92, 0x8c, 0x2f, 0x7c, 0xd7, 0x14, 0xae, 0x56, 0x89, + 0xa8, 0x2f, 0xf4, 0x12, 0x2a, 0x43, 0x32, 0xa6, 0xbe, 0xc2, 0x28, 0x48, 0x8c, 0x27, 0xff, 0x81, + 0x61, 0x0a, 0xef, 0x18, 0x05, 0x86, 0xe9, 0x77, 0xc7, 0x06, 0xb4, 0xe9, 0x81, 0x06, 0xb0, 0x4b, + 0x22, 0xe2, 0xf3, 0xa4, 0xb6, 0x07, 0x5b, 0x6a, 0x13, 0x76, 0x53, 0x7f, 0x7b, 0xdd, 0xde, 0xf9, + 0xe7, 0xba, 0xdd, 0x88, 0xdd, 0xbf, 0x0a, 0xa6, 0x94, 0x93, 0x69, 0xc8, 0xaf, 0x2c, 0x05, 0xd0, + 0xf9, 0xf5, 0x0e, 0x34, 0xd6, 0xeb, 0x40, 0x17, 0xb0, 0x9f, 0xee, 0xb1, 0x3d, 0x0f, 0x5d, 0xcc, + 0x49, 0x92, 0xea, 0xf1, 0x66, 0xaa, 0xd7, 0x89, 0xeb, 0x0f, 0xd2, 0xd3, 0xbc, 0x2b, 0x92, 0x5a, + 0x8d, 0x68, 0x75, 0x99, 0xa1, 0x4b, 0x78, 0xe0, 0x88, 0x34, 0x3e, 0x9b, 0x33, 0x5b, 0xb6, 0x60, + 0x8a, 0x1d, 0x6b, 0xdc, 0xc9, 0xb0, 0xe3, 0xee, 0x89, 0x8e, 0x8d, 0x5e, 0x12, 0xf1, 0xbd, 0xec, + 0x59, 0xeb, 0xbe, 0xb3, 0xb2, 0x90, 0x60, 0x67, 0x8a, 0x14, 0x3e, 0x54, 0x91, 0x5f, 0x34, 0xa8, + 0xa5, 0x25, 0xb1, 0x81, 0x3f, 0x0a, 0x50, 0x1f, 0xaa, 0x99, 0x1e, 0x8c, 0x70, 0x5d, 0x93, 0x7c, + 0xdb, 0x5b, 0xf8, 0xa6, 0x91, 0xe7, 0x84, 0x5b, 0x7b, 0x51, 0xee, 0x0f, 0x19, 0xd0, 0xf4, 0x30, + 0xe3, 0xf6, 0x84, 0xd0, 0xf1, 0x84, 0xdb, 0xce, 0x04, 0xfb, 0x63, 0xe2, 0xca, 0xda, 0x0b, 0xd6, + 0xbe, 0x30, 0x9d, 0x4a, 0x4b, 0x2f, 0x36, 0x74, 0x7e, 0xd7, 0xa0, 0xb9, 0x56, 0xbe, 0x64, 0x73, + 0x0e, 0x8d, 0x35, 0x1d, 0x99, 0x22, 0x74, 0x0b, 0x01, 0xd5, 0xee, 0xd4, 0x57, 0x65, 0x64, 0xff, + 0x9b, 0xdc, 0x9f, 0x1a, 0xec, 0xaf, 0x4c, 0x9d, 0xa4, 0x76, 0x09, 0xf7, 0x3d, 0x39, 0x90, 0xb6, + 0x50, 0xdd, 0x9e, 0x25, 0x46, 0xc5, 0xef, 0xe9, 0x96, 0x01, 0xd8, 0x32, 0xc0, 0x56, 0x33, 0x06, + 0x79, 0x3e, 0x74, 0x68, 0x36, 0xd5, 0x1f, 0xc3, 0x6e, 0x4c, 0x4e, 0x91, 0x52, 0x7f, 0xe8, 0x15, + 0xd4, 0x46, 0xd4, 0xc7, 0x1e, 0xfd, 0x99, 0xac, 0x4c, 0xdb, 0x17, 0x9b, 0x2d, 0xf0, 0x52, 0xf9, + 0xc5, 0x73, 0xa6, 0x90, 0xad, 0xea, 0x28, 0xbf, 0xdc, 0xa1, 0x50, 0x7c, 0x1d, 0x1f, 0x63, 0xc8, + 0x84, 0x72, 0xaa, 0x93, 0x2a, 0x21, 0x77, 0x8c, 0xa8, 0xc3, 0x6e, 0x45, 0x64, 0x25, 0x6f, 0x16, + 0x86, 0x0e, 0xa0, 0xc4, 0x82, 0x11, 0xff, 0x09, 0xcf, 0x88, 0x24, 0x5e, 0xb6, 0xd2, 0xff, 0xce, + 0x1f, 0x45, 0xb8, 0x77, 0x2e, 0x84, 0x40, 0xdf, 0x40, 0x51, 0xc1, 0xa9, 0x3c, 0x07, 0x5b, 0xa4, + 0x52, 0xb4, 0x54, 0x8e, 0x24, 0x00, 0x3d, 0x85, 0x92, 0x33, 0xc1, 0xd4, 0xb7, 0x69, 0xbc, 0x5f, + 0x65, 0xb3, 0xb2, 0xbc, 0x6e, 0x17, 0x7b, 0x62, 0x6d, 0xd0, 0xb7, 0x8a, 0xd2, 0x38, 0x70, 0xd1, + 0x13, 0xa8, 0x51, 0x9f, 0x72, 0x8a, 0x3d, 0xb5, 0xcb, 0x7a, 0x4d, 0x0a, 0x59, 0x55, 0xab, 0xf1, + 0x06, 0xa3, 0x2f, 0x41, 0x6e, 0x77, 0xac, 0x65, 0xe2, 0x59, 0x90, 0x9e, 0x75, 0x61, 0x90, 0x2a, + 0x29, 0xdf, 0x73, 0xa8, 0xe6, 0x7c, 0xa9, 0xab, 0xdf, 0x5d, 0x27, 0x9f, 0xf6, 0xa1, 0x0c, 0x1b, + 0xf4, 0xcd, 0xa6, 0x20, 0xbf, 0xbc, 0x6e, 0x57, 0xce, 0x12, 0xac, 0x41, 0xdf, 0xaa, 0xa4, 0xc0, + 0x03, 0x17, 0x9d, 0x41, 0x3d, 0x07, 0x2a, 0x2e, 0x05, 0xfd, 0x9e, 0x82, 0x8d, 0x2f, 0x04, 0x23, + 0xb9, 0x10, 0x8c, 0x8b, 0xe4, 0xc6, 0x30, 0x4b, 0x02, 0xf6, 0xcd, 0xdf, 0x6d, 0xcd, 0xaa, 0xa6, + 0x58, 0xc2, 0x8a, 0x4e, 0xa1, 0xee, 0x93, 0x05, 0xb7, 0xd3, 0x51, 0x64, 0xfa, 0xee, 0xed, 0xa6, + 0xb7, 0x26, 0xe2, 0xb2, 0x93, 0x00, 0x3d, 0x03, 0xc8, 0x81, 0x14, 0x6f, 0x07, 0x92, 0x0b, 0x11, + 0x54, 0x64, 0x61, 0x39, 0x94, 0xd2, 0x2d, 0xa9, 0x88, 0xb8, 0x1c, 0x95, 0x1e, 0xb4, 0xf2, 0xd3, + 0x9a, 0x01, 0xa6, 0x83, 0x5b, 0x96, 0x1b, 0xf6, 0x49, 0x36, 0xb8, 0x59, 0xb4, 0x1a, 0xe1, 0xad, + 0xe7, 0x08, 0x7c, 0xe8, 0x39, 0xf2, 0x0a, 0x3e, 0x5f, 0x39, 0x47, 0xd6, 0x12, 0xa4, 0xfc, 0x2a, + 0x92, 0xdf, 0x61, 0xee, 0x60, 0x59, 0x05, 0x4a, 0x48, 0x26, 0xdd, 0x38, 0x93, 0xb7, 0x34, 0xb3, + 0x27, 0x98, 0x4d, 0xf4, 0xbd, 0x43, 0xed, 0x68, 0x2f, 0xee, 0xc6, 0xf8, 0xf6, 0x66, 0xa7, 0x98, + 0x4d, 0xd0, 0x43, 0x28, 0xe1, 0x30, 0x8c, 0x5d, 0xaa, 0xd2, 0xa5, 0x88, 0xc3, 0x50, 0x9a, 0xbe, + 0x83, 0x86, 0xec, 0x82, 0xb8, 0xa7, 0x5c, 0xe2, 0xe1, 0x2b, 0xbd, 0x2e, 0x6b, 0x7d, 0xb8, 0xd1, + 0x54, 0x7d, 0xf5, 0xca, 0x88, 0x7b, 0xea, 0x37, 0xd1, 0x53, 0xb2, 0x15, 0x64, 0x4f, 0xf5, 0x45, + 0xa8, 0x79, 0xf6, 0x76, 0xd9, 0xd2, 0xde, 0x2d, 0x5b, 0xda, 0xfb, 0x65, 0x4b, 0x7b, 0x73, 0xd3, + 0xda, 0x79, 0x77, 0xd3, 0xda, 0xf9, 0xeb, 0xa6, 0xb5, 0x73, 0x79, 0x32, 0xa6, 0x7c, 0x32, 0x1f, + 0x0a, 0x01, 0xbb, 0xe9, 0xb3, 0x28, 0x7b, 0x9f, 0x85, 0xb4, 0xbb, 0xf1, 0xbc, 0x1b, 0xee, 0xca, + 0xd4, 0x5f, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x5a, 0xd2, 0xd5, 0x93, 0xfa, 0x09, 0x00, 0x00, } func (m *LegacyABCIResponses) Marshal() (dAtA []byte, err error) { @@ -915,9 +936,9 @@ func (m *ABCIResponsesInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.ResponseFinalizeBlock != nil { + if m.FinalizeBlock != nil { { - size, err := m.ResponseFinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.FinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1007,6 +1028,14 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + n9, err9 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(m.NextBlockDelay, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.NextBlockDelay):]) + if err9 != nil { + return 0, err9 + } + i -= n9 + i = encodeVarintTypes(dAtA, i, uint64(n9)) + i-- + dAtA[i] = 0x7a if m.InitialHeight != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.InitialHeight)) i-- @@ -1082,12 +1111,12 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x32 } - n13, err13 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.LastBlockTime):]) - if err13 != nil { - return 0, err13 + n14, err14 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.LastBlockTime):]) + if err14 != nil { + return 0, err14 } - i -= n13 - i = encodeVarintTypes(dAtA, i, uint64(n13)) + i -= n14 + i = encodeVarintTypes(dAtA, i, uint64(n14)) i-- dAtA[i] = 0x2a { @@ -1242,8 +1271,8 @@ func (m *ABCIResponsesInfo) Size() (n int) { if m.Height != 0 { n += 1 + sovTypes(uint64(m.Height)) } - if m.ResponseFinalizeBlock != nil { - l = m.ResponseFinalizeBlock.Size() + if m.FinalizeBlock != nil { + l = m.FinalizeBlock.Size() n += 1 + l + sovTypes(uint64(l)) } return n @@ -1314,6 +1343,8 @@ func (m *State) Size() (n int) { if m.InitialHeight != 0 { n += 1 + sovTypes(uint64(m.InitialHeight)) } + l = github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.NextBlockDelay) + n += 1 + l + sovTypes(uint64(l)) return n } @@ -1381,7 +1412,7 @@ func (m *LegacyABCIResponses) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DeliverTxs = append(m.DeliverTxs, &types.ExecTxResult{}) + m.DeliverTxs = append(m.DeliverTxs, &v1.ExecTxResult{}) if err := m.DeliverTxs[len(m.DeliverTxs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1537,7 +1568,7 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, types.Event{}) + m.Events = append(m.Events, v1.Event{}) if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1621,7 +1652,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ValidatorUpdates = append(m.ValidatorUpdates, types.ValidatorUpdate{}) + m.ValidatorUpdates = append(m.ValidatorUpdates, v1.ValidatorUpdate{}) if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1656,7 +1687,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ConsensusParamUpdates == nil { - m.ConsensusParamUpdates = &types1.ConsensusParams{} + m.ConsensusParamUpdates = &v11.ConsensusParams{} } if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1691,7 +1722,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, types.Event{}) + m.Events = append(m.Events, v1.Event{}) if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1776,7 +1807,7 @@ func (m *ValidatorsInfo) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ValidatorSet == nil { - m.ValidatorSet = &types1.ValidatorSet{} + m.ValidatorSet = &v11.ValidatorSet{} } if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2010,7 +2041,7 @@ func (m *ABCIResponsesInfo) Unmarshal(dAtA []byte) error { } case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseFinalizeBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FinalizeBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2037,10 +2068,10 @@ func (m *ABCIResponsesInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResponseFinalizeBlock == nil { - m.ResponseFinalizeBlock = &types.ResponseFinalizeBlock{} + if m.FinalizeBlock == nil { + m.FinalizeBlock = &v1.FinalizeBlockResponse{} } - if err := m.ResponseFinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2389,7 +2420,7 @@ func (m *State) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.NextValidators == nil { - m.NextValidators = &types1.ValidatorSet{} + m.NextValidators = &v11.ValidatorSet{} } if err := m.NextValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2425,7 +2456,7 @@ func (m *State) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Validators == nil { - m.Validators = &types1.ValidatorSet{} + m.Validators = &v11.ValidatorSet{} } if err := m.Validators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2461,7 +2492,7 @@ func (m *State) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastValidators == nil { - m.LastValidators = &types1.ValidatorSet{} + m.LastValidators = &v11.ValidatorSet{} } if err := m.LastValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2625,6 +2656,39 @@ func (m *State) Unmarshal(dAtA []byte) error { break } } + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextBlockDelay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdDurationUnmarshal(&m.NextBlockDelay, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/api/cometbft/state/v1beta1/types.pb.go b/api/cometbft/state/v1beta1/types.pb.go new file mode 100644 index 00000000000..8da19c6d938 --- /dev/null +++ b/api/cometbft/state/v1beta1/types.pb.go @@ -0,0 +1,2181 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/state/v1beta1/types.proto + +package v1beta1 + +import ( + fmt "fmt" + v1beta1 "github.com/cometbft/cometbft/api/cometbft/abci/v1beta1" + v1beta11 "github.com/cometbft/cometbft/api/cometbft/types/v1beta1" + v1 "github.com/cometbft/cometbft/api/cometbft/version/v1" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + _ "github.com/cosmos/gogoproto/types" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ABCIResponses retains the responses +// of the various ABCI calls during block processing. +// It is persisted to disk for each height before calling Commit. +type ABCIResponses struct { + DeliverTxs []*v1beta1.ResponseDeliverTx `protobuf:"bytes,1,rep,name=deliver_txs,json=deliverTxs,proto3" json:"deliver_txs,omitempty"` + EndBlock *v1beta1.ResponseEndBlock `protobuf:"bytes,2,opt,name=end_block,json=endBlock,proto3" json:"end_block,omitempty"` + BeginBlock *v1beta1.ResponseBeginBlock `protobuf:"bytes,3,opt,name=begin_block,json=beginBlock,proto3" json:"begin_block,omitempty"` +} + +func (m *ABCIResponses) Reset() { *m = ABCIResponses{} } +func (m *ABCIResponses) String() string { return proto.CompactTextString(m) } +func (*ABCIResponses) ProtoMessage() {} +func (*ABCIResponses) Descriptor() ([]byte, []int) { + return fileDescriptor_3973a75f85b1930f, []int{0} +} +func (m *ABCIResponses) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ABCIResponses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ABCIResponses.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ABCIResponses) XXX_Merge(src proto.Message) { + xxx_messageInfo_ABCIResponses.Merge(m, src) +} +func (m *ABCIResponses) XXX_Size() int { + return m.Size() +} +func (m *ABCIResponses) XXX_DiscardUnknown() { + xxx_messageInfo_ABCIResponses.DiscardUnknown(m) +} + +var xxx_messageInfo_ABCIResponses proto.InternalMessageInfo + +func (m *ABCIResponses) GetDeliverTxs() []*v1beta1.ResponseDeliverTx { + if m != nil { + return m.DeliverTxs + } + return nil +} + +func (m *ABCIResponses) GetEndBlock() *v1beta1.ResponseEndBlock { + if m != nil { + return m.EndBlock + } + return nil +} + +func (m *ABCIResponses) GetBeginBlock() *v1beta1.ResponseBeginBlock { + if m != nil { + return m.BeginBlock + } + return nil +} + +// ValidatorsInfo represents the latest validator set, or the last height it changed +type ValidatorsInfo struct { + ValidatorSet *v1beta11.ValidatorSet `protobuf:"bytes,1,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` + LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` +} + +func (m *ValidatorsInfo) Reset() { *m = ValidatorsInfo{} } +func (m *ValidatorsInfo) String() string { return proto.CompactTextString(m) } +func (*ValidatorsInfo) ProtoMessage() {} +func (*ValidatorsInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_3973a75f85b1930f, []int{1} +} +func (m *ValidatorsInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorsInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorsInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorsInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorsInfo.Merge(m, src) +} +func (m *ValidatorsInfo) XXX_Size() int { + return m.Size() +} +func (m *ValidatorsInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorsInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorsInfo proto.InternalMessageInfo + +func (m *ValidatorsInfo) GetValidatorSet() *v1beta11.ValidatorSet { + if m != nil { + return m.ValidatorSet + } + return nil +} + +func (m *ValidatorsInfo) GetLastHeightChanged() int64 { + if m != nil { + return m.LastHeightChanged + } + return 0 +} + +// ConsensusParamsInfo represents the latest consensus params, or the last height it changed +type ConsensusParamsInfo struct { + ConsensusParams v1beta11.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` + LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` +} + +func (m *ConsensusParamsInfo) Reset() { *m = ConsensusParamsInfo{} } +func (m *ConsensusParamsInfo) String() string { return proto.CompactTextString(m) } +func (*ConsensusParamsInfo) ProtoMessage() {} +func (*ConsensusParamsInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_3973a75f85b1930f, []int{2} +} +func (m *ConsensusParamsInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusParamsInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusParamsInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusParamsInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusParamsInfo.Merge(m, src) +} +func (m *ConsensusParamsInfo) XXX_Size() int { + return m.Size() +} +func (m *ConsensusParamsInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusParamsInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusParamsInfo proto.InternalMessageInfo + +func (m *ConsensusParamsInfo) GetConsensusParams() v1beta11.ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return v1beta11.ConsensusParams{} +} + +func (m *ConsensusParamsInfo) GetLastHeightChanged() int64 { + if m != nil { + return m.LastHeightChanged + } + return 0 +} + +// ABCIResponsesInfo retains the responses of the ABCI calls during block processing. +type ABCIResponsesInfo struct { + AbciResponses *ABCIResponses `protobuf:"bytes,1,opt,name=abci_responses,json=abciResponses,proto3" json:"abci_responses,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *ABCIResponsesInfo) Reset() { *m = ABCIResponsesInfo{} } +func (m *ABCIResponsesInfo) String() string { return proto.CompactTextString(m) } +func (*ABCIResponsesInfo) ProtoMessage() {} +func (*ABCIResponsesInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_3973a75f85b1930f, []int{3} +} +func (m *ABCIResponsesInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ABCIResponsesInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ABCIResponsesInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ABCIResponsesInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ABCIResponsesInfo.Merge(m, src) +} +func (m *ABCIResponsesInfo) XXX_Size() int { + return m.Size() +} +func (m *ABCIResponsesInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ABCIResponsesInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ABCIResponsesInfo proto.InternalMessageInfo + +func (m *ABCIResponsesInfo) GetAbciResponses() *ABCIResponses { + if m != nil { + return m.AbciResponses + } + return nil +} + +func (m *ABCIResponsesInfo) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +// Version is a message for storing versioning information. +type Version struct { + Consensus v1.Consensus `protobuf:"bytes,1,opt,name=consensus,proto3" json:"consensus"` + Software string `protobuf:"bytes,2,opt,name=software,proto3" json:"software,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { + return fileDescriptor_3973a75f85b1930f, []int{4} +} +func (m *Version) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(m, src) +} +func (m *Version) XXX_Size() int { + return m.Size() +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func (m *Version) GetConsensus() v1.Consensus { + if m != nil { + return m.Consensus + } + return v1.Consensus{} +} + +func (m *Version) GetSoftware() string { + if m != nil { + return m.Software + } + return "" +} + +// State represents the state of the blockchain. +type State struct { + Version Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version"` + // immutable + ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + InitialHeight int64 `protobuf:"varint,14,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` + // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) + LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockID v1beta11.BlockID `protobuf:"bytes,4,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` + LastBlockTime time.Time `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3,stdtime" json:"last_block_time"` + // LastValidators is used to validate block.LastCommit. + // Validators are persisted to the database separately every time they change, + // so we can query for historical validator sets. + // Note that if s.LastBlockHeight causes a valset change, + // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + 1 + // Extra +1 due to nextValSet delay. + NextValidators *v1beta11.ValidatorSet `protobuf:"bytes,6,opt,name=next_validators,json=nextValidators,proto3" json:"next_validators,omitempty"` + Validators *v1beta11.ValidatorSet `protobuf:"bytes,7,opt,name=validators,proto3" json:"validators,omitempty"` + LastValidators *v1beta11.ValidatorSet `protobuf:"bytes,8,opt,name=last_validators,json=lastValidators,proto3" json:"last_validators,omitempty"` + LastHeightValidatorsChanged int64 `protobuf:"varint,9,opt,name=last_height_validators_changed,json=lastHeightValidatorsChanged,proto3" json:"last_height_validators_changed,omitempty"` + // Consensus parameters used for validating blocks. + // Changes returned by EndBlock and updated after Commit. + ConsensusParams v1beta11.ConsensusParams `protobuf:"bytes,10,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` + LastHeightConsensusParamsChanged int64 `protobuf:"varint,11,opt,name=last_height_consensus_params_changed,json=lastHeightConsensusParamsChanged,proto3" json:"last_height_consensus_params_changed,omitempty"` + // Merkle root of the results from executing prev block + LastResultsHash []byte `protobuf:"bytes,12,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` + // the latest AppHash we've received from calling abci.Commit() + AppHash []byte `protobuf:"bytes,13,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` +} + +func (m *State) Reset() { *m = State{} } +func (m *State) String() string { return proto.CompactTextString(m) } +func (*State) ProtoMessage() {} +func (*State) Descriptor() ([]byte, []int) { + return fileDescriptor_3973a75f85b1930f, []int{5} +} +func (m *State) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *State) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_State.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *State) XXX_Merge(src proto.Message) { + xxx_messageInfo_State.Merge(m, src) +} +func (m *State) XXX_Size() int { + return m.Size() +} +func (m *State) XXX_DiscardUnknown() { + xxx_messageInfo_State.DiscardUnknown(m) +} + +var xxx_messageInfo_State proto.InternalMessageInfo + +func (m *State) GetVersion() Version { + if m != nil { + return m.Version + } + return Version{} +} + +func (m *State) GetChainID() string { + if m != nil { + return m.ChainID + } + return "" +} + +func (m *State) GetInitialHeight() int64 { + if m != nil { + return m.InitialHeight + } + return 0 +} + +func (m *State) GetLastBlockHeight() int64 { + if m != nil { + return m.LastBlockHeight + } + return 0 +} + +func (m *State) GetLastBlockID() v1beta11.BlockID { + if m != nil { + return m.LastBlockID + } + return v1beta11.BlockID{} +} + +func (m *State) GetLastBlockTime() time.Time { + if m != nil { + return m.LastBlockTime + } + return time.Time{} +} + +func (m *State) GetNextValidators() *v1beta11.ValidatorSet { + if m != nil { + return m.NextValidators + } + return nil +} + +func (m *State) GetValidators() *v1beta11.ValidatorSet { + if m != nil { + return m.Validators + } + return nil +} + +func (m *State) GetLastValidators() *v1beta11.ValidatorSet { + if m != nil { + return m.LastValidators + } + return nil +} + +func (m *State) GetLastHeightValidatorsChanged() int64 { + if m != nil { + return m.LastHeightValidatorsChanged + } + return 0 +} + +func (m *State) GetConsensusParams() v1beta11.ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return v1beta11.ConsensusParams{} +} + +func (m *State) GetLastHeightConsensusParamsChanged() int64 { + if m != nil { + return m.LastHeightConsensusParamsChanged + } + return 0 +} + +func (m *State) GetLastResultsHash() []byte { + if m != nil { + return m.LastResultsHash + } + return nil +} + +func (m *State) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +func init() { + proto.RegisterType((*ABCIResponses)(nil), "cometbft.state.v1beta1.ABCIResponses") + proto.RegisterType((*ValidatorsInfo)(nil), "cometbft.state.v1beta1.ValidatorsInfo") + proto.RegisterType((*ConsensusParamsInfo)(nil), "cometbft.state.v1beta1.ConsensusParamsInfo") + proto.RegisterType((*ABCIResponsesInfo)(nil), "cometbft.state.v1beta1.ABCIResponsesInfo") + proto.RegisterType((*Version)(nil), "cometbft.state.v1beta1.Version") + proto.RegisterType((*State)(nil), "cometbft.state.v1beta1.State") +} + +func init() { + proto.RegisterFile("cometbft/state/v1beta1/types.proto", fileDescriptor_3973a75f85b1930f) +} + +var fileDescriptor_3973a75f85b1930f = []byte{ + // 826 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x36, 0xeb, 0xc4, 0x92, 0x86, 0x96, 0xdc, 0xac, 0x8b, 0x80, 0x55, 0x01, 0xca, 0x55, 0xf3, + 0xe3, 0xf6, 0x40, 0x22, 0xe9, 0xa1, 0xc7, 0xa2, 0x94, 0x0a, 0x84, 0x85, 0x5b, 0xb4, 0x4c, 0x10, + 0x04, 0xbd, 0x10, 0x4b, 0x71, 0x4d, 0x2e, 0x2a, 0x93, 0x04, 0x77, 0xad, 0x3a, 0x0f, 0xd0, 0x53, + 0x2f, 0x79, 0x83, 0xbe, 0x4e, 0x8e, 0x39, 0xf6, 0xe4, 0x16, 0xf2, 0xb5, 0x0f, 0x51, 0xec, 0x1f, + 0x49, 0x19, 0x32, 0x02, 0x03, 0xb9, 0x71, 0x77, 0xbe, 0xf9, 0xe6, 0x9b, 0xd9, 0x99, 0x21, 0x4c, + 0x17, 0xe5, 0x19, 0xe1, 0xc9, 0x29, 0xf7, 0x19, 0xc7, 0x9c, 0xf8, 0xab, 0x27, 0x09, 0xe1, 0xf8, + 0x89, 0xcf, 0x5f, 0x57, 0x84, 0x79, 0x55, 0x5d, 0xf2, 0x12, 0xdd, 0x37, 0x18, 0x4f, 0x62, 0x3c, + 0x8d, 0x19, 0x7f, 0x92, 0x95, 0x59, 0x29, 0x21, 0xbe, 0xf8, 0x52, 0xe8, 0xf1, 0xe7, 0x0d, 0x23, + 0x4e, 0x16, 0x74, 0x1b, 0xe1, 0xb8, 0x0d, 0x2a, 0x6f, 0xb7, 0x62, 0x1e, 0xdd, 0x80, 0x59, 0xe1, + 0x25, 0x4d, 0x31, 0x2f, 0x6b, 0x8d, 0xfb, 0xe2, 0x06, 0x5c, 0x85, 0x6b, 0x7c, 0x66, 0xc8, 0x26, + 0x0d, 0x68, 0x45, 0x6a, 0x46, 0xcb, 0xc2, 0x5f, 0x6d, 0x46, 0x9b, 0x64, 0x65, 0x99, 0x2d, 0x89, + 0x2f, 0x4f, 0xc9, 0xf9, 0xa9, 0xcf, 0xe9, 0x19, 0x61, 0x1c, 0x9f, 0x55, 0x0a, 0x30, 0xfd, 0xcf, + 0x82, 0xe1, 0x77, 0xc1, 0x2c, 0x8c, 0x08, 0xab, 0xca, 0x82, 0x11, 0x86, 0x42, 0xb0, 0x53, 0xb2, + 0xa4, 0x2b, 0x52, 0xc7, 0xfc, 0x82, 0x39, 0xd6, 0xd1, 0xee, 0xb1, 0xfd, 0xf4, 0xd8, 0x6b, 0x6a, + 0x25, 0xb2, 0x37, 0xa5, 0xf2, 0x8c, 0xdb, 0x5c, 0x79, 0xbc, 0xb8, 0x88, 0x20, 0x35, 0x9f, 0x0c, + 0xcd, 0x61, 0x40, 0x8a, 0x34, 0x4e, 0x96, 0xe5, 0xe2, 0x37, 0xe7, 0xa3, 0x23, 0xeb, 0xd8, 0x7e, + 0xfa, 0xf8, 0x3d, 0x44, 0xdf, 0x17, 0x69, 0x20, 0xe0, 0x51, 0x9f, 0xe8, 0x2f, 0xf4, 0x03, 0xd8, + 0x09, 0xc9, 0x68, 0xa1, 0x79, 0x76, 0x25, 0xcf, 0x97, 0xef, 0xe1, 0x09, 0x84, 0x87, 0x62, 0x82, + 0xa4, 0xf9, 0x9e, 0xfe, 0x69, 0xc1, 0xe8, 0xa5, 0xa9, 0x34, 0x0b, 0x8b, 0xd3, 0x12, 0x85, 0x30, + 0x6c, 0x6a, 0x1f, 0x33, 0xc2, 0x1d, 0x4b, 0x06, 0x78, 0xd0, 0x06, 0x50, 0x05, 0x35, 0x11, 0x1a, + 0xf7, 0xe7, 0x84, 0x47, 0xfb, 0xab, 0xce, 0x09, 0x79, 0x70, 0xb8, 0xc4, 0x8c, 0xc7, 0x39, 0xa1, + 0x59, 0xce, 0xe3, 0x45, 0x8e, 0x8b, 0x8c, 0xa4, 0x32, 0xf3, 0xdd, 0xe8, 0x9e, 0x30, 0x3d, 0x93, + 0x96, 0x99, 0x32, 0x4c, 0xff, 0xb2, 0xe0, 0x70, 0x26, 0xd4, 0x16, 0xec, 0x9c, 0xfd, 0x2c, 0x1f, + 0x56, 0x4a, 0x7a, 0x05, 0x1f, 0x2f, 0xcc, 0x75, 0xac, 0x1e, 0x5c, 0xab, 0x7a, 0x7c, 0x93, 0xaa, + 0x6b, 0x34, 0xc1, 0x9d, 0xb7, 0x97, 0x93, 0x9d, 0xe8, 0x60, 0xb1, 0x79, 0x7d, 0x6b, 0x85, 0xaf, + 0xe1, 0xde, 0x46, 0x77, 0x48, 0x79, 0x27, 0x30, 0x12, 0x35, 0x8f, 0x6b, 0x73, 0xab, 0xc5, 0x3d, + 0xf4, 0xb6, 0x0f, 0x94, 0xb7, 0x41, 0x11, 0x0d, 0x85, 0x73, 0xdb, 0x6f, 0xf7, 0x61, 0x4f, 0xa9, + 0xd1, 0x2a, 0xf4, 0x69, 0x4a, 0xa1, 0xf7, 0x52, 0x35, 0x35, 0x0a, 0x60, 0xd0, 0x24, 0xa2, 0x63, + 0xb9, 0x6d, 0x2c, 0xdd, 0xfa, 0xde, 0xaa, 0x53, 0x05, 0x9d, 0x7f, 0xeb, 0x86, 0xc6, 0xd0, 0x67, + 0xe5, 0x29, 0xff, 0x1d, 0xd7, 0x44, 0x06, 0x1a, 0x44, 0xcd, 0x79, 0xfa, 0x47, 0x0f, 0xee, 0x3e, + 0x17, 0x8a, 0xd1, 0xb7, 0xd0, 0xd3, 0x74, 0x3a, 0xce, 0xe4, 0xa6, 0x9c, 0xb4, 0x36, 0x1d, 0xc8, + 0x78, 0xa1, 0x47, 0xd0, 0x5f, 0xe4, 0x98, 0x16, 0x31, 0x55, 0x55, 0x1d, 0x04, 0xf6, 0xfa, 0x72, + 0xd2, 0x9b, 0x89, 0xbb, 0x70, 0x1e, 0xf5, 0xa4, 0x31, 0x4c, 0xd1, 0x43, 0x18, 0xd1, 0x82, 0x72, + 0x8a, 0x97, 0xfa, 0x2d, 0x9c, 0x91, 0xcc, 0x7e, 0xa8, 0x6f, 0xd5, 0x33, 0xa0, 0xaf, 0x40, 0x3e, + 0x8a, 0x6a, 0x7d, 0x83, 0xdc, 0x95, 0xc8, 0x03, 0x61, 0x90, 0x5d, 0xad, 0xb1, 0xaf, 0x60, 0xd8, + 0xc1, 0xd2, 0xd4, 0xb9, 0x73, 0x3d, 0x83, 0xcd, 0x96, 0x91, 0xbe, 0xe1, 0x3c, 0x38, 0x14, 0x19, + 0xac, 0x2f, 0x27, 0xf6, 0x89, 0x21, 0x0c, 0xe7, 0x91, 0xdd, 0xb0, 0x87, 0x29, 0x3a, 0x81, 0x83, + 0x0e, 0xb3, 0x58, 0x21, 0xce, 0x5d, 0xc9, 0x3d, 0xf6, 0xd4, 0x7e, 0xf1, 0xcc, 0x7e, 0xf1, 0x5e, + 0x98, 0xfd, 0x12, 0xf4, 0x05, 0xed, 0x9b, 0x7f, 0x26, 0x56, 0x34, 0x6c, 0xb8, 0x84, 0x15, 0xfd, + 0x08, 0x07, 0x05, 0xb9, 0xe0, 0x71, 0x33, 0x3a, 0xcc, 0xd9, 0xbb, 0xc5, 0xc8, 0x8d, 0x84, 0x73, + 0x3b, 0xc3, 0x68, 0x0e, 0xd0, 0x61, 0xea, 0xdd, 0x82, 0xa9, 0xe3, 0x27, 0x44, 0xc9, 0x14, 0x3b, + 0x54, 0xfd, 0xdb, 0x88, 0x12, 0xce, 0x1d, 0x51, 0x33, 0x70, 0xbb, 0x73, 0xd6, 0xb2, 0x36, 0x23, + 0x37, 0x90, 0x8f, 0xf8, 0x59, 0x3b, 0x72, 0xad, 0xb7, 0x1e, 0xbe, 0xad, 0x6b, 0x00, 0x3e, 0xc8, + 0x1a, 0xf8, 0x09, 0x1e, 0x6c, 0xac, 0x81, 0x6b, 0x51, 0x1a, 0x91, 0xb6, 0x14, 0x79, 0xd4, 0xd9, + 0x0b, 0x9b, 0x44, 0x46, 0xa9, 0x69, 0xd3, 0x9a, 0xb0, 0xf3, 0x25, 0x67, 0x71, 0x8e, 0x59, 0xee, + 0xec, 0x1f, 0x59, 0xc7, 0xfb, 0xaa, 0x4d, 0x23, 0x75, 0xff, 0x0c, 0xb3, 0x1c, 0x7d, 0x0a, 0x7d, + 0x5c, 0x55, 0x0a, 0x32, 0x94, 0x90, 0x1e, 0xae, 0x2a, 0x61, 0x0a, 0x7e, 0x79, 0xbb, 0x76, 0xad, + 0x77, 0x6b, 0xd7, 0xfa, 0x77, 0xed, 0x5a, 0x6f, 0xae, 0xdc, 0x9d, 0x77, 0x57, 0xee, 0xce, 0xdf, + 0x57, 0xee, 0xce, 0xaf, 0xdf, 0x64, 0x94, 0xe7, 0xe7, 0x89, 0x48, 0xdb, 0x6f, 0xfe, 0x79, 0xed, + 0x0f, 0xb9, 0xa2, 0xfe, 0xf6, 0xff, 0x7d, 0xb2, 0x27, 0x3b, 0xf3, 0xeb, 0xff, 0x03, 0x00, 0x00, + 0xff, 0xff, 0x1a, 0xd0, 0x8c, 0x88, 0x10, 0x08, 0x00, 0x00, +} + +func (m *ABCIResponses) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ABCIResponses) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ABCIResponses) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BeginBlock != nil { + { + size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.EndBlock != nil { + { + size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.DeliverTxs) > 0 { + for iNdEx := len(m.DeliverTxs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DeliverTxs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidatorsInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorsInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorsInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastHeightChanged != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastHeightChanged)) + i-- + dAtA[i] = 0x10 + } + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConsensusParamsInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusParamsInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusParamsInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastHeightChanged != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastHeightChanged)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ABCIResponsesInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ABCIResponsesInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ABCIResponsesInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.AbciResponses != nil { + { + size, err := m.AbciResponses.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Version) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Version) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Version) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Software) > 0 { + i -= len(m.Software) + copy(dAtA[i:], m.Software) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Software))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Consensus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *State) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *State) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InitialHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.InitialHeight)) + i-- + dAtA[i] = 0x70 + } + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x6a + } + if len(m.LastResultsHash) > 0 { + i -= len(m.LastResultsHash) + copy(dAtA[i:], m.LastResultsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) + i-- + dAtA[i] = 0x62 + } + if m.LastHeightConsensusParamsChanged != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastHeightConsensusParamsChanged)) + i-- + dAtA[i] = 0x58 + } + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + if m.LastHeightValidatorsChanged != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastHeightValidatorsChanged)) + i-- + dAtA[i] = 0x48 + } + if m.LastValidators != nil { + { + size, err := m.LastValidators.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Validators != nil { + { + size, err := m.Validators.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.NextValidators != nil { + { + size, err := m.NextValidators.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + n11, err11 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.LastBlockTime):]) + if err11 != nil { + return 0, err11 + } + i -= n11 + i = encodeVarintTypes(dAtA, i, uint64(n11)) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastBlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.LastBlockHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockHeight)) + i-- + dAtA[i] = 0x18 + } + if len(m.ChainID) > 0 { + i -= len(m.ChainID) + copy(dAtA[i:], m.ChainID) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ABCIResponses) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DeliverTxs) > 0 { + for _, e := range m.DeliverTxs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.EndBlock != nil { + l = m.EndBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.BeginBlock != nil { + l = m.BeginBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ValidatorsInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastHeightChanged != 0 { + n += 1 + sovTypes(uint64(m.LastHeightChanged)) + } + return n +} + +func (m *ConsensusParamsInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.LastHeightChanged != 0 { + n += 1 + sovTypes(uint64(m.LastHeightChanged)) + } + return n +} + +func (m *ABCIResponsesInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AbciResponses != nil { + l = m.AbciResponses.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *Version) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Consensus.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Software) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *State) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Version.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ChainID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastBlockHeight != 0 { + n += 1 + sovTypes(uint64(m.LastBlockHeight)) + } + l = m.LastBlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.LastBlockTime) + n += 1 + l + sovTypes(uint64(l)) + if m.NextValidators != nil { + l = m.NextValidators.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Validators != nil { + l = m.Validators.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastValidators != nil { + l = m.LastValidators.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastHeightValidatorsChanged != 0 { + n += 1 + sovTypes(uint64(m.LastHeightValidatorsChanged)) + } + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.LastHeightConsensusParamsChanged != 0 { + n += 1 + sovTypes(uint64(m.LastHeightConsensusParamsChanged)) + } + l = len(m.LastResultsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.InitialHeight != 0 { + n += 1 + sovTypes(uint64(m.InitialHeight)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ABCIResponses) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ABCIResponses: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ABCIResponses: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTxs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeliverTxs = append(m.DeliverTxs, &v1beta1.ResponseDeliverTx{}) + if err := m.DeliverTxs[len(m.DeliverTxs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EndBlock == nil { + m.EndBlock = &v1beta1.ResponseEndBlock{} + } + if err := m.EndBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BeginBlock == nil { + m.BeginBlock = &v1beta1.ResponseBeginBlock{} + } + if err := m.BeginBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatorsInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorsInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorsInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorSet == nil { + m.ValidatorSet = &v1beta11.ValidatorSet{} + } + if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeightChanged", wireType) + } + m.LastHeightChanged = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastHeightChanged |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusParamsInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusParamsInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeightChanged", wireType) + } + m.LastHeightChanged = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastHeightChanged |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ABCIResponsesInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ABCIResponsesInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ABCIResponsesInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AbciResponses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AbciResponses == nil { + m.AbciResponses = &ABCIResponses{} + } + if err := m.AbciResponses.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Version) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Version: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Consensus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Consensus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Software", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Software = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *State) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: State: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: State: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockHeight", wireType) + } + m.LastBlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastBlockHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastBlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.LastBlockTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NextValidators == nil { + m.NextValidators = &v1beta11.ValidatorSet{} + } + if err := m.NextValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Validators == nil { + m.Validators = &v1beta11.ValidatorSet{} + } + if err := m.Validators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastValidators == nil { + m.LastValidators = &v1beta11.ValidatorSet{} + } + if err := m.LastValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeightValidatorsChanged", wireType) + } + m.LastHeightValidatorsChanged = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastHeightValidatorsChanged |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeightConsensusParamsChanged", wireType) + } + m.LastHeightConsensusParamsChanged = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastHeightConsensusParamsChanged |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastResultsHash == nil { + m.LastResultsHash = []byte{} + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) + } + m.InitialHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/api/cometbft/state/v1beta2/types.pb.go b/api/cometbft/state/v1beta2/types.pb.go new file mode 100644 index 00000000000..af6b09a791f --- /dev/null +++ b/api/cometbft/state/v1beta2/types.pb.go @@ -0,0 +1,1738 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/state/v1beta2/types.proto + +package v1beta2 + +import ( + fmt "fmt" + v1beta2 "github.com/cometbft/cometbft/api/cometbft/abci/v1beta2" + v1beta1 "github.com/cometbft/cometbft/api/cometbft/state/v1beta1" + v1beta11 "github.com/cometbft/cometbft/api/cometbft/types/v1beta1" + v1beta21 "github.com/cometbft/cometbft/api/cometbft/types/v1beta2" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + _ "github.com/cosmos/gogoproto/types" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ABCIResponses retains the responses +// of the various ABCI calls during block processing. +// It is persisted to disk for each height before calling Commit. +type ABCIResponses struct { + DeliverTxs []*v1beta2.ResponseDeliverTx `protobuf:"bytes,1,rep,name=deliver_txs,json=deliverTxs,proto3" json:"deliver_txs,omitempty"` + EndBlock *v1beta2.ResponseEndBlock `protobuf:"bytes,2,opt,name=end_block,json=endBlock,proto3" json:"end_block,omitempty"` + BeginBlock *v1beta2.ResponseBeginBlock `protobuf:"bytes,3,opt,name=begin_block,json=beginBlock,proto3" json:"begin_block,omitempty"` +} + +func (m *ABCIResponses) Reset() { *m = ABCIResponses{} } +func (m *ABCIResponses) String() string { return proto.CompactTextString(m) } +func (*ABCIResponses) ProtoMessage() {} +func (*ABCIResponses) Descriptor() ([]byte, []int) { + return fileDescriptor_f65ea0fc5c80e5be, []int{0} +} +func (m *ABCIResponses) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ABCIResponses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ABCIResponses.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ABCIResponses) XXX_Merge(src proto.Message) { + xxx_messageInfo_ABCIResponses.Merge(m, src) +} +func (m *ABCIResponses) XXX_Size() int { + return m.Size() +} +func (m *ABCIResponses) XXX_DiscardUnknown() { + xxx_messageInfo_ABCIResponses.DiscardUnknown(m) +} + +var xxx_messageInfo_ABCIResponses proto.InternalMessageInfo + +func (m *ABCIResponses) GetDeliverTxs() []*v1beta2.ResponseDeliverTx { + if m != nil { + return m.DeliverTxs + } + return nil +} + +func (m *ABCIResponses) GetEndBlock() *v1beta2.ResponseEndBlock { + if m != nil { + return m.EndBlock + } + return nil +} + +func (m *ABCIResponses) GetBeginBlock() *v1beta2.ResponseBeginBlock { + if m != nil { + return m.BeginBlock + } + return nil +} + +// ConsensusParamsInfo represents the latest consensus params, or the last height it changed +type ConsensusParamsInfo struct { + ConsensusParams v1beta21.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` + LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` +} + +func (m *ConsensusParamsInfo) Reset() { *m = ConsensusParamsInfo{} } +func (m *ConsensusParamsInfo) String() string { return proto.CompactTextString(m) } +func (*ConsensusParamsInfo) ProtoMessage() {} +func (*ConsensusParamsInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_f65ea0fc5c80e5be, []int{1} +} +func (m *ConsensusParamsInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusParamsInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusParamsInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusParamsInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusParamsInfo.Merge(m, src) +} +func (m *ConsensusParamsInfo) XXX_Size() int { + return m.Size() +} +func (m *ConsensusParamsInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusParamsInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusParamsInfo proto.InternalMessageInfo + +func (m *ConsensusParamsInfo) GetConsensusParams() v1beta21.ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return v1beta21.ConsensusParams{} +} + +func (m *ConsensusParamsInfo) GetLastHeightChanged() int64 { + if m != nil { + return m.LastHeightChanged + } + return 0 +} + +// ABCIResponsesInfo retains the responses of the ABCI calls during block processing. +type ABCIResponsesInfo struct { + AbciResponses *ABCIResponses `protobuf:"bytes,1,opt,name=abci_responses,json=abciResponses,proto3" json:"abci_responses,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *ABCIResponsesInfo) Reset() { *m = ABCIResponsesInfo{} } +func (m *ABCIResponsesInfo) String() string { return proto.CompactTextString(m) } +func (*ABCIResponsesInfo) ProtoMessage() {} +func (*ABCIResponsesInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_f65ea0fc5c80e5be, []int{2} +} +func (m *ABCIResponsesInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ABCIResponsesInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ABCIResponsesInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ABCIResponsesInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ABCIResponsesInfo.Merge(m, src) +} +func (m *ABCIResponsesInfo) XXX_Size() int { + return m.Size() +} +func (m *ABCIResponsesInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ABCIResponsesInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ABCIResponsesInfo proto.InternalMessageInfo + +func (m *ABCIResponsesInfo) GetAbciResponses() *ABCIResponses { + if m != nil { + return m.AbciResponses + } + return nil +} + +func (m *ABCIResponsesInfo) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +// State represents the state of the blockchain. +type State struct { + Version v1beta1.Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version"` + // immutable + ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + InitialHeight int64 `protobuf:"varint,14,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` + // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) + LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockID v1beta11.BlockID `protobuf:"bytes,4,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` + LastBlockTime time.Time `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3,stdtime" json:"last_block_time"` + // LastValidators is used to validate block.LastCommit. + // Validators are persisted to the database separately every time they change, + // so we can query for historical validator sets. + // Note that if s.LastBlockHeight causes a valset change, + // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + 1 + // Extra +1 due to nextValSet delay. + NextValidators *v1beta11.ValidatorSet `protobuf:"bytes,6,opt,name=next_validators,json=nextValidators,proto3" json:"next_validators,omitempty"` + Validators *v1beta11.ValidatorSet `protobuf:"bytes,7,opt,name=validators,proto3" json:"validators,omitempty"` + LastValidators *v1beta11.ValidatorSet `protobuf:"bytes,8,opt,name=last_validators,json=lastValidators,proto3" json:"last_validators,omitempty"` + LastHeightValidatorsChanged int64 `protobuf:"varint,9,opt,name=last_height_validators_changed,json=lastHeightValidatorsChanged,proto3" json:"last_height_validators_changed,omitempty"` + // Consensus parameters used for validating blocks. + // Changes returned by EndBlock and updated after Commit. + ConsensusParams v1beta21.ConsensusParams `protobuf:"bytes,10,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` + LastHeightConsensusParamsChanged int64 `protobuf:"varint,11,opt,name=last_height_consensus_params_changed,json=lastHeightConsensusParamsChanged,proto3" json:"last_height_consensus_params_changed,omitempty"` + // Merkle root of the results from executing prev block + LastResultsHash []byte `protobuf:"bytes,12,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` + // the latest AppHash we've received from calling abci.Commit() + AppHash []byte `protobuf:"bytes,13,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` +} + +func (m *State) Reset() { *m = State{} } +func (m *State) String() string { return proto.CompactTextString(m) } +func (*State) ProtoMessage() {} +func (*State) Descriptor() ([]byte, []int) { + return fileDescriptor_f65ea0fc5c80e5be, []int{3} +} +func (m *State) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *State) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_State.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *State) XXX_Merge(src proto.Message) { + xxx_messageInfo_State.Merge(m, src) +} +func (m *State) XXX_Size() int { + return m.Size() +} +func (m *State) XXX_DiscardUnknown() { + xxx_messageInfo_State.DiscardUnknown(m) +} + +var xxx_messageInfo_State proto.InternalMessageInfo + +func (m *State) GetVersion() v1beta1.Version { + if m != nil { + return m.Version + } + return v1beta1.Version{} +} + +func (m *State) GetChainID() string { + if m != nil { + return m.ChainID + } + return "" +} + +func (m *State) GetInitialHeight() int64 { + if m != nil { + return m.InitialHeight + } + return 0 +} + +func (m *State) GetLastBlockHeight() int64 { + if m != nil { + return m.LastBlockHeight + } + return 0 +} + +func (m *State) GetLastBlockID() v1beta11.BlockID { + if m != nil { + return m.LastBlockID + } + return v1beta11.BlockID{} +} + +func (m *State) GetLastBlockTime() time.Time { + if m != nil { + return m.LastBlockTime + } + return time.Time{} +} + +func (m *State) GetNextValidators() *v1beta11.ValidatorSet { + if m != nil { + return m.NextValidators + } + return nil +} + +func (m *State) GetValidators() *v1beta11.ValidatorSet { + if m != nil { + return m.Validators + } + return nil +} + +func (m *State) GetLastValidators() *v1beta11.ValidatorSet { + if m != nil { + return m.LastValidators + } + return nil +} + +func (m *State) GetLastHeightValidatorsChanged() int64 { + if m != nil { + return m.LastHeightValidatorsChanged + } + return 0 +} + +func (m *State) GetConsensusParams() v1beta21.ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return v1beta21.ConsensusParams{} +} + +func (m *State) GetLastHeightConsensusParamsChanged() int64 { + if m != nil { + return m.LastHeightConsensusParamsChanged + } + return 0 +} + +func (m *State) GetLastResultsHash() []byte { + if m != nil { + return m.LastResultsHash + } + return nil +} + +func (m *State) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +func init() { + proto.RegisterType((*ABCIResponses)(nil), "cometbft.state.v1beta2.ABCIResponses") + proto.RegisterType((*ConsensusParamsInfo)(nil), "cometbft.state.v1beta2.ConsensusParamsInfo") + proto.RegisterType((*ABCIResponsesInfo)(nil), "cometbft.state.v1beta2.ABCIResponsesInfo") + proto.RegisterType((*State)(nil), "cometbft.state.v1beta2.State") +} + +func init() { + proto.RegisterFile("cometbft/state/v1beta2/types.proto", fileDescriptor_f65ea0fc5c80e5be) +} + +var fileDescriptor_f65ea0fc5c80e5be = []byte{ + // 749 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x8d, 0x49, 0xdb, 0xa4, 0xeb, 0x26, 0xa1, 0x2e, 0xaa, 0x4c, 0x91, 0x92, 0x10, 0xda, 0x12, + 0x38, 0xd8, 0x4a, 0x38, 0x70, 0x44, 0x38, 0x41, 0xaa, 0x51, 0x41, 0xe0, 0x56, 0x55, 0xc5, 0xc5, + 0x5a, 0xc7, 0x5b, 0xdb, 0x22, 0xb1, 0xad, 0xec, 0x26, 0x6a, 0x3f, 0x80, 0x7b, 0xff, 0x80, 0xdf, + 0xe9, 0xb1, 0x47, 0x4e, 0x05, 0xa5, 0x57, 0x3e, 0x02, 0xed, 0xae, 0xd7, 0x71, 0x22, 0x57, 0x55, + 0x25, 0x6e, 0xeb, 0x99, 0x37, 0x6f, 0xdf, 0xcc, 0xce, 0x93, 0x41, 0x6b, 0x10, 0x8d, 0x10, 0x71, + 0xce, 0x88, 0x8e, 0x09, 0x24, 0x48, 0x9f, 0x76, 0x1c, 0x44, 0x60, 0x57, 0x27, 0x17, 0x31, 0xc2, + 0x5a, 0x3c, 0x8e, 0x48, 0xa4, 0x6c, 0x0b, 0x8c, 0xc6, 0x30, 0x5a, 0x82, 0xd9, 0x79, 0x9e, 0xd6, + 0x42, 0x67, 0x10, 0xe4, 0x95, 0xee, 0xe4, 0xd3, 0x77, 0xee, 0xc0, 0xb0, 0x68, 0x2e, 0x66, 0xff, + 0x0e, 0xcc, 0x14, 0x0e, 0x03, 0x17, 0x92, 0x68, 0x9c, 0xe0, 0x5e, 0xe4, 0xe2, 0xba, 0x7a, 0x0c, + 0xc7, 0x70, 0x24, 0xc8, 0x9e, 0x78, 0x91, 0x17, 0xb1, 0xa3, 0x4e, 0x4f, 0x49, 0xb4, 0xe1, 0x45, + 0x91, 0x37, 0x44, 0x3a, 0xfb, 0x72, 0x26, 0x67, 0x3a, 0x09, 0x46, 0x08, 0x13, 0x38, 0x8a, 0x39, + 0xa0, 0xf5, 0x57, 0x02, 0x95, 0xf7, 0x46, 0xcf, 0xb4, 0x10, 0x8e, 0xa3, 0x10, 0x23, 0xac, 0x98, + 0x40, 0x76, 0xd1, 0x30, 0x98, 0xa2, 0xb1, 0x4d, 0xce, 0xb1, 0x2a, 0x35, 0x8b, 0x6d, 0xb9, 0xdb, + 0xd6, 0xd2, 0x71, 0xd1, 0xb1, 0x88, 0x69, 0x69, 0xa2, 0xac, 0xcf, 0x2b, 0x8e, 0xcf, 0x2d, 0xe0, + 0x8a, 0x23, 0x56, 0xfa, 0x60, 0x1d, 0x85, 0xae, 0xed, 0x0c, 0xa3, 0xc1, 0x77, 0xf5, 0x51, 0x53, + 0x6a, 0xcb, 0xdd, 0x97, 0xf7, 0x10, 0x7d, 0x08, 0x5d, 0x83, 0xc2, 0xad, 0x32, 0x4a, 0x4e, 0xca, + 0x47, 0x20, 0x3b, 0xc8, 0x0b, 0xc2, 0x84, 0xa7, 0xc8, 0x78, 0x5e, 0xdd, 0xc3, 0x63, 0xd0, 0x0a, + 0xce, 0x04, 0x9c, 0xf4, 0xdc, 0xfa, 0x29, 0x81, 0xad, 0x1e, 0xcd, 0x87, 0x78, 0x82, 0xbf, 0xb0, + 0xf9, 0x99, 0xe1, 0x59, 0xa4, 0x9c, 0x82, 0xc7, 0x03, 0x11, 0xb6, 0xf9, 0x5c, 0x55, 0x69, 0x59, + 0x30, 0x7f, 0x3b, 0x71, 0xd3, 0x12, 0x8d, 0xb1, 0x72, 0x75, 0xd3, 0x28, 0x58, 0xb5, 0xc1, 0x62, + 0x58, 0xd1, 0xc0, 0xd6, 0x10, 0x62, 0x62, 0xfb, 0x28, 0xf0, 0x7c, 0x62, 0x0f, 0x7c, 0x18, 0x7a, + 0xc8, 0x65, 0xd3, 0x28, 0x5a, 0x9b, 0x34, 0x75, 0xc0, 0x32, 0x3d, 0x9e, 0x68, 0x5d, 0x80, 0xcd, + 0x85, 0xf7, 0x60, 0xf2, 0x0e, 0x41, 0x95, 0x76, 0x69, 0x8f, 0x45, 0x34, 0x11, 0xb7, 0xa7, 0xe5, + 0x6f, 0xb1, 0xb6, 0x40, 0x61, 0x55, 0x68, 0xf1, 0xfc, 0x85, 0xb7, 0xc1, 0x1a, 0x57, 0x93, 0xa8, + 0x48, 0xbe, 0x5a, 0x3f, 0x4a, 0x60, 0xf5, 0x88, 0xd2, 0x28, 0xef, 0x40, 0x69, 0x8a, 0xc6, 0x38, + 0x88, 0xc2, 0xe4, 0xa2, 0x46, 0xfe, 0x45, 0x1d, 0xed, 0x84, 0xc3, 0x92, 0xee, 0x45, 0x95, 0xb2, + 0x0f, 0xca, 0x03, 0x1f, 0x06, 0xa1, 0x1d, 0xf0, 0x56, 0xd7, 0x0d, 0x79, 0x76, 0xd3, 0x28, 0xf5, + 0x68, 0xcc, 0xec, 0x5b, 0x25, 0x96, 0x34, 0x5d, 0x65, 0x0f, 0x54, 0x83, 0x30, 0x20, 0x01, 0x1c, + 0x26, 0x03, 0x52, 0xab, 0x4c, 0x52, 0x25, 0x89, 0xf2, 0xd9, 0x28, 0xaf, 0x01, 0x9b, 0x14, 0xdf, + 0x00, 0x81, 0x2c, 0x32, 0x64, 0x8d, 0x26, 0xd8, 0xe3, 0x26, 0xd8, 0x53, 0x50, 0xc9, 0x60, 0x03, + 0x57, 0x5d, 0x59, 0xee, 0x20, 0xfb, 0x8e, 0x1d, 0x8d, 0xd5, 0x9a, 0x7d, 0x63, 0x8b, 0x76, 0x30, + 0xbb, 0x69, 0xc8, 0x87, 0x82, 0xd0, 0xec, 0x5b, 0x72, 0xca, 0x6e, 0xba, 0xca, 0x21, 0xa8, 0x65, + 0x98, 0xa9, 0x93, 0xd4, 0x55, 0xc6, 0xbd, 0xa3, 0x71, 0x9b, 0x69, 0xc2, 0x66, 0xda, 0xb1, 0xb0, + 0x99, 0x51, 0xa6, 0xb4, 0x97, 0xbf, 0x1b, 0x92, 0x55, 0x49, 0xb9, 0x68, 0x56, 0xf9, 0x04, 0x6a, + 0x21, 0x3a, 0x27, 0x76, 0xea, 0x76, 0xac, 0xae, 0x31, 0xb6, 0xdd, 0xbb, 0x94, 0x9e, 0x08, 0xe4, + 0x11, 0x22, 0x56, 0x95, 0x16, 0xa7, 0x11, 0xea, 0x35, 0x90, 0x61, 0x2a, 0x3d, 0x80, 0x29, 0x53, + 0x47, 0x45, 0xb1, 0x16, 0x33, 0x54, 0xe5, 0x87, 0x88, 0xa2, 0xc5, 0x19, 0x51, 0x3d, 0x50, 0xcf, + 0x2e, 0xff, 0x9c, 0x35, 0xf5, 0xc1, 0x3a, 0x7b, 0xc4, 0x67, 0x73, 0x1f, 0xcc, 0xab, 0x13, 0x47, + 0xe4, 0x7a, 0x13, 0xfc, 0x17, 0x6f, 0x7e, 0x06, 0xbb, 0x0b, 0xde, 0x5c, 0xba, 0x25, 0x15, 0x29, + 0x33, 0x91, 0xcd, 0x8c, 0x59, 0x17, 0x89, 0x84, 0x52, 0xb1, 0xa6, 0x63, 0x84, 0x27, 0x43, 0x82, + 0x6d, 0x1f, 0x62, 0x5f, 0xdd, 0x68, 0x4a, 0xed, 0x0d, 0xbe, 0xa6, 0x16, 0x8f, 0x1f, 0x40, 0xec, + 0x2b, 0x4f, 0x41, 0x19, 0xc6, 0x31, 0x87, 0x54, 0x18, 0xa4, 0x04, 0xe3, 0x98, 0xa6, 0x8c, 0xaf, + 0x57, 0xb3, 0xba, 0x74, 0x3d, 0xab, 0x4b, 0x7f, 0x66, 0x75, 0xe9, 0xf2, 0xb6, 0x5e, 0xb8, 0xbe, + 0xad, 0x17, 0x7e, 0xdd, 0xd6, 0x0b, 0xdf, 0xde, 0x7a, 0x01, 0xf1, 0x27, 0x0e, 0x6d, 0x5b, 0x4f, + 0x7f, 0x0a, 0xf3, 0x1f, 0x56, 0x1c, 0xe8, 0xf9, 0x7f, 0x3e, 0x67, 0x8d, 0x6d, 0xe6, 0x9b, 0x7f, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x60, 0xa6, 0x4a, 0xde, 0x1a, 0x07, 0x00, 0x00, +} + +func (m *ABCIResponses) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ABCIResponses) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ABCIResponses) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BeginBlock != nil { + { + size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.EndBlock != nil { + { + size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.DeliverTxs) > 0 { + for iNdEx := len(m.DeliverTxs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DeliverTxs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ConsensusParamsInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusParamsInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusParamsInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastHeightChanged != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastHeightChanged)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ABCIResponsesInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ABCIResponsesInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ABCIResponsesInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.AbciResponses != nil { + { + size, err := m.AbciResponses.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *State) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *State) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InitialHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.InitialHeight)) + i-- + dAtA[i] = 0x70 + } + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x6a + } + if len(m.LastResultsHash) > 0 { + i -= len(m.LastResultsHash) + copy(dAtA[i:], m.LastResultsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) + i-- + dAtA[i] = 0x62 + } + if m.LastHeightConsensusParamsChanged != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastHeightConsensusParamsChanged)) + i-- + dAtA[i] = 0x58 + } + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + if m.LastHeightValidatorsChanged != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastHeightValidatorsChanged)) + i-- + dAtA[i] = 0x48 + } + if m.LastValidators != nil { + { + size, err := m.LastValidators.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Validators != nil { + { + size, err := m.Validators.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.NextValidators != nil { + { + size, err := m.NextValidators.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + n9, err9 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.LastBlockTime):]) + if err9 != nil { + return 0, err9 + } + i -= n9 + i = encodeVarintTypes(dAtA, i, uint64(n9)) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastBlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.LastBlockHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockHeight)) + i-- + dAtA[i] = 0x18 + } + if len(m.ChainID) > 0 { + i -= len(m.ChainID) + copy(dAtA[i:], m.ChainID) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ABCIResponses) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DeliverTxs) > 0 { + for _, e := range m.DeliverTxs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.EndBlock != nil { + l = m.EndBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.BeginBlock != nil { + l = m.BeginBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ConsensusParamsInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.LastHeightChanged != 0 { + n += 1 + sovTypes(uint64(m.LastHeightChanged)) + } + return n +} + +func (m *ABCIResponsesInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AbciResponses != nil { + l = m.AbciResponses.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *State) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Version.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ChainID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastBlockHeight != 0 { + n += 1 + sovTypes(uint64(m.LastBlockHeight)) + } + l = m.LastBlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.LastBlockTime) + n += 1 + l + sovTypes(uint64(l)) + if m.NextValidators != nil { + l = m.NextValidators.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Validators != nil { + l = m.Validators.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastValidators != nil { + l = m.LastValidators.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastHeightValidatorsChanged != 0 { + n += 1 + sovTypes(uint64(m.LastHeightValidatorsChanged)) + } + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.LastHeightConsensusParamsChanged != 0 { + n += 1 + sovTypes(uint64(m.LastHeightConsensusParamsChanged)) + } + l = len(m.LastResultsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.InitialHeight != 0 { + n += 1 + sovTypes(uint64(m.InitialHeight)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ABCIResponses) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ABCIResponses: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ABCIResponses: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTxs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeliverTxs = append(m.DeliverTxs, &v1beta2.ResponseDeliverTx{}) + if err := m.DeliverTxs[len(m.DeliverTxs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EndBlock == nil { + m.EndBlock = &v1beta2.ResponseEndBlock{} + } + if err := m.EndBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BeginBlock == nil { + m.BeginBlock = &v1beta2.ResponseBeginBlock{} + } + if err := m.BeginBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusParamsInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusParamsInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeightChanged", wireType) + } + m.LastHeightChanged = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastHeightChanged |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ABCIResponsesInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ABCIResponsesInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ABCIResponsesInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AbciResponses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AbciResponses == nil { + m.AbciResponses = &ABCIResponses{} + } + if err := m.AbciResponses.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *State) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: State: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: State: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockHeight", wireType) + } + m.LastBlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastBlockHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastBlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.LastBlockTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NextValidators == nil { + m.NextValidators = &v1beta11.ValidatorSet{} + } + if err := m.NextValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Validators == nil { + m.Validators = &v1beta11.ValidatorSet{} + } + if err := m.Validators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastValidators == nil { + m.LastValidators = &v1beta11.ValidatorSet{} + } + if err := m.LastValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeightValidatorsChanged", wireType) + } + m.LastHeightValidatorsChanged = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastHeightValidatorsChanged |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeightConsensusParamsChanged", wireType) + } + m.LastHeightConsensusParamsChanged = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastHeightConsensusParamsChanged |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastResultsHash == nil { + m.LastResultsHash = []byte{} + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) + } + m.InitialHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/api/cometbft/state/v1beta3/types.pb.go b/api/cometbft/state/v1beta3/types.pb.go new file mode 100644 index 00000000000..aa3e36e3b9c --- /dev/null +++ b/api/cometbft/state/v1beta3/types.pb.go @@ -0,0 +1,2303 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/state/v1beta3/types.proto + +package v1beta3 + +import ( + fmt "fmt" + v1beta1 "github.com/cometbft/cometbft/api/cometbft/abci/v1beta1" + v1beta2 "github.com/cometbft/cometbft/api/cometbft/abci/v1beta2" + v1beta3 "github.com/cometbft/cometbft/api/cometbft/abci/v1beta3" + v1beta11 "github.com/cometbft/cometbft/api/cometbft/state/v1beta1" + v1 "github.com/cometbft/cometbft/api/cometbft/types/v1" + v1beta12 "github.com/cometbft/cometbft/api/cometbft/types/v1beta1" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + _ "github.com/cosmos/gogoproto/types" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// LegacyABCIResponses retains the responses +// of the legacy ABCI calls during block processing. +// Note ReponseDeliverTx is renamed to ExecTxResult but they are semantically the same +// Kept for backwards compatibility for versions prior to v0.38 +type LegacyABCIResponses struct { + DeliverTxs []*v1beta3.ExecTxResult `protobuf:"bytes,1,rep,name=deliver_txs,json=deliverTxs,proto3" json:"deliver_txs,omitempty"` + EndBlock *ResponseEndBlock `protobuf:"bytes,2,opt,name=end_block,json=endBlock,proto3" json:"end_block,omitempty"` + BeginBlock *ResponseBeginBlock `protobuf:"bytes,3,opt,name=begin_block,json=beginBlock,proto3" json:"begin_block,omitempty"` +} + +func (m *LegacyABCIResponses) Reset() { *m = LegacyABCIResponses{} } +func (m *LegacyABCIResponses) String() string { return proto.CompactTextString(m) } +func (*LegacyABCIResponses) ProtoMessage() {} +func (*LegacyABCIResponses) Descriptor() ([]byte, []int) { + return fileDescriptor_941a8825417feee0, []int{0} +} +func (m *LegacyABCIResponses) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LegacyABCIResponses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LegacyABCIResponses.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LegacyABCIResponses) XXX_Merge(src proto.Message) { + xxx_messageInfo_LegacyABCIResponses.Merge(m, src) +} +func (m *LegacyABCIResponses) XXX_Size() int { + return m.Size() +} +func (m *LegacyABCIResponses) XXX_DiscardUnknown() { + xxx_messageInfo_LegacyABCIResponses.DiscardUnknown(m) +} + +var xxx_messageInfo_LegacyABCIResponses proto.InternalMessageInfo + +func (m *LegacyABCIResponses) GetDeliverTxs() []*v1beta3.ExecTxResult { + if m != nil { + return m.DeliverTxs + } + return nil +} + +func (m *LegacyABCIResponses) GetEndBlock() *ResponseEndBlock { + if m != nil { + return m.EndBlock + } + return nil +} + +func (m *LegacyABCIResponses) GetBeginBlock() *ResponseBeginBlock { + if m != nil { + return m.BeginBlock + } + return nil +} + +// ResponseBeginBlock is kept for backward compatibility for versions prior to v0.38, +// as it was then defined in the cometbft.abci packages. +type ResponseBeginBlock struct { + Events []v1beta2.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` +} + +func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } +func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } +func (*ResponseBeginBlock) ProtoMessage() {} +func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_941a8825417feee0, []int{1} +} +func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseBeginBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseBeginBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseBeginBlock.Merge(m, src) +} +func (m *ResponseBeginBlock) XXX_Size() int { + return m.Size() +} +func (m *ResponseBeginBlock) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseBeginBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseBeginBlock proto.InternalMessageInfo + +func (m *ResponseBeginBlock) GetEvents() []v1beta2.Event { + if m != nil { + return m.Events + } + return nil +} + +// ResponseEndBlock is kept for backward compatibility for versions prior to v0.38, +// its earlier revisions were defined in the cometbft.abci packages. +// It uses an updated definition for the consensus_param_updates field to keep the +// generated data types interoperable with the latest protocol. +type ResponseEndBlock struct { + ValidatorUpdates []v1beta1.ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` + ConsensusParamUpdates *v1.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + Events []v1beta2.Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` +} + +func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } +func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } +func (*ResponseEndBlock) ProtoMessage() {} +func (*ResponseEndBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_941a8825417feee0, []int{2} +} +func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseEndBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseEndBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseEndBlock.Merge(m, src) +} +func (m *ResponseEndBlock) XXX_Size() int { + return m.Size() +} +func (m *ResponseEndBlock) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseEndBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseEndBlock proto.InternalMessageInfo + +func (m *ResponseEndBlock) GetValidatorUpdates() []v1beta1.ValidatorUpdate { + if m != nil { + return m.ValidatorUpdates + } + return nil +} + +func (m *ResponseEndBlock) GetConsensusParamUpdates() *v1.ConsensusParams { + if m != nil { + return m.ConsensusParamUpdates + } + return nil +} + +func (m *ResponseEndBlock) GetEvents() []v1beta2.Event { + if m != nil { + return m.Events + } + return nil +} + +// ConsensusParamsInfo represents the latest consensus params, or the last height it changed +type ConsensusParamsInfo struct { + ConsensusParams v1.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` + LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` +} + +func (m *ConsensusParamsInfo) Reset() { *m = ConsensusParamsInfo{} } +func (m *ConsensusParamsInfo) String() string { return proto.CompactTextString(m) } +func (*ConsensusParamsInfo) ProtoMessage() {} +func (*ConsensusParamsInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_941a8825417feee0, []int{3} +} +func (m *ConsensusParamsInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusParamsInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusParamsInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusParamsInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusParamsInfo.Merge(m, src) +} +func (m *ConsensusParamsInfo) XXX_Size() int { + return m.Size() +} +func (m *ConsensusParamsInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusParamsInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusParamsInfo proto.InternalMessageInfo + +func (m *ConsensusParamsInfo) GetConsensusParams() v1.ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return v1.ConsensusParams{} +} + +func (m *ConsensusParamsInfo) GetLastHeightChanged() int64 { + if m != nil { + return m.LastHeightChanged + } + return 0 +} + +// ABCIResponsesInfo retains the responses of the ABCI calls during block processing. +type ABCIResponsesInfo struct { + // Retains the responses of the legacy ABCI calls during block processing. + LegacyAbciResponses *LegacyABCIResponses `protobuf:"bytes,1,opt,name=legacy_abci_responses,json=legacyAbciResponses,proto3" json:"legacy_abci_responses,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + ResponseFinalizeBlock *v1beta3.ResponseFinalizeBlock `protobuf:"bytes,3,opt,name=response_finalize_block,json=responseFinalizeBlock,proto3" json:"response_finalize_block,omitempty"` +} + +func (m *ABCIResponsesInfo) Reset() { *m = ABCIResponsesInfo{} } +func (m *ABCIResponsesInfo) String() string { return proto.CompactTextString(m) } +func (*ABCIResponsesInfo) ProtoMessage() {} +func (*ABCIResponsesInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_941a8825417feee0, []int{4} +} +func (m *ABCIResponsesInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ABCIResponsesInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ABCIResponsesInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ABCIResponsesInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ABCIResponsesInfo.Merge(m, src) +} +func (m *ABCIResponsesInfo) XXX_Size() int { + return m.Size() +} +func (m *ABCIResponsesInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ABCIResponsesInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ABCIResponsesInfo proto.InternalMessageInfo + +func (m *ABCIResponsesInfo) GetLegacyAbciResponses() *LegacyABCIResponses { + if m != nil { + return m.LegacyAbciResponses + } + return nil +} + +func (m *ABCIResponsesInfo) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *ABCIResponsesInfo) GetResponseFinalizeBlock() *v1beta3.ResponseFinalizeBlock { + if m != nil { + return m.ResponseFinalizeBlock + } + return nil +} + +// State represents the state of the blockchain. +type State struct { + Version v1beta11.Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version"` + // immutable + ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + InitialHeight int64 `protobuf:"varint,14,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` + // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) + LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockID v1beta12.BlockID `protobuf:"bytes,4,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` + LastBlockTime time.Time `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3,stdtime" json:"last_block_time"` + // LastValidators is used to validate block.LastCommit. + // Validators are persisted to the database separately every time they change, + // so we can query for historical validator sets. + // Note that if s.LastBlockHeight causes a valset change, + // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + 1 + // Extra +1 due to nextValSet delay. + NextValidators *v1beta12.ValidatorSet `protobuf:"bytes,6,opt,name=next_validators,json=nextValidators,proto3" json:"next_validators,omitempty"` + Validators *v1beta12.ValidatorSet `protobuf:"bytes,7,opt,name=validators,proto3" json:"validators,omitempty"` + LastValidators *v1beta12.ValidatorSet `protobuf:"bytes,8,opt,name=last_validators,json=lastValidators,proto3" json:"last_validators,omitempty"` + LastHeightValidatorsChanged int64 `protobuf:"varint,9,opt,name=last_height_validators_changed,json=lastHeightValidatorsChanged,proto3" json:"last_height_validators_changed,omitempty"` + // Consensus parameters used for validating blocks. + // Changes returned by EndBlock and updated after Commit. + ConsensusParams v1.ConsensusParams `protobuf:"bytes,10,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` + LastHeightConsensusParamsChanged int64 `protobuf:"varint,11,opt,name=last_height_consensus_params_changed,json=lastHeightConsensusParamsChanged,proto3" json:"last_height_consensus_params_changed,omitempty"` + // Merkle root of the results from executing prev block + LastResultsHash []byte `protobuf:"bytes,12,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` + // the latest AppHash we've received from calling abci.Commit() + AppHash []byte `protobuf:"bytes,13,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` +} + +func (m *State) Reset() { *m = State{} } +func (m *State) String() string { return proto.CompactTextString(m) } +func (*State) ProtoMessage() {} +func (*State) Descriptor() ([]byte, []int) { + return fileDescriptor_941a8825417feee0, []int{5} +} +func (m *State) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *State) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_State.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *State) XXX_Merge(src proto.Message) { + xxx_messageInfo_State.Merge(m, src) +} +func (m *State) XXX_Size() int { + return m.Size() +} +func (m *State) XXX_DiscardUnknown() { + xxx_messageInfo_State.DiscardUnknown(m) +} + +var xxx_messageInfo_State proto.InternalMessageInfo + +func (m *State) GetVersion() v1beta11.Version { + if m != nil { + return m.Version + } + return v1beta11.Version{} +} + +func (m *State) GetChainID() string { + if m != nil { + return m.ChainID + } + return "" +} + +func (m *State) GetInitialHeight() int64 { + if m != nil { + return m.InitialHeight + } + return 0 +} + +func (m *State) GetLastBlockHeight() int64 { + if m != nil { + return m.LastBlockHeight + } + return 0 +} + +func (m *State) GetLastBlockID() v1beta12.BlockID { + if m != nil { + return m.LastBlockID + } + return v1beta12.BlockID{} +} + +func (m *State) GetLastBlockTime() time.Time { + if m != nil { + return m.LastBlockTime + } + return time.Time{} +} + +func (m *State) GetNextValidators() *v1beta12.ValidatorSet { + if m != nil { + return m.NextValidators + } + return nil +} + +func (m *State) GetValidators() *v1beta12.ValidatorSet { + if m != nil { + return m.Validators + } + return nil +} + +func (m *State) GetLastValidators() *v1beta12.ValidatorSet { + if m != nil { + return m.LastValidators + } + return nil +} + +func (m *State) GetLastHeightValidatorsChanged() int64 { + if m != nil { + return m.LastHeightValidatorsChanged + } + return 0 +} + +func (m *State) GetConsensusParams() v1.ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return v1.ConsensusParams{} +} + +func (m *State) GetLastHeightConsensusParamsChanged() int64 { + if m != nil { + return m.LastHeightConsensusParamsChanged + } + return 0 +} + +func (m *State) GetLastResultsHash() []byte { + if m != nil { + return m.LastResultsHash + } + return nil +} + +func (m *State) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +func init() { + proto.RegisterType((*LegacyABCIResponses)(nil), "cometbft.state.v1beta3.LegacyABCIResponses") + proto.RegisterType((*ResponseBeginBlock)(nil), "cometbft.state.v1beta3.ResponseBeginBlock") + proto.RegisterType((*ResponseEndBlock)(nil), "cometbft.state.v1beta3.ResponseEndBlock") + proto.RegisterType((*ConsensusParamsInfo)(nil), "cometbft.state.v1beta3.ConsensusParamsInfo") + proto.RegisterType((*ABCIResponsesInfo)(nil), "cometbft.state.v1beta3.ABCIResponsesInfo") + proto.RegisterType((*State)(nil), "cometbft.state.v1beta3.State") +} + +func init() { + proto.RegisterFile("cometbft/state/v1beta3/types.proto", fileDescriptor_941a8825417feee0) +} + +var fileDescriptor_941a8825417feee0 = []byte{ + // 921 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4d, 0x6f, 0xdb, 0x46, + 0x10, 0x35, 0xa3, 0xc4, 0x92, 0x57, 0xf1, 0xd7, 0xaa, 0x4e, 0x54, 0xb7, 0x90, 0x5c, 0x35, 0x35, + 0x8c, 0x34, 0x20, 0x21, 0xe7, 0xd0, 0x63, 0x11, 0xca, 0x2e, 0x22, 0xd4, 0x0d, 0x5a, 0xda, 0x0d, + 0xda, 0x5c, 0x88, 0x25, 0xb9, 0xa6, 0x16, 0xa5, 0x48, 0x82, 0xbb, 0x12, 0xe4, 0x9e, 0xfa, 0x13, + 0x72, 0x6d, 0x7f, 0x51, 0x8e, 0x39, 0xf6, 0xe4, 0x16, 0xf2, 0xad, 0x97, 0x02, 0xfd, 0x05, 0xc5, + 0x7e, 0x51, 0xa4, 0x42, 0x21, 0x09, 0xda, 0xdb, 0x6a, 0xf7, 0xcd, 0x9b, 0xf7, 0x66, 0x77, 0x46, + 0x04, 0x3d, 0x3f, 0x19, 0x63, 0xe6, 0x5d, 0x32, 0x8b, 0x32, 0xc4, 0xb0, 0x35, 0xed, 0x7b, 0x98, + 0xa1, 0xc7, 0x16, 0xbb, 0x4a, 0x31, 0x35, 0xd3, 0x2c, 0x61, 0x09, 0xbc, 0xa7, 0x31, 0xa6, 0xc0, + 0x98, 0x0a, 0xb3, 0x5f, 0x1d, 0xdb, 0x2f, 0xc6, 0xee, 0x7f, 0x92, 0x63, 0x90, 0xe7, 0x93, 0x77, + 0x87, 0x1c, 0xbf, 0x1d, 0x52, 0x12, 0x59, 0x10, 0x23, 0x76, 0x2b, 0x33, 0x1d, 0xae, 0xc0, 0x4c, + 0x51, 0x44, 0x02, 0xc4, 0x92, 0x4c, 0xe1, 0x3a, 0x6f, 0xe0, 0xac, 0x14, 0x65, 0x68, 0xac, 0x79, + 0x3e, 0x08, 0x93, 0x30, 0x11, 0x4b, 0x8b, 0xaf, 0xd4, 0x6e, 0x37, 0x4c, 0x92, 0x30, 0xc2, 0x96, + 0xf8, 0xe5, 0x4d, 0x2e, 0x2d, 0x46, 0xc6, 0x98, 0x32, 0x34, 0x4e, 0x25, 0xa0, 0xf7, 0xb7, 0x01, + 0x5a, 0x67, 0x38, 0x44, 0xfe, 0xd5, 0x13, 0x7b, 0x30, 0x74, 0x30, 0x4d, 0x93, 0x98, 0x62, 0x0a, + 0x4f, 0x40, 0x33, 0xc0, 0x11, 0x99, 0xe2, 0xcc, 0x65, 0x33, 0xda, 0x36, 0x0e, 0x6a, 0x47, 0xcd, + 0xe3, 0x4f, 0xcd, 0xbc, 0xea, 0xdc, 0xb3, 0x2e, 0xba, 0x79, 0x3a, 0xc3, 0xfe, 0xc5, 0xcc, 0xc1, + 0x74, 0x12, 0x31, 0x07, 0xa8, 0xb8, 0x8b, 0x19, 0x85, 0xa7, 0x60, 0x03, 0xc7, 0x81, 0xeb, 0x45, + 0x89, 0xff, 0x53, 0xfb, 0xd6, 0x81, 0x71, 0xd4, 0x3c, 0x3e, 0x32, 0xab, 0x6f, 0xce, 0xd4, 0xb9, + 0x4f, 0xe3, 0xc0, 0xe6, 0x78, 0xa7, 0x81, 0xd5, 0x0a, 0x7e, 0x0d, 0x9a, 0x1e, 0x0e, 0x49, 0xac, + 0x88, 0x6a, 0x82, 0xe8, 0xe1, 0xdb, 0x88, 0x6c, 0x1e, 0x22, 0xa9, 0x80, 0x97, 0xaf, 0x7b, 0x01, + 0x80, 0x6f, 0x22, 0xe0, 0x33, 0xb0, 0x8e, 0xa7, 0x38, 0x66, 0xda, 0xea, 0xc7, 0x95, 0x56, 0x8f, + 0xcd, 0x53, 0x0e, 0xb2, 0xdb, 0xaf, 0xae, 0xbb, 0x6b, 0x7f, 0x5d, 0x77, 0x77, 0x64, 0xcc, 0xa3, + 0x64, 0x4c, 0x18, 0x1e, 0xa7, 0xec, 0xca, 0x51, 0x2c, 0xbd, 0x5f, 0x6f, 0x81, 0x9d, 0x65, 0x47, + 0xf0, 0x47, 0xb0, 0x9b, 0x5f, 0xab, 0x3b, 0x49, 0x03, 0xc4, 0xb0, 0xce, 0x77, 0x58, 0x99, 0xaf, + 0x6f, 0x3e, 0xd7, 0xf8, 0xef, 0x05, 0xdc, 0xbe, 0xcd, 0x33, 0x3b, 0x3b, 0xd3, 0xf2, 0x36, 0x85, + 0x2f, 0xc0, 0x7d, 0x9f, 0xe7, 0x8a, 0xe9, 0x84, 0xba, 0xe2, 0x61, 0xe4, 0x09, 0x64, 0xdd, 0x7b, + 0x8b, 0x04, 0xf2, 0xf9, 0x4d, 0xfb, 0xe6, 0x40, 0x47, 0x7c, 0x2b, 0x5e, 0x92, 0xb3, 0xe7, 0x97, + 0x36, 0x34, 0xf7, 0xa2, 0x36, 0xb5, 0xff, 0xa5, 0x36, 0xbf, 0x19, 0xa0, 0xb5, 0x94, 0x7a, 0x18, + 0x5f, 0x26, 0xf0, 0x1c, 0xec, 0x2c, 0x79, 0xe0, 0xd5, 0x79, 0x47, 0xf1, 0xaa, 0x32, 0xdb, 0x65, + 0x0b, 0x14, 0x9a, 0xa0, 0x15, 0x21, 0xca, 0xdc, 0x11, 0x26, 0xe1, 0x88, 0xb9, 0xfe, 0x08, 0xc5, + 0x21, 0x0e, 0x44, 0x51, 0x6a, 0xce, 0x2e, 0x3f, 0x7a, 0x2a, 0x4e, 0x06, 0xf2, 0xa0, 0xf7, 0x8f, + 0x01, 0x76, 0x4b, 0xad, 0x20, 0xa4, 0xb9, 0x60, 0x2f, 0x12, 0x5d, 0xe2, 0x72, 0xc7, 0x6e, 0xa6, + 0x0f, 0x95, 0xbe, 0xcf, 0x57, 0xbd, 0xc5, 0x8a, 0xd6, 0x72, 0x5a, 0x92, 0xe9, 0x89, 0xe7, 0x93, + 0x45, 0xbf, 0xdd, 0x03, 0xeb, 0x52, 0xa1, 0x52, 0xa6, 0x7e, 0xc1, 0x00, 0xdc, 0xd7, 0xc9, 0xdc, + 0x4b, 0x12, 0xa3, 0x88, 0xfc, 0x8c, 0x4b, 0x6d, 0xf0, 0x68, 0x45, 0x4f, 0x6a, 0xea, 0xaf, 0x54, + 0x90, 0x6c, 0x84, 0xbd, 0xac, 0x6a, 0xbb, 0xf7, 0x4b, 0x1d, 0xdc, 0x39, 0xe7, 0xc2, 0xe1, 0x97, + 0xa0, 0x3e, 0xc5, 0x19, 0x25, 0x49, 0xac, 0xac, 0x75, 0xab, 0xad, 0xf5, 0xcd, 0xe7, 0x12, 0xa6, + 0xea, 0xae, 0xa3, 0xe0, 0x21, 0x68, 0xf8, 0x23, 0x44, 0x62, 0x97, 0xc8, 0x22, 0x6f, 0xd8, 0xcd, + 0xf9, 0x75, 0xb7, 0x3e, 0xe0, 0x7b, 0xc3, 0x13, 0xa7, 0x2e, 0x0e, 0x87, 0x01, 0xfc, 0x0c, 0x6c, + 0x91, 0x98, 0x30, 0x82, 0x22, 0x75, 0x35, 0xed, 0x2d, 0x61, 0x7c, 0x53, 0xed, 0xca, 0x5b, 0x81, + 0x0f, 0x81, 0xb8, 0x23, 0x69, 0x59, 0x23, 0x6b, 0x02, 0xb9, 0xcd, 0x0f, 0x84, 0x7e, 0x85, 0xfd, + 0x01, 0x6c, 0x16, 0xb0, 0x24, 0x68, 0xdf, 0x5e, 0x76, 0xa0, 0x1f, 0x8f, 0x74, 0x20, 0x62, 0x87, + 0x27, 0x76, 0x8b, 0x3b, 0x98, 0x5f, 0x77, 0x9b, 0x67, 0x9a, 0x70, 0x78, 0xe2, 0x34, 0x73, 0xf6, + 0x61, 0x00, 0xcf, 0xc0, 0x76, 0x81, 0x99, 0xcf, 0xd0, 0xf6, 0x1d, 0xc1, 0xbd, 0x6f, 0xca, 0x01, + 0x6b, 0xea, 0x01, 0x6b, 0x5e, 0xe8, 0x01, 0x6b, 0x37, 0x38, 0xed, 0xcb, 0x3f, 0xba, 0x86, 0xb3, + 0x99, 0x73, 0xf1, 0x53, 0xf8, 0x0d, 0xd8, 0x8e, 0xf1, 0x8c, 0xb9, 0x79, 0x13, 0xd3, 0xf6, 0xba, + 0x60, 0x7b, 0xb0, 0x4a, 0x69, 0x3e, 0x05, 0xce, 0x31, 0x73, 0xb6, 0x78, 0x70, 0xbe, 0xc3, 0x47, + 0x35, 0x28, 0x30, 0xd5, 0xdf, 0x83, 0xa9, 0x10, 0xc7, 0x45, 0x09, 0x8b, 0x05, 0xaa, 0xc6, 0xfb, + 0x88, 0xe2, 0xc1, 0x05, 0x51, 0x03, 0xd0, 0x29, 0xb6, 0xdd, 0x82, 0x35, 0xef, 0xc0, 0x0d, 0x71, + 0x89, 0x1f, 0x2d, 0x3a, 0x70, 0x11, 0xad, 0x7a, 0xb1, 0x72, 0x20, 0x80, 0xff, 0x3a, 0x10, 0x9e, + 0x81, 0x07, 0xa5, 0x81, 0xb0, 0x94, 0x20, 0xd7, 0xd7, 0x14, 0xfa, 0x0e, 0x0a, 0x13, 0xa2, 0x4c, + 0xa4, 0x45, 0xea, 0x17, 0x9a, 0x89, 0xbf, 0x3f, 0xea, 0x8e, 0x10, 0x1d, 0xb5, 0xef, 0x1e, 0x18, + 0x47, 0x77, 0xe5, 0x0b, 0x95, 0x7f, 0x8b, 0xf4, 0x29, 0xa2, 0x23, 0xf8, 0x21, 0x68, 0xa0, 0x34, + 0x95, 0x90, 0x4d, 0x01, 0xa9, 0xa3, 0x34, 0xe5, 0x47, 0xf6, 0x77, 0xaf, 0xe6, 0x1d, 0xe3, 0xf5, + 0xbc, 0x63, 0xfc, 0x39, 0xef, 0x18, 0x2f, 0x6f, 0x3a, 0x6b, 0xaf, 0x6f, 0x3a, 0x6b, 0xbf, 0xdf, + 0x74, 0xd6, 0x5e, 0x7c, 0x11, 0x12, 0x36, 0x9a, 0x78, 0xdc, 0xb1, 0x95, 0x7f, 0x04, 0x2c, 0x3e, + 0x3e, 0x52, 0x62, 0x55, 0x7f, 0x2f, 0x79, 0xeb, 0xe2, 0x51, 0x3e, 0xfe, 0x37, 0x00, 0x00, 0xff, + 0xff, 0x20, 0xeb, 0x80, 0xec, 0x50, 0x09, 0x00, 0x00, +} + +func (m *LegacyABCIResponses) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LegacyABCIResponses) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LegacyABCIResponses) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BeginBlock != nil { + { + size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.EndBlock != nil { + { + size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.DeliverTxs) > 0 { + for iNdEx := len(m.DeliverTxs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DeliverTxs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseBeginBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseBeginBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ConsensusParamUpdates != nil { + { + size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ValidatorUpdates) > 0 { + for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ConsensusParamsInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusParamsInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusParamsInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastHeightChanged != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastHeightChanged)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ABCIResponsesInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ABCIResponsesInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ABCIResponsesInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ResponseFinalizeBlock != nil { + { + size, err := m.ResponseFinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.LegacyAbciResponses != nil { + { + size, err := m.LegacyAbciResponses.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *State) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *State) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InitialHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.InitialHeight)) + i-- + dAtA[i] = 0x70 + } + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x6a + } + if len(m.LastResultsHash) > 0 { + i -= len(m.LastResultsHash) + copy(dAtA[i:], m.LastResultsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) + i-- + dAtA[i] = 0x62 + } + if m.LastHeightConsensusParamsChanged != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastHeightConsensusParamsChanged)) + i-- + dAtA[i] = 0x58 + } + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + if m.LastHeightValidatorsChanged != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastHeightValidatorsChanged)) + i-- + dAtA[i] = 0x48 + } + if m.LastValidators != nil { + { + size, err := m.LastValidators.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Validators != nil { + { + size, err := m.Validators.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.NextValidators != nil { + { + size, err := m.NextValidators.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + n11, err11 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.LastBlockTime):]) + if err11 != nil { + return 0, err11 + } + i -= n11 + i = encodeVarintTypes(dAtA, i, uint64(n11)) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastBlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.LastBlockHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockHeight)) + i-- + dAtA[i] = 0x18 + } + if len(m.ChainID) > 0 { + i -= len(m.ChainID) + copy(dAtA[i:], m.ChainID) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LegacyABCIResponses) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DeliverTxs) > 0 { + for _, e := range m.DeliverTxs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.EndBlock != nil { + l = m.EndBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.BeginBlock != nil { + l = m.BeginBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseBeginBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResponseEndBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ValidatorUpdates) > 0 { + for _, e := range m.ValidatorUpdates { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.ConsensusParamUpdates != nil { + l = m.ConsensusParamUpdates.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ConsensusParamsInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.LastHeightChanged != 0 { + n += 1 + sovTypes(uint64(m.LastHeightChanged)) + } + return n +} + +func (m *ABCIResponsesInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LegacyAbciResponses != nil { + l = m.LegacyAbciResponses.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.ResponseFinalizeBlock != nil { + l = m.ResponseFinalizeBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *State) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Version.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ChainID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastBlockHeight != 0 { + n += 1 + sovTypes(uint64(m.LastBlockHeight)) + } + l = m.LastBlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.LastBlockTime) + n += 1 + l + sovTypes(uint64(l)) + if m.NextValidators != nil { + l = m.NextValidators.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Validators != nil { + l = m.Validators.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastValidators != nil { + l = m.LastValidators.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastHeightValidatorsChanged != 0 { + n += 1 + sovTypes(uint64(m.LastHeightValidatorsChanged)) + } + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.LastHeightConsensusParamsChanged != 0 { + n += 1 + sovTypes(uint64(m.LastHeightConsensusParamsChanged)) + } + l = len(m.LastResultsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.InitialHeight != 0 { + n += 1 + sovTypes(uint64(m.InitialHeight)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LegacyABCIResponses) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LegacyABCIResponses: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LegacyABCIResponses: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTxs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeliverTxs = append(m.DeliverTxs, &v1beta3.ExecTxResult{}) + if err := m.DeliverTxs[len(m.DeliverTxs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EndBlock == nil { + m.EndBlock = &ResponseEndBlock{} + } + if err := m.EndBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BeginBlock == nil { + m.BeginBlock = &ResponseBeginBlock{} + } + if err := m.BeginBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseBeginBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, v1beta2.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseEndBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorUpdates = append(m.ValidatorUpdates, v1beta1.ValidatorUpdate{}) + if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParamUpdates == nil { + m.ConsensusParamUpdates = &v1.ConsensusParams{} + } + if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, v1beta2.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusParamsInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusParamsInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeightChanged", wireType) + } + m.LastHeightChanged = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastHeightChanged |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ABCIResponsesInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ABCIResponsesInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ABCIResponsesInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LegacyAbciResponses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LegacyAbciResponses == nil { + m.LegacyAbciResponses = &LegacyABCIResponses{} + } + if err := m.LegacyAbciResponses.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseFinalizeBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseFinalizeBlock == nil { + m.ResponseFinalizeBlock = &v1beta3.ResponseFinalizeBlock{} + } + if err := m.ResponseFinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *State) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: State: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: State: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockHeight", wireType) + } + m.LastBlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastBlockHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastBlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.LastBlockTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NextValidators == nil { + m.NextValidators = &v1beta12.ValidatorSet{} + } + if err := m.NextValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Validators == nil { + m.Validators = &v1beta12.ValidatorSet{} + } + if err := m.Validators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastValidators == nil { + m.LastValidators = &v1beta12.ValidatorSet{} + } + if err := m.LastValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeightValidatorsChanged", wireType) + } + m.LastHeightValidatorsChanged = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastHeightValidatorsChanged |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeightConsensusParamsChanged", wireType) + } + m.LastHeightConsensusParamsChanged = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastHeightConsensusParamsChanged |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastResultsHash == nil { + m.LastResultsHash = []byte{} + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) + } + m.InitialHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/statesync/message.go b/api/cometbft/statesync/v1/message.go similarity index 84% rename from proto/tendermint/statesync/message.go rename to api/cometbft/statesync/v1/message.go index 357e8eac5c5..af6c0ea1010 100644 --- a/proto/tendermint/statesync/message.go +++ b/api/cometbft/statesync/v1/message.go @@ -1,18 +1,11 @@ -package statesync +package v1 import ( "fmt" "github.com/cosmos/gogoproto/proto" - - "github.com/cometbft/cometbft/p2p" ) -var _ p2p.Wrapper = &ChunkRequest{} -var _ p2p.Wrapper = &ChunkResponse{} -var _ p2p.Wrapper = &SnapshotsRequest{} -var _ p2p.Wrapper = &SnapshotsResponse{} - func (m *SnapshotsResponse) Wrap() proto.Message { sm := &Message{} sm.Sum = &Message_SnapshotsResponse{SnapshotsResponse: m} diff --git a/proto/tendermint/statesync/types.pb.go b/api/cometbft/statesync/v1/types.pb.go similarity index 90% rename from proto/tendermint/statesync/types.pb.go rename to api/cometbft/statesync/v1/types.pb.go index 84a637f5e21..cfdad4a898b 100644 --- a/proto/tendermint/statesync/types.pb.go +++ b/api/cometbft/statesync/v1/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/statesync/types.proto +// source: cometbft/statesync/v1/types.proto -package statesync +package v1 import ( fmt "fmt" @@ -22,7 +22,10 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// Message is the top-level message type for the statesync service. type Message struct { + // The message type. + // // Types that are valid to be assigned to Sum: // // *Message_SnapshotsRequest @@ -36,7 +39,7 @@ func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_a1c2869546ca7914, []int{0} + return fileDescriptor_95fd383b29885bb3, []int{0} } func (m *Message) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,6 +137,7 @@ func (*Message) XXX_OneofWrappers() []interface{} { } } +// SnapshotsRequest is sent to request a snapshot. type SnapshotsRequest struct { } @@ -141,7 +145,7 @@ func (m *SnapshotsRequest) Reset() { *m = SnapshotsRequest{} } func (m *SnapshotsRequest) String() string { return proto.CompactTextString(m) } func (*SnapshotsRequest) ProtoMessage() {} func (*SnapshotsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a1c2869546ca7914, []int{1} + return fileDescriptor_95fd383b29885bb3, []int{1} } func (m *SnapshotsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -170,6 +174,7 @@ func (m *SnapshotsRequest) XXX_DiscardUnknown() { var xxx_messageInfo_SnapshotsRequest proto.InternalMessageInfo +// SnapshotsResponse contains the snapshot metadata. type SnapshotsResponse struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` @@ -182,7 +187,7 @@ func (m *SnapshotsResponse) Reset() { *m = SnapshotsResponse{} } func (m *SnapshotsResponse) String() string { return proto.CompactTextString(m) } func (*SnapshotsResponse) ProtoMessage() {} func (*SnapshotsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a1c2869546ca7914, []int{2} + return fileDescriptor_95fd383b29885bb3, []int{2} } func (m *SnapshotsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,6 +251,7 @@ func (m *SnapshotsResponse) GetMetadata() []byte { return nil } +// ChunkRequest is sent to request a chunk. type ChunkRequest struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` @@ -256,7 +262,7 @@ func (m *ChunkRequest) Reset() { *m = ChunkRequest{} } func (m *ChunkRequest) String() string { return proto.CompactTextString(m) } func (*ChunkRequest) ProtoMessage() {} func (*ChunkRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a1c2869546ca7914, []int{3} + return fileDescriptor_95fd383b29885bb3, []int{3} } func (m *ChunkRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -306,6 +312,7 @@ func (m *ChunkRequest) GetIndex() uint32 { return 0 } +// ChunkResponse contains a chunk of the snapshot. type ChunkResponse struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` @@ -318,7 +325,7 @@ func (m *ChunkResponse) Reset() { *m = ChunkResponse{} } func (m *ChunkResponse) String() string { return proto.CompactTextString(m) } func (*ChunkResponse) ProtoMessage() {} func (*ChunkResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a1c2869546ca7914, []int{4} + return fileDescriptor_95fd383b29885bb3, []int{4} } func (m *ChunkResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -383,42 +390,42 @@ func (m *ChunkResponse) GetMissing() bool { } func init() { - proto.RegisterType((*Message)(nil), "tendermint.statesync.Message") - proto.RegisterType((*SnapshotsRequest)(nil), "tendermint.statesync.SnapshotsRequest") - proto.RegisterType((*SnapshotsResponse)(nil), "tendermint.statesync.SnapshotsResponse") - proto.RegisterType((*ChunkRequest)(nil), "tendermint.statesync.ChunkRequest") - proto.RegisterType((*ChunkResponse)(nil), "tendermint.statesync.ChunkResponse") -} - -func init() { proto.RegisterFile("tendermint/statesync/types.proto", fileDescriptor_a1c2869546ca7914) } - -var fileDescriptor_a1c2869546ca7914 = []byte{ - // 397 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0x3f, 0x6b, 0xdb, 0x40, - 0x1c, 0x95, 0xfc, 0x9f, 0x5f, 0xad, 0x62, 0x1f, 0xa6, 0x88, 0x0e, 0xc2, 0xa8, 0xd0, 0x76, 0x92, - 0xa0, 0x1d, 0xba, 0xbb, 0x8b, 0x0b, 0xed, 0xd0, 0x6b, 0x03, 0x21, 0x4b, 0x38, 0xcb, 0x67, 0x49, - 0x04, 0x9d, 0x14, 0xfd, 0x4e, 0x10, 0x7f, 0x80, 0x4c, 0x59, 0xf2, 0xb1, 0x32, 0x7a, 0x0c, 0x99, - 0x82, 0xfd, 0x45, 0x82, 0x4e, 0xb2, 0xac, 0x38, 0x26, 0x21, 0x90, 0xed, 0xde, 0xd3, 0xd3, 0xbb, - 0xf7, 0x1e, 0x1c, 0x8c, 0x25, 0x17, 0x73, 0x9e, 0x46, 0xa1, 0x90, 0x2e, 0x4a, 0x26, 0x39, 0x2e, - 0x85, 0xe7, 0xca, 0x65, 0xc2, 0xd1, 0x49, 0xd2, 0x58, 0xc6, 0x64, 0xb4, 0x53, 0x38, 0x95, 0xc2, - 0xbe, 0x6b, 0x40, 0xf7, 0x0f, 0x47, 0x64, 0x3e, 0x27, 0x47, 0x30, 0x44, 0xc1, 0x12, 0x0c, 0x62, - 0x89, 0xa7, 0x29, 0x3f, 0xcf, 0x38, 0x4a, 0x53, 0x1f, 0xeb, 0x5f, 0xdf, 0x7d, 0xfb, 0xec, 0x1c, - 0xfa, 0xdb, 0xf9, 0xb7, 0x95, 0xd3, 0x42, 0x3d, 0xd5, 0xe8, 0x00, 0xf7, 0x38, 0x72, 0x0c, 0xa4, - 0x6e, 0x8b, 0x49, 0x2c, 0x90, 0x9b, 0x0d, 0xe5, 0xfb, 0xe5, 0x45, 0xdf, 0x42, 0x3e, 0xd5, 0xe8, - 0x10, 0xf7, 0x49, 0xf2, 0x0b, 0x0c, 0x2f, 0xc8, 0xc4, 0x59, 0x15, 0xb6, 0xa9, 0x4c, 0xed, 0xc3, - 0xa6, 0x3f, 0x73, 0xe9, 0x2e, 0x68, 0xdf, 0xab, 0x61, 0xf2, 0x1b, 0xde, 0x6f, 0xad, 0xca, 0x80, - 0x2d, 0xe5, 0xf5, 0xe9, 0x59, 0xaf, 0x2a, 0x9c, 0xe1, 0xd5, 0x89, 0x49, 0x1b, 0x9a, 0x98, 0x45, - 0x36, 0x81, 0xc1, 0xfe, 0x42, 0xf6, 0x95, 0x0e, 0xc3, 0x27, 0xf5, 0xc8, 0x07, 0xe8, 0x04, 0x3c, - 0xf4, 0x83, 0x62, 0xef, 0x16, 0x2d, 0x51, 0xce, 0x2f, 0xe2, 0x34, 0x62, 0x52, 0xed, 0x65, 0xd0, - 0x12, 0xe5, 0xbc, 0xba, 0x11, 0x55, 0x65, 0x83, 0x96, 0x88, 0x10, 0x68, 0x05, 0x0c, 0x03, 0x15, - 0xbe, 0x4f, 0xd5, 0x99, 0x7c, 0x84, 0x5e, 0xc4, 0x25, 0x9b, 0x33, 0xc9, 0xcc, 0xb6, 0xe2, 0x2b, - 0x6c, 0xff, 0x87, 0x7e, 0x7d, 0x96, 0x57, 0xe7, 0x18, 0x41, 0x3b, 0x14, 0x73, 0x7e, 0x51, 0xc6, - 0x28, 0x80, 0x7d, 0xa9, 0x83, 0xf1, 0x68, 0xa1, 0xb7, 0xf1, 0xcd, 0x59, 0xd5, 0xb3, 0xac, 0x57, - 0x00, 0x62, 0x42, 0x37, 0x0a, 0x11, 0x43, 0xe1, 0xab, 0x7a, 0x3d, 0xba, 0x85, 0x93, 0xbf, 0x37, - 0x6b, 0x4b, 0x5f, 0xad, 0x2d, 0xfd, 0x7e, 0x6d, 0xe9, 0xd7, 0x1b, 0x4b, 0x5b, 0x6d, 0x2c, 0xed, - 0x76, 0x63, 0x69, 0x27, 0x3f, 0xfc, 0x50, 0x06, 0xd9, 0xcc, 0xf1, 0xe2, 0xc8, 0xf5, 0xe2, 0x88, - 0xcb, 0xd9, 0x42, 0xee, 0x0e, 0xea, 0xc1, 0xb8, 0x87, 0x5e, 0xd4, 0xac, 0xa3, 0xbe, 0x7d, 0x7f, - 0x08, 0x00, 0x00, 0xff, 0xff, 0x04, 0xbe, 0xb0, 0x90, 0x70, 0x03, 0x00, 0x00, + proto.RegisterType((*Message)(nil), "cometbft.statesync.v1.Message") + proto.RegisterType((*SnapshotsRequest)(nil), "cometbft.statesync.v1.SnapshotsRequest") + proto.RegisterType((*SnapshotsResponse)(nil), "cometbft.statesync.v1.SnapshotsResponse") + proto.RegisterType((*ChunkRequest)(nil), "cometbft.statesync.v1.ChunkRequest") + proto.RegisterType((*ChunkResponse)(nil), "cometbft.statesync.v1.ChunkResponse") +} + +func init() { proto.RegisterFile("cometbft/statesync/v1/types.proto", fileDescriptor_95fd383b29885bb3) } + +var fileDescriptor_95fd383b29885bb3 = []byte{ + // 399 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0xcd, 0x6a, 0xdb, 0x40, + 0x18, 0x94, 0xfc, 0xcf, 0x57, 0xab, 0xd8, 0x4b, 0x5b, 0x44, 0x0f, 0xa2, 0x55, 0x0b, 0xf5, 0x49, + 0xc2, 0x2d, 0xf4, 0x01, 0xdc, 0x8b, 0x29, 0x18, 0xca, 0xb6, 0x14, 0x92, 0x4b, 0x58, 0xcb, 0x6b, + 0x49, 0x04, 0xfd, 0xc4, 0xdf, 0xca, 0xc4, 0x0f, 0x90, 0x53, 0x2e, 0x79, 0xac, 0x1c, 0x7d, 0xcc, + 0x29, 0x04, 0xfb, 0x45, 0x82, 0x56, 0x3f, 0x51, 0x1c, 0x27, 0x21, 0x90, 0xdb, 0xce, 0x30, 0x1e, + 0xcf, 0x8c, 0xf8, 0xe0, 0xb3, 0x13, 0x05, 0x5c, 0x4c, 0xe7, 0xc2, 0x46, 0xc1, 0x04, 0xc7, 0x55, + 0xe8, 0xd8, 0xcb, 0xa1, 0x2d, 0x56, 0x31, 0x47, 0x2b, 0x5e, 0x44, 0x22, 0x22, 0xef, 0x0b, 0x89, + 0x55, 0x4a, 0xac, 0xe5, 0xd0, 0xbc, 0xae, 0x41, 0x7b, 0xc2, 0x11, 0x99, 0xcb, 0xc9, 0x7f, 0xe8, + 0x63, 0xc8, 0x62, 0xf4, 0x22, 0x81, 0x47, 0x0b, 0x7e, 0x92, 0x70, 0x14, 0xba, 0xfa, 0x49, 0x1d, + 0xbc, 0xf9, 0xfe, 0xcd, 0xda, 0xfb, 0x73, 0xeb, 0x6f, 0xa1, 0xa7, 0x99, 0x7c, 0xac, 0xd0, 0x1e, + 0xee, 0x70, 0xe4, 0x00, 0x48, 0xd5, 0x17, 0xe3, 0x28, 0x44, 0xae, 0xd7, 0xa4, 0xf1, 0xe0, 0x79, + 0xe3, 0x4c, 0x3f, 0x56, 0x68, 0x1f, 0x77, 0x49, 0xf2, 0x1b, 0x34, 0xc7, 0x4b, 0xc2, 0xe3, 0x32, + 0x6e, 0x5d, 0xba, 0x7e, 0x79, 0xc4, 0xf5, 0x57, 0xaa, 0xbd, 0x8b, 0xda, 0x75, 0x2a, 0x98, 0x4c, + 0xe0, 0x6d, 0xe1, 0x95, 0x47, 0x6c, 0x48, 0xb3, 0xaf, 0x4f, 0x9b, 0x95, 0xf1, 0x34, 0xa7, 0x4a, + 0x8c, 0x9a, 0x50, 0xc7, 0x24, 0x30, 0x09, 0xf4, 0x76, 0x47, 0x32, 0xcf, 0x55, 0xe8, 0x3f, 0x28, + 0x48, 0x3e, 0x40, 0xcb, 0xe3, 0xbe, 0xeb, 0x65, 0x9b, 0x37, 0x68, 0x8e, 0x52, 0x7e, 0x1e, 0x2d, + 0x02, 0x26, 0xe4, 0x64, 0x1a, 0xcd, 0x51, 0xca, 0xcb, 0x7f, 0x44, 0x59, 0x5a, 0xa3, 0x39, 0x22, + 0x04, 0x1a, 0x1e, 0x43, 0x4f, 0xa6, 0xef, 0x52, 0xf9, 0x26, 0x1f, 0xa1, 0x13, 0x70, 0xc1, 0x66, + 0x4c, 0x30, 0xbd, 0x29, 0xf9, 0x12, 0x9b, 0xff, 0xa0, 0x5b, 0xdd, 0xe5, 0xc5, 0x39, 0xde, 0x41, + 0xd3, 0x0f, 0x67, 0xfc, 0x34, 0x8f, 0x91, 0x01, 0xf3, 0x4c, 0x05, 0xed, 0xde, 0x42, 0xaf, 0xe3, + 0x9b, 0xb2, 0xb2, 0x67, 0x5e, 0x2f, 0x03, 0x44, 0x87, 0x76, 0xe0, 0x23, 0xfa, 0xa1, 0x2b, 0xeb, + 0x75, 0x68, 0x01, 0x47, 0x7f, 0x2e, 0x37, 0x86, 0xba, 0xde, 0x18, 0xea, 0xcd, 0xc6, 0x50, 0x2f, + 0xb6, 0x86, 0xb2, 0xde, 0x1a, 0xca, 0xd5, 0xd6, 0x50, 0x0e, 0x7f, 0xba, 0xbe, 0xf0, 0x92, 0x69, + 0xfa, 0x75, 0xed, 0xf2, 0x7e, 0xca, 0x07, 0x8b, 0x7d, 0x7b, 0xef, 0x55, 0x4d, 0x5b, 0xf2, 0xa0, + 0x7e, 0xdc, 0x06, 0x00, 0x00, 0xff, 0xff, 0x99, 0x49, 0xe4, 0x5a, 0x75, 0x03, 0x00, 0x00, } func (m *Message) Marshal() (dAtA []byte, err error) { diff --git a/proto/tendermint/store/types.pb.go b/api/cometbft/store/v1/types.pb.go similarity index 83% rename from proto/tendermint/store/types.pb.go rename to api/cometbft/store/v1/types.pb.go index e7e553e0e2c..81978c26944 100644 --- a/proto/tendermint/store/types.pb.go +++ b/api/cometbft/store/v1/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/store/types.proto +// source: cometbft/store/v1/types.proto -package store +package v1 import ( fmt "fmt" @@ -22,6 +22,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// BlockStoreState represents the state of the block store. type BlockStoreState struct { Base int64 `protobuf:"varint,1,opt,name=base,proto3" json:"base,omitempty"` Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` @@ -31,7 +32,7 @@ func (m *BlockStoreState) Reset() { *m = BlockStoreState{} } func (m *BlockStoreState) String() string { return proto.CompactTextString(m) } func (*BlockStoreState) ProtoMessage() {} func (*BlockStoreState) Descriptor() ([]byte, []int) { - return fileDescriptor_ff9e53a0a74267f7, []int{0} + return fileDescriptor_39bdcbdd79a94f5f, []int{0} } func (m *BlockStoreState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,24 +76,24 @@ func (m *BlockStoreState) GetHeight() int64 { } func init() { - proto.RegisterType((*BlockStoreState)(nil), "tendermint.store.BlockStoreState") + proto.RegisterType((*BlockStoreState)(nil), "cometbft.store.v1.BlockStoreState") } -func init() { proto.RegisterFile("tendermint/store/types.proto", fileDescriptor_ff9e53a0a74267f7) } +func init() { proto.RegisterFile("cometbft/store/v1/types.proto", fileDescriptor_39bdcbdd79a94f5f) } -var fileDescriptor_ff9e53a0a74267f7 = []byte{ - // 171 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x29, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0x2f, 0xa9, 0x2c, - 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x40, 0xc8, 0xea, 0x81, 0x65, 0x95, - 0x6c, 0xb9, 0xf8, 0x9d, 0x72, 0xf2, 0x93, 0xb3, 0x83, 0x41, 0xbc, 0xe0, 0x92, 0xc4, 0x92, 0x54, - 0x21, 0x21, 0x2e, 0x96, 0xa4, 0xc4, 0xe2, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xe6, 0x20, 0x30, - 0x5b, 0x48, 0x8c, 0x8b, 0x2d, 0x23, 0x35, 0x33, 0x3d, 0xa3, 0x44, 0x82, 0x09, 0x2c, 0x0a, 0xe5, - 0x39, 0xf9, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, - 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x71, 0x7a, 0x66, - 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x72, 0x7e, 0x6e, 0x6a, 0x49, 0x52, 0x5a, - 0x09, 0x82, 0x01, 0x76, 0x8e, 0x3e, 0xba, 0x5b, 0x93, 0xd8, 0xc0, 0xe2, 0xc6, 0x80, 0x00, 0x00, - 0x00, 0xff, 0xff, 0xb7, 0x2b, 0x34, 0x2a, 0xc6, 0x00, 0x00, 0x00, +var fileDescriptor_39bdcbdd79a94f5f = []byte{ + // 170 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4d, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0x2f, 0x33, 0xd4, 0x2f, 0xa9, + 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x84, 0x49, 0xeb, 0x81, 0xa5, + 0xf5, 0xca, 0x0c, 0x95, 0x6c, 0xb9, 0xf8, 0x9d, 0x72, 0xf2, 0x93, 0xb3, 0x83, 0x41, 0x02, 0xc1, + 0x25, 0x89, 0x25, 0xa9, 0x42, 0x42, 0x5c, 0x2c, 0x49, 0x89, 0xc5, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, + 0x1a, 0xcc, 0x41, 0x60, 0xb6, 0x90, 0x18, 0x17, 0x5b, 0x46, 0x6a, 0x66, 0x7a, 0x46, 0x89, 0x04, + 0x13, 0x58, 0x14, 0xca, 0x73, 0xf2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, + 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, + 0x28, 0xa3, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb8, 0xab, 0xe0, + 0x8c, 0xc4, 0x82, 0x4c, 0x7d, 0x0c, 0xb7, 0x26, 0xb1, 0x81, 0x9d, 0x69, 0x0c, 0x08, 0x00, 0x00, + 0xff, 0xff, 0x25, 0xd4, 0x78, 0x17, 0xc7, 0x00, 0x00, 0x00, } func (m *BlockStoreState) Marshal() (dAtA []byte, err error) { diff --git a/proto/tendermint/types/block.pb.go b/api/cometbft/types/v1/block.pb.go similarity index 83% rename from proto/tendermint/types/block.pb.go rename to api/cometbft/types/v1/block.pb.go index 3b3e3811ffc..311eeaafd37 100644 --- a/proto/tendermint/types/block.pb.go +++ b/api/cometbft/types/v1/block.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/types/block.proto +// source: cometbft/types/v1/block.proto -package types +package v1 import ( fmt "fmt" @@ -23,6 +23,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// Block defines the structure of a block in the CometBFT blockchain. type Block struct { Header Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header"` Data Data `protobuf:"bytes,2,opt,name=data,proto3" json:"data"` @@ -34,7 +35,7 @@ func (m *Block) Reset() { *m = Block{} } func (m *Block) String() string { return proto.CompactTextString(m) } func (*Block) ProtoMessage() {} func (*Block) Descriptor() ([]byte, []int) { - return fileDescriptor_70840e82f4357ab1, []int{0} + return fileDescriptor_fbda8644c7f5ead0, []int{0} } func (m *Block) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -92,30 +93,30 @@ func (m *Block) GetLastCommit() *Commit { } func init() { - proto.RegisterType((*Block)(nil), "tendermint.types.Block") + proto.RegisterType((*Block)(nil), "cometbft.types.v1.Block") } -func init() { proto.RegisterFile("tendermint/types/block.proto", fileDescriptor_70840e82f4357ab1) } +func init() { proto.RegisterFile("cometbft/types/v1/block.proto", fileDescriptor_fbda8644c7f5ead0) } -var fileDescriptor_70840e82f4357ab1 = []byte{ - // 272 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x29, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xca, 0xc9, - 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x40, 0xc8, 0xea, 0x81, 0x65, 0xa5, - 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x14, 0xa6, 0x29, 0x60, - 0x12, 0x2a, 0x2b, 0x8f, 0x21, 0x9b, 0x5a, 0x96, 0x99, 0x92, 0x9a, 0x97, 0x9c, 0x0a, 0x51, 0xa0, - 0xf4, 0x8e, 0x91, 0x8b, 0xd5, 0x09, 0x64, 0xad, 0x90, 0x19, 0x17, 0x5b, 0x46, 0x6a, 0x62, 0x4a, - 0x6a, 0x91, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x84, 0x1e, 0xba, 0x0b, 0xf4, 0x3c, 0xc0, - 0xf2, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, 0x41, 0x55, 0x0b, 0x19, 0x70, 0xb1, 0xa4, 0x24, - 0x96, 0x24, 0x4a, 0x30, 0x81, 0x75, 0x89, 0x61, 0xea, 0x72, 0x49, 0x2c, 0x49, 0x84, 0xea, 0x01, - 0xab, 0x14, 0x72, 0xe0, 0xe2, 0x80, 0xb9, 0x42, 0x82, 0x19, 0xac, 0x4b, 0x0e, 0x53, 0x97, 0x2b, - 0x54, 0x85, 0x4f, 0x66, 0x71, 0x09, 0x54, 0x37, 0x5c, 0x97, 0x90, 0x25, 0x17, 0x77, 0x4e, 0x62, - 0x71, 0x49, 0x7c, 0x72, 0x7e, 0x6e, 0x6e, 0x66, 0x89, 0x04, 0x0b, 0x2e, 0x07, 0x3b, 0x83, 0xe5, - 0x83, 0xb8, 0x40, 0x8a, 0x21, 0x6c, 0x27, 0xdf, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, - 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, - 0x63, 0x88, 0x32, 0x4e, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, - 0xcf, 0x4d, 0x2d, 0x49, 0x4a, 0x2b, 0x41, 0x30, 0x20, 0x01, 0x8f, 0x1e, 0x9c, 0x49, 0x6c, 0x60, - 0x71, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x15, 0xdf, 0xde, 0x0a, 0xcd, 0x01, 0x00, 0x00, +var fileDescriptor_fbda8644c7f5ead0 = []byte{ + // 271 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4d, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0x33, 0xd4, 0x4f, 0xca, + 0xc9, 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x84, 0x49, 0xeb, 0x81, 0xa5, + 0xf5, 0xca, 0x0c, 0xa5, 0xb0, 0xe8, 0x80, 0xc8, 0x81, 0x75, 0x48, 0x29, 0x60, 0x4a, 0xa7, 0x96, + 0x65, 0xa6, 0xa4, 0xe6, 0x25, 0xa7, 0x42, 0x55, 0x88, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x99, 0xfa, + 0x20, 0x16, 0x44, 0x54, 0xe9, 0x13, 0x23, 0x17, 0xab, 0x13, 0xc8, 0x66, 0x21, 0x73, 0x2e, 0xb6, + 0x8c, 0xd4, 0xc4, 0x94, 0xd4, 0x22, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x6e, 0x23, 0x49, 0x3d, 0x0c, + 0x47, 0xe8, 0x79, 0x80, 0x15, 0x38, 0xb1, 0x9c, 0xb8, 0x27, 0xcf, 0x10, 0x04, 0x55, 0x2e, 0x64, + 0xc8, 0xc5, 0x92, 0x92, 0x58, 0x92, 0x28, 0xc1, 0x04, 0xd6, 0x26, 0x8e, 0x45, 0x9b, 0x4b, 0x62, + 0x49, 0x22, 0x54, 0x13, 0x58, 0xa9, 0x90, 0x23, 0x17, 0x07, 0xcc, 0x75, 0x12, 0xcc, 0x60, 0x6d, + 0xf2, 0x58, 0xb4, 0xb9, 0x42, 0x95, 0xf8, 0x64, 0x16, 0x97, 0x40, 0xb5, 0xc3, 0xb5, 0x09, 0x59, + 0x71, 0x71, 0xe7, 0x24, 0x16, 0x97, 0xc4, 0x27, 0xe7, 0xe7, 0xe6, 0x66, 0x96, 0x48, 0xb0, 0xe0, + 0x74, 0xb3, 0x33, 0x58, 0x41, 0x10, 0x17, 0x48, 0x35, 0x84, 0xed, 0xe4, 0x73, 0xe2, 0x91, 0x1c, + 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, + 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0x46, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x20, 0x63, + 0xf4, 0xe1, 0x21, 0x0a, 0x67, 0x24, 0x16, 0x64, 0xea, 0x63, 0x84, 0x73, 0x12, 0x1b, 0x38, 0x24, + 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x58, 0x78, 0xe4, 0xc7, 0xd4, 0x01, 0x00, 0x00, } func (m *Block) Marshal() (dAtA []byte, err error) { diff --git a/proto/tendermint/types/canonical.pb.go b/api/cometbft/types/v1/canonical.pb.go similarity index 89% rename from proto/tendermint/types/canonical.pb.go rename to api/cometbft/types/v1/canonical.pb.go index 7e776e02d58..99d65dd9240 100644 --- a/proto/tendermint/types/canonical.pb.go +++ b/api/cometbft/types/v1/canonical.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/types/canonical.proto +// source: cometbft/types/v1/canonical.proto -package types +package v1 import ( encoding_binary "encoding/binary" @@ -28,6 +28,8 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// CanonicalBlockID is a canonical representation of a BlockID, which gets +// serialized and signed. type CanonicalBlockID struct { Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` PartSetHeader CanonicalPartSetHeader `protobuf:"bytes,2,opt,name=part_set_header,json=partSetHeader,proto3" json:"part_set_header"` @@ -37,7 +39,7 @@ func (m *CanonicalBlockID) Reset() { *m = CanonicalBlockID{} } func (m *CanonicalBlockID) String() string { return proto.CompactTextString(m) } func (*CanonicalBlockID) ProtoMessage() {} func (*CanonicalBlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_8d1a1a84ff7267ed, []int{0} + return fileDescriptor_bd60568638662265, []int{0} } func (m *CanonicalBlockID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -80,6 +82,8 @@ func (m *CanonicalBlockID) GetPartSetHeader() CanonicalPartSetHeader { return CanonicalPartSetHeader{} } +// CanonicalPartSetHeader is a canonical representation of a PartSetHeader, +// which gets serialized and signed. type CanonicalPartSetHeader struct { Total uint32 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` @@ -89,7 +93,7 @@ func (m *CanonicalPartSetHeader) Reset() { *m = CanonicalPartSetHeader{} func (m *CanonicalPartSetHeader) String() string { return proto.CompactTextString(m) } func (*CanonicalPartSetHeader) ProtoMessage() {} func (*CanonicalPartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_8d1a1a84ff7267ed, []int{1} + return fileDescriptor_bd60568638662265, []int{1} } func (m *CanonicalPartSetHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,8 +136,10 @@ func (m *CanonicalPartSetHeader) GetHash() []byte { return nil } +// CanonicalProposal is a canonical representation of a Proposal, which gets +// serialized and signed. type CanonicalProposal struct { - Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=cometbft.types.v1.SignedMsgType" json:"type,omitempty"` Height int64 `protobuf:"fixed64,2,opt,name=height,proto3" json:"height,omitempty"` Round int64 `protobuf:"fixed64,3,opt,name=round,proto3" json:"round,omitempty"` POLRound int64 `protobuf:"varint,4,opt,name=pol_round,json=polRound,proto3" json:"pol_round,omitempty"` @@ -146,7 +152,7 @@ func (m *CanonicalProposal) Reset() { *m = CanonicalProposal{} } func (m *CanonicalProposal) String() string { return proto.CompactTextString(m) } func (*CanonicalProposal) ProtoMessage() {} func (*CanonicalProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_8d1a1a84ff7267ed, []int{2} + return fileDescriptor_bd60568638662265, []int{2} } func (m *CanonicalProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -224,8 +230,10 @@ func (m *CanonicalProposal) GetChainID() string { return "" } +// CanonicalVote is a canonical representation of a Vote, which gets +// serialized and signed. type CanonicalVote struct { - Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=cometbft.types.v1.SignedMsgType" json:"type,omitempty"` Height int64 `protobuf:"fixed64,2,opt,name=height,proto3" json:"height,omitempty"` Round int64 `protobuf:"fixed64,3,opt,name=round,proto3" json:"round,omitempty"` BlockID *CanonicalBlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id,omitempty"` @@ -237,7 +245,7 @@ func (m *CanonicalVote) Reset() { *m = CanonicalVote{} } func (m *CanonicalVote) String() string { return proto.CompactTextString(m) } func (*CanonicalVote) ProtoMessage() {} func (*CanonicalVote) Descriptor() ([]byte, []int) { - return fileDescriptor_8d1a1a84ff7267ed, []int{3} + return fileDescriptor_bd60568638662265, []int{3} } func (m *CanonicalVote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -321,7 +329,7 @@ func (m *CanonicalVoteExtension) Reset() { *m = CanonicalVoteExtension{} func (m *CanonicalVoteExtension) String() string { return proto.CompactTextString(m) } func (*CanonicalVoteExtension) ProtoMessage() {} func (*CanonicalVoteExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_8d1a1a84ff7267ed, []int{4} + return fileDescriptor_bd60568638662265, []int{4} } func (m *CanonicalVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -379,50 +387,50 @@ func (m *CanonicalVoteExtension) GetChainId() string { } func init() { - proto.RegisterType((*CanonicalBlockID)(nil), "tendermint.types.CanonicalBlockID") - proto.RegisterType((*CanonicalPartSetHeader)(nil), "tendermint.types.CanonicalPartSetHeader") - proto.RegisterType((*CanonicalProposal)(nil), "tendermint.types.CanonicalProposal") - proto.RegisterType((*CanonicalVote)(nil), "tendermint.types.CanonicalVote") - proto.RegisterType((*CanonicalVoteExtension)(nil), "tendermint.types.CanonicalVoteExtension") -} - -func init() { proto.RegisterFile("tendermint/types/canonical.proto", fileDescriptor_8d1a1a84ff7267ed) } - -var fileDescriptor_8d1a1a84ff7267ed = []byte{ - // 525 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0xc1, 0x6e, 0x9b, 0x40, - 0x10, 0x35, 0x0e, 0xb6, 0x61, 0x13, 0xb7, 0xee, 0x2a, 0x8a, 0xa8, 0x15, 0x01, 0xe2, 0x50, 0xd1, - 0x0b, 0x48, 0xf1, 0x1f, 0x90, 0x56, 0xaa, 0xab, 0x46, 0x8d, 0x48, 0x94, 0x43, 0x2f, 0xd6, 0x02, - 0x1b, 0x40, 0x05, 0x16, 0xc1, 0x5a, 0x6a, 0x2e, 0xed, 0x2f, 0xe4, 0x3b, 0xfa, 0x25, 0x39, 0xe6, - 0xd8, 0x5e, 0xdc, 0x0a, 0xff, 0x48, 0xb5, 0x0b, 0x06, 0x2b, 0xa9, 0x2c, 0x55, 0xad, 0x7a, 0x41, - 0x33, 0x6f, 0xde, 0xce, 0x3c, 0xbd, 0x61, 0x17, 0xe8, 0x14, 0x67, 0x01, 0x2e, 0xd2, 0x38, 0xa3, - 0x36, 0xbd, 0xc9, 0x71, 0x69, 0xfb, 0x28, 0x23, 0x59, 0xec, 0xa3, 0xc4, 0xca, 0x0b, 0x42, 0x09, - 0x9c, 0x74, 0x0c, 0x8b, 0x33, 0xa6, 0x87, 0x21, 0x09, 0x09, 0x2f, 0xda, 0x2c, 0xaa, 0x79, 0xd3, - 0xe3, 0x47, 0x9d, 0xf8, 0xb7, 0xa9, 0x6a, 0x21, 0x21, 0x61, 0x82, 0x6d, 0x9e, 0x79, 0xcb, 0x6b, - 0x9b, 0xc6, 0x29, 0x2e, 0x29, 0x4a, 0xf3, 0x9a, 0x60, 0x7c, 0x06, 0x93, 0xd3, 0xcd, 0x64, 0x27, - 0x21, 0xfe, 0xc7, 0xf9, 0x2b, 0x08, 0x81, 0x18, 0xa1, 0x32, 0x52, 0x04, 0x5d, 0x30, 0x0f, 0x5c, - 0x1e, 0xc3, 0x2b, 0xf0, 0x34, 0x47, 0x05, 0x5d, 0x94, 0x98, 0x2e, 0x22, 0x8c, 0x02, 0x5c, 0x28, - 0x7d, 0x5d, 0x30, 0xf7, 0x4f, 0x4c, 0xeb, 0xa1, 0x50, 0xab, 0x6d, 0x78, 0x8e, 0x0a, 0x7a, 0x81, - 0xe9, 0x1b, 0xce, 0x77, 0xc4, 0xbb, 0x95, 0xd6, 0x73, 0xc7, 0xf9, 0x36, 0x68, 0x38, 0xe0, 0xe8, - 0xf7, 0x74, 0x78, 0x08, 0x06, 0x94, 0x50, 0x94, 0x70, 0x19, 0x63, 0xb7, 0x4e, 0x5a, 0x6d, 0xfd, - 0x4e, 0x9b, 0xf1, 0xbd, 0x0f, 0x9e, 0x75, 0x4d, 0x0a, 0x92, 0x93, 0x12, 0x25, 0x70, 0x06, 0x44, - 0x26, 0x87, 0x1f, 0x7f, 0x72, 0xa2, 0x3d, 0x96, 0x79, 0x11, 0x87, 0x19, 0x0e, 0xce, 0xca, 0xf0, - 0xf2, 0x26, 0xc7, 0x2e, 0x27, 0xc3, 0x23, 0x30, 0x8c, 0x70, 0x1c, 0x46, 0x94, 0x0f, 0x98, 0xb8, - 0x4d, 0xc6, 0xc4, 0x14, 0x64, 0x99, 0x05, 0xca, 0x1e, 0x87, 0xeb, 0x04, 0xbe, 0x04, 0x72, 0x4e, - 0x92, 0x45, 0x5d, 0x11, 0x75, 0xc1, 0xdc, 0x73, 0x0e, 0xaa, 0x95, 0x26, 0x9d, 0xbf, 0x7f, 0xe7, - 0x32, 0xcc, 0x95, 0x72, 0x92, 0xf0, 0x08, 0xbe, 0x05, 0x92, 0xc7, 0xec, 0x5d, 0xc4, 0x81, 0x32, - 0xe0, 0xc6, 0x19, 0x3b, 0x8c, 0x6b, 0x36, 0xe1, 0xec, 0x57, 0x2b, 0x6d, 0xd4, 0x24, 0xee, 0x88, - 0x37, 0x98, 0x07, 0xd0, 0x01, 0x72, 0xbb, 0x46, 0x65, 0xc8, 0x9b, 0x4d, 0xad, 0x7a, 0xd1, 0xd6, - 0x66, 0xd1, 0xd6, 0xe5, 0x86, 0xe1, 0x48, 0xcc, 0xf7, 0xdb, 0x1f, 0x9a, 0xe0, 0x76, 0xc7, 0xe0, - 0x0b, 0x20, 0xf9, 0x11, 0x8a, 0x33, 0xa6, 0x67, 0xa4, 0x0b, 0xa6, 0x5c, 0xcf, 0x3a, 0x65, 0x18, - 0x9b, 0xc5, 0x8b, 0xf3, 0xc0, 0xf8, 0xda, 0x07, 0xe3, 0x56, 0xd6, 0x15, 0xa1, 0xf8, 0x7f, 0xf8, - 0xba, 0x6d, 0x96, 0xf8, 0x2f, 0xcd, 0x1a, 0xfc, 0xbd, 0x59, 0xc3, 0x1d, 0x66, 0x7d, 0xd9, 0xfa, - 0x99, 0x99, 0x57, 0xaf, 0x3f, 0x51, 0x9c, 0x95, 0x31, 0xc9, 0xe0, 0x31, 0x90, 0xf1, 0x26, 0x69, - 0xee, 0x55, 0x07, 0xfc, 0xa1, 0x3b, 0xcf, 0xb7, 0xd4, 0x30, 0x77, 0xe4, 0x56, 0x80, 0x73, 0x76, - 0x57, 0xa9, 0xc2, 0x7d, 0xa5, 0x0a, 0x3f, 0x2b, 0x55, 0xb8, 0x5d, 0xab, 0xbd, 0xfb, 0xb5, 0xda, - 0xfb, 0xb6, 0x56, 0x7b, 0x1f, 0x66, 0x61, 0x4c, 0xa3, 0xa5, 0x67, 0xf9, 0x24, 0xb5, 0x7d, 0x92, - 0x62, 0xea, 0x5d, 0xd3, 0x2e, 0xa8, 0x5f, 0x95, 0x87, 0x2f, 0x89, 0x37, 0xe4, 0xf8, 0xec, 0x57, - 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0x22, 0x5b, 0x0b, 0xae, 0x04, 0x00, 0x00, + proto.RegisterType((*CanonicalBlockID)(nil), "cometbft.types.v1.CanonicalBlockID") + proto.RegisterType((*CanonicalPartSetHeader)(nil), "cometbft.types.v1.CanonicalPartSetHeader") + proto.RegisterType((*CanonicalProposal)(nil), "cometbft.types.v1.CanonicalProposal") + proto.RegisterType((*CanonicalVote)(nil), "cometbft.types.v1.CanonicalVote") + proto.RegisterType((*CanonicalVoteExtension)(nil), "cometbft.types.v1.CanonicalVoteExtension") +} + +func init() { proto.RegisterFile("cometbft/types/v1/canonical.proto", fileDescriptor_bd60568638662265) } + +var fileDescriptor_bd60568638662265 = []byte{ + // 526 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xdf, 0x6a, 0xd3, 0x50, + 0x1c, 0xc7, 0x9b, 0x2e, 0x6d, 0x93, 0xb3, 0x55, 0xb7, 0xc3, 0x18, 0xb1, 0x68, 0x12, 0x2b, 0x48, + 0x77, 0x93, 0xb0, 0xea, 0x13, 0x64, 0x0a, 0x16, 0x27, 0x8e, 0x6c, 0x28, 0x78, 0x53, 0x4e, 0x92, + 0xb3, 0x24, 0x98, 0xe6, 0x1c, 0x92, 0xd3, 0xe1, 0xae, 0xf6, 0x0a, 0x7b, 0x10, 0x1f, 0x64, 0x97, + 0xbb, 0x14, 0x84, 0x2a, 0xe9, 0x8b, 0xc8, 0x39, 0x69, 0xd2, 0x42, 0x47, 0x41, 0x94, 0xdd, 0xfd, + 0xfe, 0xff, 0xbe, 0x7c, 0x7e, 0xc9, 0x01, 0xcf, 0x7d, 0x32, 0xc1, 0xcc, 0xbb, 0x60, 0x36, 0xbb, + 0xa2, 0x38, 0xb7, 0x2f, 0x8f, 0x6c, 0x1f, 0xa5, 0x24, 0x8d, 0x7d, 0x94, 0x58, 0x34, 0x23, 0x8c, + 0xc0, 0xbd, 0xaa, 0xc4, 0x12, 0x25, 0xd6, 0xe5, 0x51, 0x6f, 0x3f, 0x24, 0x21, 0x11, 0x59, 0x9b, + 0x5b, 0x65, 0x61, 0xef, 0xd9, 0xfa, 0xac, 0xb2, 0xa3, 0x4c, 0x1b, 0x21, 0x21, 0x61, 0x82, 0x6d, + 0xe1, 0x79, 0xd3, 0x0b, 0x9b, 0xc5, 0x13, 0x9c, 0x33, 0x34, 0xa1, 0x65, 0x41, 0xff, 0x1a, 0xec, + 0x1e, 0x57, 0xbb, 0x9d, 0x84, 0xf8, 0x5f, 0x47, 0x6f, 0x20, 0x04, 0x72, 0x84, 0xf2, 0x48, 0x93, + 0x4c, 0x69, 0xb0, 0xe3, 0x0a, 0x1b, 0x7e, 0x06, 0x8f, 0x29, 0xca, 0xd8, 0x38, 0xc7, 0x6c, 0x1c, + 0x61, 0x14, 0xe0, 0x4c, 0x6b, 0x9a, 0xd2, 0x60, 0x7b, 0x78, 0x68, 0xad, 0x49, 0xb5, 0xea, 0x89, + 0xa7, 0x28, 0x63, 0x67, 0x98, 0xbd, 0x13, 0x0d, 0x8e, 0x7c, 0x3b, 0x33, 0x1a, 0x6e, 0x97, 0xae, + 0x06, 0xfb, 0x0e, 0x38, 0xb8, 0xbf, 0x1c, 0xee, 0x83, 0x16, 0x23, 0x0c, 0x25, 0x42, 0x47, 0xd7, + 0x2d, 0x9d, 0x5a, 0x5c, 0x73, 0x29, 0xae, 0xff, 0xb3, 0x09, 0xf6, 0x96, 0x43, 0x32, 0x42, 0x49, + 0x8e, 0x12, 0xf8, 0x1a, 0xc8, 0x5c, 0x91, 0x68, 0x7f, 0x34, 0x34, 0xef, 0xd1, 0x79, 0x16, 0x87, + 0x29, 0x0e, 0x3e, 0xe4, 0xe1, 0xf9, 0x15, 0xc5, 0xae, 0xa8, 0x86, 0x07, 0xa0, 0x1d, 0xe1, 0x38, + 0x8c, 0x98, 0xd8, 0xb0, 0xeb, 0x2e, 0x3c, 0xae, 0x26, 0x23, 0xd3, 0x34, 0xd0, 0xb6, 0x44, 0xb8, + 0x74, 0xe0, 0x21, 0x50, 0x29, 0x49, 0xc6, 0x65, 0x46, 0x36, 0xa5, 0xc1, 0x96, 0xb3, 0x53, 0xcc, + 0x0c, 0xe5, 0xf4, 0xe3, 0x89, 0xcb, 0x63, 0xae, 0x42, 0x49, 0x22, 0x2c, 0xf8, 0x1e, 0x28, 0x1e, + 0x07, 0x3c, 0x8e, 0x03, 0xad, 0x25, 0xd0, 0xbd, 0xd8, 0x84, 0x6e, 0x71, 0x0c, 0x67, 0xbb, 0x98, + 0x19, 0x9d, 0x85, 0xe3, 0x76, 0xc4, 0x84, 0x51, 0x00, 0x1d, 0xa0, 0xd6, 0x97, 0xd4, 0xda, 0x62, + 0x5a, 0xcf, 0x2a, 0x6f, 0x6d, 0x55, 0xb7, 0xb6, 0xce, 0xab, 0x0a, 0x47, 0xe1, 0xe4, 0x6f, 0x7e, + 0x19, 0x92, 0xbb, 0x6c, 0x83, 0x2f, 0x81, 0xe2, 0x47, 0x28, 0x4e, 0xb9, 0xa0, 0x8e, 0x29, 0x0d, + 0xd4, 0x72, 0xd7, 0x31, 0x8f, 0xf1, 0x5d, 0x22, 0x39, 0x0a, 0xfa, 0xdf, 0x9b, 0xa0, 0x5b, 0xcb, + 0xfa, 0x44, 0x18, 0x7e, 0x10, 0xb2, 0xab, 0xb8, 0xe4, 0xff, 0x8a, 0xab, 0xf5, 0xef, 0xb8, 0xda, + 0x1b, 0x70, 0x5d, 0xaf, 0x7c, 0xd0, 0x9c, 0xd6, 0xdb, 0x6f, 0x0c, 0xa7, 0x79, 0x4c, 0x52, 0xf8, + 0x14, 0xa8, 0xb8, 0x72, 0x16, 0x3f, 0xd7, 0x32, 0xf0, 0x97, 0x78, 0x9e, 0xac, 0xa8, 0xe1, 0x78, + 0xd4, 0x5a, 0x80, 0x73, 0x72, 0x5b, 0xe8, 0xd2, 0x5d, 0xa1, 0x4b, 0xbf, 0x0b, 0x5d, 0xba, 0x99, + 0xeb, 0x8d, 0xbb, 0xb9, 0xde, 0xf8, 0x31, 0xd7, 0x1b, 0x5f, 0x86, 0x61, 0xcc, 0xa2, 0xa9, 0xc7, + 0x39, 0xda, 0xf5, 0xbb, 0x51, 0x1b, 0x88, 0xc6, 0xf6, 0xda, 0x6b, 0xe2, 0xb5, 0x05, 0x9f, 0x57, + 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x35, 0xdb, 0x57, 0x1c, 0xb5, 0x04, 0x00, 0x00, } func (m *CanonicalBlockID) Marshal() (dAtA []byte, err error) { diff --git a/proto/tendermint/types/events.pb.go b/api/cometbft/types/v1/events.pb.go similarity index 83% rename from proto/tendermint/types/events.pb.go rename to api/cometbft/types/v1/events.pb.go index 02607e6d9bc..ceb0b571ed1 100644 --- a/proto/tendermint/types/events.pb.go +++ b/api/cometbft/types/v1/events.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/types/events.proto +// source: cometbft/types/v1/events.proto -package types +package v1 import ( fmt "fmt" @@ -22,6 +22,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// EventDataRoundState is emitted with each new round step. type EventDataRoundState struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` @@ -32,7 +33,7 @@ func (m *EventDataRoundState) Reset() { *m = EventDataRoundState{} } func (m *EventDataRoundState) String() string { return proto.CompactTextString(m) } func (*EventDataRoundState) ProtoMessage() {} func (*EventDataRoundState) Descriptor() ([]byte, []int) { - return fileDescriptor_72cfafd446dedf7c, []int{0} + return fileDescriptor_52423bfa52525bbb, []int{0} } func (m *EventDataRoundState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -83,26 +84,26 @@ func (m *EventDataRoundState) GetStep() string { } func init() { - proto.RegisterType((*EventDataRoundState)(nil), "tendermint.types.EventDataRoundState") + proto.RegisterType((*EventDataRoundState)(nil), "cometbft.types.v1.EventDataRoundState") } -func init() { proto.RegisterFile("tendermint/types/events.proto", fileDescriptor_72cfafd446dedf7c) } +func init() { proto.RegisterFile("cometbft/types/v1/events.proto", fileDescriptor_52423bfa52525bbb) } -var fileDescriptor_72cfafd446dedf7c = []byte{ - // 195 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2d, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x2d, 0x4b, - 0xcd, 0x2b, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x40, 0x48, 0xeb, 0x81, 0xa5, - 0x95, 0xc2, 0xb9, 0x84, 0x5d, 0x41, 0x2a, 0x5c, 0x12, 0x4b, 0x12, 0x83, 0xf2, 0x4b, 0xf3, 0x52, - 0x82, 0x4b, 0x12, 0x4b, 0x52, 0x85, 0xc4, 0xb8, 0xd8, 0x32, 0x52, 0x33, 0xd3, 0x33, 0x4a, 0x24, - 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0xa0, 0x3c, 0x21, 0x11, 0x2e, 0xd6, 0x22, 0x90, 0x2a, 0x09, - 0x26, 0x05, 0x46, 0x0d, 0xd6, 0x20, 0x08, 0x47, 0x48, 0x88, 0x8b, 0xa5, 0xb8, 0x24, 0xb5, 0x40, - 0x82, 0x59, 0x81, 0x51, 0x83, 0x33, 0x08, 0xcc, 0x76, 0xf2, 0x3d, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, - 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, - 0xc6, 0x63, 0x39, 0x86, 0x28, 0xe3, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, - 0xfd, 0xe4, 0xfc, 0xdc, 0xd4, 0x92, 0xa4, 0xb4, 0x12, 0x04, 0x03, 0xec, 0x50, 0x7d, 0x74, 0x6f, - 0x24, 0xb1, 0x81, 0xc5, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xb3, 0x7f, 0x37, 0xe1, - 0x00, 0x00, 0x00, +var fileDescriptor_52423bfa52525bbb = []byte{ + // 194 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0x33, 0xd4, 0x4f, 0x2d, + 0x4b, 0xcd, 0x2b, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x84, 0xc9, 0xeb, 0x81, + 0xe5, 0xf5, 0xca, 0x0c, 0x95, 0xc2, 0xb9, 0x84, 0x5d, 0x41, 0x4a, 0x5c, 0x12, 0x4b, 0x12, 0x83, + 0xf2, 0x4b, 0xf3, 0x52, 0x82, 0x4b, 0x12, 0x4b, 0x52, 0x85, 0xc4, 0xb8, 0xd8, 0x32, 0x52, 0x33, + 0xd3, 0x33, 0x4a, 0x24, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0xa0, 0x3c, 0x21, 0x11, 0x2e, 0xd6, + 0x22, 0x90, 0x2a, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6, 0x20, 0x08, 0x47, 0x48, 0x88, 0x8b, 0xa5, + 0xb8, 0x24, 0xb5, 0x40, 0x82, 0x59, 0x81, 0x51, 0x83, 0x33, 0x08, 0xcc, 0x76, 0xf2, 0x39, 0xf1, + 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, + 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xa3, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, + 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb8, 0x83, 0xe1, 0x8c, 0xc4, 0x82, 0x4c, 0x7d, 0x0c, 0x6f, 0x24, + 0xb1, 0x81, 0x3d, 0x60, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x86, 0x40, 0xdc, 0x57, 0xe2, 0x00, + 0x00, 0x00, } func (m *EventDataRoundState) Marshal() (dAtA []byte, err error) { diff --git a/proto/tendermint/types/evidence.pb.go b/api/cometbft/types/v1/evidence.pb.go similarity index 88% rename from proto/tendermint/types/evidence.pb.go rename to api/cometbft/types/v1/evidence.pb.go index 1022d4daf45..cdc9bc82673 100644 --- a/proto/tendermint/types/evidence.pb.go +++ b/api/cometbft/types/v1/evidence.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/types/evidence.proto +// source: cometbft/types/v1/evidence.proto -package types +package v1 import ( fmt "fmt" @@ -27,7 +27,10 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// Evidence is a generic type for wrapping evidence of misbehavior by a validator. type Evidence struct { + // The type of evidence. + // // Types that are valid to be assigned to Sum: // *Evidence_DuplicateVoteEvidence // *Evidence_LightClientAttackEvidence @@ -38,7 +41,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_6825fabc78e0a168, []int{0} + return fileDescriptor_4c96acf1a4e66b9a, []int{0} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -125,7 +128,7 @@ func (m *DuplicateVoteEvidence) Reset() { *m = DuplicateVoteEvidence{} } func (m *DuplicateVoteEvidence) String() string { return proto.CompactTextString(m) } func (*DuplicateVoteEvidence) ProtoMessage() {} func (*DuplicateVoteEvidence) Descriptor() ([]byte, []int) { - return fileDescriptor_6825fabc78e0a168, []int{1} + return fileDescriptor_4c96acf1a4e66b9a, []int{1} } func (m *DuplicateVoteEvidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -202,7 +205,7 @@ func (m *LightClientAttackEvidence) Reset() { *m = LightClientAttackEvid func (m *LightClientAttackEvidence) String() string { return proto.CompactTextString(m) } func (*LightClientAttackEvidence) ProtoMessage() {} func (*LightClientAttackEvidence) Descriptor() ([]byte, []int) { - return fileDescriptor_6825fabc78e0a168, []int{2} + return fileDescriptor_4c96acf1a4e66b9a, []int{2} } func (m *LightClientAttackEvidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -266,6 +269,7 @@ func (m *LightClientAttackEvidence) GetTimestamp() time.Time { return time.Time{} } +// EvidenceList is a list of evidence. type EvidenceList struct { Evidence []Evidence `protobuf:"bytes,1,rep,name=evidence,proto3" json:"evidence"` } @@ -274,7 +278,7 @@ func (m *EvidenceList) Reset() { *m = EvidenceList{} } func (m *EvidenceList) String() string { return proto.CompactTextString(m) } func (*EvidenceList) ProtoMessage() {} func (*EvidenceList) Descriptor() ([]byte, []int) { - return fileDescriptor_6825fabc78e0a168, []int{3} + return fileDescriptor_4c96acf1a4e66b9a, []int{3} } func (m *EvidenceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -311,50 +315,50 @@ func (m *EvidenceList) GetEvidence() []Evidence { } func init() { - proto.RegisterType((*Evidence)(nil), "tendermint.types.Evidence") - proto.RegisterType((*DuplicateVoteEvidence)(nil), "tendermint.types.DuplicateVoteEvidence") - proto.RegisterType((*LightClientAttackEvidence)(nil), "tendermint.types.LightClientAttackEvidence") - proto.RegisterType((*EvidenceList)(nil), "tendermint.types.EvidenceList") + proto.RegisterType((*Evidence)(nil), "cometbft.types.v1.Evidence") + proto.RegisterType((*DuplicateVoteEvidence)(nil), "cometbft.types.v1.DuplicateVoteEvidence") + proto.RegisterType((*LightClientAttackEvidence)(nil), "cometbft.types.v1.LightClientAttackEvidence") + proto.RegisterType((*EvidenceList)(nil), "cometbft.types.v1.EvidenceList") } -func init() { proto.RegisterFile("tendermint/types/evidence.proto", fileDescriptor_6825fabc78e0a168) } +func init() { proto.RegisterFile("cometbft/types/v1/evidence.proto", fileDescriptor_4c96acf1a4e66b9a) } -var fileDescriptor_6825fabc78e0a168 = []byte{ +var fileDescriptor_4c96acf1a4e66b9a = []byte{ // 533 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0xe3, 0x38, 0xa9, 0xc2, 0xb6, 0x40, 0x58, 0x5a, 0x48, 0x43, 0xe4, 0x44, 0xe1, 0xd0, - 0x48, 0x80, 0x2d, 0xb5, 0x57, 0x2e, 0x35, 0x20, 0x15, 0x29, 0x20, 0x64, 0xa1, 0x1e, 0xb8, 0x58, - 0xeb, 0xcd, 0xc6, 0x59, 0xd5, 0xde, 0x8d, 0xe2, 0x49, 0x50, 0x79, 0x8a, 0x3c, 0x56, 0x2f, 0x48, - 0x3d, 0x72, 0x02, 0x94, 0xf0, 0x20, 0xc8, 0xeb, 0x3f, 0x89, 0xea, 0x98, 0x13, 0x97, 0xc8, 0x99, - 0xf9, 0x7d, 0x3b, 0x33, 0x9f, 0x67, 0x8d, 0xba, 0xc0, 0xc4, 0x88, 0xcd, 0x42, 0x2e, 0xc0, 0x82, - 0xeb, 0x29, 0x8b, 0x2c, 0xb6, 0xe0, 0x23, 0x26, 0x28, 0x33, 0xa7, 0x33, 0x09, 0x12, 0x37, 0x37, - 0x80, 0xa9, 0x80, 0xf6, 0xa1, 0x2f, 0x7d, 0xa9, 0x92, 0x56, 0xfc, 0x94, 0x70, 0xed, 0xae, 0x2f, - 0xa5, 0x1f, 0x30, 0x4b, 0xfd, 0xf3, 0xe6, 0x63, 0x0b, 0x78, 0xc8, 0x22, 0x20, 0xe1, 0x34, 0x05, - 0x3a, 0x85, 0x4a, 0xea, 0x37, 0xcd, 0xf6, 0x0a, 0xd9, 0x05, 0x09, 0xf8, 0x88, 0x80, 0x9c, 0x25, - 0x44, 0xff, 0x8f, 0x86, 0x1a, 0xef, 0xd2, 0xde, 0x30, 0x41, 0x4f, 0x47, 0xf3, 0x69, 0xc0, 0x29, - 0x01, 0xe6, 0x2e, 0x24, 0x30, 0x37, 0x6b, 0xbb, 0xa5, 0xf5, 0xb4, 0xc1, 0xfe, 0xe9, 0x89, 0x79, - 0xb7, 0x6f, 0xf3, 0x6d, 0x26, 0xb8, 0x94, 0xc0, 0xb2, 0x93, 0x2e, 0x2a, 0xce, 0xd1, 0x68, 0x57, - 0x02, 0x0b, 0xd4, 0x09, 0xb8, 0x3f, 0x01, 0x97, 0x06, 0x9c, 0x09, 0x70, 0x09, 0x00, 0xa1, 0x57, - 0x9b, 0x3a, 0x55, 0x55, 0xe7, 0x45, 0xb1, 0xce, 0x30, 0x56, 0xbd, 0x51, 0xa2, 0x73, 0xa5, 0xd9, - 0xaa, 0x75, 0x1c, 0x94, 0x25, 0xed, 0x3a, 0xd2, 0xa3, 0x79, 0xd8, 0x5f, 0x56, 0xd1, 0xd1, 0xce, - 0x4e, 0xf1, 0x2b, 0xb4, 0xa7, 0x26, 0x25, 0xe9, 0x88, 0x4f, 0x8a, 0xa5, 0x63, 0xde, 0xa9, 0xc7, - 0xd4, 0x79, 0x8e, 0x7b, 0x69, 0xa7, 0xff, 0xc4, 0x6d, 0xfc, 0x12, 0x61, 0x90, 0x40, 0x82, 0xd8, - 0x4d, 0x2e, 0x7c, 0x77, 0x2a, 0xbf, 0xb2, 0x59, 0x4b, 0xef, 0x69, 0x03, 0xdd, 0x69, 0xaa, 0xcc, - 0xa5, 0x4a, 0x7c, 0x8a, 0xe3, 0xf8, 0x04, 0x3d, 0xcc, 0xdf, 0x4f, 0x8a, 0xd6, 0x14, 0xfa, 0x20, - 0x0f, 0x27, 0xa0, 0x8d, 0xee, 0xe5, 0x8b, 0xd0, 0xaa, 0xab, 0x46, 0xda, 0x66, 0xb2, 0x2a, 0x66, - 0xb6, 0x2a, 0xe6, 0xe7, 0x8c, 0xb0, 0x1b, 0x37, 0x3f, 0xbb, 0x95, 0xe5, 0xaf, 0xae, 0xe6, 0x6c, - 0x64, 0xfd, 0xef, 0x55, 0x74, 0x5c, 0x6a, 0x2a, 0x7e, 0x8f, 0x1e, 0x51, 0x29, 0xc6, 0x01, 0xa7, - 0xaa, 0x6f, 0x2f, 0x90, 0xf4, 0x2a, 0x75, 0xa8, 0x53, 0xf2, 0x72, 0xec, 0x98, 0x71, 0x9a, 0x5b, - 0x32, 0x15, 0xc1, 0xcf, 0xd1, 0x7d, 0x2a, 0xc3, 0x50, 0x0a, 0x77, 0xc2, 0x62, 0x4e, 0x39, 0xa7, - 0x3b, 0x07, 0x49, 0xf0, 0x42, 0xc5, 0xf0, 0x47, 0x74, 0xe8, 0x5d, 0x7f, 0x23, 0x02, 0xb8, 0x60, - 0x6e, 0x3e, 0x6d, 0xd4, 0xd2, 0x7b, 0xfa, 0x60, 0xff, 0xf4, 0xd9, 0x0e, 0x97, 0x33, 0xc6, 0x79, - 0x9c, 0x0b, 0xf3, 0x58, 0x54, 0x62, 0x7c, 0xad, 0xc4, 0xf8, 0xff, 0xe1, 0xe7, 0x10, 0x1d, 0x64, - 0xee, 0x0d, 0x79, 0x04, 0xf8, 0x35, 0x6a, 0x6c, 0xdd, 0x1e, 0x5d, 0x1d, 0x59, 0x98, 0x22, 0xdf, - 0xd3, 0x5a, 0x7c, 0xa4, 0x93, 0x2b, 0xec, 0x0f, 0x37, 0x2b, 0x43, 0xbb, 0x5d, 0x19, 0xda, 0xef, - 0x95, 0xa1, 0x2d, 0xd7, 0x46, 0xe5, 0x76, 0x6d, 0x54, 0x7e, 0xac, 0x8d, 0xca, 0x97, 0x33, 0x9f, - 0xc3, 0x64, 0xee, 0x99, 0x54, 0x86, 0x16, 0x95, 0x21, 0x03, 0x6f, 0x0c, 0x9b, 0x87, 0xe4, 0x0b, - 0x72, 0xf7, 0xda, 0x7b, 0x7b, 0x2a, 0x7e, 0xf6, 0x37, 0x00, 0x00, 0xff, 0xff, 0xab, 0xbe, 0xb8, - 0x21, 0x99, 0x04, 0x00, 0x00, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe3, 0x38, 0xa9, 0xc2, 0xb6, 0x40, 0xbb, 0xb4, 0x6a, 0x1a, 0x5a, 0x27, 0x84, 0x03, + 0x39, 0x54, 0xb6, 0x1a, 0xce, 0x1c, 0x6a, 0x40, 0xaa, 0x50, 0x10, 0xc8, 0x42, 0x3d, 0x70, 0xb1, + 0xd6, 0xce, 0xc6, 0x59, 0xd5, 0xf6, 0x5a, 0xf1, 0x24, 0xa8, 0x3c, 0x45, 0xc5, 0x53, 0xf5, 0x46, + 0x8f, 0x9c, 0x00, 0x25, 0x12, 0xcf, 0x81, 0x76, 0x6d, 0x6f, 0x23, 0xc5, 0xe6, 0xc4, 0x6d, 0x33, + 0xf3, 0xfd, 0x3b, 0x33, 0xff, 0xc4, 0x8b, 0x7a, 0x3e, 0x8f, 0x28, 0x78, 0x13, 0xb0, 0xe0, 0x3a, + 0xa1, 0xa9, 0xb5, 0x38, 0xb3, 0xe8, 0x82, 0x8d, 0x69, 0xec, 0x53, 0x33, 0x99, 0x71, 0xe0, 0x78, + 0xaf, 0x20, 0x4c, 0x49, 0x98, 0x8b, 0xb3, 0xce, 0xc9, 0xa6, 0x28, 0xcb, 0x49, 0x45, 0xe7, 0xd9, + 0x66, 0x7a, 0x41, 0x42, 0x36, 0x26, 0xc0, 0x67, 0x39, 0xb2, 0x1f, 0xf0, 0x80, 0xcb, 0xa3, 0x25, + 0x4e, 0x79, 0xb4, 0x1b, 0x70, 0x1e, 0x84, 0xd4, 0x92, 0xbf, 0xbc, 0xf9, 0xc4, 0x02, 0x16, 0xd1, + 0x14, 0x48, 0x94, 0x64, 0x40, 0xff, 0x8f, 0x86, 0x5a, 0x6f, 0xf3, 0xf6, 0xb0, 0x87, 0x0e, 0xc7, + 0xf3, 0x24, 0x64, 0x3e, 0x01, 0xea, 0x2e, 0x38, 0x50, 0xb7, 0xe8, 0xbc, 0xad, 0xf5, 0xb4, 0xc1, + 0xf6, 0x70, 0x60, 0x6e, 0xb4, 0x6e, 0xbe, 0x29, 0x14, 0x97, 0x1c, 0x68, 0x71, 0xd5, 0x45, 0xcd, + 0x39, 0x18, 0x97, 0x25, 0x30, 0x47, 0xc7, 0x21, 0x0b, 0xa6, 0xe0, 0xfa, 0x21, 0xa3, 0x31, 0xb8, + 0x04, 0x80, 0xf8, 0x57, 0xf7, 0x85, 0xea, 0xb2, 0xd0, 0x69, 0x49, 0xa1, 0x91, 0x90, 0xbd, 0x96, + 0xaa, 0x73, 0x29, 0x5a, 0x2b, 0x76, 0x14, 0x56, 0x25, 0xed, 0x26, 0xd2, 0xd3, 0x79, 0xd4, 0xff, + 0x56, 0x47, 0x07, 0xa5, 0xad, 0x62, 0x13, 0x6d, 0xc9, 0x59, 0x49, 0x3e, 0xe4, 0x61, 0x49, 0x6d, + 0x21, 0x70, 0x9a, 0x02, 0x3b, 0x57, 0xbc, 0x97, 0xf7, 0xfa, 0x6f, 0xde, 0xc6, 0xa7, 0x08, 0x03, + 0x07, 0x12, 0x0a, 0x47, 0x59, 0x1c, 0xb8, 0x09, 0xff, 0x42, 0x67, 0x6d, 0xbd, 0xa7, 0x0d, 0x74, + 0x67, 0x57, 0x66, 0x2e, 0x65, 0xe2, 0xa3, 0x88, 0xe3, 0x17, 0xe8, 0xb1, 0x5a, 0x6d, 0x8e, 0x36, + 0x24, 0xfa, 0x48, 0x85, 0x33, 0xd0, 0x46, 0x0f, 0xd4, 0x32, 0xdb, 0x4d, 0xd9, 0x49, 0xc7, 0xcc, + 0xd6, 0x6d, 0x16, 0xeb, 0x36, 0x3f, 0x15, 0x84, 0xdd, 0xba, 0xfd, 0xd9, 0xad, 0xdd, 0xfc, 0xea, + 0x6a, 0xce, 0xbd, 0xac, 0xff, 0xbd, 0x8e, 0x8e, 0x2a, 0x6d, 0xc5, 0xef, 0xd0, 0x9e, 0xcf, 0xe3, + 0x49, 0xc8, 0x7c, 0xd9, 0xb7, 0x17, 0x72, 0xff, 0x2a, 0xf7, 0xe8, 0xa4, 0x6a, 0x3f, 0xb6, 0x80, + 0x9c, 0xdd, 0x35, 0x9d, 0x8c, 0xe0, 0xe7, 0xe8, 0xa1, 0xcf, 0xa3, 0x88, 0xc7, 0xee, 0x94, 0x0a, + 0x4e, 0x7a, 0xa7, 0x3b, 0x3b, 0x59, 0xf0, 0x42, 0xc6, 0xf0, 0x07, 0xb4, 0xef, 0x5d, 0x7f, 0x25, + 0x31, 0xb0, 0x98, 0xba, 0x6a, 0xdc, 0xb4, 0xad, 0xf7, 0xf4, 0xc1, 0xf6, 0xf0, 0xb8, 0xcc, 0xe7, + 0x02, 0x72, 0x9e, 0x28, 0xa5, 0x8a, 0xa5, 0x15, 0xd6, 0x37, 0x2a, 0xac, 0xff, 0x1f, 0x8e, 0xbe, + 0x47, 0x3b, 0x85, 0x7f, 0x23, 0x96, 0x02, 0x7e, 0x85, 0x5a, 0x6b, 0xdf, 0x90, 0x18, 0xe3, 0x69, + 0xc9, 0x18, 0xea, 0xcf, 0xda, 0x10, 0x77, 0x3a, 0x4a, 0x62, 0x8f, 0x6e, 0x97, 0x86, 0x76, 0xb7, + 0x34, 0xb4, 0xdf, 0x4b, 0x43, 0xbb, 0x59, 0x19, 0xb5, 0xbb, 0x95, 0x51, 0xfb, 0xb1, 0x32, 0x6a, + 0x9f, 0x87, 0x01, 0x83, 0xe9, 0xdc, 0x13, 0x97, 0x59, 0xea, 0x75, 0x50, 0x07, 0x92, 0x30, 0x6b, + 0xe3, 0xcd, 0xf0, 0xb6, 0xe4, 0x14, 0x2f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x50, 0xcc, 0xfa, + 0x44, 0xa3, 0x04, 0x00, 0x00, } func (m *Evidence) Marshal() (dAtA []byte, err error) { diff --git a/api/cometbft/types/v1/params.pb.go b/api/cometbft/types/v1/params.pb.go new file mode 100644 index 00000000000..7f3af701696 --- /dev/null +++ b/api/cometbft/types/v1/params.pb.go @@ -0,0 +1,2739 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/types/v1/params.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + types "github.com/cosmos/gogoproto/types" + _ "github.com/golang/protobuf/ptypes/duration" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ConsensusParams contains consensus critical parameters that determine the +// validity of blocks. +type ConsensusParams struct { + Block *BlockParams `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + Evidence *EvidenceParams `protobuf:"bytes,2,opt,name=evidence,proto3" json:"evidence,omitempty"` + Validator *ValidatorParams `protobuf:"bytes,3,opt,name=validator,proto3" json:"validator,omitempty"` + Version *VersionParams `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + Abci *ABCIParams `protobuf:"bytes,5,opt,name=abci,proto3" json:"abci,omitempty"` // Deprecated: Do not use. + Synchrony *SynchronyParams `protobuf:"bytes,6,opt,name=synchrony,proto3" json:"synchrony,omitempty"` + Feature *FeatureParams `protobuf:"bytes,7,opt,name=feature,proto3" json:"feature,omitempty"` +} + +func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } +func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } +func (*ConsensusParams) ProtoMessage() {} +func (*ConsensusParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8c2f6d19461b2fe7, []int{0} +} +func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusParams.Merge(m, src) +} +func (m *ConsensusParams) XXX_Size() int { + return m.Size() +} +func (m *ConsensusParams) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusParams.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusParams proto.InternalMessageInfo + +func (m *ConsensusParams) GetBlock() *BlockParams { + if m != nil { + return m.Block + } + return nil +} + +func (m *ConsensusParams) GetEvidence() *EvidenceParams { + if m != nil { + return m.Evidence + } + return nil +} + +func (m *ConsensusParams) GetValidator() *ValidatorParams { + if m != nil { + return m.Validator + } + return nil +} + +func (m *ConsensusParams) GetVersion() *VersionParams { + if m != nil { + return m.Version + } + return nil +} + +// Deprecated: Do not use. +func (m *ConsensusParams) GetAbci() *ABCIParams { + if m != nil { + return m.Abci + } + return nil +} + +func (m *ConsensusParams) GetSynchrony() *SynchronyParams { + if m != nil { + return m.Synchrony + } + return nil +} + +func (m *ConsensusParams) GetFeature() *FeatureParams { + if m != nil { + return m.Feature + } + return nil +} + +// BlockParams define limits on the block size and gas. +type BlockParams struct { + // Maximum size of a block, in bytes. + // + // Must be greater or equal to -1 and cannot be greater than the hard-coded + // maximum block size, which is 100MB. + // + // If set to -1, the limit is the hard-coded maximum block size. + MaxBytes int64 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` + // Maximum gas wanted by transactions included in a block. + // + // Must be greater or equal to -1. If set to -1, no limit is enforced. + MaxGas int64 `protobuf:"varint,2,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` +} + +func (m *BlockParams) Reset() { *m = BlockParams{} } +func (m *BlockParams) String() string { return proto.CompactTextString(m) } +func (*BlockParams) ProtoMessage() {} +func (*BlockParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8c2f6d19461b2fe7, []int{1} +} +func (m *BlockParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockParams.Merge(m, src) +} +func (m *BlockParams) XXX_Size() int { + return m.Size() +} +func (m *BlockParams) XXX_DiscardUnknown() { + xxx_messageInfo_BlockParams.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockParams proto.InternalMessageInfo + +func (m *BlockParams) GetMaxBytes() int64 { + if m != nil { + return m.MaxBytes + } + return 0 +} + +func (m *BlockParams) GetMaxGas() int64 { + if m != nil { + return m.MaxGas + } + return 0 +} + +// EvidenceParams determine the validity of evidences of Byzantine behavior. +type EvidenceParams struct { + // Maximum age of evidence, in blocks. + // + // The recommended formula for calculating it is max_age_duration / {average + // block time}. + MaxAgeNumBlocks int64 `protobuf:"varint,1,opt,name=max_age_num_blocks,json=maxAgeNumBlocks,proto3" json:"max_age_num_blocks,omitempty"` + // Maximum age of evidence, in time. + // + // The recommended value of is should correspond to the application's + // "unbonding period" or other similar mechanism for handling + // Nothing-At-Stake attacks. + // See: https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed. + MaxAgeDuration time.Duration `protobuf:"bytes,2,opt,name=max_age_duration,json=maxAgeDuration,proto3,stdduration" json:"max_age_duration"` + // Maximum size in bytes of evidence allowed to be included in a block. + // + // It should fall comfortably under the maximum size of a block. + MaxBytes int64 `protobuf:"varint,3,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` +} + +func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } +func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } +func (*EvidenceParams) ProtoMessage() {} +func (*EvidenceParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8c2f6d19461b2fe7, []int{2} +} +func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvidenceParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvidenceParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvidenceParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvidenceParams.Merge(m, src) +} +func (m *EvidenceParams) XXX_Size() int { + return m.Size() +} +func (m *EvidenceParams) XXX_DiscardUnknown() { + xxx_messageInfo_EvidenceParams.DiscardUnknown(m) +} + +var xxx_messageInfo_EvidenceParams proto.InternalMessageInfo + +func (m *EvidenceParams) GetMaxAgeNumBlocks() int64 { + if m != nil { + return m.MaxAgeNumBlocks + } + return 0 +} + +func (m *EvidenceParams) GetMaxAgeDuration() time.Duration { + if m != nil { + return m.MaxAgeDuration + } + return 0 +} + +func (m *EvidenceParams) GetMaxBytes() int64 { + if m != nil { + return m.MaxBytes + } + return 0 +} + +// ValidatorParams restrict the public key types validators can use. +// +// NOTE: uses ABCI public keys naming, not Amino names. +type ValidatorParams struct { + PubKeyTypes []string `protobuf:"bytes,1,rep,name=pub_key_types,json=pubKeyTypes,proto3" json:"pub_key_types,omitempty"` +} + +func (m *ValidatorParams) Reset() { *m = ValidatorParams{} } +func (m *ValidatorParams) String() string { return proto.CompactTextString(m) } +func (*ValidatorParams) ProtoMessage() {} +func (*ValidatorParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8c2f6d19461b2fe7, []int{3} +} +func (m *ValidatorParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorParams.Merge(m, src) +} +func (m *ValidatorParams) XXX_Size() int { + return m.Size() +} +func (m *ValidatorParams) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorParams.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorParams proto.InternalMessageInfo + +func (m *ValidatorParams) GetPubKeyTypes() []string { + if m != nil { + return m.PubKeyTypes + } + return nil +} + +// VersionParams contain the version of specific components of CometBFT. +type VersionParams struct { + // The ABCI application version. + // + // It was named app_version in CometBFT 0.34. + App uint64 `protobuf:"varint,1,opt,name=app,proto3" json:"app,omitempty"` +} + +func (m *VersionParams) Reset() { *m = VersionParams{} } +func (m *VersionParams) String() string { return proto.CompactTextString(m) } +func (*VersionParams) ProtoMessage() {} +func (*VersionParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8c2f6d19461b2fe7, []int{4} +} +func (m *VersionParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VersionParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VersionParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VersionParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionParams.Merge(m, src) +} +func (m *VersionParams) XXX_Size() int { + return m.Size() +} +func (m *VersionParams) XXX_DiscardUnknown() { + xxx_messageInfo_VersionParams.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionParams proto.InternalMessageInfo + +func (m *VersionParams) GetApp() uint64 { + if m != nil { + return m.App + } + return 0 +} + +// HashedParams is a subset of ConsensusParams. +// +// It is hashed into the Header.ConsensusHash. +type HashedParams struct { + BlockMaxBytes int64 `protobuf:"varint,1,opt,name=block_max_bytes,json=blockMaxBytes,proto3" json:"block_max_bytes,omitempty"` + BlockMaxGas int64 `protobuf:"varint,2,opt,name=block_max_gas,json=blockMaxGas,proto3" json:"block_max_gas,omitempty"` +} + +func (m *HashedParams) Reset() { *m = HashedParams{} } +func (m *HashedParams) String() string { return proto.CompactTextString(m) } +func (*HashedParams) ProtoMessage() {} +func (*HashedParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8c2f6d19461b2fe7, []int{5} +} +func (m *HashedParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HashedParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HashedParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HashedParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_HashedParams.Merge(m, src) +} +func (m *HashedParams) XXX_Size() int { + return m.Size() +} +func (m *HashedParams) XXX_DiscardUnknown() { + xxx_messageInfo_HashedParams.DiscardUnknown(m) +} + +var xxx_messageInfo_HashedParams proto.InternalMessageInfo + +func (m *HashedParams) GetBlockMaxBytes() int64 { + if m != nil { + return m.BlockMaxBytes + } + return 0 +} + +func (m *HashedParams) GetBlockMaxGas() int64 { + if m != nil { + return m.BlockMaxGas + } + return 0 +} + +// SynchronyParams determine the validity of block timestamps. +// +// These parameters are part of the Proposer-Based Timestamps (PBTS) algorithm. +// For more information on the relationship of the synchrony parameters to +// block timestamps validity, refer to the PBTS specification: +// https://github.com/tendermint/spec/blob/master/spec/consensus/proposer-based-timestamp/README.md +type SynchronyParams struct { + // Bound for how skewed a proposer's clock may be from any validator on the + // network while still producing valid proposals. + Precision *time.Duration `protobuf:"bytes,1,opt,name=precision,proto3,stdduration" json:"precision,omitempty"` + // Bound for how long a proposal message may take to reach all validators on + // a network and still be considered valid. + MessageDelay *time.Duration `protobuf:"bytes,2,opt,name=message_delay,json=messageDelay,proto3,stdduration" json:"message_delay,omitempty"` +} + +func (m *SynchronyParams) Reset() { *m = SynchronyParams{} } +func (m *SynchronyParams) String() string { return proto.CompactTextString(m) } +func (*SynchronyParams) ProtoMessage() {} +func (*SynchronyParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8c2f6d19461b2fe7, []int{6} +} +func (m *SynchronyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SynchronyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SynchronyParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SynchronyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_SynchronyParams.Merge(m, src) +} +func (m *SynchronyParams) XXX_Size() int { + return m.Size() +} +func (m *SynchronyParams) XXX_DiscardUnknown() { + xxx_messageInfo_SynchronyParams.DiscardUnknown(m) +} + +var xxx_messageInfo_SynchronyParams proto.InternalMessageInfo + +func (m *SynchronyParams) GetPrecision() *time.Duration { + if m != nil { + return m.Precision + } + return nil +} + +func (m *SynchronyParams) GetMessageDelay() *time.Duration { + if m != nil { + return m.MessageDelay + } + return nil +} + +// FeatureParams configure the height from which features of CometBFT are enabled. +type FeatureParams struct { + // Height during which vote extensions will be enabled. + // + // A value of 0 means vote extensions are disabled. A value > 0 denotes + // the height at which vote extensions will be (or have been) enabled. + // + // During the specified height, and for all subsequent heights, precommit + // messages that do not contain valid extension data will be considered + // invalid. Prior to this height, or when this height is set to 0, vote + // extensions will not be used or accepted by validators on the network. + // + // Once enabled, vote extensions will be created by the application in + // ExtendVote, validated by the application in VerifyVoteExtension, and + // used by the application in PrepareProposal, when proposing the next block. + // + // Cannot be set to heights lower or equal to the current blockchain height. + VoteExtensionsEnableHeight *types.Int64Value `protobuf:"bytes,1,opt,name=vote_extensions_enable_height,json=voteExtensionsEnableHeight,proto3" json:"vote_extensions_enable_height,omitempty"` + // Height at which Proposer-Based Timestamps (PBTS) will be enabled. + // + // A value of 0 means PBTS is disabled. A value > 0 denotes the height at + // which PBTS will be (or has been) enabled. + // + // From the specified height, and for all subsequent heights, the PBTS + // algorithm will be used to produce and validate block timestamps. Prior to + // this height, or when this height is set to 0, the legacy BFT Time + // algorithm is used to produce and validate timestamps. + // + // Cannot be set to heights lower or equal to the current blockchain height. + PbtsEnableHeight *types.Int64Value `protobuf:"bytes,2,opt,name=pbts_enable_height,json=pbtsEnableHeight,proto3" json:"pbts_enable_height,omitempty"` +} + +func (m *FeatureParams) Reset() { *m = FeatureParams{} } +func (m *FeatureParams) String() string { return proto.CompactTextString(m) } +func (*FeatureParams) ProtoMessage() {} +func (*FeatureParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8c2f6d19461b2fe7, []int{7} +} +func (m *FeatureParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FeatureParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FeatureParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FeatureParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_FeatureParams.Merge(m, src) +} +func (m *FeatureParams) XXX_Size() int { + return m.Size() +} +func (m *FeatureParams) XXX_DiscardUnknown() { + xxx_messageInfo_FeatureParams.DiscardUnknown(m) +} + +var xxx_messageInfo_FeatureParams proto.InternalMessageInfo + +func (m *FeatureParams) GetVoteExtensionsEnableHeight() *types.Int64Value { + if m != nil { + return m.VoteExtensionsEnableHeight + } + return nil +} + +func (m *FeatureParams) GetPbtsEnableHeight() *types.Int64Value { + if m != nil { + return m.PbtsEnableHeight + } + return nil +} + +// ABCIParams is deprecated and its contents moved to FeatureParams +// +// Deprecated: Do not use. +type ABCIParams struct { + // vote_extensions_enable_height has been deprecated. + // Instead, use FeatureParams.vote_extensions_enable_height. + VoteExtensionsEnableHeight int64 `protobuf:"varint,1,opt,name=vote_extensions_enable_height,json=voteExtensionsEnableHeight,proto3" json:"vote_extensions_enable_height,omitempty"` +} + +func (m *ABCIParams) Reset() { *m = ABCIParams{} } +func (m *ABCIParams) String() string { return proto.CompactTextString(m) } +func (*ABCIParams) ProtoMessage() {} +func (*ABCIParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8c2f6d19461b2fe7, []int{8} +} +func (m *ABCIParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ABCIParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ABCIParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ABCIParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ABCIParams.Merge(m, src) +} +func (m *ABCIParams) XXX_Size() int { + return m.Size() +} +func (m *ABCIParams) XXX_DiscardUnknown() { + xxx_messageInfo_ABCIParams.DiscardUnknown(m) +} + +var xxx_messageInfo_ABCIParams proto.InternalMessageInfo + +func (m *ABCIParams) GetVoteExtensionsEnableHeight() int64 { + if m != nil { + return m.VoteExtensionsEnableHeight + } + return 0 +} + +func init() { + proto.RegisterType((*ConsensusParams)(nil), "cometbft.types.v1.ConsensusParams") + proto.RegisterType((*BlockParams)(nil), "cometbft.types.v1.BlockParams") + proto.RegisterType((*EvidenceParams)(nil), "cometbft.types.v1.EvidenceParams") + proto.RegisterType((*ValidatorParams)(nil), "cometbft.types.v1.ValidatorParams") + proto.RegisterType((*VersionParams)(nil), "cometbft.types.v1.VersionParams") + proto.RegisterType((*HashedParams)(nil), "cometbft.types.v1.HashedParams") + proto.RegisterType((*SynchronyParams)(nil), "cometbft.types.v1.SynchronyParams") + proto.RegisterType((*FeatureParams)(nil), "cometbft.types.v1.FeatureParams") + proto.RegisterType((*ABCIParams)(nil), "cometbft.types.v1.ABCIParams") +} + +func init() { proto.RegisterFile("cometbft/types/v1/params.proto", fileDescriptor_8c2f6d19461b2fe7) } + +var fileDescriptor_8c2f6d19461b2fe7 = []byte{ + // 729 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xbf, 0x4e, 0xdb, 0x50, + 0x14, 0xc6, 0x73, 0xe3, 0x00, 0xc9, 0x09, 0x21, 0xe9, 0x55, 0xa5, 0xba, 0x20, 0x1c, 0xea, 0xa1, + 0x42, 0x42, 0xb2, 0x05, 0xa5, 0x1d, 0x90, 0x50, 0x4b, 0x80, 0x02, 0xad, 0x68, 0x91, 0xa9, 0x18, + 0x58, 0xac, 0xeb, 0xe4, 0xe2, 0x58, 0xc4, 0x7f, 0xe4, 0x6b, 0xa7, 0xc9, 0x5b, 0x74, 0xaa, 0x3a, + 0x32, 0xb6, 0x6f, 0xd0, 0xbe, 0x40, 0xc5, 0xc8, 0xd8, 0x89, 0x56, 0x61, 0xe9, 0x63, 0x54, 0xbe, + 0xb6, 0x13, 0x12, 0x42, 0x61, 0xb3, 0x7d, 0xbe, 0xdf, 0xe7, 0xef, 0x9e, 0x73, 0x6c, 0x90, 0xea, + 0xae, 0x4d, 0x03, 0xe3, 0x24, 0x50, 0x83, 0xae, 0x47, 0x99, 0xda, 0x5e, 0x56, 0x3d, 0xe2, 0x13, + 0x9b, 0x29, 0x9e, 0xef, 0x06, 0x2e, 0x7e, 0x90, 0xd6, 0x15, 0x5e, 0x57, 0xda, 0xcb, 0xb3, 0x0f, + 0x4d, 0xd7, 0x74, 0x79, 0x55, 0x8d, 0xae, 0x62, 0xe1, 0xac, 0x64, 0xba, 0xae, 0xd9, 0xa2, 0x2a, + 0xbf, 0x33, 0xc2, 0x13, 0xb5, 0x11, 0xfa, 0x24, 0xb0, 0x5c, 0xe7, 0xb6, 0xfa, 0x47, 0x9f, 0x78, + 0x1e, 0xf5, 0x93, 0x17, 0xc9, 0x3f, 0x04, 0x28, 0x6f, 0xba, 0x0e, 0xa3, 0x0e, 0x0b, 0xd9, 0x01, + 0x8f, 0x80, 0x57, 0x61, 0xc2, 0x68, 0xb9, 0xf5, 0x53, 0x11, 0x2d, 0xa0, 0xc5, 0xe2, 0x8a, 0xa4, + 0xdc, 0x08, 0xa3, 0xd4, 0xa2, 0x7a, 0x2c, 0xd7, 0x62, 0x31, 0x5e, 0x87, 0x3c, 0x6d, 0x5b, 0x0d, + 0xea, 0xd4, 0xa9, 0x98, 0xe5, 0xe0, 0x93, 0x31, 0xe0, 0x76, 0x22, 0x49, 0xd8, 0x3e, 0x82, 0x5f, + 0x41, 0xa1, 0x4d, 0x5a, 0x56, 0x83, 0x04, 0xae, 0x2f, 0x0a, 0x9c, 0x97, 0xc7, 0xf0, 0x47, 0xa9, + 0x26, 0x31, 0x18, 0x40, 0x78, 0x0d, 0xa6, 0xda, 0xd4, 0x67, 0x96, 0xeb, 0x88, 0x39, 0xce, 0x2f, + 0x8c, 0xe3, 0x63, 0x45, 0x42, 0xa7, 0x00, 0x7e, 0x0e, 0x39, 0x62, 0xd4, 0x2d, 0x71, 0x82, 0x83, + 0xf3, 0x63, 0xc0, 0x8d, 0xda, 0xe6, 0x5e, 0x4c, 0xd5, 0xb2, 0x22, 0xd2, 0xb8, 0x3c, 0x0a, 0xcd, + 0xba, 0x4e, 0xbd, 0xe9, 0xbb, 0x4e, 0x57, 0x9c, 0xbc, 0x35, 0xf4, 0x61, 0xaa, 0x49, 0x43, 0xf7, + 0xa1, 0x28, 0xf4, 0x09, 0x25, 0x41, 0xe8, 0x53, 0x71, 0xea, 0xd6, 0xd0, 0xaf, 0x63, 0x45, 0x1a, + 0x3a, 0x01, 0xe4, 0x3d, 0x28, 0x5e, 0x9b, 0x03, 0x9e, 0x83, 0x82, 0x4d, 0x3a, 0xba, 0xd1, 0x0d, + 0x28, 0xe3, 0xa3, 0x13, 0xb4, 0xbc, 0x4d, 0x3a, 0xb5, 0xe8, 0x1e, 0x3f, 0x82, 0xa9, 0xa8, 0x68, + 0x12, 0xc6, 0x87, 0x23, 0x68, 0x93, 0x36, 0xe9, 0xec, 0x10, 0xf6, 0x26, 0x97, 0x17, 0x2a, 0x39, + 0xf9, 0x1b, 0x82, 0x99, 0xe1, 0xd1, 0xe0, 0x25, 0xc0, 0x11, 0x41, 0x4c, 0xaa, 0x3b, 0xa1, 0xad, + 0xf3, 0x21, 0xa7, 0xbe, 0x65, 0x9b, 0x74, 0x36, 0x4c, 0xfa, 0x2e, 0xb4, 0x79, 0x00, 0x86, 0xf7, + 0xa1, 0x92, 0x8a, 0xd3, 0x05, 0x4c, 0x96, 0xe0, 0xb1, 0x12, 0x6f, 0xa0, 0x92, 0x6e, 0xa0, 0xb2, + 0x95, 0x08, 0x6a, 0xf9, 0xf3, 0xcb, 0x6a, 0xe6, 0xcb, 0xef, 0x2a, 0xd2, 0x66, 0x62, 0xbf, 0xb4, + 0x32, 0x7c, 0x14, 0x61, 0xf8, 0x28, 0xf2, 0x4b, 0x28, 0x8f, 0x6c, 0x01, 0x96, 0xa1, 0xe4, 0x85, + 0x86, 0x7e, 0x4a, 0xbb, 0x3a, 0x6f, 0x9a, 0x88, 0x16, 0x84, 0xc5, 0x82, 0x56, 0xf4, 0x42, 0xe3, + 0x2d, 0xed, 0x7e, 0x88, 0x1e, 0xad, 0xe5, 0xbf, 0x9f, 0x55, 0xd1, 0xdf, 0xb3, 0x2a, 0x92, 0x97, + 0xa0, 0x34, 0xb4, 0x06, 0xb8, 0x02, 0x02, 0xf1, 0x3c, 0x7e, 0xb6, 0x9c, 0x16, 0x5d, 0x5e, 0x13, + 0x1f, 0xc3, 0xf4, 0x2e, 0x61, 0x4d, 0xda, 0x48, 0xb4, 0x4f, 0xa1, 0xcc, 0x5b, 0xa1, 0x8f, 0xf6, + 0xba, 0xc4, 0x1f, 0xef, 0xa7, 0x0d, 0x97, 0xa1, 0x34, 0xd0, 0x0d, 0xda, 0x5e, 0x4c, 0x55, 0x3b, + 0x84, 0xc9, 0x9f, 0x11, 0x94, 0x47, 0x76, 0x03, 0xaf, 0x43, 0xc1, 0xf3, 0x69, 0xdd, 0xe2, 0x7b, + 0x8c, 0xee, 0x6a, 0x61, 0x8e, 0xb7, 0x6f, 0x40, 0xe0, 0x2d, 0x28, 0xd9, 0x94, 0x31, 0x3e, 0x08, + 0xda, 0x22, 0xdd, 0xbb, 0xa7, 0x10, 0x5b, 0x4c, 0x27, 0xd4, 0x56, 0x04, 0xc9, 0x3f, 0x11, 0x94, + 0x86, 0x96, 0x0e, 0x37, 0x60, 0xbe, 0xed, 0x06, 0x54, 0xa7, 0x9d, 0x80, 0x3a, 0xd1, 0x9b, 0x98, + 0x4e, 0x1d, 0x62, 0xb4, 0xa8, 0xde, 0xa4, 0x96, 0xd9, 0x0c, 0x92, 0xa8, 0x73, 0x37, 0xde, 0xb3, + 0xe7, 0x04, 0x2f, 0x56, 0x8f, 0x48, 0x2b, 0xa4, 0xb5, 0xdc, 0xf9, 0x65, 0x15, 0x69, 0xb3, 0x91, + 0xcf, 0x76, 0xdf, 0x66, 0x9b, 0xbb, 0xec, 0x72, 0x13, 0xfc, 0x1e, 0xb0, 0x67, 0x04, 0xa3, 0xd6, + 0xd9, 0xfb, 0x5a, 0x57, 0x22, 0xf8, 0xba, 0xa1, 0x7c, 0x08, 0x30, 0xf8, 0x70, 0xf1, 0xc6, 0x7d, + 0x0e, 0x21, 0xfc, 0x2f, 0xe1, 0x5a, 0x56, 0x44, 0xb5, 0x83, 0xaf, 0x3d, 0x09, 0x9d, 0xf7, 0x24, + 0x74, 0xd1, 0x93, 0xd0, 0x9f, 0x9e, 0x84, 0x3e, 0x5d, 0x49, 0x99, 0x8b, 0x2b, 0x29, 0xf3, 0xeb, + 0x4a, 0xca, 0x1c, 0xaf, 0x98, 0x56, 0xd0, 0x0c, 0x8d, 0xe8, 0x33, 0x56, 0xfb, 0x7f, 0xf9, 0xfe, + 0x05, 0xf1, 0x2c, 0xf5, 0xc6, 0xbf, 0xdf, 0x98, 0xe4, 0x67, 0x7a, 0xf6, 0x2f, 0x00, 0x00, 0xff, + 0xff, 0xd6, 0x46, 0x5b, 0x10, 0x17, 0x06, 0x00, 0x00, +} + +func (this *ConsensusParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ConsensusParams) + if !ok { + that2, ok := that.(ConsensusParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Block.Equal(that1.Block) { + return false + } + if !this.Evidence.Equal(that1.Evidence) { + return false + } + if !this.Validator.Equal(that1.Validator) { + return false + } + if !this.Version.Equal(that1.Version) { + return false + } + if !this.Abci.Equal(that1.Abci) { + return false + } + if !this.Synchrony.Equal(that1.Synchrony) { + return false + } + if !this.Feature.Equal(that1.Feature) { + return false + } + return true +} +func (this *BlockParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BlockParams) + if !ok { + that2, ok := that.(BlockParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.MaxBytes != that1.MaxBytes { + return false + } + if this.MaxGas != that1.MaxGas { + return false + } + return true +} +func (this *EvidenceParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EvidenceParams) + if !ok { + that2, ok := that.(EvidenceParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.MaxAgeNumBlocks != that1.MaxAgeNumBlocks { + return false + } + if this.MaxAgeDuration != that1.MaxAgeDuration { + return false + } + if this.MaxBytes != that1.MaxBytes { + return false + } + return true +} +func (this *ValidatorParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ValidatorParams) + if !ok { + that2, ok := that.(ValidatorParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.PubKeyTypes) != len(that1.PubKeyTypes) { + return false + } + for i := range this.PubKeyTypes { + if this.PubKeyTypes[i] != that1.PubKeyTypes[i] { + return false + } + } + return true +} +func (this *VersionParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*VersionParams) + if !ok { + that2, ok := that.(VersionParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.App != that1.App { + return false + } + return true +} +func (this *HashedParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*HashedParams) + if !ok { + that2, ok := that.(HashedParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BlockMaxBytes != that1.BlockMaxBytes { + return false + } + if this.BlockMaxGas != that1.BlockMaxGas { + return false + } + return true +} +func (this *SynchronyParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SynchronyParams) + if !ok { + that2, ok := that.(SynchronyParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Precision != nil && that1.Precision != nil { + if *this.Precision != *that1.Precision { + return false + } + } else if this.Precision != nil { + return false + } else if that1.Precision != nil { + return false + } + if this.MessageDelay != nil && that1.MessageDelay != nil { + if *this.MessageDelay != *that1.MessageDelay { + return false + } + } else if this.MessageDelay != nil { + return false + } else if that1.MessageDelay != nil { + return false + } + return true +} +func (this *FeatureParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FeatureParams) + if !ok { + that2, ok := that.(FeatureParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.VoteExtensionsEnableHeight.Equal(that1.VoteExtensionsEnableHeight) { + return false + } + if !this.PbtsEnableHeight.Equal(that1.PbtsEnableHeight) { + return false + } + return true +} +func (this *ABCIParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ABCIParams) + if !ok { + that2, ok := that.(ABCIParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.VoteExtensionsEnableHeight != that1.VoteExtensionsEnableHeight { + return false + } + return true +} +func (m *ConsensusParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Feature != nil { + { + size, err := m.Feature.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.Synchrony != nil { + { + size, err := m.Synchrony.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Abci != nil { + { + size, err := m.Abci.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Version != nil { + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Validator != nil { + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Evidence != nil { + { + size, err := m.Evidence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Block != nil { + { + size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BlockParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxGas != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.MaxGas)) + i-- + dAtA[i] = 0x10 + } + if m.MaxBytes != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.MaxBytes)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EvidenceParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvidenceParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvidenceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxBytes != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.MaxBytes)) + i-- + dAtA[i] = 0x18 + } + n8, err8 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.MaxAgeDuration):]) + if err8 != nil { + return 0, err8 + } + i -= n8 + i = encodeVarintParams(dAtA, i, uint64(n8)) + i-- + dAtA[i] = 0x12 + if m.MaxAgeNumBlocks != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.MaxAgeNumBlocks)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ValidatorParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PubKeyTypes) > 0 { + for iNdEx := len(m.PubKeyTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PubKeyTypes[iNdEx]) + copy(dAtA[i:], m.PubKeyTypes[iNdEx]) + i = encodeVarintParams(dAtA, i, uint64(len(m.PubKeyTypes[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *VersionParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VersionParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VersionParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.App != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.App)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HashedParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashedParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HashedParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BlockMaxGas != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.BlockMaxGas)) + i-- + dAtA[i] = 0x10 + } + if m.BlockMaxBytes != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.BlockMaxBytes)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SynchronyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SynchronyParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SynchronyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MessageDelay != nil { + n9, err9 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(*m.MessageDelay, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(*m.MessageDelay):]) + if err9 != nil { + return 0, err9 + } + i -= n9 + i = encodeVarintParams(dAtA, i, uint64(n9)) + i-- + dAtA[i] = 0x12 + } + if m.Precision != nil { + n10, err10 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(*m.Precision, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(*m.Precision):]) + if err10 != nil { + return 0, err10 + } + i -= n10 + i = encodeVarintParams(dAtA, i, uint64(n10)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FeatureParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FeatureParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FeatureParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PbtsEnableHeight != nil { + { + size, err := m.PbtsEnableHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.VoteExtensionsEnableHeight != nil { + { + size, err := m.VoteExtensionsEnableHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ABCIParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ABCIParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ABCIParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VoteExtensionsEnableHeight != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.VoteExtensionsEnableHeight)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedValidatorParams(r randyParams, easy bool) *ValidatorParams { + this := &ValidatorParams{} + v1 := r.Intn(10) + this.PubKeyTypes = make([]string, v1) + for i := 0; i < v1; i++ { + this.PubKeyTypes[i] = string(randStringParams(r)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedVersionParams(r randyParams, easy bool) *VersionParams { + this := &VersionParams{} + this.App = uint64(uint64(r.Uint32())) + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyParams interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneParams(r randyParams) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringParams(r randyParams) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneParams(r) + } + return string(tmps) +} +func randUnrecognizedParams(r randyParams, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldParams(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldParams(dAtA []byte, r randyParams, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateParams(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateParams(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateParams(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateParams(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateParams(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateParams(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateParams(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *ConsensusParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Block != nil { + l = m.Block.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.Evidence != nil { + l = m.Evidence.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.Validator != nil { + l = m.Validator.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.Version != nil { + l = m.Version.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.Abci != nil { + l = m.Abci.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.Synchrony != nil { + l = m.Synchrony.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.Feature != nil { + l = m.Feature.Size() + n += 1 + l + sovParams(uint64(l)) + } + return n +} + +func (m *BlockParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxBytes != 0 { + n += 1 + sovParams(uint64(m.MaxBytes)) + } + if m.MaxGas != 0 { + n += 1 + sovParams(uint64(m.MaxGas)) + } + return n +} + +func (m *EvidenceParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxAgeNumBlocks != 0 { + n += 1 + sovParams(uint64(m.MaxAgeNumBlocks)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.MaxAgeDuration) + n += 1 + l + sovParams(uint64(l)) + if m.MaxBytes != 0 { + n += 1 + sovParams(uint64(m.MaxBytes)) + } + return n +} + +func (m *ValidatorParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PubKeyTypes) > 0 { + for _, s := range m.PubKeyTypes { + l = len(s) + n += 1 + l + sovParams(uint64(l)) + } + } + return n +} + +func (m *VersionParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.App != 0 { + n += 1 + sovParams(uint64(m.App)) + } + return n +} + +func (m *HashedParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockMaxBytes != 0 { + n += 1 + sovParams(uint64(m.BlockMaxBytes)) + } + if m.BlockMaxGas != 0 { + n += 1 + sovParams(uint64(m.BlockMaxGas)) + } + return n +} + +func (m *SynchronyParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Precision != nil { + l = github_com_cosmos_gogoproto_types.SizeOfStdDuration(*m.Precision) + n += 1 + l + sovParams(uint64(l)) + } + if m.MessageDelay != nil { + l = github_com_cosmos_gogoproto_types.SizeOfStdDuration(*m.MessageDelay) + n += 1 + l + sovParams(uint64(l)) + } + return n +} + +func (m *FeatureParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VoteExtensionsEnableHeight != nil { + l = m.VoteExtensionsEnableHeight.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.PbtsEnableHeight != nil { + l = m.PbtsEnableHeight.Size() + n += 1 + l + sovParams(uint64(l)) + } + return n +} + +func (m *ABCIParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VoteExtensionsEnableHeight != 0 { + n += 1 + sovParams(uint64(m.VoteExtensionsEnableHeight)) + } + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ConsensusParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Block == nil { + m.Block = &BlockParams{} + } + if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Evidence == nil { + m.Evidence = &EvidenceParams{} + } + if err := m.Evidence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Validator == nil { + m.Validator = &ValidatorParams{} + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Version == nil { + m.Version = &VersionParams{} + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Abci", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Abci == nil { + m.Abci = &ABCIParams{} + } + if err := m.Abci.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Synchrony", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Synchrony == nil { + m.Synchrony = &SynchronyParams{} + } + if err := m.Synchrony.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Feature", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Feature == nil { + m.Feature = &FeatureParams{} + } + if err := m.Feature.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxBytes", wireType) + } + m.MaxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxGas", wireType) + } + m.MaxGas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxGas |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvidenceParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvidenceParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvidenceParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxAgeNumBlocks", wireType) + } + m.MaxAgeNumBlocks = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxAgeNumBlocks |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxAgeDuration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdDurationUnmarshal(&m.MaxAgeDuration, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxBytes", wireType) + } + m.MaxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatorParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKeyTypes = append(m.PubKeyTypes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VersionParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VersionParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VersionParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field App", wireType) + } + m.App = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.App |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashedParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashedParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashedParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockMaxBytes", wireType) + } + m.BlockMaxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockMaxBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockMaxGas", wireType) + } + m.BlockMaxGas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockMaxGas |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SynchronyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SynchronyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SynchronyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Precision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Precision == nil { + m.Precision = new(time.Duration) + } + if err := github_com_cosmos_gogoproto_types.StdDurationUnmarshal(m.Precision, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageDelay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MessageDelay == nil { + m.MessageDelay = new(time.Duration) + } + if err := github_com_cosmos_gogoproto_types.StdDurationUnmarshal(m.MessageDelay, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FeatureParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FeatureParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FeatureParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtensionsEnableHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VoteExtensionsEnableHeight == nil { + m.VoteExtensionsEnableHeight = &types.Int64Value{} + } + if err := m.VoteExtensionsEnableHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PbtsEnableHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PbtsEnableHeight == nil { + m.PbtsEnableHeight = &types.Int64Value{} + } + if err := m.PbtsEnableHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ABCIParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ABCIParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ABCIParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtensionsEnableHeight", wireType) + } + m.VoteExtensionsEnableHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VoteExtensionsEnableHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/types/types.pb.go b/api/cometbft/types/v1/types.pb.go similarity index 90% rename from proto/tendermint/types/types.pb.go rename to api/cometbft/types/v1/types.pb.go index 2b2c819b4f4..b9fef6df44a 100644 --- a/proto/tendermint/types/types.pb.go +++ b/api/cometbft/types/v1/types.pb.go @@ -1,12 +1,12 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/types/types.proto +// source: cometbft/types/v1/types.proto -package types +package v1 import ( fmt "fmt" - crypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - version "github.com/cometbft/cometbft/proto/tendermint/version" + v1 "github.com/cometbft/cometbft/api/cometbft/crypto/v1" + v11 "github.com/cometbft/cometbft/api/cometbft/version/v1" _ "github.com/cosmos/gogoproto/gogoproto" proto "github.com/cosmos/gogoproto/proto" _ "github.com/cosmos/gogoproto/types" @@ -33,11 +33,13 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type SignedMsgType int32 const ( + // Unknown UnknownType SignedMsgType = 0 - // Votes - PrevoteType SignedMsgType = 1 + // Prevote + PrevoteType SignedMsgType = 1 + // Precommit PrecommitType SignedMsgType = 2 - // Proposals + // Proposal ProposalType SignedMsgType = 32 ) @@ -60,10 +62,10 @@ func (x SignedMsgType) String() string { } func (SignedMsgType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{0} + return fileDescriptor_8ea20b664d765b5f, []int{0} } -// PartsetHeader +// Header of the parts set for a block. type PartSetHeader struct { Total uint32 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` @@ -73,7 +75,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } func (*PartSetHeader) ProtoMessage() {} func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{0} + return fileDescriptor_8ea20b664d765b5f, []int{0} } func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -116,17 +118,18 @@ func (m *PartSetHeader) GetHash() []byte { return nil } +// Part of the block. type Part struct { - Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` - Bytes []byte `protobuf:"bytes,2,opt,name=bytes,proto3" json:"bytes,omitempty"` - Proof crypto.Proof `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof"` + Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Bytes []byte `protobuf:"bytes,2,opt,name=bytes,proto3" json:"bytes,omitempty"` + Proof v1.Proof `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof"` } func (m *Part) Reset() { *m = Part{} } func (m *Part) String() string { return proto.CompactTextString(m) } func (*Part) ProtoMessage() {} func (*Part) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{1} + return fileDescriptor_8ea20b664d765b5f, []int{1} } func (m *Part) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -169,14 +172,14 @@ func (m *Part) GetBytes() []byte { return nil } -func (m *Part) GetProof() crypto.Proof { +func (m *Part) GetProof() v1.Proof { if m != nil { return m.Proof } - return crypto.Proof{} + return v1.Proof{} } -// BlockID +// BlockID defines the unique ID of a block as its hash and its `PartSetHeader`. type BlockID struct { Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` PartSetHeader PartSetHeader `protobuf:"bytes,2,opt,name=part_set_header,json=partSetHeader,proto3" json:"part_set_header"` @@ -186,7 +189,7 @@ func (m *BlockID) Reset() { *m = BlockID{} } func (m *BlockID) String() string { return proto.CompactTextString(m) } func (*BlockID) ProtoMessage() {} func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{2} + return fileDescriptor_8ea20b664d765b5f, []int{2} } func (m *BlockID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -232,10 +235,10 @@ func (m *BlockID) GetPartSetHeader() PartSetHeader { // Header defines the structure of a block header. type Header struct { // basic block info - Version version.Consensus `protobuf:"bytes,1,opt,name=version,proto3" json:"version"` - ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` - Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` + Version v11.Consensus `protobuf:"bytes,1,opt,name=version,proto3" json:"version"` + ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` // prev block info LastBlockId BlockID `protobuf:"bytes,5,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` // hashes of block data @@ -256,7 +259,7 @@ func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{3} + return fileDescriptor_8ea20b664d765b5f, []int{3} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -285,11 +288,11 @@ func (m *Header) XXX_DiscardUnknown() { var xxx_messageInfo_Header proto.InternalMessageInfo -func (m *Header) GetVersion() version.Consensus { +func (m *Header) GetVersion() v11.Consensus { if m != nil { return m.Version } - return version.Consensus{} + return v11.Consensus{} } func (m *Header) GetChainID() string { @@ -395,7 +398,7 @@ func (m *Data) Reset() { *m = Data{} } func (m *Data) String() string { return proto.CompactTextString(m) } func (*Data) ProtoMessage() {} func (*Data) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{4} + return fileDescriptor_8ea20b664d765b5f, []int{4} } func (m *Data) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -434,7 +437,7 @@ func (m *Data) GetTxs() [][]byte { // Vote represents a prevote or precommit vote from validators for // consensus. type Vote struct { - Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=cometbft.types.v1.SignedMsgType" json:"type,omitempty"` Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` Round int32 `protobuf:"varint,3,opt,name=round,proto3" json:"round,omitempty"` BlockID BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` @@ -457,7 +460,7 @@ func (m *Vote) Reset() { *m = Vote{} } func (m *Vote) String() string { return proto.CompactTextString(m) } func (*Vote) ProtoMessage() {} func (*Vote) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{5} + return fileDescriptor_8ea20b664d765b5f, []int{5} } func (m *Vote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -568,7 +571,7 @@ func (m *Commit) Reset() { *m = Commit{} } func (m *Commit) String() string { return proto.CompactTextString(m) } func (*Commit) ProtoMessage() {} func (*Commit) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{6} + return fileDescriptor_8ea20b664d765b5f, []int{6} } func (m *Commit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -627,7 +630,7 @@ func (m *Commit) GetSignatures() []CommitSig { // CommitSig is a part of the Vote included in a Commit. type CommitSig struct { - BlockIdFlag BlockIDFlag `protobuf:"varint,1,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=tendermint.types.BlockIDFlag" json:"block_id_flag,omitempty"` + BlockIdFlag BlockIDFlag `protobuf:"varint,1,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=cometbft.types.v1.BlockIDFlag" json:"block_id_flag,omitempty"` ValidatorAddress []byte `protobuf:"bytes,2,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"` Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` @@ -637,7 +640,7 @@ func (m *CommitSig) Reset() { *m = CommitSig{} } func (m *CommitSig) String() string { return proto.CompactTextString(m) } func (*CommitSig) ProtoMessage() {} func (*CommitSig) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{7} + return fileDescriptor_8ea20b664d765b5f, []int{7} } func (m *CommitSig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -694,6 +697,7 @@ func (m *CommitSig) GetSignature() []byte { return nil } +// ExtendedCommit is a Commit with ExtendedCommitSig. type ExtendedCommit struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` @@ -705,7 +709,7 @@ func (m *ExtendedCommit) Reset() { *m = ExtendedCommit{} } func (m *ExtendedCommit) String() string { return proto.CompactTextString(m) } func (*ExtendedCommit) ProtoMessage() {} func (*ExtendedCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{8} + return fileDescriptor_8ea20b664d765b5f, []int{8} } func (m *ExtendedCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -766,7 +770,7 @@ func (m *ExtendedCommit) GetExtendedSignatures() []ExtendedCommitSig { // extension-related fields. We use two signatures to ensure backwards compatibility. // That is the digest of the original signature is still the same in prior versions type ExtendedCommitSig struct { - BlockIdFlag BlockIDFlag `protobuf:"varint,1,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=tendermint.types.BlockIDFlag" json:"block_id_flag,omitempty"` + BlockIdFlag BlockIDFlag `protobuf:"varint,1,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=cometbft.types.v1.BlockIDFlag" json:"block_id_flag,omitempty"` ValidatorAddress []byte `protobuf:"bytes,2,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"` Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` @@ -780,7 +784,7 @@ func (m *ExtendedCommitSig) Reset() { *m = ExtendedCommitSig{} } func (m *ExtendedCommitSig) String() string { return proto.CompactTextString(m) } func (*ExtendedCommitSig) ProtoMessage() {} func (*ExtendedCommitSig) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{9} + return fileDescriptor_8ea20b664d765b5f, []int{9} } func (m *ExtendedCommitSig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -851,8 +855,9 @@ func (m *ExtendedCommitSig) GetExtensionSignature() []byte { return nil } +// Block proposal. type Proposal struct { - Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=cometbft.types.v1.SignedMsgType" json:"type,omitempty"` Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` Round int32 `protobuf:"varint,3,opt,name=round,proto3" json:"round,omitempty"` PolRound int32 `protobuf:"varint,4,opt,name=pol_round,json=polRound,proto3" json:"pol_round,omitempty"` @@ -865,7 +870,7 @@ func (m *Proposal) Reset() { *m = Proposal{} } func (m *Proposal) String() string { return proto.CompactTextString(m) } func (*Proposal) ProtoMessage() {} func (*Proposal) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{10} + return fileDescriptor_8ea20b664d765b5f, []int{10} } func (m *Proposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -943,6 +948,7 @@ func (m *Proposal) GetSignature() []byte { return nil } +// SignedHeader contains a Header(H) and Commit(H+1) with signatures of validators who signed it. type SignedHeader struct { Header *Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` Commit *Commit `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` @@ -952,7 +958,7 @@ func (m *SignedHeader) Reset() { *m = SignedHeader{} } func (m *SignedHeader) String() string { return proto.CompactTextString(m) } func (*SignedHeader) ProtoMessage() {} func (*SignedHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{11} + return fileDescriptor_8ea20b664d765b5f, []int{11} } func (m *SignedHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -995,6 +1001,7 @@ func (m *SignedHeader) GetCommit() *Commit { return nil } +// LightBlock is a combination of SignedHeader and ValidatorSet. It is used by light clients. type LightBlock struct { SignedHeader *SignedHeader `protobuf:"bytes,1,opt,name=signed_header,json=signedHeader,proto3" json:"signed_header,omitempty"` ValidatorSet *ValidatorSet `protobuf:"bytes,2,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` @@ -1004,7 +1011,7 @@ func (m *LightBlock) Reset() { *m = LightBlock{} } func (m *LightBlock) String() string { return proto.CompactTextString(m) } func (*LightBlock) ProtoMessage() {} func (*LightBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{12} + return fileDescriptor_8ea20b664d765b5f, []int{12} } func (m *LightBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1047,6 +1054,7 @@ func (m *LightBlock) GetValidatorSet() *ValidatorSet { return nil } +// BlockMeta contains meta information about a block. type BlockMeta struct { BlockID BlockID `protobuf:"bytes,1,opt,name=block_id,json=blockId,proto3" json:"block_id"` BlockSize int64 `protobuf:"varint,2,opt,name=block_size,json=blockSize,proto3" json:"block_size,omitempty"` @@ -1058,7 +1066,7 @@ func (m *BlockMeta) Reset() { *m = BlockMeta{} } func (m *BlockMeta) String() string { return proto.CompactTextString(m) } func (*BlockMeta) ProtoMessage() {} func (*BlockMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{13} + return fileDescriptor_8ea20b664d765b5f, []int{13} } func (m *BlockMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1117,16 +1125,16 @@ func (m *BlockMeta) GetNumTxs() int64 { // TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. type TxProof struct { - RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Proof *crypto.Proof `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof,omitempty"` + RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Proof *v1.Proof `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof,omitempty"` } func (m *TxProof) Reset() { *m = TxProof{} } func (m *TxProof) String() string { return proto.CompactTextString(m) } func (*TxProof) ProtoMessage() {} func (*TxProof) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{14} + return fileDescriptor_8ea20b664d765b5f, []int{14} } func (m *TxProof) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1169,7 +1177,7 @@ func (m *TxProof) GetData() []byte { return nil } -func (m *TxProof) GetProof() *crypto.Proof { +func (m *TxProof) GetProof() *v1.Proof { if m != nil { return m.Proof } @@ -1177,110 +1185,111 @@ func (m *TxProof) GetProof() *crypto.Proof { } func init() { - proto.RegisterEnum("tendermint.types.SignedMsgType", SignedMsgType_name, SignedMsgType_value) - proto.RegisterType((*PartSetHeader)(nil), "tendermint.types.PartSetHeader") - proto.RegisterType((*Part)(nil), "tendermint.types.Part") - proto.RegisterType((*BlockID)(nil), "tendermint.types.BlockID") - proto.RegisterType((*Header)(nil), "tendermint.types.Header") - proto.RegisterType((*Data)(nil), "tendermint.types.Data") - proto.RegisterType((*Vote)(nil), "tendermint.types.Vote") - proto.RegisterType((*Commit)(nil), "tendermint.types.Commit") - proto.RegisterType((*CommitSig)(nil), "tendermint.types.CommitSig") - proto.RegisterType((*ExtendedCommit)(nil), "tendermint.types.ExtendedCommit") - proto.RegisterType((*ExtendedCommitSig)(nil), "tendermint.types.ExtendedCommitSig") - proto.RegisterType((*Proposal)(nil), "tendermint.types.Proposal") - proto.RegisterType((*SignedHeader)(nil), "tendermint.types.SignedHeader") - proto.RegisterType((*LightBlock)(nil), "tendermint.types.LightBlock") - proto.RegisterType((*BlockMeta)(nil), "tendermint.types.BlockMeta") - proto.RegisterType((*TxProof)(nil), "tendermint.types.TxProof") -} - -func init() { proto.RegisterFile("tendermint/types/types.proto", fileDescriptor_d3a6e55e2345de56) } - -var fileDescriptor_d3a6e55e2345de56 = []byte{ - // 1310 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xce, 0xda, 0xeb, 0x5f, 0xcf, 0x76, 0xe2, 0x2c, 0x11, 0x75, 0xdd, 0xc6, 0xb1, 0x5c, 0x01, - 0xa1, 0xa0, 0x4d, 0x95, 0x22, 0x04, 0x07, 0x0e, 0xf9, 0x45, 0x1b, 0x51, 0x27, 0xd6, 0xda, 0x2d, - 0xa2, 0x97, 0xd5, 0xda, 0x3b, 0xb1, 0x97, 0xda, 0x3b, 0xab, 0xdd, 0x71, 0x70, 0xfa, 0x17, 0xa0, - 0x9e, 0x7a, 0xe2, 0xd6, 0x13, 0x1c, 0xb8, 0x83, 0xc4, 0x15, 0x71, 0xea, 0xb1, 0x37, 0xb8, 0x50, - 0x20, 0x95, 0xf8, 0x3b, 0xd0, 0xbc, 0x99, 0xdd, 0xb5, 0xe3, 0x18, 0xaa, 0xa8, 0x02, 0x89, 0x8b, - 0xb5, 0xf3, 0xde, 0xf7, 0xde, 0xbc, 0x79, 0xdf, 0x37, 0xa3, 0x67, 0xb8, 0xca, 0x88, 0x6b, 0x13, - 0x7f, 0xe8, 0xb8, 0x6c, 0x83, 0x9d, 0x78, 0x24, 0x10, 0xbf, 0xba, 0xe7, 0x53, 0x46, 0xb5, 0x52, - 0xec, 0xd5, 0xd1, 0x5e, 0x59, 0xe9, 0xd1, 0x1e, 0x45, 0xe7, 0x06, 0xff, 0x12, 0xb8, 0xca, 0x5a, - 0x8f, 0xd2, 0xde, 0x80, 0x6c, 0xe0, 0xaa, 0x33, 0x3a, 0xda, 0x60, 0xce, 0x90, 0x04, 0xcc, 0x1a, - 0x7a, 0x12, 0xb0, 0x3a, 0xb1, 0x4d, 0xd7, 0x3f, 0xf1, 0x18, 0xe5, 0x58, 0x7a, 0x24, 0xdd, 0xd5, - 0x09, 0xf7, 0x31, 0xf1, 0x03, 0x87, 0xba, 0x93, 0x75, 0x54, 0x6a, 0x33, 0x55, 0x1e, 0x5b, 0x03, - 0xc7, 0xb6, 0x18, 0xf5, 0x05, 0xa2, 0xfe, 0x21, 0x14, 0x9b, 0x96, 0xcf, 0x5a, 0x84, 0xdd, 0x26, - 0x96, 0x4d, 0x7c, 0x6d, 0x05, 0x52, 0x8c, 0x32, 0x6b, 0x50, 0x56, 0x6a, 0xca, 0x7a, 0xd1, 0x10, - 0x0b, 0x4d, 0x03, 0xb5, 0x6f, 0x05, 0xfd, 0x72, 0xa2, 0xa6, 0xac, 0x17, 0x0c, 0xfc, 0xae, 0xf7, - 0x41, 0xe5, 0xa1, 0x3c, 0xc2, 0x71, 0x6d, 0x32, 0x0e, 0x23, 0x70, 0xc1, 0xad, 0x9d, 0x13, 0x46, - 0x02, 0x19, 0x22, 0x16, 0xda, 0x7b, 0x90, 0xc2, 0xfa, 0xcb, 0xc9, 0x9a, 0xb2, 0x9e, 0xdf, 0x2c, - 0xeb, 0x13, 0x8d, 0x12, 0xe7, 0xd3, 0x9b, 0xdc, 0xbf, 0xad, 0x3e, 0x7d, 0xbe, 0xb6, 0x60, 0x08, - 0x70, 0x7d, 0x00, 0x99, 0xed, 0x01, 0xed, 0x3e, 0xd8, 0xdf, 0x8d, 0x0a, 0x51, 0xe2, 0x42, 0xb4, - 0x06, 0x2c, 0x79, 0x96, 0xcf, 0xcc, 0x80, 0x30, 0xb3, 0x8f, 0xa7, 0xc0, 0x4d, 0xf3, 0x9b, 0x6b, - 0xfa, 0x59, 0x1e, 0xf4, 0xa9, 0xc3, 0xca, 0x5d, 0x8a, 0xde, 0xa4, 0xb1, 0xfe, 0xa7, 0x0a, 0x69, - 0xd9, 0x8c, 0x8f, 0x20, 0x23, 0xdb, 0x8a, 0x1b, 0xe6, 0x37, 0x57, 0x27, 0x33, 0x4a, 0x97, 0xbe, - 0x43, 0xdd, 0x80, 0xb8, 0xc1, 0x28, 0x90, 0xf9, 0xc2, 0x18, 0xed, 0x4d, 0xc8, 0x76, 0xfb, 0x96, - 0xe3, 0x9a, 0x8e, 0x8d, 0x15, 0xe5, 0xb6, 0xf3, 0xa7, 0xcf, 0xd7, 0x32, 0x3b, 0xdc, 0xb6, 0xbf, - 0x6b, 0x64, 0xd0, 0xb9, 0x6f, 0x6b, 0xaf, 0x43, 0xba, 0x4f, 0x9c, 0x5e, 0x9f, 0x61, 0x5b, 0x92, - 0x86, 0x5c, 0x69, 0x1f, 0x80, 0xca, 0x05, 0x51, 0x56, 0x71, 0xef, 0x8a, 0x2e, 0xd4, 0xa2, 0x87, - 0x6a, 0xd1, 0xdb, 0xa1, 0x5a, 0xb6, 0xb3, 0x7c, 0xe3, 0xc7, 0xbf, 0xad, 0x29, 0x06, 0x46, 0x68, - 0x3b, 0x50, 0x1c, 0x58, 0x01, 0x33, 0x3b, 0xbc, 0x6d, 0x7c, 0xfb, 0x14, 0xa6, 0xb8, 0x3c, 0xdb, - 0x10, 0xd9, 0x58, 0x59, 0x7a, 0x9e, 0x47, 0x09, 0x93, 0xad, 0xad, 0x43, 0x09, 0x93, 0x74, 0xe9, - 0x70, 0xe8, 0x30, 0x13, 0xfb, 0x9e, 0xc6, 0xbe, 0x2f, 0x72, 0xfb, 0x0e, 0x9a, 0x6f, 0x73, 0x06, - 0xae, 0x40, 0xce, 0xb6, 0x98, 0x25, 0x20, 0x19, 0x84, 0x64, 0xb9, 0x01, 0x9d, 0x6f, 0xc1, 0x52, - 0xa4, 0xba, 0x40, 0x40, 0xb2, 0x22, 0x4b, 0x6c, 0x46, 0xe0, 0x0d, 0x58, 0x71, 0xc9, 0x98, 0x99, - 0x67, 0xd1, 0x39, 0x44, 0x6b, 0xdc, 0x77, 0x6f, 0x3a, 0xe2, 0x0d, 0x58, 0xec, 0x86, 0xcd, 0x17, - 0x58, 0x40, 0x6c, 0x31, 0xb2, 0x22, 0xec, 0x32, 0x64, 0x2d, 0xcf, 0x13, 0x80, 0x3c, 0x02, 0x32, - 0x96, 0xe7, 0xa1, 0xeb, 0x3a, 0x2c, 0xe3, 0x19, 0x7d, 0x12, 0x8c, 0x06, 0x4c, 0x26, 0x29, 0x20, - 0x66, 0x89, 0x3b, 0x0c, 0x61, 0x47, 0xec, 0x35, 0x28, 0x92, 0x63, 0xc7, 0x26, 0x6e, 0x97, 0x08, - 0x5c, 0x11, 0x71, 0x85, 0xd0, 0x88, 0xa0, 0xb7, 0xa1, 0xe4, 0xf9, 0xd4, 0xa3, 0x01, 0xf1, 0x4d, - 0xcb, 0xb6, 0x7d, 0x12, 0x04, 0xe5, 0x45, 0x91, 0x2f, 0xb4, 0x6f, 0x09, 0x73, 0xbd, 0x0c, 0xea, - 0xae, 0xc5, 0x2c, 0xad, 0x04, 0x49, 0x36, 0x0e, 0xca, 0x4a, 0x2d, 0xb9, 0x5e, 0x30, 0xf8, 0x67, - 0xfd, 0x87, 0x24, 0xa8, 0xf7, 0x28, 0x23, 0xda, 0x4d, 0x50, 0x39, 0x4d, 0xa8, 0xbe, 0xc5, 0xf3, - 0xf4, 0xdc, 0x72, 0x7a, 0x2e, 0xb1, 0x1b, 0x41, 0xaf, 0x7d, 0xe2, 0x11, 0x03, 0xc1, 0x13, 0x72, - 0x4a, 0x4c, 0xc9, 0x69, 0x05, 0x52, 0x3e, 0x1d, 0xb9, 0x36, 0xaa, 0x2c, 0x65, 0x88, 0x85, 0xb6, - 0x07, 0xd9, 0x48, 0x25, 0xea, 0x3f, 0xa9, 0x64, 0x89, 0xab, 0x84, 0x6b, 0x58, 0x1a, 0x8c, 0x4c, - 0x47, 0x8a, 0x65, 0x1b, 0x72, 0xd1, 0xe3, 0x25, 0xd5, 0xf6, 0x72, 0x82, 0x8d, 0xc3, 0xb4, 0x77, - 0x60, 0x39, 0xe2, 0x3e, 0x6a, 0x9e, 0x50, 0x5c, 0x29, 0x72, 0xc8, 0xee, 0x4d, 0xc9, 0xca, 0x14, - 0x0f, 0x50, 0x06, 0xcf, 0x15, 0xcb, 0x6a, 0x1f, 0x5f, 0xa2, 0xab, 0x90, 0x0b, 0x9c, 0x9e, 0x6b, - 0xb1, 0x91, 0x4f, 0xa4, 0xf2, 0x62, 0x03, 0xf7, 0x92, 0x31, 0x23, 0x2e, 0x5e, 0x72, 0xa1, 0xb4, - 0xd8, 0xa0, 0x6d, 0xc0, 0x6b, 0xd1, 0xc2, 0x8c, 0xb3, 0x08, 0x95, 0x69, 0x91, 0xab, 0x15, 0x7a, - 0xea, 0x3f, 0x2a, 0x90, 0x16, 0x17, 0x63, 0x82, 0x06, 0xe5, 0x7c, 0x1a, 0x12, 0xf3, 0x68, 0x48, - 0x5e, 0x9c, 0x86, 0x2d, 0x80, 0xa8, 0xcc, 0xa0, 0xac, 0xd6, 0x92, 0xeb, 0xf9, 0xcd, 0x2b, 0xb3, - 0x89, 0x44, 0x89, 0x2d, 0xa7, 0x27, 0xef, 0xfd, 0x44, 0x50, 0xfd, 0x57, 0x05, 0x72, 0x91, 0x5f, - 0xdb, 0x82, 0x62, 0x58, 0x97, 0x79, 0x34, 0xb0, 0x7a, 0x52, 0x8a, 0xab, 0x73, 0x8b, 0xfb, 0x78, - 0x60, 0xf5, 0x8c, 0xbc, 0xac, 0x87, 0x2f, 0xce, 0xa7, 0x35, 0x31, 0x87, 0xd6, 0x29, 0x1d, 0x25, - 0x2f, 0xa6, 0xa3, 0x29, 0xc6, 0xd5, 0x33, 0x8c, 0xd7, 0xff, 0x50, 0x60, 0x71, 0x6f, 0x8c, 0xe5, - 0xdb, 0xff, 0x25, 0x55, 0xf7, 0xa5, 0xb6, 0x6c, 0x62, 0x9b, 0x33, 0x9c, 0x5d, 0x9b, 0xcd, 0x38, - 0x5d, 0x73, 0xcc, 0x9d, 0x16, 0x66, 0x69, 0xc5, 0x1c, 0x7e, 0x9f, 0x80, 0xe5, 0x19, 0xfc, 0xff, - 0x8f, 0xcb, 0xe9, 0xdb, 0x9b, 0x7a, 0xc9, 0xdb, 0x9b, 0x9e, 0x7b, 0x7b, 0xbf, 0x4b, 0x40, 0xb6, - 0x89, 0xaf, 0xb4, 0x35, 0xf8, 0x37, 0xde, 0xde, 0x2b, 0x90, 0xf3, 0xe8, 0xc0, 0x14, 0x1e, 0x15, - 0x3d, 0x59, 0x8f, 0x0e, 0x8c, 0x19, 0x99, 0xa5, 0x5e, 0xd1, 0xc3, 0x9c, 0x7e, 0x05, 0x24, 0x64, - 0xce, 0x5e, 0x28, 0x1f, 0x0a, 0xa2, 0x15, 0x72, 0x6a, 0xba, 0xc1, 0x7b, 0x80, 0x63, 0x98, 0x32, - 0x3b, 0xe5, 0x89, 0xb2, 0x05, 0xd2, 0x90, 0x38, 0x1e, 0x21, 0x86, 0x0c, 0x39, 0xb8, 0x95, 0xe7, - 0xbd, 0x58, 0x86, 0xc4, 0xd5, 0xbf, 0x52, 0x00, 0xee, 0xf0, 0xce, 0xe2, 0x79, 0xf9, 0xbc, 0x13, - 0x60, 0x09, 0xe6, 0xd4, 0xce, 0xd5, 0x79, 0xa4, 0xc9, 0xfd, 0x0b, 0xc1, 0x64, 0xdd, 0x3b, 0x50, - 0x8c, 0xb5, 0x1d, 0x90, 0xb0, 0x98, 0x73, 0x92, 0x44, 0x63, 0x48, 0x8b, 0x30, 0xa3, 0x70, 0x3c, - 0xb1, 0xaa, 0xff, 0xa4, 0x40, 0x0e, 0x6b, 0x6a, 0x10, 0x66, 0x4d, 0x71, 0xa8, 0x5c, 0x9c, 0xc3, - 0x55, 0x00, 0x91, 0x26, 0x70, 0x1e, 0x12, 0xa9, 0xac, 0x1c, 0x5a, 0x5a, 0xce, 0x43, 0xa2, 0xbd, - 0x1f, 0x35, 0x3c, 0xf9, 0xf7, 0x0d, 0x97, 0x2f, 0x46, 0xd8, 0xf6, 0x4b, 0x90, 0x71, 0x47, 0x43, - 0x93, 0x0f, 0x1f, 0xaa, 0x50, 0xab, 0x3b, 0x1a, 0xb6, 0xc7, 0x41, 0xfd, 0x73, 0xc8, 0xb4, 0xc7, - 0x38, 0x88, 0x73, 0x89, 0xfa, 0x94, 0xca, 0xe9, 0x4f, 0x4c, 0xdd, 0x59, 0x6e, 0xc0, 0x61, 0x47, - 0x03, 0x95, 0x8f, 0x79, 0xe1, 0xdf, 0x02, 0xfe, 0xad, 0xe9, 0x2f, 0x39, 0xe2, 0xcb, 0xe1, 0xfe, - 0xfa, 0xcf, 0x0a, 0x14, 0xa7, 0x6e, 0x92, 0xf6, 0x2e, 0x5c, 0x6a, 0xed, 0xdf, 0x3a, 0xd8, 0xdb, - 0x35, 0x1b, 0xad, 0x5b, 0x66, 0xfb, 0xb3, 0xe6, 0x9e, 0x79, 0xf7, 0xe0, 0x93, 0x83, 0xc3, 0x4f, - 0x0f, 0x4a, 0x0b, 0x95, 0xa5, 0x47, 0x4f, 0x6a, 0xf9, 0xbb, 0xee, 0x03, 0x97, 0x7e, 0xe1, 0xce, - 0x43, 0x37, 0x8d, 0xbd, 0x7b, 0x87, 0xed, 0xbd, 0x92, 0x22, 0xd0, 0x4d, 0x9f, 0x1c, 0x53, 0x46, - 0x10, 0x7d, 0x03, 0x2e, 0x9f, 0x83, 0xde, 0x39, 0x6c, 0x34, 0xf6, 0xdb, 0xa5, 0x44, 0x65, 0xf9, - 0xd1, 0x93, 0x5a, 0xb1, 0xe9, 0x13, 0xa1, 0x32, 0x8c, 0xd0, 0xa1, 0x3c, 0x1b, 0x71, 0xd8, 0x3c, - 0x6c, 0x6d, 0xdd, 0x29, 0xd5, 0x2a, 0xa5, 0x47, 0x4f, 0x6a, 0x85, 0xf0, 0xc9, 0xe0, 0xf8, 0x4a, - 0xf6, 0xcb, 0xaf, 0xab, 0x0b, 0xdf, 0x7e, 0x53, 0x55, 0xb6, 0x1b, 0x4f, 0x4f, 0xab, 0xca, 0xb3, - 0xd3, 0xaa, 0xf2, 0xfb, 0x69, 0x55, 0x79, 0xfc, 0xa2, 0xba, 0xf0, 0xec, 0x45, 0x75, 0xe1, 0x97, - 0x17, 0xd5, 0x85, 0xfb, 0x37, 0x7b, 0x0e, 0xeb, 0x8f, 0x3a, 0x7a, 0x97, 0x0e, 0x37, 0xba, 0x74, - 0x48, 0x58, 0xe7, 0x88, 0xc5, 0x1f, 0xe2, 0x6f, 0xe2, 0xd9, 0xbf, 0x6e, 0x9d, 0x34, 0xda, 0x6f, - 0xfe, 0x15, 0x00, 0x00, 0xff, 0xff, 0x8c, 0xb6, 0xa1, 0x4e, 0x7b, 0x0e, 0x00, 0x00, + proto.RegisterEnum("cometbft.types.v1.SignedMsgType", SignedMsgType_name, SignedMsgType_value) + proto.RegisterType((*PartSetHeader)(nil), "cometbft.types.v1.PartSetHeader") + proto.RegisterType((*Part)(nil), "cometbft.types.v1.Part") + proto.RegisterType((*BlockID)(nil), "cometbft.types.v1.BlockID") + proto.RegisterType((*Header)(nil), "cometbft.types.v1.Header") + proto.RegisterType((*Data)(nil), "cometbft.types.v1.Data") + proto.RegisterType((*Vote)(nil), "cometbft.types.v1.Vote") + proto.RegisterType((*Commit)(nil), "cometbft.types.v1.Commit") + proto.RegisterType((*CommitSig)(nil), "cometbft.types.v1.CommitSig") + proto.RegisterType((*ExtendedCommit)(nil), "cometbft.types.v1.ExtendedCommit") + proto.RegisterType((*ExtendedCommitSig)(nil), "cometbft.types.v1.ExtendedCommitSig") + proto.RegisterType((*Proposal)(nil), "cometbft.types.v1.Proposal") + proto.RegisterType((*SignedHeader)(nil), "cometbft.types.v1.SignedHeader") + proto.RegisterType((*LightBlock)(nil), "cometbft.types.v1.LightBlock") + proto.RegisterType((*BlockMeta)(nil), "cometbft.types.v1.BlockMeta") + proto.RegisterType((*TxProof)(nil), "cometbft.types.v1.TxProof") +} + +func init() { proto.RegisterFile("cometbft/types/v1/types.proto", fileDescriptor_8ea20b664d765b5f) } + +var fileDescriptor_8ea20b664d765b5f = []byte{ + // 1314 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xda, 0xeb, 0x7f, 0xcf, 0x76, 0xe2, 0x2c, 0x11, 0x75, 0xdc, 0xd6, 0x31, 0xe6, 0x5f, + 0x28, 0xc8, 0x6e, 0x02, 0x08, 0xb8, 0x20, 0xd5, 0x49, 0xda, 0x46, 0x34, 0x89, 0xb5, 0x76, 0x8b, + 0x80, 0xc3, 0x6a, 0xed, 0x9d, 0xd8, 0xab, 0xda, 0x3b, 0xab, 0xdd, 0xb1, 0x49, 0xfa, 0x09, 0x50, + 0x4f, 0x3d, 0x72, 0xe9, 0x09, 0x0e, 0x7c, 0x81, 0x1e, 0xb8, 0x22, 0x0e, 0x3d, 0xf6, 0x06, 0xa7, + 0x82, 0x92, 0x0b, 0x1f, 0x03, 0xcd, 0x9b, 0xd9, 0xdd, 0x38, 0xb6, 0xd5, 0x88, 0x56, 0x20, 0x71, + 0x9b, 0x79, 0xef, 0xf7, 0xde, 0xbc, 0x7d, 0xbf, 0xdf, 0x8c, 0xde, 0xc2, 0xd5, 0x2e, 0x1d, 0x12, + 0xd6, 0x39, 0x64, 0x75, 0x76, 0xec, 0x12, 0xbf, 0x3e, 0xde, 0x10, 0x8b, 0x9a, 0xeb, 0x51, 0x46, + 0xb5, 0xe5, 0xc0, 0x5d, 0x13, 0xd6, 0xf1, 0x46, 0xa9, 0x1c, 0x46, 0x74, 0xbd, 0x63, 0x97, 0x51, + 0x1e, 0xe2, 0x7a, 0x94, 0x1e, 0x8a, 0x90, 0xd2, 0x1b, 0xd3, 0x19, 0xc7, 0xe6, 0xc0, 0xb6, 0x4c, + 0x46, 0x3d, 0x09, 0x59, 0x0b, 0x21, 0x63, 0xe2, 0xf9, 0x36, 0x75, 0xce, 0x1d, 0x5b, 0x5a, 0xe9, + 0xd1, 0x1e, 0xc5, 0x65, 0x9d, 0xaf, 0x82, 0xb0, 0x1e, 0xa5, 0xbd, 0x01, 0xa9, 0xe3, 0xae, 0x33, + 0x3a, 0xac, 0x33, 0x7b, 0x48, 0x7c, 0x66, 0x0e, 0x5d, 0x01, 0xa8, 0x7e, 0x06, 0xf9, 0xa6, 0xe9, + 0xb1, 0x16, 0x61, 0xb7, 0x89, 0x69, 0x11, 0x4f, 0x5b, 0x81, 0x04, 0xa3, 0xcc, 0x1c, 0x14, 0x95, + 0x8a, 0xb2, 0x9e, 0xd7, 0xc5, 0x46, 0xd3, 0x40, 0xed, 0x9b, 0x7e, 0xbf, 0x18, 0xab, 0x28, 0xeb, + 0x39, 0x1d, 0xd7, 0x55, 0x1b, 0x54, 0x1e, 0xca, 0x23, 0x6c, 0xc7, 0x22, 0x47, 0x41, 0x04, 0x6e, + 0xb8, 0xb5, 0x73, 0xcc, 0x88, 0x2f, 0x43, 0xc4, 0x46, 0xfb, 0x18, 0x12, 0xf8, 0xe1, 0xc5, 0x78, + 0x45, 0x59, 0xcf, 0x6e, 0xae, 0xd6, 0xc2, 0x66, 0x89, 0xce, 0xd4, 0xc6, 0x1b, 0xb5, 0x26, 0x07, + 0x34, 0xd4, 0xa7, 0xcf, 0xd7, 0x16, 0x74, 0x81, 0xae, 0x0e, 0x21, 0xd5, 0x18, 0xd0, 0xee, 0xfd, + 0xdd, 0xed, 0xb0, 0x12, 0x25, 0xaa, 0x44, 0xdb, 0x87, 0x25, 0xd7, 0xf4, 0x98, 0xe1, 0x13, 0x66, + 0xf4, 0xf1, 0x33, 0xf0, 0xd4, 0xec, 0x66, 0xa5, 0x36, 0x45, 0x46, 0x6d, 0xe2, 0x73, 0xe5, 0x31, + 0x79, 0xf7, 0xac, 0xb1, 0xfa, 0x97, 0x0a, 0x49, 0xd9, 0x8e, 0xcf, 0x21, 0x25, 0x1b, 0x8e, 0x27, + 0x66, 0x37, 0xcb, 0x51, 0x4a, 0xe9, 0xe0, 0x49, 0xb7, 0xa8, 0xe3, 0x13, 0xc7, 0x1f, 0xf9, 0x32, + 0x61, 0x10, 0xa4, 0xbd, 0x03, 0xe9, 0x6e, 0xdf, 0xb4, 0x1d, 0xc3, 0xb6, 0xb0, 0xa6, 0x4c, 0x23, + 0x7b, 0xf2, 0x7c, 0x2d, 0xb5, 0xc5, 0x6d, 0xbb, 0xdb, 0x7a, 0x0a, 0x9d, 0xbb, 0x96, 0xf6, 0x3a, + 0x24, 0xfb, 0xc4, 0xee, 0xf5, 0x19, 0x76, 0x26, 0xae, 0xcb, 0x9d, 0xf6, 0x29, 0xa8, 0x9c, 0xb2, + 0xa2, 0x8a, 0x87, 0x97, 0x6a, 0x82, 0xcf, 0x5a, 0xc0, 0x67, 0xad, 0x1d, 0xf0, 0xd9, 0x48, 0xf3, + 0x83, 0x1f, 0xfd, 0xb1, 0xa6, 0xe8, 0x18, 0xa1, 0x6d, 0x43, 0x7e, 0x60, 0xfa, 0xcc, 0xe8, 0xf0, + 0xc6, 0xf1, 0xe3, 0x13, 0x32, 0xc5, 0x74, 0x4b, 0x64, 0x6f, 0x65, 0xed, 0x59, 0x1e, 0x26, 0x4c, + 0x96, 0xb6, 0x0e, 0x05, 0xcc, 0xd2, 0xa5, 0xc3, 0xa1, 0xcd, 0x0c, 0x6c, 0x7d, 0x12, 0x5b, 0xbf, + 0xc8, 0xed, 0x5b, 0x68, 0xbe, 0xcd, 0x49, 0xb8, 0x0c, 0x19, 0xcb, 0x64, 0xa6, 0x80, 0xa4, 0x10, + 0x92, 0xe6, 0x06, 0x74, 0xbe, 0x0b, 0x4b, 0xa1, 0xa2, 0x7d, 0x01, 0x49, 0x8b, 0x2c, 0x91, 0x19, + 0x81, 0xd7, 0x61, 0xc5, 0x21, 0x47, 0xcc, 0x38, 0x8f, 0xce, 0x20, 0x5a, 0xe3, 0xbe, 0x7b, 0x93, + 0x11, 0x6f, 0xc3, 0x62, 0x37, 0xe8, 0xbe, 0xc0, 0x02, 0x62, 0xf3, 0xa1, 0x15, 0x61, 0xab, 0x90, + 0x36, 0x5d, 0x57, 0x00, 0xb2, 0x08, 0x48, 0x99, 0xae, 0x8b, 0xae, 0x6b, 0xb0, 0x8c, 0xdf, 0xe8, + 0x11, 0x7f, 0x34, 0x60, 0x32, 0x49, 0x0e, 0x31, 0x4b, 0xdc, 0xa1, 0x0b, 0x3b, 0x62, 0xdf, 0x84, + 0x3c, 0x19, 0xdb, 0x16, 0x71, 0xba, 0x44, 0xe0, 0xf2, 0x88, 0xcb, 0x05, 0x46, 0x04, 0xbd, 0x07, + 0x05, 0xd7, 0xa3, 0x2e, 0xf5, 0x89, 0x67, 0x98, 0x96, 0xe5, 0x11, 0xdf, 0x2f, 0x2e, 0x8a, 0x7c, + 0x81, 0xfd, 0x86, 0x30, 0x57, 0x8b, 0xa0, 0x6e, 0x9b, 0xcc, 0xd4, 0x0a, 0x10, 0x67, 0x47, 0x7e, + 0x51, 0xa9, 0xc4, 0xd7, 0x73, 0x3a, 0x5f, 0x56, 0x7f, 0x8e, 0x83, 0x7a, 0x8f, 0x32, 0xa2, 0x7d, + 0x04, 0x2a, 0x67, 0x0a, 0xf5, 0xb7, 0x38, 0x53, 0xd2, 0x2d, 0xbb, 0xe7, 0x10, 0x6b, 0xcf, 0xef, + 0xb5, 0x8f, 0x5d, 0xa2, 0x23, 0xfa, 0x8c, 0xa0, 0x62, 0x13, 0x82, 0x5a, 0x81, 0x84, 0x47, 0x47, + 0x8e, 0x85, 0x3a, 0x4b, 0xe8, 0x62, 0xa3, 0xdd, 0x84, 0x74, 0xa8, 0x13, 0xf5, 0x85, 0x3a, 0x59, + 0xe2, 0x3a, 0xe1, 0x32, 0x96, 0x06, 0x3d, 0xd5, 0x91, 0x72, 0x69, 0x40, 0x26, 0x7c, 0x61, 0x42, + 0xc1, 0x5d, 0x44, 0xb3, 0x51, 0x98, 0xf6, 0x3e, 0x2c, 0x87, 0xec, 0x87, 0xed, 0x13, 0x9a, 0x2b, + 0x84, 0x0e, 0xd9, 0xbf, 0x09, 0x61, 0x19, 0xe2, 0x19, 0x4a, 0xe1, 0x87, 0x45, 0xc2, 0xda, 0xc5, + 0xf7, 0xe8, 0x0a, 0x64, 0x7c, 0xbb, 0xe7, 0x98, 0x6c, 0xe4, 0x11, 0xa9, 0xbd, 0xc8, 0xc0, 0xbd, + 0xe4, 0x88, 0x11, 0x07, 0x2f, 0xba, 0xd0, 0x5a, 0x64, 0xd0, 0xea, 0xf0, 0x5a, 0xb8, 0x31, 0xa2, + 0x2c, 0x42, 0x67, 0x5a, 0xe8, 0x6a, 0x05, 0x9e, 0xea, 0x2f, 0x0a, 0x24, 0xc5, 0xd5, 0x38, 0xc3, + 0x83, 0x32, 0x9b, 0x87, 0xd8, 0x3c, 0x1e, 0xe2, 0x2f, 0xc5, 0x03, 0x84, 0x75, 0xfa, 0x45, 0xb5, + 0x12, 0x5f, 0xcf, 0x6e, 0x5e, 0x99, 0x91, 0x49, 0x14, 0xd9, 0xb2, 0x7b, 0xf2, 0xee, 0x9f, 0x89, + 0xaa, 0x3e, 0x57, 0x20, 0x13, 0xfa, 0xb5, 0x06, 0xe4, 0x83, 0xca, 0x8c, 0xc3, 0x81, 0xd9, 0x93, + 0x72, 0x2c, 0xcf, 0x2f, 0xef, 0xe6, 0xc0, 0xec, 0xe9, 0x59, 0x59, 0x11, 0xdf, 0xcc, 0x66, 0x36, + 0x36, 0x87, 0xd9, 0x09, 0x29, 0xc5, 0xff, 0x99, 0x94, 0x26, 0x48, 0x57, 0xcf, 0x91, 0x5e, 0x3d, + 0x55, 0x60, 0x71, 0x87, 0x93, 0x67, 0x11, 0xeb, 0x3f, 0x65, 0xeb, 0x1b, 0xa9, 0x2f, 0x8b, 0x58, + 0xc6, 0x14, 0x6d, 0x6f, 0xcd, 0x48, 0x39, 0x59, 0x75, 0x44, 0x9f, 0x16, 0xa4, 0x69, 0x45, 0x34, + 0x3e, 0x89, 0xc1, 0xf2, 0x14, 0xfe, 0x7f, 0x48, 0xe7, 0xe4, 0x1d, 0x4e, 0x5c, 0xf0, 0x0e, 0x27, + 0xe7, 0xde, 0xe1, 0x27, 0x31, 0x48, 0x37, 0xf1, 0xb5, 0x36, 0x07, 0xff, 0xca, 0x1b, 0x7c, 0x19, + 0x32, 0x2e, 0x1d, 0x18, 0xc2, 0xa3, 0xa2, 0x27, 0xed, 0xd2, 0x81, 0x3e, 0x25, 0xb5, 0xc4, 0xab, + 0x7a, 0xa0, 0x93, 0xaf, 0x80, 0x86, 0xd4, 0xf9, 0x5b, 0xc5, 0x20, 0x27, 0x7a, 0x21, 0x27, 0xa8, + 0x0d, 0xde, 0x04, 0x9c, 0xc9, 0x94, 0xf3, 0x33, 0x5f, 0x58, 0xb7, 0x80, 0xea, 0x12, 0xc8, 0x43, + 0xc4, 0xbc, 0x21, 0xc7, 0xb8, 0xd5, 0xb9, 0x2f, 0x97, 0x2e, 0x81, 0xd5, 0xef, 0x15, 0x80, 0x3b, + 0xbc, 0xb9, 0xf8, 0xc5, 0x7c, 0xf8, 0xf1, 0xb1, 0x08, 0x63, 0xe2, 0xec, 0xb5, 0xb9, 0xc4, 0xc9, + 0x0a, 0x72, 0xfe, 0xd9, 0xd2, 0xb7, 0x21, 0x1f, 0x09, 0xdc, 0x27, 0x41, 0x39, 0xb3, 0xb2, 0x84, + 0x43, 0x49, 0x8b, 0x30, 0x3d, 0x37, 0x3e, 0xb3, 0xab, 0xfe, 0xaa, 0x40, 0x06, 0xab, 0xda, 0x23, + 0xcc, 0x9c, 0x20, 0x52, 0x79, 0x09, 0x22, 0xaf, 0x02, 0x88, 0x3c, 0xbe, 0xfd, 0x80, 0x48, 0x7d, + 0x65, 0xd0, 0xd2, 0xb2, 0x1f, 0x10, 0xed, 0x93, 0xb0, 0xeb, 0xf1, 0x17, 0x74, 0x5d, 0x3e, 0x1d, + 0x41, 0xef, 0x2f, 0x41, 0xca, 0x19, 0x0d, 0x0d, 0x3e, 0x8c, 0xa8, 0x42, 0xb4, 0xce, 0x68, 0xd8, + 0x3e, 0xf2, 0xab, 0xf7, 0x21, 0xd5, 0x3e, 0xc2, 0xd9, 0x9c, 0x2b, 0xd5, 0xa3, 0x54, 0x4e, 0x83, + 0x62, 0x10, 0x4f, 0x73, 0x03, 0x0e, 0x3f, 0x1a, 0xa8, 0x7c, 0xec, 0x0b, 0x7e, 0x15, 0xf8, 0x5a, + 0xab, 0x5f, 0x74, 0xec, 0x97, 0x03, 0xff, 0xb5, 0xdf, 0x14, 0xc8, 0x4f, 0xdc, 0x28, 0xed, 0x03, + 0xb8, 0xd4, 0xda, 0xbd, 0xb5, 0xbf, 0xb3, 0x6d, 0xec, 0xb5, 0x6e, 0x19, 0xed, 0xaf, 0x9a, 0x3b, + 0xc6, 0xdd, 0xfd, 0x2f, 0xf6, 0x0f, 0xbe, 0xdc, 0x2f, 0x2c, 0x94, 0x96, 0x1e, 0x3e, 0xae, 0x64, + 0xef, 0x3a, 0xf7, 0x1d, 0xfa, 0xad, 0x33, 0x0f, 0xdd, 0xd4, 0x77, 0xee, 0x1d, 0xb4, 0x77, 0x0a, + 0x8a, 0x40, 0x37, 0x3d, 0x32, 0xa6, 0x8c, 0x20, 0xfa, 0x3a, 0xac, 0xce, 0x40, 0x6f, 0x1d, 0xec, + 0xed, 0xed, 0xb6, 0x0b, 0xb1, 0xd2, 0xf2, 0xc3, 0xc7, 0x95, 0x7c, 0xd3, 0x23, 0x42, 0x6a, 0x18, + 0x51, 0x83, 0xe2, 0x74, 0xc4, 0x41, 0xf3, 0xa0, 0x75, 0xe3, 0x4e, 0xa1, 0x52, 0x2a, 0x3c, 0x7c, + 0x5c, 0xc9, 0x05, 0x6f, 0x07, 0xc7, 0x97, 0xd2, 0xdf, 0xfd, 0x50, 0x5e, 0xf8, 0xe9, 0xc7, 0xb2, + 0xd2, 0xb8, 0xf3, 0xf4, 0xa4, 0xac, 0x3c, 0x3b, 0x29, 0x2b, 0x7f, 0x9e, 0x94, 0x95, 0x47, 0xa7, + 0xe5, 0x85, 0x67, 0xa7, 0xe5, 0x85, 0xdf, 0x4f, 0xcb, 0x0b, 0x5f, 0x6f, 0xf6, 0x6c, 0xd6, 0x1f, + 0x75, 0x78, 0x6f, 0xea, 0xd1, 0x0f, 0x63, 0xb0, 0x30, 0x5d, 0xbb, 0x3e, 0xf5, 0x9b, 0xd8, 0x49, + 0xe2, 0x9d, 0xfd, 0xf0, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x98, 0x74, 0x84, 0x94, 0x0e, + 0x00, 0x00, } func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { @@ -5264,7 +5273,7 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Proof == nil { - m.Proof = &crypto.Proof{} + m.Proof = &v1.Proof{} } if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err diff --git a/api/cometbft/types/v1/validator.pb.go b/api/cometbft/types/v1/validator.pb.go new file mode 100644 index 00000000000..dd507d40213 --- /dev/null +++ b/api/cometbft/types/v1/validator.pb.go @@ -0,0 +1,1109 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/types/v1/validator.proto + +package v1 + +import ( + fmt "fmt" + v1 "github.com/cometbft/cometbft/api/cometbft/crypto/v1" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// BlockIdFlag indicates which BlockID the signature is for +type BlockIDFlag int32 + +const ( + // Indicates an error condition + BlockIDFlagUnknown BlockIDFlag = 0 + // The vote was not received + BlockIDFlagAbsent BlockIDFlag = 1 + // Voted for the block that received the majority + BlockIDFlagCommit BlockIDFlag = 2 + // Voted for nil + BlockIDFlagNil BlockIDFlag = 3 +) + +var BlockIDFlag_name = map[int32]string{ + 0: "BLOCK_ID_FLAG_UNKNOWN", + 1: "BLOCK_ID_FLAG_ABSENT", + 2: "BLOCK_ID_FLAG_COMMIT", + 3: "BLOCK_ID_FLAG_NIL", +} + +var BlockIDFlag_value = map[string]int32{ + "BLOCK_ID_FLAG_UNKNOWN": 0, + "BLOCK_ID_FLAG_ABSENT": 1, + "BLOCK_ID_FLAG_COMMIT": 2, + "BLOCK_ID_FLAG_NIL": 3, +} + +func (x BlockIDFlag) String() string { + return proto.EnumName(BlockIDFlag_name, int32(x)) +} + +func (BlockIDFlag) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_20d37f2fd54e559e, []int{0} +} + +// ValidatorSet defines a set of validators. +type ValidatorSet struct { + Validators []*Validator `protobuf:"bytes,1,rep,name=validators,proto3" json:"validators,omitempty"` + Proposer *Validator `protobuf:"bytes,2,opt,name=proposer,proto3" json:"proposer,omitempty"` + TotalVotingPower int64 `protobuf:"varint,3,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` +} + +func (m *ValidatorSet) Reset() { *m = ValidatorSet{} } +func (m *ValidatorSet) String() string { return proto.CompactTextString(m) } +func (*ValidatorSet) ProtoMessage() {} +func (*ValidatorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_20d37f2fd54e559e, []int{0} +} +func (m *ValidatorSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorSet.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorSet.Merge(m, src) +} +func (m *ValidatorSet) XXX_Size() int { + return m.Size() +} +func (m *ValidatorSet) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorSet proto.InternalMessageInfo + +func (m *ValidatorSet) GetValidators() []*Validator { + if m != nil { + return m.Validators + } + return nil +} + +func (m *ValidatorSet) GetProposer() *Validator { + if m != nil { + return m.Proposer + } + return nil +} + +func (m *ValidatorSet) GetTotalVotingPower() int64 { + if m != nil { + return m.TotalVotingPower + } + return 0 +} + +// Validator represents a node participating in the consensus protocol. +type Validator struct { + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + PubKey *v1.PublicKey `protobuf:"bytes,2,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` // Deprecated: Do not use. + VotingPower int64 `protobuf:"varint,3,opt,name=voting_power,json=votingPower,proto3" json:"voting_power,omitempty"` + ProposerPriority int64 `protobuf:"varint,4,opt,name=proposer_priority,json=proposerPriority,proto3" json:"proposer_priority,omitempty"` + PubKeyBytes []byte `protobuf:"bytes,5,opt,name=pub_key_bytes,json=pubKeyBytes,proto3" json:"pub_key_bytes,omitempty"` + PubKeyType string `protobuf:"bytes,6,opt,name=pub_key_type,json=pubKeyType,proto3" json:"pub_key_type,omitempty"` +} + +func (m *Validator) Reset() { *m = Validator{} } +func (m *Validator) String() string { return proto.CompactTextString(m) } +func (*Validator) ProtoMessage() {} +func (*Validator) Descriptor() ([]byte, []int) { + return fileDescriptor_20d37f2fd54e559e, []int{1} +} +func (m *Validator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Validator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Validator.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Validator) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validator.Merge(m, src) +} +func (m *Validator) XXX_Size() int { + return m.Size() +} +func (m *Validator) XXX_DiscardUnknown() { + xxx_messageInfo_Validator.DiscardUnknown(m) +} + +var xxx_messageInfo_Validator proto.InternalMessageInfo + +func (m *Validator) GetAddress() []byte { + if m != nil { + return m.Address + } + return nil +} + +// Deprecated: Do not use. +func (m *Validator) GetPubKey() *v1.PublicKey { + if m != nil { + return m.PubKey + } + return nil +} + +func (m *Validator) GetVotingPower() int64 { + if m != nil { + return m.VotingPower + } + return 0 +} + +func (m *Validator) GetProposerPriority() int64 { + if m != nil { + return m.ProposerPriority + } + return 0 +} + +func (m *Validator) GetPubKeyBytes() []byte { + if m != nil { + return m.PubKeyBytes + } + return nil +} + +func (m *Validator) GetPubKeyType() string { + if m != nil { + return m.PubKeyType + } + return "" +} + +// SimpleValidator is a Validator, which is serialized and hashed in consensus. +// Address is removed because it's redundant with the pubkey. +// Proposer priority is removed because it changes every round. +type SimpleValidator struct { + PubKey *v1.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` + VotingPower int64 `protobuf:"varint,2,opt,name=voting_power,json=votingPower,proto3" json:"voting_power,omitempty"` +} + +func (m *SimpleValidator) Reset() { *m = SimpleValidator{} } +func (m *SimpleValidator) String() string { return proto.CompactTextString(m) } +func (*SimpleValidator) ProtoMessage() {} +func (*SimpleValidator) Descriptor() ([]byte, []int) { + return fileDescriptor_20d37f2fd54e559e, []int{2} +} +func (m *SimpleValidator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SimpleValidator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SimpleValidator.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SimpleValidator) XXX_Merge(src proto.Message) { + xxx_messageInfo_SimpleValidator.Merge(m, src) +} +func (m *SimpleValidator) XXX_Size() int { + return m.Size() +} +func (m *SimpleValidator) XXX_DiscardUnknown() { + xxx_messageInfo_SimpleValidator.DiscardUnknown(m) +} + +var xxx_messageInfo_SimpleValidator proto.InternalMessageInfo + +func (m *SimpleValidator) GetPubKey() *v1.PublicKey { + if m != nil { + return m.PubKey + } + return nil +} + +func (m *SimpleValidator) GetVotingPower() int64 { + if m != nil { + return m.VotingPower + } + return 0 +} + +func init() { + proto.RegisterEnum("cometbft.types.v1.BlockIDFlag", BlockIDFlag_name, BlockIDFlag_value) + proto.RegisterType((*ValidatorSet)(nil), "cometbft.types.v1.ValidatorSet") + proto.RegisterType((*Validator)(nil), "cometbft.types.v1.Validator") + proto.RegisterType((*SimpleValidator)(nil), "cometbft.types.v1.SimpleValidator") +} + +func init() { proto.RegisterFile("cometbft/types/v1/validator.proto", fileDescriptor_20d37f2fd54e559e) } + +var fileDescriptor_20d37f2fd54e559e = []byte{ + // 541 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xcf, 0x8e, 0xd2, 0x50, + 0x14, 0xc6, 0xb9, 0xa0, 0xf3, 0xe7, 0x82, 0x0a, 0x37, 0x33, 0xa6, 0x69, 0x9c, 0xa6, 0xc3, 0x0a, + 0xff, 0x84, 0x86, 0x31, 0x31, 0xc6, 0xb8, 0xa1, 0x8c, 0x63, 0x08, 0x4c, 0x21, 0xc0, 0x8c, 0x89, + 0x9b, 0xa6, 0x85, 0x2b, 0xde, 0x50, 0xb8, 0x37, 0xed, 0xa5, 0x93, 0xbe, 0x81, 0x61, 0xe5, 0x0b, + 0xb0, 0xd2, 0x85, 0x0f, 0xe0, 0x43, 0xb8, 0x9c, 0x9d, 0x2e, 0x0d, 0x3c, 0x84, 0x5b, 0xd3, 0x96, + 0xb6, 0x4c, 0xaa, 0xd1, 0xdd, 0xed, 0x39, 0xdf, 0xaf, 0xe7, 0xfb, 0x4e, 0x72, 0xe0, 0xf1, 0x90, + 0x4e, 0x31, 0x37, 0xdf, 0x71, 0x85, 0x7b, 0x0c, 0x3b, 0x8a, 0x5b, 0x53, 0x5c, 0xc3, 0x22, 0x23, + 0x83, 0x53, 0xbb, 0xca, 0x6c, 0xca, 0x29, 0x2a, 0x45, 0x92, 0x6a, 0x20, 0xa9, 0xba, 0x35, 0xf1, + 0x28, 0xa6, 0x86, 0xb6, 0xc7, 0x38, 0xf5, 0xb1, 0x09, 0xf6, 0x9c, 0x90, 0x10, 0x0f, 0xc6, 0x74, + 0x4c, 0x83, 0xa7, 0xe2, 0xbf, 0xc2, 0x6a, 0xf9, 0x2b, 0x80, 0x85, 0xcb, 0xe8, 0xdf, 0x7d, 0xcc, + 0xd1, 0x4b, 0x08, 0xe3, 0x59, 0x8e, 0x00, 0xe4, 0x5c, 0x25, 0x7f, 0xf2, 0xa0, 0x9a, 0x9a, 0x56, + 0x8d, 0xa1, 0xde, 0x96, 0x1e, 0x3d, 0x87, 0x7b, 0xcc, 0xa6, 0x8c, 0x3a, 0xd8, 0x16, 0xb2, 0x32, + 0xf8, 0x27, 0x1b, 0xab, 0xd1, 0x13, 0x88, 0x38, 0xe5, 0x86, 0xa5, 0xbb, 0x94, 0x93, 0xd9, 0x58, + 0x67, 0xf4, 0x0a, 0xdb, 0x42, 0x4e, 0x06, 0x95, 0x5c, 0xaf, 0x18, 0x74, 0x2e, 0x83, 0x46, 0xd7, + 0xaf, 0x97, 0x7f, 0x01, 0xb8, 0x1f, 0xff, 0x05, 0x09, 0x70, 0xd7, 0x18, 0x8d, 0x6c, 0xec, 0xf8, + 0x86, 0x41, 0xa5, 0xd0, 0x8b, 0x3e, 0xd1, 0x0b, 0xb8, 0xcb, 0xe6, 0xa6, 0x3e, 0xc1, 0xde, 0xc6, + 0xce, 0x51, 0x62, 0x27, 0xdc, 0x92, 0xef, 0xa7, 0x3b, 0x37, 0x2d, 0x32, 0x6c, 0x61, 0x4f, 0xcd, + 0x0a, 0xa0, 0xb7, 0xc3, 0xe6, 0x66, 0x0b, 0x7b, 0xe8, 0x18, 0x16, 0xfe, 0xe0, 0x25, 0xef, 0x26, + 0x36, 0xd0, 0x63, 0x58, 0x8a, 0x02, 0xe8, 0xcc, 0x26, 0xd4, 0x26, 0xdc, 0x13, 0x6e, 0x85, 0x9e, + 0xa3, 0x46, 0x77, 0x53, 0x47, 0x65, 0x78, 0x67, 0xe3, 0x45, 0x37, 0x3d, 0x8e, 0x1d, 0xe1, 0x76, + 0xe0, 0x35, 0x1f, 0x8e, 0x53, 0xfd, 0x12, 0x92, 0x61, 0x21, 0xd2, 0xf8, 0xdb, 0x12, 0x76, 0x64, + 0x50, 0xd9, 0xef, 0xc1, 0x50, 0x32, 0xf0, 0x18, 0x2e, 0x5b, 0xf0, 0x5e, 0x9f, 0x4c, 0x99, 0x85, + 0x93, 0xf8, 0xcf, 0x92, 0x90, 0xe0, 0x3f, 0x42, 0xfe, 0x35, 0x60, 0x36, 0x15, 0xf0, 0xd1, 0x77, + 0x00, 0xf3, 0xaa, 0x45, 0x87, 0x93, 0xe6, 0xe9, 0x99, 0x65, 0x8c, 0x51, 0x0d, 0x1e, 0xaa, 0xed, + 0x4e, 0xa3, 0xa5, 0x37, 0x4f, 0xf5, 0xb3, 0x76, 0xfd, 0xb5, 0x7e, 0xa1, 0xb5, 0xb4, 0xce, 0x1b, + 0xad, 0x98, 0x11, 0xef, 0x2f, 0x96, 0x32, 0xda, 0xd2, 0x5e, 0xcc, 0x26, 0x33, 0x7a, 0x35, 0x43, + 0x0a, 0x3c, 0xb8, 0x89, 0xd4, 0xd5, 0xfe, 0x2b, 0x6d, 0x50, 0x04, 0xe2, 0xe1, 0x62, 0x29, 0x97, + 0xb6, 0x88, 0xba, 0xe9, 0xe0, 0x19, 0x4f, 0x03, 0x8d, 0xce, 0xf9, 0x79, 0x73, 0x50, 0xcc, 0xa6, + 0x80, 0x06, 0x9d, 0x4e, 0x09, 0x47, 0x0f, 0x61, 0xe9, 0x26, 0xa0, 0x35, 0xdb, 0xc5, 0x9c, 0x88, + 0x16, 0x4b, 0xf9, 0xee, 0x96, 0x5a, 0x23, 0x96, 0xb8, 0xf7, 0xe1, 0x93, 0x94, 0xf9, 0xf2, 0x59, + 0x02, 0x6a, 0xfb, 0xdb, 0x4a, 0x02, 0xd7, 0x2b, 0x09, 0xfc, 0x5c, 0x49, 0xe0, 0xe3, 0x5a, 0xca, + 0x5c, 0xaf, 0xa5, 0xcc, 0x8f, 0xb5, 0x94, 0x79, 0x7b, 0x32, 0x26, 0xfc, 0xfd, 0xdc, 0xf4, 0x77, + 0xa8, 0x24, 0x27, 0x15, 0x3d, 0x0c, 0x46, 0x94, 0xd4, 0x79, 0x9a, 0x3b, 0xc1, 0x35, 0x3d, 0xfd, + 0x1d, 0x00, 0x00, 0xff, 0xff, 0x20, 0xc2, 0xf3, 0x5f, 0xba, 0x03, 0x00, 0x00, +} + +func (m *ValidatorSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TotalVotingPower != 0 { + i = encodeVarintValidator(dAtA, i, uint64(m.TotalVotingPower)) + i-- + dAtA[i] = 0x18 + } + if m.Proposer != nil { + { + size, err := m.Proposer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintValidator(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Validators) > 0 { + for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintValidator(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Validator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PubKeyType) > 0 { + i -= len(m.PubKeyType) + copy(dAtA[i:], m.PubKeyType) + i = encodeVarintValidator(dAtA, i, uint64(len(m.PubKeyType))) + i-- + dAtA[i] = 0x32 + } + if len(m.PubKeyBytes) > 0 { + i -= len(m.PubKeyBytes) + copy(dAtA[i:], m.PubKeyBytes) + i = encodeVarintValidator(dAtA, i, uint64(len(m.PubKeyBytes))) + i-- + dAtA[i] = 0x2a + } + if m.ProposerPriority != 0 { + i = encodeVarintValidator(dAtA, i, uint64(m.ProposerPriority)) + i-- + dAtA[i] = 0x20 + } + if m.VotingPower != 0 { + i = encodeVarintValidator(dAtA, i, uint64(m.VotingPower)) + i-- + dAtA[i] = 0x18 + } + if m.PubKey != nil { + { + size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintValidator(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintValidator(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SimpleValidator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SimpleValidator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SimpleValidator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VotingPower != 0 { + i = encodeVarintValidator(dAtA, i, uint64(m.VotingPower)) + i-- + dAtA[i] = 0x10 + } + if m.PubKey != nil { + { + size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintValidator(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintValidator(dAtA []byte, offset int, v uint64) int { + offset -= sovValidator(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ValidatorSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Validators) > 0 { + for _, e := range m.Validators { + l = e.Size() + n += 1 + l + sovValidator(uint64(l)) + } + } + if m.Proposer != nil { + l = m.Proposer.Size() + n += 1 + l + sovValidator(uint64(l)) + } + if m.TotalVotingPower != 0 { + n += 1 + sovValidator(uint64(m.TotalVotingPower)) + } + return n +} + +func (m *Validator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovValidator(uint64(l)) + } + if m.PubKey != nil { + l = m.PubKey.Size() + n += 1 + l + sovValidator(uint64(l)) + } + if m.VotingPower != 0 { + n += 1 + sovValidator(uint64(m.VotingPower)) + } + if m.ProposerPriority != 0 { + n += 1 + sovValidator(uint64(m.ProposerPriority)) + } + l = len(m.PubKeyBytes) + if l > 0 { + n += 1 + l + sovValidator(uint64(l)) + } + l = len(m.PubKeyType) + if l > 0 { + n += 1 + l + sovValidator(uint64(l)) + } + return n +} + +func (m *SimpleValidator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PubKey != nil { + l = m.PubKey.Size() + n += 1 + l + sovValidator(uint64(l)) + } + if m.VotingPower != 0 { + n += 1 + sovValidator(uint64(m.VotingPower)) + } + return n +} + +func sovValidator(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozValidator(x uint64) (n int) { + return sovValidator(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ValidatorSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthValidator + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthValidator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validators = append(m.Validators, &Validator{}) + if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proposer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthValidator + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthValidator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proposer == nil { + m.Proposer = &Validator{} + } + if err := m.Proposer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) + } + m.TotalVotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalVotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipValidator(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Validator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Validator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Validator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthValidator + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthValidator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = append(m.Address[:0], dAtA[iNdEx:postIndex]...) + if m.Address == nil { + m.Address = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthValidator + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthValidator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PubKey == nil { + m.PubKey = &v1.PublicKey{} + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VotingPower", wireType) + } + m.VotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerPriority", wireType) + } + m.ProposerPriority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProposerPriority |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyBytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthValidator + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthValidator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKeyBytes = append(m.PubKeyBytes[:0], dAtA[iNdEx:postIndex]...) + if m.PubKeyBytes == nil { + m.PubKeyBytes = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthValidator + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthValidator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKeyType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipValidator(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SimpleValidator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SimpleValidator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SimpleValidator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthValidator + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthValidator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PubKey == nil { + m.PubKey = &v1.PublicKey{} + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VotingPower", wireType) + } + m.VotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipValidator(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipValidator(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowValidator + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowValidator + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowValidator + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthValidator + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupValidator + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthValidator + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthValidator = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowValidator = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupValidator = fmt.Errorf("proto: unexpected end of group") +) diff --git a/api/cometbft/types/v1beta1/block.pb.go b/api/cometbft/types/v1beta1/block.pb.go new file mode 100644 index 00000000000..0a4c6780deb --- /dev/null +++ b/api/cometbft/types/v1beta1/block.pb.go @@ -0,0 +1,494 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/types/v1beta1/block.proto + +package v1beta1 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Block defines the structure of a block in the CometBFT blockchain. +type Block struct { + Header Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header"` + Data Data `protobuf:"bytes,2,opt,name=data,proto3" json:"data"` + Evidence EvidenceList `protobuf:"bytes,3,opt,name=evidence,proto3" json:"evidence"` + LastCommit *Commit `protobuf:"bytes,4,opt,name=last_commit,json=lastCommit,proto3" json:"last_commit,omitempty"` +} + +func (m *Block) Reset() { *m = Block{} } +func (m *Block) String() string { return proto.CompactTextString(m) } +func (*Block) ProtoMessage() {} +func (*Block) Descriptor() ([]byte, []int) { + return fileDescriptor_9a94804f701ae91c, []int{0} +} +func (m *Block) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Block.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Block.Merge(m, src) +} +func (m *Block) XXX_Size() int { + return m.Size() +} +func (m *Block) XXX_DiscardUnknown() { + xxx_messageInfo_Block.DiscardUnknown(m) +} + +var xxx_messageInfo_Block proto.InternalMessageInfo + +func (m *Block) GetHeader() Header { + if m != nil { + return m.Header + } + return Header{} +} + +func (m *Block) GetData() Data { + if m != nil { + return m.Data + } + return Data{} +} + +func (m *Block) GetEvidence() EvidenceList { + if m != nil { + return m.Evidence + } + return EvidenceList{} +} + +func (m *Block) GetLastCommit() *Commit { + if m != nil { + return m.LastCommit + } + return nil +} + +func init() { + proto.RegisterType((*Block)(nil), "cometbft.types.v1beta1.Block") +} + +func init() { + proto.RegisterFile("cometbft/types/v1beta1/block.proto", fileDescriptor_9a94804f701ae91c) +} + +var fileDescriptor_9a94804f701ae91c = []byte{ + // 277 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, + 0x49, 0x34, 0xd4, 0x4f, 0xca, 0xc9, 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, + 0x83, 0xa9, 0xd1, 0x03, 0xab, 0xd1, 0x83, 0xaa, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, + 0xd1, 0x07, 0xb1, 0x20, 0xaa, 0xa5, 0x70, 0x99, 0x08, 0xd1, 0x0b, 0x51, 0xa3, 0x8a, 0x43, 0x4d, + 0x6a, 0x59, 0x66, 0x4a, 0x6a, 0x5e, 0x72, 0x2a, 0x44, 0x99, 0x52, 0x1b, 0x13, 0x17, 0xab, 0x13, + 0xc8, 0x21, 0x42, 0x36, 0x5c, 0x6c, 0x19, 0xa9, 0x89, 0x29, 0xa9, 0x45, 0x12, 0x8c, 0x0a, 0x8c, + 0x1a, 0xdc, 0x46, 0x72, 0x7a, 0xd8, 0xdd, 0xa4, 0xe7, 0x01, 0x56, 0xe5, 0xc4, 0x72, 0xe2, 0x9e, + 0x3c, 0x43, 0x10, 0x54, 0x8f, 0x90, 0x19, 0x17, 0x4b, 0x4a, 0x62, 0x49, 0xa2, 0x04, 0x13, 0x58, + 0xaf, 0x0c, 0x2e, 0xbd, 0x2e, 0x89, 0x25, 0x89, 0x50, 0x9d, 0x60, 0xf5, 0x42, 0x6e, 0x5c, 0x1c, + 0x30, 0x17, 0x49, 0x30, 0x83, 0xf5, 0xaa, 0xe0, 0xd2, 0xeb, 0x0a, 0x55, 0xe7, 0x93, 0x59, 0x5c, + 0x02, 0x35, 0x03, 0xae, 0x57, 0xc8, 0x9e, 0x8b, 0x3b, 0x27, 0xb1, 0xb8, 0x24, 0x3e, 0x39, 0x3f, + 0x37, 0x37, 0xb3, 0x44, 0x82, 0x05, 0xbf, 0x17, 0x9c, 0xc1, 0xaa, 0x82, 0xb8, 0x40, 0x5a, 0x20, + 0x6c, 0xa7, 0xc0, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, + 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x32, 0x4f, 0xcf, + 0x2c, 0xc9, 0x28, 0x4d, 0x02, 0x99, 0xa5, 0x0f, 0x0f, 0x54, 0x38, 0x23, 0xb1, 0x20, 0x53, 0x1f, + 0x7b, 0x50, 0x27, 0xb1, 0x81, 0x83, 0xd8, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xf3, 0x24, 0x27, + 0xcb, 0x01, 0x02, 0x00, 0x00, +} + +func (m *Block) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Block) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Block) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastCommit != nil { + { + size, err := m.LastCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBlock(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.Evidence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBlock(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBlock(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBlock(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintBlock(dAtA []byte, offset int, v uint64) int { + offset -= sovBlock(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Block) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Header.Size() + n += 1 + l + sovBlock(uint64(l)) + l = m.Data.Size() + n += 1 + l + sovBlock(uint64(l)) + l = m.Evidence.Size() + n += 1 + l + sovBlock(uint64(l)) + if m.LastCommit != nil { + l = m.LastCommit.Size() + n += 1 + l + sovBlock(uint64(l)) + } + return n +} + +func sovBlock(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozBlock(x uint64) (n int) { + return sovBlock(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Block) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBlock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Block: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Block: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBlock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBlock + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBlock + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBlock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBlock + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBlock + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBlock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBlock + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBlock + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Evidence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBlock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBlock + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBlock + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastCommit == nil { + m.LastCommit = &Commit{} + } + if err := m.LastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBlock(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBlock + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBlock(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBlock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBlock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBlock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthBlock + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupBlock + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthBlock + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthBlock = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBlock = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupBlock = fmt.Errorf("proto: unexpected end of group") +) diff --git a/api/cometbft/types/v1beta1/canonical.pb.go b/api/cometbft/types/v1beta1/canonical.pb.go new file mode 100644 index 00000000000..2e03ff7d4a0 --- /dev/null +++ b/api/cometbft/types/v1beta1/canonical.pb.go @@ -0,0 +1,1389 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/types/v1beta1/canonical.proto + +package v1beta1 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + _ "github.com/cosmos/gogoproto/types" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// CanonicalBlockID is a canonical representation of a BlockID, which gets +// serialized and signed. +type CanonicalBlockID struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + PartSetHeader CanonicalPartSetHeader `protobuf:"bytes,2,opt,name=part_set_header,json=partSetHeader,proto3" json:"part_set_header"` +} + +func (m *CanonicalBlockID) Reset() { *m = CanonicalBlockID{} } +func (m *CanonicalBlockID) String() string { return proto.CompactTextString(m) } +func (*CanonicalBlockID) ProtoMessage() {} +func (*CanonicalBlockID) Descriptor() ([]byte, []int) { + return fileDescriptor_476991b7998505b4, []int{0} +} +func (m *CanonicalBlockID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CanonicalBlockID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CanonicalBlockID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CanonicalBlockID) XXX_Merge(src proto.Message) { + xxx_messageInfo_CanonicalBlockID.Merge(m, src) +} +func (m *CanonicalBlockID) XXX_Size() int { + return m.Size() +} +func (m *CanonicalBlockID) XXX_DiscardUnknown() { + xxx_messageInfo_CanonicalBlockID.DiscardUnknown(m) +} + +var xxx_messageInfo_CanonicalBlockID proto.InternalMessageInfo + +func (m *CanonicalBlockID) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *CanonicalBlockID) GetPartSetHeader() CanonicalPartSetHeader { + if m != nil { + return m.PartSetHeader + } + return CanonicalPartSetHeader{} +} + +// CanonicalPartSetHeader is a canonical representation of a PartSetHeader, +// which gets serialized and signed. +type CanonicalPartSetHeader struct { + Total uint32 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *CanonicalPartSetHeader) Reset() { *m = CanonicalPartSetHeader{} } +func (m *CanonicalPartSetHeader) String() string { return proto.CompactTextString(m) } +func (*CanonicalPartSetHeader) ProtoMessage() {} +func (*CanonicalPartSetHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_476991b7998505b4, []int{1} +} +func (m *CanonicalPartSetHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CanonicalPartSetHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CanonicalPartSetHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CanonicalPartSetHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_CanonicalPartSetHeader.Merge(m, src) +} +func (m *CanonicalPartSetHeader) XXX_Size() int { + return m.Size() +} +func (m *CanonicalPartSetHeader) XXX_DiscardUnknown() { + xxx_messageInfo_CanonicalPartSetHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_CanonicalPartSetHeader proto.InternalMessageInfo + +func (m *CanonicalPartSetHeader) GetTotal() uint32 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *CanonicalPartSetHeader) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +// CanonicalProposal is a canonical representation of a Proposal, which gets +// serialized and signed. +type CanonicalProposal struct { + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=cometbft.types.v1beta1.SignedMsgType" json:"type,omitempty"` + Height int64 `protobuf:"fixed64,2,opt,name=height,proto3" json:"height,omitempty"` + Round int64 `protobuf:"fixed64,3,opt,name=round,proto3" json:"round,omitempty"` + POLRound int64 `protobuf:"varint,4,opt,name=pol_round,json=polRound,proto3" json:"pol_round,omitempty"` + BlockID *CanonicalBlockID `protobuf:"bytes,5,opt,name=block_id,json=blockId,proto3" json:"block_id,omitempty"` + Timestamp time.Time `protobuf:"bytes,6,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + ChainID string `protobuf:"bytes,7,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *CanonicalProposal) Reset() { *m = CanonicalProposal{} } +func (m *CanonicalProposal) String() string { return proto.CompactTextString(m) } +func (*CanonicalProposal) ProtoMessage() {} +func (*CanonicalProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_476991b7998505b4, []int{2} +} +func (m *CanonicalProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CanonicalProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CanonicalProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CanonicalProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_CanonicalProposal.Merge(m, src) +} +func (m *CanonicalProposal) XXX_Size() int { + return m.Size() +} +func (m *CanonicalProposal) XXX_DiscardUnknown() { + xxx_messageInfo_CanonicalProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_CanonicalProposal proto.InternalMessageInfo + +func (m *CanonicalProposal) GetType() SignedMsgType { + if m != nil { + return m.Type + } + return UnknownType +} + +func (m *CanonicalProposal) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *CanonicalProposal) GetRound() int64 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *CanonicalProposal) GetPOLRound() int64 { + if m != nil { + return m.POLRound + } + return 0 +} + +func (m *CanonicalProposal) GetBlockID() *CanonicalBlockID { + if m != nil { + return m.BlockID + } + return nil +} + +func (m *CanonicalProposal) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *CanonicalProposal) GetChainID() string { + if m != nil { + return m.ChainID + } + return "" +} + +// CanonicalVote is a canonical representation of a Vote, which gets +// serialized and signed. +type CanonicalVote struct { + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=cometbft.types.v1beta1.SignedMsgType" json:"type,omitempty"` + Height int64 `protobuf:"fixed64,2,opt,name=height,proto3" json:"height,omitempty"` + Round int64 `protobuf:"fixed64,3,opt,name=round,proto3" json:"round,omitempty"` + BlockID *CanonicalBlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id,omitempty"` + Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + ChainID string `protobuf:"bytes,6,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *CanonicalVote) Reset() { *m = CanonicalVote{} } +func (m *CanonicalVote) String() string { return proto.CompactTextString(m) } +func (*CanonicalVote) ProtoMessage() {} +func (*CanonicalVote) Descriptor() ([]byte, []int) { + return fileDescriptor_476991b7998505b4, []int{3} +} +func (m *CanonicalVote) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CanonicalVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CanonicalVote.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CanonicalVote) XXX_Merge(src proto.Message) { + xxx_messageInfo_CanonicalVote.Merge(m, src) +} +func (m *CanonicalVote) XXX_Size() int { + return m.Size() +} +func (m *CanonicalVote) XXX_DiscardUnknown() { + xxx_messageInfo_CanonicalVote.DiscardUnknown(m) +} + +var xxx_messageInfo_CanonicalVote proto.InternalMessageInfo + +func (m *CanonicalVote) GetType() SignedMsgType { + if m != nil { + return m.Type + } + return UnknownType +} + +func (m *CanonicalVote) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *CanonicalVote) GetRound() int64 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *CanonicalVote) GetBlockID() *CanonicalBlockID { + if m != nil { + return m.BlockID + } + return nil +} + +func (m *CanonicalVote) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *CanonicalVote) GetChainID() string { + if m != nil { + return m.ChainID + } + return "" +} + +func init() { + proto.RegisterType((*CanonicalBlockID)(nil), "cometbft.types.v1beta1.CanonicalBlockID") + proto.RegisterType((*CanonicalPartSetHeader)(nil), "cometbft.types.v1beta1.CanonicalPartSetHeader") + proto.RegisterType((*CanonicalProposal)(nil), "cometbft.types.v1beta1.CanonicalProposal") + proto.RegisterType((*CanonicalVote)(nil), "cometbft.types.v1beta1.CanonicalVote") +} + +func init() { + proto.RegisterFile("cometbft/types/v1beta1/canonical.proto", fileDescriptor_476991b7998505b4) +} + +var fileDescriptor_476991b7998505b4 = []byte{ + // 498 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x93, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xb3, 0xa9, 0x93, 0x38, 0xdb, 0x06, 0xca, 0xaa, 0x8a, 0xac, 0x1c, 0xec, 0xc8, 0x12, + 0x95, 0xb9, 0xd8, 0x6a, 0x39, 0x20, 0xae, 0x2e, 0x07, 0x22, 0x81, 0x08, 0x6e, 0xc5, 0x01, 0x21, + 0x45, 0x6b, 0x7b, 0x6b, 0x5b, 0x38, 0xde, 0x95, 0xbd, 0x41, 0xea, 0x9d, 0x07, 0xe8, 0xd3, 0xf0, + 0x0c, 0x3d, 0xf6, 0xc8, 0x29, 0x20, 0x47, 0xbc, 0x07, 0xda, 0xdd, 0xc4, 0xe9, 0x21, 0x11, 0x07, + 0x10, 0xb7, 0x99, 0xf1, 0x3f, 0x33, 0x9f, 0xff, 0xd1, 0xc2, 0xd3, 0x88, 0xce, 0x09, 0x0f, 0xaf, + 0xb9, 0xc7, 0x6f, 0x18, 0xa9, 0xbc, 0x2f, 0x67, 0x21, 0xe1, 0xf8, 0xcc, 0x8b, 0x70, 0x41, 0x8b, + 0x2c, 0xc2, 0xb9, 0xcb, 0x4a, 0xca, 0x29, 0x1a, 0x6e, 0x74, 0xae, 0xd4, 0xb9, 0x6b, 0xdd, 0xe8, + 0x24, 0xa1, 0x09, 0x95, 0x12, 0x4f, 0x44, 0x4a, 0x3d, 0xb2, 0xf7, 0x4c, 0x55, 0xbd, 0x4a, 0x63, + 0x25, 0x94, 0x26, 0x39, 0xf1, 0x64, 0x16, 0x2e, 0xae, 0x3d, 0x9e, 0xcd, 0x49, 0xc5, 0xf1, 0x9c, + 0x29, 0x81, 0xfd, 0x15, 0xc0, 0xe3, 0x8b, 0x0d, 0x86, 0x9f, 0xd3, 0xe8, 0xf3, 0xe4, 0x15, 0x42, + 0x50, 0x4b, 0x71, 0x95, 0x1a, 0x60, 0x0c, 0x9c, 0xa3, 0x40, 0xc6, 0xe8, 0x13, 0x7c, 0xcc, 0x70, + 0xc9, 0x67, 0x15, 0xe1, 0xb3, 0x94, 0xe0, 0x98, 0x94, 0x46, 0x7b, 0x0c, 0x9c, 0xc3, 0x73, 0xd7, + 0xdd, 0x4d, 0xed, 0x36, 0x63, 0xa7, 0xb8, 0xe4, 0x97, 0x84, 0xbf, 0x96, 0x5d, 0xbe, 0x76, 0xb7, + 0xb4, 0x5a, 0xc1, 0x80, 0x3d, 0x2c, 0xda, 0x3e, 0x1c, 0xee, 0x96, 0xa3, 0x13, 0xd8, 0xe1, 0x94, + 0xe3, 0x5c, 0xc2, 0x0c, 0x02, 0x95, 0x34, 0x84, 0xed, 0x2d, 0xa1, 0xfd, 0xab, 0x0d, 0x9f, 0x6c, + 0x87, 0x94, 0x94, 0xd1, 0x0a, 0xe7, 0xe8, 0x25, 0xd4, 0x04, 0x96, 0x6c, 0x7f, 0x74, 0xfe, 0x74, + 0x1f, 0xec, 0x65, 0x96, 0x14, 0x24, 0x7e, 0x5b, 0x25, 0x57, 0x37, 0x8c, 0x04, 0xb2, 0x05, 0x0d, + 0x61, 0x37, 0x25, 0x59, 0x92, 0x72, 0xb9, 0xe6, 0x38, 0x58, 0x67, 0x02, 0xa9, 0xa4, 0x8b, 0x22, + 0x36, 0x0e, 0x64, 0x59, 0x25, 0xe8, 0x19, 0xec, 0x33, 0x9a, 0xcf, 0xd4, 0x17, 0x6d, 0x0c, 0x9c, + 0x03, 0xff, 0xa8, 0x5e, 0x5a, 0xfa, 0xf4, 0xdd, 0x9b, 0x40, 0xd4, 0x02, 0x9d, 0xd1, 0x5c, 0x46, + 0x68, 0x0a, 0xf5, 0x50, 0x58, 0x3d, 0xcb, 0x62, 0xa3, 0x23, 0x4d, 0x74, 0xfe, 0x68, 0xe2, 0xfa, + 0x36, 0xfe, 0x61, 0xbd, 0xb4, 0x7a, 0xeb, 0x24, 0xe8, 0xc9, 0x31, 0x93, 0x18, 0xf9, 0xb0, 0xdf, + 0x5c, 0xd6, 0xe8, 0xca, 0x91, 0x23, 0x57, 0xdd, 0xde, 0xdd, 0xdc, 0xde, 0xbd, 0xda, 0x28, 0x7c, + 0x5d, 0xdc, 0xe0, 0xf6, 0x87, 0x05, 0x82, 0x6d, 0x1b, 0x3a, 0x85, 0x7a, 0x94, 0xe2, 0xac, 0x10, + 0x54, 0xbd, 0x31, 0x70, 0xfa, 0x6a, 0xd7, 0x85, 0xa8, 0x89, 0x5d, 0xf2, 0xe3, 0x24, 0xb6, 0xbf, + 0xb5, 0xe1, 0xa0, 0xc1, 0xfa, 0x40, 0x39, 0xf9, 0x7f, 0x1e, 0x3f, 0x34, 0x4e, 0xfb, 0xf7, 0xc6, + 0x75, 0xfe, 0xde, 0xb8, 0xee, 0x7e, 0xe3, 0xfc, 0xf7, 0x77, 0xb5, 0x09, 0xee, 0x6b, 0x13, 0xfc, + 0xac, 0x4d, 0x70, 0xbb, 0x32, 0x5b, 0xf7, 0x2b, 0xb3, 0xf5, 0x7d, 0x65, 0xb6, 0x3e, 0xbe, 0x48, + 0x32, 0x9e, 0x2e, 0x42, 0xf1, 0x2f, 0x5e, 0xf3, 0xaa, 0x9b, 0x00, 0xb3, 0xcc, 0xdb, 0xfd, 0xd6, + 0xc3, 0xae, 0x64, 0x7c, 0xfe, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x10, 0x33, 0xb1, 0x97, 0x62, 0x04, + 0x00, 0x00, +} + +func (m *CanonicalBlockID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CanonicalBlockID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CanonicalBlockID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.PartSetHeader.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCanonical(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintCanonical(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CanonicalPartSetHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CanonicalPartSetHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CanonicalPartSetHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintCanonical(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x12 + } + if m.Total != 0 { + i = encodeVarintCanonical(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CanonicalProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CanonicalProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CanonicalProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainID) > 0 { + i -= len(m.ChainID) + copy(dAtA[i:], m.ChainID) + i = encodeVarintCanonical(dAtA, i, uint64(len(m.ChainID))) + i-- + dAtA[i] = 0x3a + } + n2, err2 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintCanonical(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x32 + if m.BlockID != nil { + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCanonical(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.POLRound != 0 { + i = encodeVarintCanonical(dAtA, i, uint64(m.POLRound)) + i-- + dAtA[i] = 0x20 + } + if m.Round != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Round)) + i-- + dAtA[i] = 0x19 + } + if m.Height != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Height)) + i-- + dAtA[i] = 0x11 + } + if m.Type != 0 { + i = encodeVarintCanonical(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CanonicalVote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CanonicalVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CanonicalVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainID) > 0 { + i -= len(m.ChainID) + copy(dAtA[i:], m.ChainID) + i = encodeVarintCanonical(dAtA, i, uint64(len(m.ChainID))) + i-- + dAtA[i] = 0x32 + } + n4, err4 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) + if err4 != nil { + return 0, err4 + } + i -= n4 + i = encodeVarintCanonical(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x2a + if m.BlockID != nil { + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCanonical(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Round != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Round)) + i-- + dAtA[i] = 0x19 + } + if m.Height != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Height)) + i-- + dAtA[i] = 0x11 + } + if m.Type != 0 { + i = encodeVarintCanonical(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintCanonical(dAtA []byte, offset int, v uint64) int { + offset -= sovCanonical(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CanonicalBlockID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovCanonical(uint64(l)) + } + l = m.PartSetHeader.Size() + n += 1 + l + sovCanonical(uint64(l)) + return n +} + +func (m *CanonicalPartSetHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Total != 0 { + n += 1 + sovCanonical(uint64(m.Total)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovCanonical(uint64(l)) + } + return n +} + +func (m *CanonicalProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovCanonical(uint64(m.Type)) + } + if m.Height != 0 { + n += 9 + } + if m.Round != 0 { + n += 9 + } + if m.POLRound != 0 { + n += 1 + sovCanonical(uint64(m.POLRound)) + } + if m.BlockID != nil { + l = m.BlockID.Size() + n += 1 + l + sovCanonical(uint64(l)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovCanonical(uint64(l)) + l = len(m.ChainID) + if l > 0 { + n += 1 + l + sovCanonical(uint64(l)) + } + return n +} + +func (m *CanonicalVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovCanonical(uint64(m.Type)) + } + if m.Height != 0 { + n += 9 + } + if m.Round != 0 { + n += 9 + } + if m.BlockID != nil { + l = m.BlockID.Size() + n += 1 + l + sovCanonical(uint64(l)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovCanonical(uint64(l)) + l = len(m.ChainID) + if l > 0 { + n += 1 + l + sovCanonical(uint64(l)) + } + return n +} + +func sovCanonical(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCanonical(x uint64) (n int) { + return sovCanonical(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CanonicalBlockID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CanonicalBlockID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CanonicalBlockID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PartSetHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PartSetHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCanonical(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CanonicalPartSetHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CanonicalPartSetHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CanonicalPartSetHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCanonical(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CanonicalProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CanonicalProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CanonicalProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Height = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Round = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field POLRound", wireType) + } + m.POLRound = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.POLRound |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockID == nil { + m.BlockID = &CanonicalBlockID{} + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCanonical(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CanonicalVote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CanonicalVote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CanonicalVote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Height = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Round = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockID == nil { + m.BlockID = &CanonicalBlockID{} + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCanonical(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCanonical(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCanonical + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCanonical + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCanonical + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCanonical + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCanonical + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCanonical + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCanonical = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCanonical = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCanonical = fmt.Errorf("proto: unexpected end of group") +) diff --git a/api/cometbft/types/v1beta1/events.pb.go b/api/cometbft/types/v1beta1/events.pb.go new file mode 100644 index 00000000000..592072b9d27 --- /dev/null +++ b/api/cometbft/types/v1beta1/events.pb.go @@ -0,0 +1,390 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/types/v1beta1/events.proto + +package v1beta1 + +import ( + fmt "fmt" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// EventDataRoundState is emitted with each new round step. +type EventDataRoundState struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Step string `protobuf:"bytes,3,opt,name=step,proto3" json:"step,omitempty"` +} + +func (m *EventDataRoundState) Reset() { *m = EventDataRoundState{} } +func (m *EventDataRoundState) String() string { return proto.CompactTextString(m) } +func (*EventDataRoundState) ProtoMessage() {} +func (*EventDataRoundState) Descriptor() ([]byte, []int) { + return fileDescriptor_659f129cf60555df, []int{0} +} +func (m *EventDataRoundState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventDataRoundState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventDataRoundState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventDataRoundState) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventDataRoundState.Merge(m, src) +} +func (m *EventDataRoundState) XXX_Size() int { + return m.Size() +} +func (m *EventDataRoundState) XXX_DiscardUnknown() { + xxx_messageInfo_EventDataRoundState.DiscardUnknown(m) +} + +var xxx_messageInfo_EventDataRoundState proto.InternalMessageInfo + +func (m *EventDataRoundState) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *EventDataRoundState) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *EventDataRoundState) GetStep() string { + if m != nil { + return m.Step + } + return "" +} + +func init() { + proto.RegisterType((*EventDataRoundState)(nil), "cometbft.types.v1beta1.EventDataRoundState") +} + +func init() { + proto.RegisterFile("cometbft/types/v1beta1/events.proto", fileDescriptor_659f129cf60555df) +} + +var fileDescriptor_659f129cf60555df = []byte{ + // 200 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4e, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, + 0x49, 0x34, 0xd4, 0x4f, 0x2d, 0x4b, 0xcd, 0x2b, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0x12, 0x83, 0x29, 0xd2, 0x03, 0x2b, 0xd2, 0x83, 0x2a, 0x52, 0x0a, 0xe7, 0x12, 0x76, 0x05, 0xa9, + 0x73, 0x49, 0x2c, 0x49, 0x0c, 0xca, 0x2f, 0xcd, 0x4b, 0x09, 0x2e, 0x49, 0x2c, 0x49, 0x15, 0x12, + 0xe3, 0x62, 0xcb, 0x48, 0xcd, 0x4c, 0xcf, 0x28, 0x91, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0e, 0x82, + 0xf2, 0x84, 0x44, 0xb8, 0x58, 0x8b, 0x40, 0xaa, 0x24, 0x98, 0x14, 0x18, 0x35, 0x58, 0x83, 0x20, + 0x1c, 0x21, 0x21, 0x2e, 0x96, 0xe2, 0x92, 0xd4, 0x02, 0x09, 0x66, 0x05, 0x46, 0x0d, 0xce, 0x20, + 0x30, 0xdb, 0x29, 0xf0, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, + 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0xcc, 0xd3, + 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xe1, 0x4e, 0x87, 0x33, 0x12, 0x0b, + 0x32, 0xf5, 0xb1, 0x7b, 0x28, 0x89, 0x0d, 0xec, 0x15, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x75, 0x89, 0x8f, 0x3b, 0xf1, 0x00, 0x00, 0x00, +} + +func (m *EventDataRoundState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventDataRoundState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventDataRoundState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Step) > 0 { + i -= len(m.Step) + copy(dAtA[i:], m.Step) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Step))) + i-- + dAtA[i] = 0x1a + } + if m.Round != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { + offset -= sovEvents(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EventDataRoundState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovEvents(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovEvents(uint64(m.Round)) + } + l = len(m.Step) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func sovEvents(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvents(x uint64) (n int) { + return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EventDataRoundState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventDataRoundState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventDataRoundState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Step = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvents(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvents + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvents + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvents + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group") +) diff --git a/api/cometbft/types/v1beta1/evidence.pb.go b/api/cometbft/types/v1beta1/evidence.pb.go new file mode 100644 index 00000000000..41dd8b843f7 --- /dev/null +++ b/api/cometbft/types/v1beta1/evidence.pb.go @@ -0,0 +1,1400 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/types/v1beta1/evidence.proto + +package v1beta1 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + _ "github.com/cosmos/gogoproto/types" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Evidence is a generic type for wrapping evidence of misbehavior by a validator. +type Evidence struct { + // The type of evidence. + // + // Types that are valid to be assigned to Sum: + // *Evidence_DuplicateVoteEvidence + // *Evidence_LightClientAttackEvidence + Sum isEvidence_Sum `protobuf_oneof:"sum"` +} + +func (m *Evidence) Reset() { *m = Evidence{} } +func (m *Evidence) String() string { return proto.CompactTextString(m) } +func (*Evidence) ProtoMessage() {} +func (*Evidence) Descriptor() ([]byte, []int) { + return fileDescriptor_ab3b8db02b56853b, []int{0} +} +func (m *Evidence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Evidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Evidence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Evidence) XXX_Merge(src proto.Message) { + xxx_messageInfo_Evidence.Merge(m, src) +} +func (m *Evidence) XXX_Size() int { + return m.Size() +} +func (m *Evidence) XXX_DiscardUnknown() { + xxx_messageInfo_Evidence.DiscardUnknown(m) +} + +var xxx_messageInfo_Evidence proto.InternalMessageInfo + +type isEvidence_Sum interface { + isEvidence_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Evidence_DuplicateVoteEvidence struct { + DuplicateVoteEvidence *DuplicateVoteEvidence `protobuf:"bytes,1,opt,name=duplicate_vote_evidence,json=duplicateVoteEvidence,proto3,oneof" json:"duplicate_vote_evidence,omitempty"` +} +type Evidence_LightClientAttackEvidence struct { + LightClientAttackEvidence *LightClientAttackEvidence `protobuf:"bytes,2,opt,name=light_client_attack_evidence,json=lightClientAttackEvidence,proto3,oneof" json:"light_client_attack_evidence,omitempty"` +} + +func (*Evidence_DuplicateVoteEvidence) isEvidence_Sum() {} +func (*Evidence_LightClientAttackEvidence) isEvidence_Sum() {} + +func (m *Evidence) GetSum() isEvidence_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Evidence) GetDuplicateVoteEvidence() *DuplicateVoteEvidence { + if x, ok := m.GetSum().(*Evidence_DuplicateVoteEvidence); ok { + return x.DuplicateVoteEvidence + } + return nil +} + +func (m *Evidence) GetLightClientAttackEvidence() *LightClientAttackEvidence { + if x, ok := m.GetSum().(*Evidence_LightClientAttackEvidence); ok { + return x.LightClientAttackEvidence + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Evidence) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Evidence_DuplicateVoteEvidence)(nil), + (*Evidence_LightClientAttackEvidence)(nil), + } +} + +// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. +type DuplicateVoteEvidence struct { + VoteA *Vote `protobuf:"bytes,1,opt,name=vote_a,json=voteA,proto3" json:"vote_a,omitempty"` + VoteB *Vote `protobuf:"bytes,2,opt,name=vote_b,json=voteB,proto3" json:"vote_b,omitempty"` + TotalVotingPower int64 `protobuf:"varint,3,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` + ValidatorPower int64 `protobuf:"varint,4,opt,name=validator_power,json=validatorPower,proto3" json:"validator_power,omitempty"` + Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` +} + +func (m *DuplicateVoteEvidence) Reset() { *m = DuplicateVoteEvidence{} } +func (m *DuplicateVoteEvidence) String() string { return proto.CompactTextString(m) } +func (*DuplicateVoteEvidence) ProtoMessage() {} +func (*DuplicateVoteEvidence) Descriptor() ([]byte, []int) { + return fileDescriptor_ab3b8db02b56853b, []int{1} +} +func (m *DuplicateVoteEvidence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DuplicateVoteEvidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DuplicateVoteEvidence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DuplicateVoteEvidence) XXX_Merge(src proto.Message) { + xxx_messageInfo_DuplicateVoteEvidence.Merge(m, src) +} +func (m *DuplicateVoteEvidence) XXX_Size() int { + return m.Size() +} +func (m *DuplicateVoteEvidence) XXX_DiscardUnknown() { + xxx_messageInfo_DuplicateVoteEvidence.DiscardUnknown(m) +} + +var xxx_messageInfo_DuplicateVoteEvidence proto.InternalMessageInfo + +func (m *DuplicateVoteEvidence) GetVoteA() *Vote { + if m != nil { + return m.VoteA + } + return nil +} + +func (m *DuplicateVoteEvidence) GetVoteB() *Vote { + if m != nil { + return m.VoteB + } + return nil +} + +func (m *DuplicateVoteEvidence) GetTotalVotingPower() int64 { + if m != nil { + return m.TotalVotingPower + } + return 0 +} + +func (m *DuplicateVoteEvidence) GetValidatorPower() int64 { + if m != nil { + return m.ValidatorPower + } + return 0 +} + +func (m *DuplicateVoteEvidence) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. +type LightClientAttackEvidence struct { + ConflictingBlock *LightBlock `protobuf:"bytes,1,opt,name=conflicting_block,json=conflictingBlock,proto3" json:"conflicting_block,omitempty"` + CommonHeight int64 `protobuf:"varint,2,opt,name=common_height,json=commonHeight,proto3" json:"common_height,omitempty"` + ByzantineValidators []*Validator `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators,omitempty"` + TotalVotingPower int64 `protobuf:"varint,4,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` + Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` +} + +func (m *LightClientAttackEvidence) Reset() { *m = LightClientAttackEvidence{} } +func (m *LightClientAttackEvidence) String() string { return proto.CompactTextString(m) } +func (*LightClientAttackEvidence) ProtoMessage() {} +func (*LightClientAttackEvidence) Descriptor() ([]byte, []int) { + return fileDescriptor_ab3b8db02b56853b, []int{2} +} +func (m *LightClientAttackEvidence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LightClientAttackEvidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LightClientAttackEvidence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LightClientAttackEvidence) XXX_Merge(src proto.Message) { + xxx_messageInfo_LightClientAttackEvidence.Merge(m, src) +} +func (m *LightClientAttackEvidence) XXX_Size() int { + return m.Size() +} +func (m *LightClientAttackEvidence) XXX_DiscardUnknown() { + xxx_messageInfo_LightClientAttackEvidence.DiscardUnknown(m) +} + +var xxx_messageInfo_LightClientAttackEvidence proto.InternalMessageInfo + +func (m *LightClientAttackEvidence) GetConflictingBlock() *LightBlock { + if m != nil { + return m.ConflictingBlock + } + return nil +} + +func (m *LightClientAttackEvidence) GetCommonHeight() int64 { + if m != nil { + return m.CommonHeight + } + return 0 +} + +func (m *LightClientAttackEvidence) GetByzantineValidators() []*Validator { + if m != nil { + return m.ByzantineValidators + } + return nil +} + +func (m *LightClientAttackEvidence) GetTotalVotingPower() int64 { + if m != nil { + return m.TotalVotingPower + } + return 0 +} + +func (m *LightClientAttackEvidence) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +// EvidenceList is a list of evidence. +type EvidenceList struct { + Evidence []Evidence `protobuf:"bytes,1,rep,name=evidence,proto3" json:"evidence"` +} + +func (m *EvidenceList) Reset() { *m = EvidenceList{} } +func (m *EvidenceList) String() string { return proto.CompactTextString(m) } +func (*EvidenceList) ProtoMessage() {} +func (*EvidenceList) Descriptor() ([]byte, []int) { + return fileDescriptor_ab3b8db02b56853b, []int{3} +} +func (m *EvidenceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvidenceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvidenceList.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvidenceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvidenceList.Merge(m, src) +} +func (m *EvidenceList) XXX_Size() int { + return m.Size() +} +func (m *EvidenceList) XXX_DiscardUnknown() { + xxx_messageInfo_EvidenceList.DiscardUnknown(m) +} + +var xxx_messageInfo_EvidenceList proto.InternalMessageInfo + +func (m *EvidenceList) GetEvidence() []Evidence { + if m != nil { + return m.Evidence + } + return nil +} + +func init() { + proto.RegisterType((*Evidence)(nil), "cometbft.types.v1beta1.Evidence") + proto.RegisterType((*DuplicateVoteEvidence)(nil), "cometbft.types.v1beta1.DuplicateVoteEvidence") + proto.RegisterType((*LightClientAttackEvidence)(nil), "cometbft.types.v1beta1.LightClientAttackEvidence") + proto.RegisterType((*EvidenceList)(nil), "cometbft.types.v1beta1.EvidenceList") +} + +func init() { + proto.RegisterFile("cometbft/types/v1beta1/evidence.proto", fileDescriptor_ab3b8db02b56853b) +} + +var fileDescriptor_ab3b8db02b56853b = []byte{ + // 540 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe3, 0x3a, 0xa9, 0xc2, 0xb6, 0x40, 0x59, 0x5a, 0x48, 0xa3, 0xca, 0x09, 0x46, 0x40, + 0x0e, 0x60, 0x2b, 0xed, 0x81, 0x73, 0x0d, 0x48, 0x3d, 0x54, 0x02, 0xac, 0xaa, 0x07, 0x2e, 0xd6, + 0xda, 0xd9, 0x38, 0xab, 0xda, 0x5e, 0x2b, 0x9e, 0x04, 0x95, 0xa7, 0xe8, 0x03, 0xf0, 0x40, 0x3d, + 0xf6, 0x08, 0x17, 0x40, 0xc9, 0x0b, 0xf0, 0x08, 0x68, 0xd7, 0xf6, 0x36, 0x87, 0x2c, 0xe2, 0xc0, + 0xcd, 0x99, 0xfd, 0xfe, 0x9d, 0xf9, 0x67, 0x26, 0x8b, 0x9e, 0x45, 0x3c, 0xa5, 0x10, 0x8e, 0xc1, + 0x85, 0xcb, 0x9c, 0x16, 0xee, 0x7c, 0x18, 0x52, 0x20, 0x43, 0x97, 0xce, 0xd9, 0x88, 0x66, 0x11, + 0x75, 0xf2, 0x29, 0x07, 0x8e, 0x1f, 0xd5, 0x98, 0x23, 0x31, 0xa7, 0xc2, 0xba, 0xbb, 0x31, 0x8f, + 0xb9, 0x44, 0x5c, 0xf1, 0x55, 0xd2, 0xdd, 0x5e, 0xcc, 0x79, 0x9c, 0x50, 0x57, 0xfe, 0x0a, 0x67, + 0x63, 0x17, 0x58, 0x4a, 0x0b, 0x20, 0x69, 0x5e, 0x01, 0xb6, 0x26, 0x6b, 0x79, 0x79, 0xc9, 0x3c, + 0xd7, 0x30, 0x73, 0x92, 0xb0, 0x11, 0x01, 0x3e, 0x2d, 0x39, 0xfb, 0xb7, 0x81, 0xda, 0xef, 0xaa, + 0x6a, 0x71, 0x8c, 0x1e, 0x8f, 0x66, 0x79, 0xc2, 0x22, 0x02, 0x34, 0x98, 0x73, 0xa0, 0x41, 0x6d, + 0xa4, 0x63, 0xf4, 0x8d, 0xc1, 0xd6, 0xe1, 0x2b, 0x67, 0xbd, 0x13, 0xe7, 0x6d, 0x2d, 0x3b, 0xe7, + 0x40, 0xeb, 0xfb, 0x4e, 0x1a, 0xfe, 0xde, 0x68, 0xdd, 0x01, 0x06, 0x74, 0x90, 0xb0, 0x78, 0x02, + 0x41, 0x94, 0x30, 0x9a, 0x41, 0x40, 0x00, 0x48, 0x74, 0x71, 0x9b, 0x6d, 0x43, 0x66, 0x1b, 0xea, + 0xb2, 0x9d, 0x0a, 0xed, 0x1b, 0x29, 0x3d, 0x96, 0xca, 0x95, 0x8c, 0xfb, 0x89, 0xee, 0xd0, 0x6b, + 0x21, 0xb3, 0x98, 0xa5, 0xf6, 0xd7, 0x0d, 0xb4, 0xb7, 0xb6, 0x5e, 0x7c, 0x84, 0x36, 0xa5, 0x6b, + 0x52, 0xd9, 0x3d, 0xd0, 0x15, 0x20, 0x54, 0x7e, 0x4b, 0xb0, 0xc7, 0x4a, 0x14, 0x56, 0x55, 0xff, + 0x83, 0xc8, 0xc3, 0x2f, 0x11, 0x06, 0x0e, 0x24, 0x11, 0x5d, 0x66, 0x59, 0x1c, 0xe4, 0xfc, 0x33, + 0x9d, 0x76, 0xcc, 0xbe, 0x31, 0x30, 0xfd, 0x1d, 0x79, 0x72, 0x2e, 0x0f, 0x3e, 0x88, 0x38, 0x7e, + 0x81, 0xee, 0xab, 0xb9, 0x55, 0x68, 0x53, 0xa2, 0xf7, 0x54, 0xb8, 0x04, 0x3d, 0x74, 0x47, 0x2d, + 0x4b, 0xa7, 0x25, 0xcb, 0xe9, 0x3a, 0xe5, 0x3a, 0x39, 0xf5, 0x3a, 0x39, 0x67, 0x35, 0xe1, 0xb5, + 0xaf, 0x7f, 0xf4, 0x1a, 0x57, 0x3f, 0x7b, 0x86, 0x7f, 0x2b, 0xb3, 0xbf, 0x6f, 0xa0, 0x7d, 0x6d, + 0x83, 0xf1, 0x7b, 0xf4, 0x20, 0xe2, 0xd9, 0x38, 0x61, 0x91, 0xac, 0x3b, 0x4c, 0x78, 0x74, 0x51, + 0x75, 0xcb, 0xfe, 0xeb, 0xb8, 0x3c, 0x41, 0xfa, 0x3b, 0x2b, 0x62, 0x19, 0xc1, 0x4f, 0xd1, 0xdd, + 0x88, 0xa7, 0x29, 0xcf, 0x82, 0x09, 0x15, 0x9c, 0xec, 0xa2, 0xe9, 0x6f, 0x97, 0xc1, 0x13, 0x19, + 0xc3, 0x67, 0x68, 0x37, 0xbc, 0xfc, 0x42, 0x32, 0x60, 0x19, 0x0d, 0x94, 0xe7, 0xa2, 0x63, 0xf6, + 0xcd, 0xc1, 0xd6, 0xe1, 0x13, 0x6d, 0xc7, 0x6b, 0xd2, 0x7f, 0xa8, 0xe4, 0x2a, 0x56, 0x68, 0x86, + 0xd0, 0xd4, 0x0c, 0xe1, 0x7f, 0xf4, 0xd6, 0x47, 0xdb, 0x75, 0x27, 0x4f, 0x59, 0x01, 0xd8, 0x43, + 0xed, 0x95, 0x7f, 0x98, 0xf0, 0xd2, 0xd7, 0x79, 0x51, 0x5b, 0xdc, 0x14, 0x17, 0xfb, 0x4a, 0xe7, + 0x7d, 0xbc, 0x5e, 0x58, 0xc6, 0xcd, 0xc2, 0x32, 0x7e, 0x2d, 0x2c, 0xe3, 0x6a, 0x69, 0x35, 0x6e, + 0x96, 0x56, 0xe3, 0xdb, 0xd2, 0x6a, 0x7c, 0x7a, 0x1d, 0x33, 0x98, 0xcc, 0x42, 0x71, 0xa3, 0xab, + 0x9e, 0x03, 0xf5, 0x41, 0x72, 0xe6, 0xae, 0x7f, 0x24, 0xc2, 0x4d, 0xe9, 0xe7, 0xe8, 0x4f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x50, 0xf2, 0xed, 0xb6, 0xdf, 0x04, 0x00, 0x00, +} + +func (m *Evidence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Evidence_DuplicateVoteEvidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Evidence_DuplicateVoteEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DuplicateVoteEvidence != nil { + { + size, err := m.DuplicateVoteEvidence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvidence(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Evidence_LightClientAttackEvidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Evidence_LightClientAttackEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LightClientAttackEvidence != nil { + { + size, err := m.LightClientAttackEvidence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvidence(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *DuplicateVoteEvidence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DuplicateVoteEvidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DuplicateVoteEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n3, err3 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintEvidence(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0x2a + if m.ValidatorPower != 0 { + i = encodeVarintEvidence(dAtA, i, uint64(m.ValidatorPower)) + i-- + dAtA[i] = 0x20 + } + if m.TotalVotingPower != 0 { + i = encodeVarintEvidence(dAtA, i, uint64(m.TotalVotingPower)) + i-- + dAtA[i] = 0x18 + } + if m.VoteB != nil { + { + size, err := m.VoteB.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvidence(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.VoteA != nil { + { + size, err := m.VoteA.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvidence(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LightClientAttackEvidence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LightClientAttackEvidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LightClientAttackEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n6, err6 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) + if err6 != nil { + return 0, err6 + } + i -= n6 + i = encodeVarintEvidence(dAtA, i, uint64(n6)) + i-- + dAtA[i] = 0x2a + if m.TotalVotingPower != 0 { + i = encodeVarintEvidence(dAtA, i, uint64(m.TotalVotingPower)) + i-- + dAtA[i] = 0x20 + } + if len(m.ByzantineValidators) > 0 { + for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvidence(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.CommonHeight != 0 { + i = encodeVarintEvidence(dAtA, i, uint64(m.CommonHeight)) + i-- + dAtA[i] = 0x10 + } + if m.ConflictingBlock != nil { + { + size, err := m.ConflictingBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvidence(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EvidenceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvidenceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvidenceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Evidence) > 0 { + for iNdEx := len(m.Evidence) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Evidence[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvidence(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintEvidence(dAtA []byte, offset int, v uint64) int { + offset -= sovEvidence(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Evidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Evidence_DuplicateVoteEvidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DuplicateVoteEvidence != nil { + l = m.DuplicateVoteEvidence.Size() + n += 1 + l + sovEvidence(uint64(l)) + } + return n +} +func (m *Evidence_LightClientAttackEvidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LightClientAttackEvidence != nil { + l = m.LightClientAttackEvidence.Size() + n += 1 + l + sovEvidence(uint64(l)) + } + return n +} +func (m *DuplicateVoteEvidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VoteA != nil { + l = m.VoteA.Size() + n += 1 + l + sovEvidence(uint64(l)) + } + if m.VoteB != nil { + l = m.VoteB.Size() + n += 1 + l + sovEvidence(uint64(l)) + } + if m.TotalVotingPower != 0 { + n += 1 + sovEvidence(uint64(m.TotalVotingPower)) + } + if m.ValidatorPower != 0 { + n += 1 + sovEvidence(uint64(m.ValidatorPower)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovEvidence(uint64(l)) + return n +} + +func (m *LightClientAttackEvidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConflictingBlock != nil { + l = m.ConflictingBlock.Size() + n += 1 + l + sovEvidence(uint64(l)) + } + if m.CommonHeight != 0 { + n += 1 + sovEvidence(uint64(m.CommonHeight)) + } + if len(m.ByzantineValidators) > 0 { + for _, e := range m.ByzantineValidators { + l = e.Size() + n += 1 + l + sovEvidence(uint64(l)) + } + } + if m.TotalVotingPower != 0 { + n += 1 + sovEvidence(uint64(m.TotalVotingPower)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovEvidence(uint64(l)) + return n +} + +func (m *EvidenceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Evidence) > 0 { + for _, e := range m.Evidence { + l = e.Size() + n += 1 + l + sovEvidence(uint64(l)) + } + } + return n +} + +func sovEvidence(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvidence(x uint64) (n int) { + return sovEvidence(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Evidence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Evidence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Evidence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DuplicateVoteEvidence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DuplicateVoteEvidence{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Evidence_DuplicateVoteEvidence{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LightClientAttackEvidence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &LightClientAttackEvidence{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Evidence_LightClientAttackEvidence{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvidence(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DuplicateVoteEvidence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DuplicateVoteEvidence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteA", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VoteA == nil { + m.VoteA = &Vote{} + } + if err := m.VoteA.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteB", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VoteB == nil { + m.VoteB = &Vote{} + } + if err := m.VoteB.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) + } + m.TotalVotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalVotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorPower", wireType) + } + m.ValidatorPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ValidatorPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvidence(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LightClientAttackEvidence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LightClientAttackEvidence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConflictingBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConflictingBlock == nil { + m.ConflictingBlock = &LightBlock{} + } + if err := m.ConflictingBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonHeight", wireType) + } + m.CommonHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CommonHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ByzantineValidators = append(m.ByzantineValidators, &Validator{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) + } + m.TotalVotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalVotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvidence(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvidenceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvidenceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvidenceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Evidence = append(m.Evidence, Evidence{}) + if err := m.Evidence[len(m.Evidence)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvidence(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvidence(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvidence + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvidence + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvidence + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvidence + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvidence + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvidence + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvidence = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvidence = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvidence = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/types/params.pb.go b/api/cometbft/types/v1beta1/params.pb.go similarity index 74% rename from proto/tendermint/types/params.pb.go rename to api/cometbft/types/v1beta1/params.pb.go index 3184fdd1a1c..b12f31e022e 100644 --- a/proto/tendermint/types/params.pb.go +++ b/api/cometbft/types/v1beta1/params.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/types/params.proto +// source: cometbft/types/v1beta1/params.proto -package types +package v1beta1 import ( fmt "fmt" @@ -30,18 +30,17 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // ConsensusParams contains consensus critical parameters that determine the // validity of blocks. type ConsensusParams struct { - Block *BlockParams `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` - Evidence *EvidenceParams `protobuf:"bytes,2,opt,name=evidence,proto3" json:"evidence,omitempty"` - Validator *ValidatorParams `protobuf:"bytes,3,opt,name=validator,proto3" json:"validator,omitempty"` - Version *VersionParams `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` - Abci *ABCIParams `protobuf:"bytes,5,opt,name=abci,proto3" json:"abci,omitempty"` + Block BlockParams `protobuf:"bytes,1,opt,name=block,proto3" json:"block"` + Evidence EvidenceParams `protobuf:"bytes,2,opt,name=evidence,proto3" json:"evidence"` + Validator ValidatorParams `protobuf:"bytes,3,opt,name=validator,proto3" json:"validator"` + Version VersionParams `protobuf:"bytes,4,opt,name=version,proto3" json:"version"` } func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } func (*ConsensusParams) ProtoMessage() {} func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_e12598271a686f57, []int{0} + return fileDescriptor_be5c3dceb37e69cf, []int{0} } func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -70,39 +69,32 @@ func (m *ConsensusParams) XXX_DiscardUnknown() { var xxx_messageInfo_ConsensusParams proto.InternalMessageInfo -func (m *ConsensusParams) GetBlock() *BlockParams { +func (m *ConsensusParams) GetBlock() BlockParams { if m != nil { return m.Block } - return nil + return BlockParams{} } -func (m *ConsensusParams) GetEvidence() *EvidenceParams { +func (m *ConsensusParams) GetEvidence() EvidenceParams { if m != nil { return m.Evidence } - return nil + return EvidenceParams{} } -func (m *ConsensusParams) GetValidator() *ValidatorParams { +func (m *ConsensusParams) GetValidator() ValidatorParams { if m != nil { return m.Validator } - return nil + return ValidatorParams{} } -func (m *ConsensusParams) GetVersion() *VersionParams { +func (m *ConsensusParams) GetVersion() VersionParams { if m != nil { return m.Version } - return nil -} - -func (m *ConsensusParams) GetAbci() *ABCIParams { - if m != nil { - return m.Abci - } - return nil + return VersionParams{} } // BlockParams contains limits on the block size. @@ -113,13 +105,18 @@ type BlockParams struct { // Max gas per block. // Note: must be greater or equal to -1 MaxGas int64 `protobuf:"varint,2,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` + // Minimum time increment between consecutive blocks (in milliseconds) If the + // block header timestamp is ahead of the system clock, decrease this value. + // + // Not exposed to the application. + TimeIotaMs int64 `protobuf:"varint,3,opt,name=time_iota_ms,json=timeIotaMs,proto3" json:"time_iota_ms,omitempty"` } func (m *BlockParams) Reset() { *m = BlockParams{} } func (m *BlockParams) String() string { return proto.CompactTextString(m) } func (*BlockParams) ProtoMessage() {} func (*BlockParams) Descriptor() ([]byte, []int) { - return fileDescriptor_e12598271a686f57, []int{1} + return fileDescriptor_be5c3dceb37e69cf, []int{1} } func (m *BlockParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,6 +159,13 @@ func (m *BlockParams) GetMaxGas() int64 { return 0 } +func (m *BlockParams) GetTimeIotaMs() int64 { + if m != nil { + return m.TimeIotaMs + } + return 0 +} + // EvidenceParams determine how we handle evidence of malfeasance. type EvidenceParams struct { // Max age of evidence, in blocks. @@ -185,7 +189,7 @@ func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } func (*EvidenceParams) ProtoMessage() {} func (*EvidenceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_e12598271a686f57, []int{2} + return fileDescriptor_be5c3dceb37e69cf, []int{2} } func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -245,7 +249,7 @@ func (m *ValidatorParams) Reset() { *m = ValidatorParams{} } func (m *ValidatorParams) String() string { return proto.CompactTextString(m) } func (*ValidatorParams) ProtoMessage() {} func (*ValidatorParams) Descriptor() ([]byte, []int) { - return fileDescriptor_e12598271a686f57, []int{3} + return fileDescriptor_be5c3dceb37e69cf, []int{3} } func (m *ValidatorParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -283,6 +287,7 @@ func (m *ValidatorParams) GetPubKeyTypes() []string { // VersionParams contains the ABCI application version. type VersionParams struct { + // Was named app_version in Tendermint 0.34 App uint64 `protobuf:"varint,1,opt,name=app,proto3" json:"app,omitempty"` } @@ -290,7 +295,7 @@ func (m *VersionParams) Reset() { *m = VersionParams{} } func (m *VersionParams) String() string { return proto.CompactTextString(m) } func (*VersionParams) ProtoMessage() {} func (*VersionParams) Descriptor() ([]byte, []int) { - return fileDescriptor_e12598271a686f57, []int{4} + return fileDescriptor_be5c3dceb37e69cf, []int{4} } func (m *VersionParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -338,7 +343,7 @@ func (m *HashedParams) Reset() { *m = HashedParams{} } func (m *HashedParams) String() string { return proto.CompactTextString(m) } func (*HashedParams) ProtoMessage() {} func (*HashedParams) Descriptor() ([]byte, []int) { - return fileDescriptor_e12598271a686f57, []int{5} + return fileDescriptor_be5c3dceb37e69cf, []int{5} } func (m *HashedParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -381,110 +386,56 @@ func (m *HashedParams) GetBlockMaxGas() int64 { return 0 } -// ABCIParams configure functionality specific to the Application Blockchain Interface. -type ABCIParams struct { - // vote_extensions_enable_height configures the first height during which - // vote extensions will be enabled. During this specified height, and for all - // subsequent heights, precommit messages that do not contain valid extension data - // will be considered invalid. Prior to this height, vote extensions will not - // be used or accepted by validators on the network. - // - // Once enabled, vote extensions will be created by the application in ExtendVote, - // passed to the application for validation in VerifyVoteExtension and given - // to the application to use when proposing a block during PrepareProposal. - VoteExtensionsEnableHeight int64 `protobuf:"varint,1,opt,name=vote_extensions_enable_height,json=voteExtensionsEnableHeight,proto3" json:"vote_extensions_enable_height,omitempty"` -} - -func (m *ABCIParams) Reset() { *m = ABCIParams{} } -func (m *ABCIParams) String() string { return proto.CompactTextString(m) } -func (*ABCIParams) ProtoMessage() {} -func (*ABCIParams) Descriptor() ([]byte, []int) { - return fileDescriptor_e12598271a686f57, []int{6} -} -func (m *ABCIParams) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ABCIParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ABCIParams.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ABCIParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_ABCIParams.Merge(m, src) -} -func (m *ABCIParams) XXX_Size() int { - return m.Size() -} -func (m *ABCIParams) XXX_DiscardUnknown() { - xxx_messageInfo_ABCIParams.DiscardUnknown(m) -} - -var xxx_messageInfo_ABCIParams proto.InternalMessageInfo - -func (m *ABCIParams) GetVoteExtensionsEnableHeight() int64 { - if m != nil { - return m.VoteExtensionsEnableHeight - } - return 0 +func init() { + proto.RegisterType((*ConsensusParams)(nil), "cometbft.types.v1beta1.ConsensusParams") + proto.RegisterType((*BlockParams)(nil), "cometbft.types.v1beta1.BlockParams") + proto.RegisterType((*EvidenceParams)(nil), "cometbft.types.v1beta1.EvidenceParams") + proto.RegisterType((*ValidatorParams)(nil), "cometbft.types.v1beta1.ValidatorParams") + proto.RegisterType((*VersionParams)(nil), "cometbft.types.v1beta1.VersionParams") + proto.RegisterType((*HashedParams)(nil), "cometbft.types.v1beta1.HashedParams") } func init() { - proto.RegisterType((*ConsensusParams)(nil), "tendermint.types.ConsensusParams") - proto.RegisterType((*BlockParams)(nil), "tendermint.types.BlockParams") - proto.RegisterType((*EvidenceParams)(nil), "tendermint.types.EvidenceParams") - proto.RegisterType((*ValidatorParams)(nil), "tendermint.types.ValidatorParams") - proto.RegisterType((*VersionParams)(nil), "tendermint.types.VersionParams") - proto.RegisterType((*HashedParams)(nil), "tendermint.types.HashedParams") - proto.RegisterType((*ABCIParams)(nil), "tendermint.types.ABCIParams") -} - -func init() { proto.RegisterFile("tendermint/types/params.proto", fileDescriptor_e12598271a686f57) } - -var fileDescriptor_e12598271a686f57 = []byte{ - // 576 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x93, 0x3f, 0x6f, 0xd3, 0x4e, - 0x18, 0xc7, 0x73, 0x75, 0xda, 0xa6, 0x4f, 0x7e, 0x69, 0xa2, 0xd3, 0x4f, 0xc2, 0x14, 0xe2, 0x04, - 0x0f, 0xa8, 0x52, 0x25, 0x1b, 0x91, 0x09, 0x84, 0x54, 0x25, 0x25, 0x6a, 0x0b, 0x2a, 0x7f, 0x2c, - 0xc4, 0xd0, 0xc5, 0x3a, 0x27, 0x57, 0xc7, 0x6a, 0xec, 0xb3, 0x7c, 0xe7, 0x28, 0x79, 0x17, 0x8c, - 0x8c, 0x1d, 0x61, 0x65, 0xe2, 0x25, 0x74, 0xec, 0xc8, 0x04, 0x28, 0x59, 0x78, 0x19, 0xc8, 0x67, - 0xbb, 0x6e, 0x12, 0xb6, 0xbb, 0x7b, 0x3e, 0x9f, 0xfb, 0xf3, 0x7d, 0x74, 0xd0, 0x14, 0x34, 0x18, - 0xd2, 0xc8, 0xf7, 0x02, 0x61, 0x8a, 0x59, 0x48, 0xb9, 0x19, 0x92, 0x88, 0xf8, 0xdc, 0x08, 0x23, - 0x26, 0x18, 0x6e, 0x14, 0x65, 0x43, 0x96, 0xf7, 0xfe, 0x77, 0x99, 0xcb, 0x64, 0xd1, 0x4c, 0x46, - 0x29, 0xb7, 0xa7, 0xb9, 0x8c, 0xb9, 0x63, 0x6a, 0xca, 0x99, 0x13, 0x5f, 0x98, 0xc3, 0x38, 0x22, - 0xc2, 0x63, 0x41, 0x5a, 0xd7, 0xbf, 0x6d, 0x40, 0xfd, 0x88, 0x05, 0x9c, 0x06, 0x3c, 0xe6, 0xef, - 0xe4, 0x09, 0xb8, 0x03, 0x9b, 0xce, 0x98, 0x0d, 0x2e, 0x55, 0xd4, 0x46, 0xfb, 0xd5, 0xa7, 0x4d, - 0x63, 0xf5, 0x2c, 0xa3, 0x97, 0x94, 0x53, 0xda, 0x4a, 0x59, 0xfc, 0x02, 0x2a, 0x74, 0xe2, 0x0d, - 0x69, 0x30, 0xa0, 0xea, 0x86, 0xf4, 0xda, 0xeb, 0x5e, 0x3f, 0x23, 0x32, 0xf5, 0xd6, 0xc0, 0x87, - 0xb0, 0x33, 0x21, 0x63, 0x6f, 0x48, 0x04, 0x8b, 0x54, 0x45, 0xea, 0x8f, 0xd6, 0xf5, 0x8f, 0x39, - 0x92, 0xf9, 0x85, 0x83, 0x9f, 0xc1, 0xf6, 0x84, 0x46, 0xdc, 0x63, 0x81, 0x5a, 0x96, 0x7a, 0xeb, - 0x1f, 0x7a, 0x0a, 0x64, 0x72, 0xce, 0xe3, 0x27, 0x50, 0x26, 0xce, 0xc0, 0x53, 0x37, 0xa5, 0xf7, - 0x70, 0xdd, 0xeb, 0xf6, 0x8e, 0x4e, 0x33, 0x49, 0x92, 0xfa, 0x29, 0x54, 0xef, 0x24, 0x80, 0x1f, - 0xc0, 0x8e, 0x4f, 0xa6, 0xb6, 0x33, 0x13, 0x94, 0xcb, 0xcc, 0x14, 0xab, 0xe2, 0x93, 0x69, 0x2f, - 0x99, 0xe3, 0x7b, 0xb0, 0x9d, 0x14, 0x5d, 0xc2, 0x65, 0x2c, 0x8a, 0xb5, 0xe5, 0x93, 0xe9, 0x31, - 0xe1, 0xaf, 0xca, 0x15, 0xa5, 0x51, 0xd6, 0xbf, 0x22, 0xd8, 0x5d, 0x4e, 0x05, 0x1f, 0x00, 0x4e, - 0x0c, 0xe2, 0x52, 0x3b, 0x88, 0x7d, 0x5b, 0xc6, 0x9b, 0xef, 0x5b, 0xf7, 0xc9, 0xb4, 0xeb, 0xd2, - 0x37, 0xb1, 0x2f, 0x2f, 0xc0, 0xf1, 0x19, 0x34, 0x72, 0x38, 0xef, 0x6c, 0x16, 0xff, 0x7d, 0x23, - 0x6d, 0xbd, 0x91, 0xb7, 0xde, 0x78, 0x99, 0x01, 0xbd, 0xca, 0xf5, 0xcf, 0x56, 0xe9, 0xf3, 0xaf, - 0x16, 0xb2, 0x76, 0xd3, 0xfd, 0xf2, 0xca, 0xf2, 0x53, 0x94, 0xe5, 0xa7, 0xe8, 0x87, 0x50, 0x5f, - 0xe9, 0x00, 0xd6, 0xa1, 0x16, 0xc6, 0x8e, 0x7d, 0x49, 0x67, 0xb6, 0xcc, 0x4a, 0x45, 0x6d, 0x65, - 0x7f, 0xc7, 0xaa, 0x86, 0xb1, 0xf3, 0x9a, 0xce, 0x3e, 0x24, 0x4b, 0xcf, 0x2b, 0xdf, 0xaf, 0x5a, - 0xe8, 0xcf, 0x55, 0x0b, 0xe9, 0x07, 0x50, 0x5b, 0xea, 0x01, 0x6e, 0x80, 0x42, 0xc2, 0x50, 0xbe, - 0xad, 0x6c, 0x25, 0xc3, 0x3b, 0xf0, 0x39, 0xfc, 0x77, 0x42, 0xf8, 0x88, 0x0e, 0x33, 0xf6, 0x31, - 0xd4, 0x65, 0x14, 0xf6, 0x6a, 0xd6, 0x35, 0xb9, 0x7c, 0x96, 0x07, 0xae, 0x43, 0xad, 0xe0, 0x8a, - 0xd8, 0xab, 0x39, 0x75, 0x4c, 0xb8, 0xfe, 0x16, 0xa0, 0x68, 0x2a, 0xee, 0x42, 0x73, 0xc2, 0x04, - 0xb5, 0xe9, 0x54, 0xd0, 0x20, 0xb9, 0x1d, 0xb7, 0x69, 0x40, 0x9c, 0x31, 0xb5, 0x47, 0xd4, 0x73, - 0x47, 0x22, 0x3b, 0x67, 0x2f, 0x81, 0xfa, 0xb7, 0x4c, 0x5f, 0x22, 0x27, 0x92, 0xe8, 0xbd, 0xff, - 0x32, 0xd7, 0xd0, 0xf5, 0x5c, 0x43, 0x37, 0x73, 0x0d, 0xfd, 0x9e, 0x6b, 0xe8, 0xd3, 0x42, 0x2b, - 0xdd, 0x2c, 0xb4, 0xd2, 0x8f, 0x85, 0x56, 0x3a, 0xef, 0xb8, 0x9e, 0x18, 0xc5, 0x8e, 0x31, 0x60, - 0xbe, 0x39, 0x60, 0x3e, 0x15, 0xce, 0x85, 0x28, 0x06, 0xe9, 0x9f, 0x5d, 0xfd, 0xee, 0xce, 0x96, - 0x5c, 0xef, 0xfc, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xb9, 0xe8, 0xce, 0x9a, 0x09, 0x04, 0x00, 0x00, + proto.RegisterFile("cometbft/types/v1beta1/params.proto", fileDescriptor_be5c3dceb37e69cf) +} + +var fileDescriptor_be5c3dceb37e69cf = []byte{ + // 545 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0x41, 0x6f, 0xd3, 0x30, + 0x14, 0xc7, 0xeb, 0xa5, 0x6c, 0xed, 0xeb, 0xba, 0x4e, 0x16, 0x82, 0x32, 0xa4, 0xb4, 0xca, 0xc4, + 0x98, 0x34, 0x29, 0xd1, 0xe0, 0x80, 0xc4, 0x65, 0xa2, 0x30, 0x6d, 0x68, 0x2a, 0x42, 0x11, 0x70, + 0xd8, 0x25, 0x72, 0x5a, 0x2f, 0x8b, 0xd6, 0xc4, 0x51, 0xed, 0x54, 0xed, 0xb7, 0xe0, 0xc8, 0x71, + 0x47, 0xb8, 0x71, 0xe4, 0x23, 0xec, 0xb8, 0x23, 0x27, 0x40, 0xed, 0x85, 0x8f, 0x31, 0xd9, 0x89, + 0xdb, 0x65, 0x5a, 0x6f, 0x8e, 0xfd, 0x7b, 0x7f, 0xe7, 0xff, 0x7f, 0xcf, 0xb0, 0xdd, 0x63, 0x11, + 0x15, 0xfe, 0x99, 0x70, 0xc4, 0x24, 0xa1, 0xdc, 0x19, 0xed, 0xfb, 0x54, 0x90, 0x7d, 0x27, 0x21, + 0x43, 0x12, 0x71, 0x3b, 0x19, 0x32, 0xc1, 0xf0, 0x23, 0x0d, 0xd9, 0x0a, 0xb2, 0x73, 0x68, 0xeb, + 0x61, 0xc0, 0x02, 0xa6, 0x10, 0x47, 0xae, 0x32, 0x7a, 0xcb, 0x0c, 0x18, 0x0b, 0x06, 0xd4, 0x51, + 0x5f, 0x7e, 0x7a, 0xe6, 0xf4, 0xd3, 0x21, 0x11, 0x21, 0x8b, 0xb3, 0x73, 0xeb, 0xe7, 0x0a, 0x34, + 0xde, 0xb2, 0x98, 0xd3, 0x98, 0xa7, 0xfc, 0xa3, 0xba, 0x07, 0x1f, 0xc0, 0x03, 0x7f, 0xc0, 0x7a, + 0x17, 0x4d, 0xd4, 0x46, 0xbb, 0xb5, 0x17, 0xdb, 0xf6, 0xfd, 0x37, 0xda, 0x1d, 0x09, 0x65, 0x35, + 0x9d, 0xf2, 0xd5, 0x9f, 0x56, 0xc9, 0xcd, 0xea, 0xf0, 0x31, 0x54, 0xe8, 0x28, 0xec, 0xd3, 0xb8, + 0x47, 0x9b, 0x2b, 0x4a, 0x63, 0x67, 0x99, 0xc6, 0x61, 0xce, 0x15, 0x64, 0xe6, 0xd5, 0xf8, 0x04, + 0xaa, 0x23, 0x32, 0x08, 0xfb, 0x44, 0xb0, 0x61, 0xd3, 0x50, 0x52, 0xcf, 0x97, 0x49, 0x7d, 0xd1, + 0x60, 0x41, 0x6b, 0x51, 0x8f, 0x0f, 0x61, 0x6d, 0x44, 0x87, 0x3c, 0x64, 0x71, 0xb3, 0xac, 0xa4, + 0x9e, 0x2d, 0x95, 0xca, 0xb0, 0x82, 0x90, 0xae, 0xb5, 0x28, 0xd4, 0x6e, 0x39, 0xc7, 0x4f, 0xa1, + 0x1a, 0x91, 0xb1, 0xe7, 0x4f, 0x04, 0xe5, 0x2a, 0x31, 0xc3, 0xad, 0x44, 0x64, 0xdc, 0x91, 0xdf, + 0xf8, 0x31, 0xac, 0xc9, 0xc3, 0x80, 0x70, 0x15, 0x84, 0xe1, 0xae, 0x46, 0x64, 0x7c, 0x44, 0x38, + 0x6e, 0xc3, 0xba, 0x08, 0x23, 0xea, 0x85, 0x4c, 0x10, 0x2f, 0xe2, 0xca, 0x9b, 0xe1, 0x82, 0xdc, + 0x7b, 0xcf, 0x04, 0xe9, 0x72, 0xeb, 0x07, 0x82, 0x8d, 0x62, 0x3a, 0x78, 0x0f, 0xb0, 0x54, 0x23, + 0x01, 0xf5, 0xe2, 0x34, 0xf2, 0x54, 0xd8, 0xfa, 0xce, 0x46, 0x44, 0xc6, 0x6f, 0x02, 0xfa, 0x21, + 0x8d, 0xd4, 0xcf, 0x71, 0xdc, 0x85, 0x4d, 0x0d, 0xeb, 0x9e, 0xe7, 0xcd, 0x78, 0x62, 0x67, 0x43, + 0x61, 0xeb, 0xa1, 0xb0, 0xdf, 0xe5, 0x40, 0xa7, 0x22, 0xad, 0x7e, 0xfb, 0xdb, 0x42, 0xee, 0x46, + 0xa6, 0xa7, 0x4f, 0x8a, 0x36, 0x8d, 0xa2, 0x4d, 0xeb, 0x00, 0x1a, 0x77, 0xd2, 0xc7, 0x16, 0xd4, + 0x93, 0xd4, 0xf7, 0x2e, 0xe8, 0xc4, 0x53, 0xd9, 0x36, 0x51, 0xdb, 0xd8, 0xad, 0xba, 0xb5, 0x24, + 0xf5, 0x4f, 0xe8, 0xe4, 0x93, 0xdc, 0x7a, 0x5d, 0xf9, 0x75, 0xd9, 0x42, 0xff, 0x2f, 0x5b, 0xc8, + 0xda, 0x83, 0x7a, 0x21, 0x73, 0xbc, 0x09, 0x06, 0x49, 0x12, 0xe5, 0xad, 0xec, 0xca, 0xe5, 0x2d, + 0xf8, 0x14, 0xd6, 0x8f, 0x09, 0x3f, 0xa7, 0xfd, 0x9c, 0xdd, 0x81, 0x86, 0x8a, 0xc2, 0xbb, 0xdb, + 0x87, 0xba, 0xda, 0xee, 0xea, 0x66, 0x58, 0x50, 0x5f, 0x70, 0x8b, 0x96, 0xd4, 0x34, 0x75, 0x44, + 0x78, 0xe7, 0xf3, 0xf7, 0xa9, 0x89, 0xae, 0xa6, 0x26, 0xba, 0x9e, 0x9a, 0xe8, 0xdf, 0xd4, 0x44, + 0x5f, 0x67, 0x66, 0xe9, 0x7a, 0x66, 0x96, 0x7e, 0xcf, 0xcc, 0xd2, 0xe9, 0xab, 0x20, 0x14, 0xe7, + 0xa9, 0x2f, 0xc7, 0xc6, 0x99, 0xbf, 0xd5, 0xf9, 0x82, 0x24, 0xa1, 0x73, 0xff, 0x0b, 0xf6, 0x57, + 0x55, 0xd4, 0x2f, 0x6f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfd, 0x5e, 0xbe, 0x2b, 0xe2, 0x03, 0x00, + 0x00, } func (this *ConsensusParams) Equal(that interface{}) bool { @@ -506,19 +457,16 @@ func (this *ConsensusParams) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.Block.Equal(that1.Block) { - return false - } - if !this.Evidence.Equal(that1.Evidence) { + if !this.Block.Equal(&that1.Block) { return false } - if !this.Validator.Equal(that1.Validator) { + if !this.Evidence.Equal(&that1.Evidence) { return false } - if !this.Version.Equal(that1.Version) { + if !this.Validator.Equal(&that1.Validator) { return false } - if !this.Abci.Equal(that1.Abci) { + if !this.Version.Equal(&that1.Version) { return false } return true @@ -548,6 +496,9 @@ func (this *BlockParams) Equal(that interface{}) bool { if this.MaxGas != that1.MaxGas { return false } + if this.TimeIotaMs != that1.TimeIotaMs { + return false + } return true } func (this *EvidenceParams) Equal(that interface{}) bool { @@ -660,30 +611,6 @@ func (this *HashedParams) Equal(that interface{}) bool { } return true } -func (this *ABCIParams) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ABCIParams) - if !ok { - that2, ok := that.(ABCIParams) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.VoteExtensionsEnableHeight != that1.VoteExtensionsEnableHeight { - return false - } - return true -} func (m *ConsensusParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -704,66 +631,46 @@ func (m *ConsensusParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Abci != nil { - { - size, err := m.Abci.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintParams(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Version != nil { - { - size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintParams(dAtA, i, uint64(size)) + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) } - if m.Validator != nil { - { - size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintParams(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) } - if m.Evidence != nil { - { - size, err := m.Evidence.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintParams(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Evidence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) } - if m.Block != nil { - { - size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintParams(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + { + size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -787,6 +694,11 @@ func (m *BlockParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TimeIotaMs != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.TimeIotaMs)) + i-- + dAtA[i] = 0x18 + } if m.MaxGas != 0 { i = encodeVarintParams(dAtA, i, uint64(m.MaxGas)) i-- @@ -825,12 +737,12 @@ func (m *EvidenceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x18 } - n6, err6 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.MaxAgeDuration):]) - if err6 != nil { - return 0, err6 + n5, err5 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.MaxAgeDuration):]) + if err5 != nil { + return 0, err5 } - i -= n6 - i = encodeVarintParams(dAtA, i, uint64(n6)) + i -= n5 + i = encodeVarintParams(dAtA, i, uint64(n5)) i-- dAtA[i] = 0x12 if m.MaxAgeNumBlocks != 0 { @@ -934,34 +846,6 @@ func (m *HashedParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ABCIParams) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ABCIParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ABCIParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.VoteExtensionsEnableHeight != 0 { - i = encodeVarintParams(dAtA, i, uint64(m.VoteExtensionsEnableHeight)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - func encodeVarintParams(dAtA []byte, offset int, v uint64) int { offset -= sovParams(v) base := offset @@ -1071,26 +955,14 @@ func (m *ConsensusParams) Size() (n int) { } var l int _ = l - if m.Block != nil { - l = m.Block.Size() - n += 1 + l + sovParams(uint64(l)) - } - if m.Evidence != nil { - l = m.Evidence.Size() - n += 1 + l + sovParams(uint64(l)) - } - if m.Validator != nil { - l = m.Validator.Size() - n += 1 + l + sovParams(uint64(l)) - } - if m.Version != nil { - l = m.Version.Size() - n += 1 + l + sovParams(uint64(l)) - } - if m.Abci != nil { - l = m.Abci.Size() - n += 1 + l + sovParams(uint64(l)) - } + l = m.Block.Size() + n += 1 + l + sovParams(uint64(l)) + l = m.Evidence.Size() + n += 1 + l + sovParams(uint64(l)) + l = m.Validator.Size() + n += 1 + l + sovParams(uint64(l)) + l = m.Version.Size() + n += 1 + l + sovParams(uint64(l)) return n } @@ -1106,6 +978,9 @@ func (m *BlockParams) Size() (n int) { if m.MaxGas != 0 { n += 1 + sovParams(uint64(m.MaxGas)) } + if m.TimeIotaMs != 0 { + n += 1 + sovParams(uint64(m.TimeIotaMs)) + } return n } @@ -1168,18 +1043,6 @@ func (m *HashedParams) Size() (n int) { return n } -func (m *ABCIParams) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.VoteExtensionsEnableHeight != 0 { - n += 1 + sovParams(uint64(m.VoteExtensionsEnableHeight)) - } - return n -} - func sovParams(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1244,9 +1107,6 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Block == nil { - m.Block = &BlockParams{} - } if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1280,9 +1140,6 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Evidence == nil { - m.Evidence = &EvidenceParams{} - } if err := m.Evidence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1316,9 +1173,6 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Validator == nil { - m.Validator = &ValidatorParams{} - } if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1352,49 +1206,10 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Version == nil { - m.Version = &VersionParams{} - } if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Abci", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Abci == nil { - m.Abci = &ABCIParams{} - } - if err := m.Abci.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:]) @@ -1483,6 +1298,25 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { break } } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeIotaMs", wireType) + } + m.TimeIotaMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeIotaMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:]) @@ -1864,75 +1698,6 @@ func (m *HashedParams) Unmarshal(dAtA []byte) error { } return nil } -func (m *ABCIParams) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ABCIParams: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ABCIParams: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field VoteExtensionsEnableHeight", wireType) - } - m.VoteExtensionsEnableHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.VoteExtensionsEnableHeight |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipParams(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipParams(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/api/cometbft/types/v1beta1/types.pb.go b/api/cometbft/types/v1beta1/types.pb.go new file mode 100644 index 00000000000..407738b779d --- /dev/null +++ b/api/cometbft/types/v1beta1/types.pb.go @@ -0,0 +1,4543 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/types/v1beta1/types.proto + +package v1beta1 + +import ( + fmt "fmt" + v1 "github.com/cometbft/cometbft/api/cometbft/crypto/v1" + v11 "github.com/cometbft/cometbft/api/cometbft/version/v1" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + _ "github.com/cosmos/gogoproto/types" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// SignedMsgType is a type of signed message in the consensus. +type SignedMsgType int32 + +const ( + // Unknown + UnknownType SignedMsgType = 0 + // Prevote + PrevoteType SignedMsgType = 1 + // Precommit + PrecommitType SignedMsgType = 2 + // Proposal + ProposalType SignedMsgType = 32 +) + +var SignedMsgType_name = map[int32]string{ + 0: "SIGNED_MSG_TYPE_UNKNOWN", + 1: "SIGNED_MSG_TYPE_PREVOTE", + 2: "SIGNED_MSG_TYPE_PRECOMMIT", + 32: "SIGNED_MSG_TYPE_PROPOSAL", +} + +var SignedMsgType_value = map[string]int32{ + "SIGNED_MSG_TYPE_UNKNOWN": 0, + "SIGNED_MSG_TYPE_PREVOTE": 1, + "SIGNED_MSG_TYPE_PRECOMMIT": 2, + "SIGNED_MSG_TYPE_PROPOSAL": 32, +} + +func (x SignedMsgType) String() string { + return proto.EnumName(SignedMsgType_name, int32(x)) +} + +func (SignedMsgType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_af6831286a0350e0, []int{0} +} + +// Header of the parts set for a block. +type PartSetHeader struct { + Total uint32 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } +func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } +func (*PartSetHeader) ProtoMessage() {} +func (*PartSetHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_af6831286a0350e0, []int{0} +} +func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartSetHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PartSetHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PartSetHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartSetHeader.Merge(m, src) +} +func (m *PartSetHeader) XXX_Size() int { + return m.Size() +} +func (m *PartSetHeader) XXX_DiscardUnknown() { + xxx_messageInfo_PartSetHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_PartSetHeader proto.InternalMessageInfo + +func (m *PartSetHeader) GetTotal() uint32 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *PartSetHeader) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +// Part of the block. +type Part struct { + Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Bytes []byte `protobuf:"bytes,2,opt,name=bytes,proto3" json:"bytes,omitempty"` + Proof v1.Proof `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof"` +} + +func (m *Part) Reset() { *m = Part{} } +func (m *Part) String() string { return proto.CompactTextString(m) } +func (*Part) ProtoMessage() {} +func (*Part) Descriptor() ([]byte, []int) { + return fileDescriptor_af6831286a0350e0, []int{1} +} +func (m *Part) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Part) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Part.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Part) XXX_Merge(src proto.Message) { + xxx_messageInfo_Part.Merge(m, src) +} +func (m *Part) XXX_Size() int { + return m.Size() +} +func (m *Part) XXX_DiscardUnknown() { + xxx_messageInfo_Part.DiscardUnknown(m) +} + +var xxx_messageInfo_Part proto.InternalMessageInfo + +func (m *Part) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *Part) GetBytes() []byte { + if m != nil { + return m.Bytes + } + return nil +} + +func (m *Part) GetProof() v1.Proof { + if m != nil { + return m.Proof + } + return v1.Proof{} +} + +// BlockID defines the unique ID of a block as its hash and its `PartSetHeader`. +type BlockID struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + PartSetHeader PartSetHeader `protobuf:"bytes,2,opt,name=part_set_header,json=partSetHeader,proto3" json:"part_set_header"` +} + +func (m *BlockID) Reset() { *m = BlockID{} } +func (m *BlockID) String() string { return proto.CompactTextString(m) } +func (*BlockID) ProtoMessage() {} +func (*BlockID) Descriptor() ([]byte, []int) { + return fileDescriptor_af6831286a0350e0, []int{2} +} +func (m *BlockID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockID) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockID.Merge(m, src) +} +func (m *BlockID) XXX_Size() int { + return m.Size() +} +func (m *BlockID) XXX_DiscardUnknown() { + xxx_messageInfo_BlockID.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockID proto.InternalMessageInfo + +func (m *BlockID) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *BlockID) GetPartSetHeader() PartSetHeader { + if m != nil { + return m.PartSetHeader + } + return PartSetHeader{} +} + +// Header defines the structure of a block header. +type Header struct { + // basic block info + Version v11.Consensus `protobuf:"bytes,1,opt,name=version,proto3" json:"version"` + ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` + // prev block info + LastBlockId BlockID `protobuf:"bytes,5,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` + // hashes of block data + LastCommitHash []byte `protobuf:"bytes,6,opt,name=last_commit_hash,json=lastCommitHash,proto3" json:"last_commit_hash,omitempty"` + DataHash []byte `protobuf:"bytes,7,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + // hashes from the app output from the prev block + ValidatorsHash []byte `protobuf:"bytes,8,opt,name=validators_hash,json=validatorsHash,proto3" json:"validators_hash,omitempty"` + NextValidatorsHash []byte `protobuf:"bytes,9,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + ConsensusHash []byte `protobuf:"bytes,10,opt,name=consensus_hash,json=consensusHash,proto3" json:"consensus_hash,omitempty"` + AppHash []byte `protobuf:"bytes,11,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + LastResultsHash []byte `protobuf:"bytes,12,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` + // consensus info + EvidenceHash []byte `protobuf:"bytes,13,opt,name=evidence_hash,json=evidenceHash,proto3" json:"evidence_hash,omitempty"` + ProposerAddress []byte `protobuf:"bytes,14,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_af6831286a0350e0, []int{3} +} +func (m *Header) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(m, src) +} +func (m *Header) XXX_Size() int { + return m.Size() +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_Header proto.InternalMessageInfo + +func (m *Header) GetVersion() v11.Consensus { + if m != nil { + return m.Version + } + return v11.Consensus{} +} + +func (m *Header) GetChainID() string { + if m != nil { + return m.ChainID + } + return "" +} + +func (m *Header) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Header) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *Header) GetLastBlockId() BlockID { + if m != nil { + return m.LastBlockId + } + return BlockID{} +} + +func (m *Header) GetLastCommitHash() []byte { + if m != nil { + return m.LastCommitHash + } + return nil +} + +func (m *Header) GetDataHash() []byte { + if m != nil { + return m.DataHash + } + return nil +} + +func (m *Header) GetValidatorsHash() []byte { + if m != nil { + return m.ValidatorsHash + } + return nil +} + +func (m *Header) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash + } + return nil +} + +func (m *Header) GetConsensusHash() []byte { + if m != nil { + return m.ConsensusHash + } + return nil +} + +func (m *Header) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +func (m *Header) GetLastResultsHash() []byte { + if m != nil { + return m.LastResultsHash + } + return nil +} + +func (m *Header) GetEvidenceHash() []byte { + if m != nil { + return m.EvidenceHash + } + return nil +} + +func (m *Header) GetProposerAddress() []byte { + if m != nil { + return m.ProposerAddress + } + return nil +} + +// Data contains the set of transactions included in the block +type Data struct { + // Txs that will be applied by state @ block.Height+1. + // NOTE: not all txs here are valid. We're just agreeing on the order first. + // This means that block.AppHash does not include these txs. + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` +} + +func (m *Data) Reset() { *m = Data{} } +func (m *Data) String() string { return proto.CompactTextString(m) } +func (*Data) ProtoMessage() {} +func (*Data) Descriptor() ([]byte, []int) { + return fileDescriptor_af6831286a0350e0, []int{4} +} +func (m *Data) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Data) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Data.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Data) XXX_Merge(src proto.Message) { + xxx_messageInfo_Data.Merge(m, src) +} +func (m *Data) XXX_Size() int { + return m.Size() +} +func (m *Data) XXX_DiscardUnknown() { + xxx_messageInfo_Data.DiscardUnknown(m) +} + +var xxx_messageInfo_Data proto.InternalMessageInfo + +func (m *Data) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + +// Vote represents a prevote or precommit vote from validators for +// consensus. +type Vote struct { + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=cometbft.types.v1beta1.SignedMsgType" json:"type,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,3,opt,name=round,proto3" json:"round,omitempty"` + BlockID BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` + Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + ValidatorAddress []byte `protobuf:"bytes,6,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` + ValidatorIndex int32 `protobuf:"varint,7,opt,name=validator_index,json=validatorIndex,proto3" json:"validator_index,omitempty"` + Signature []byte `protobuf:"bytes,8,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (m *Vote) Reset() { *m = Vote{} } +func (m *Vote) String() string { return proto.CompactTextString(m) } +func (*Vote) ProtoMessage() {} +func (*Vote) Descriptor() ([]byte, []int) { + return fileDescriptor_af6831286a0350e0, []int{5} +} +func (m *Vote) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Vote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Vote.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Vote) XXX_Merge(src proto.Message) { + xxx_messageInfo_Vote.Merge(m, src) +} +func (m *Vote) XXX_Size() int { + return m.Size() +} +func (m *Vote) XXX_DiscardUnknown() { + xxx_messageInfo_Vote.DiscardUnknown(m) +} + +var xxx_messageInfo_Vote proto.InternalMessageInfo + +func (m *Vote) GetType() SignedMsgType { + if m != nil { + return m.Type + } + return UnknownType +} + +func (m *Vote) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Vote) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *Vote) GetBlockID() BlockID { + if m != nil { + return m.BlockID + } + return BlockID{} +} + +func (m *Vote) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *Vote) GetValidatorAddress() []byte { + if m != nil { + return m.ValidatorAddress + } + return nil +} + +func (m *Vote) GetValidatorIndex() int32 { + if m != nil { + return m.ValidatorIndex + } + return 0 +} + +func (m *Vote) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +// Commit contains the evidence that a block was committed by a set of validators. +type Commit struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + BlockID BlockID `protobuf:"bytes,3,opt,name=block_id,json=blockId,proto3" json:"block_id"` + Signatures []CommitSig `protobuf:"bytes,4,rep,name=signatures,proto3" json:"signatures"` +} + +func (m *Commit) Reset() { *m = Commit{} } +func (m *Commit) String() string { return proto.CompactTextString(m) } +func (*Commit) ProtoMessage() {} +func (*Commit) Descriptor() ([]byte, []int) { + return fileDescriptor_af6831286a0350e0, []int{6} +} +func (m *Commit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Commit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Commit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Commit) XXX_Merge(src proto.Message) { + xxx_messageInfo_Commit.Merge(m, src) +} +func (m *Commit) XXX_Size() int { + return m.Size() +} +func (m *Commit) XXX_DiscardUnknown() { + xxx_messageInfo_Commit.DiscardUnknown(m) +} + +var xxx_messageInfo_Commit proto.InternalMessageInfo + +func (m *Commit) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Commit) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *Commit) GetBlockID() BlockID { + if m != nil { + return m.BlockID + } + return BlockID{} +} + +func (m *Commit) GetSignatures() []CommitSig { + if m != nil { + return m.Signatures + } + return nil +} + +// CommitSig is a part of the Vote included in a Commit. +type CommitSig struct { + BlockIdFlag BlockIDFlag `protobuf:"varint,1,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=cometbft.types.v1beta1.BlockIDFlag" json:"block_id_flag,omitempty"` + ValidatorAddress []byte `protobuf:"bytes,2,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` + Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (m *CommitSig) Reset() { *m = CommitSig{} } +func (m *CommitSig) String() string { return proto.CompactTextString(m) } +func (*CommitSig) ProtoMessage() {} +func (*CommitSig) Descriptor() ([]byte, []int) { + return fileDescriptor_af6831286a0350e0, []int{7} +} +func (m *CommitSig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitSig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitSig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CommitSig) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitSig.Merge(m, src) +} +func (m *CommitSig) XXX_Size() int { + return m.Size() +} +func (m *CommitSig) XXX_DiscardUnknown() { + xxx_messageInfo_CommitSig.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitSig proto.InternalMessageInfo + +func (m *CommitSig) GetBlockIdFlag() BlockIDFlag { + if m != nil { + return m.BlockIdFlag + } + return BlockIDFlagUnknown +} + +func (m *CommitSig) GetValidatorAddress() []byte { + if m != nil { + return m.ValidatorAddress + } + return nil +} + +func (m *CommitSig) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *CommitSig) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +// Block proposal. +type Proposal struct { + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=cometbft.types.v1beta1.SignedMsgType" json:"type,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,3,opt,name=round,proto3" json:"round,omitempty"` + PolRound int32 `protobuf:"varint,4,opt,name=pol_round,json=polRound,proto3" json:"pol_round,omitempty"` + BlockID BlockID `protobuf:"bytes,5,opt,name=block_id,json=blockId,proto3" json:"block_id"` + Timestamp time.Time `protobuf:"bytes,6,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + Signature []byte `protobuf:"bytes,7,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (m *Proposal) Reset() { *m = Proposal{} } +func (m *Proposal) String() string { return proto.CompactTextString(m) } +func (*Proposal) ProtoMessage() {} +func (*Proposal) Descriptor() ([]byte, []int) { + return fileDescriptor_af6831286a0350e0, []int{8} +} +func (m *Proposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Proposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Proposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Proposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Proposal.Merge(m, src) +} +func (m *Proposal) XXX_Size() int { + return m.Size() +} +func (m *Proposal) XXX_DiscardUnknown() { + xxx_messageInfo_Proposal.DiscardUnknown(m) +} + +var xxx_messageInfo_Proposal proto.InternalMessageInfo + +func (m *Proposal) GetType() SignedMsgType { + if m != nil { + return m.Type + } + return UnknownType +} + +func (m *Proposal) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Proposal) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *Proposal) GetPolRound() int32 { + if m != nil { + return m.PolRound + } + return 0 +} + +func (m *Proposal) GetBlockID() BlockID { + if m != nil { + return m.BlockID + } + return BlockID{} +} + +func (m *Proposal) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *Proposal) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +// SignedHeader contains a Header(H) and Commit(H+1) with signatures of validators who signed it. +type SignedHeader struct { + Header *Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Commit *Commit `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` +} + +func (m *SignedHeader) Reset() { *m = SignedHeader{} } +func (m *SignedHeader) String() string { return proto.CompactTextString(m) } +func (*SignedHeader) ProtoMessage() {} +func (*SignedHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_af6831286a0350e0, []int{9} +} +func (m *SignedHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignedHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignedHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignedHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignedHeader.Merge(m, src) +} +func (m *SignedHeader) XXX_Size() int { + return m.Size() +} +func (m *SignedHeader) XXX_DiscardUnknown() { + xxx_messageInfo_SignedHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_SignedHeader proto.InternalMessageInfo + +func (m *SignedHeader) GetHeader() *Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *SignedHeader) GetCommit() *Commit { + if m != nil { + return m.Commit + } + return nil +} + +// LightBlock is a combination of SignedHeader and ValidatorSet. It is used by light clients. +type LightBlock struct { + SignedHeader *SignedHeader `protobuf:"bytes,1,opt,name=signed_header,json=signedHeader,proto3" json:"signed_header,omitempty"` + ValidatorSet *ValidatorSet `protobuf:"bytes,2,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` +} + +func (m *LightBlock) Reset() { *m = LightBlock{} } +func (m *LightBlock) String() string { return proto.CompactTextString(m) } +func (*LightBlock) ProtoMessage() {} +func (*LightBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_af6831286a0350e0, []int{10} +} +func (m *LightBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LightBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LightBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LightBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_LightBlock.Merge(m, src) +} +func (m *LightBlock) XXX_Size() int { + return m.Size() +} +func (m *LightBlock) XXX_DiscardUnknown() { + xxx_messageInfo_LightBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_LightBlock proto.InternalMessageInfo + +func (m *LightBlock) GetSignedHeader() *SignedHeader { + if m != nil { + return m.SignedHeader + } + return nil +} + +func (m *LightBlock) GetValidatorSet() *ValidatorSet { + if m != nil { + return m.ValidatorSet + } + return nil +} + +// BlockMeta contains meta information about a block. +type BlockMeta struct { + BlockID BlockID `protobuf:"bytes,1,opt,name=block_id,json=blockId,proto3" json:"block_id"` + BlockSize int64 `protobuf:"varint,2,opt,name=block_size,json=blockSize,proto3" json:"block_size,omitempty"` + Header Header `protobuf:"bytes,3,opt,name=header,proto3" json:"header"` + NumTxs int64 `protobuf:"varint,4,opt,name=num_txs,json=numTxs,proto3" json:"num_txs,omitempty"` +} + +func (m *BlockMeta) Reset() { *m = BlockMeta{} } +func (m *BlockMeta) String() string { return proto.CompactTextString(m) } +func (*BlockMeta) ProtoMessage() {} +func (*BlockMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_af6831286a0350e0, []int{11} +} +func (m *BlockMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockMeta.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockMeta.Merge(m, src) +} +func (m *BlockMeta) XXX_Size() int { + return m.Size() +} +func (m *BlockMeta) XXX_DiscardUnknown() { + xxx_messageInfo_BlockMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockMeta proto.InternalMessageInfo + +func (m *BlockMeta) GetBlockID() BlockID { + if m != nil { + return m.BlockID + } + return BlockID{} +} + +func (m *BlockMeta) GetBlockSize() int64 { + if m != nil { + return m.BlockSize + } + return 0 +} + +func (m *BlockMeta) GetHeader() Header { + if m != nil { + return m.Header + } + return Header{} +} + +func (m *BlockMeta) GetNumTxs() int64 { + if m != nil { + return m.NumTxs + } + return 0 +} + +// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. +type TxProof struct { + RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Proof *v1.Proof `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof,omitempty"` +} + +func (m *TxProof) Reset() { *m = TxProof{} } +func (m *TxProof) String() string { return proto.CompactTextString(m) } +func (*TxProof) ProtoMessage() {} +func (*TxProof) Descriptor() ([]byte, []int) { + return fileDescriptor_af6831286a0350e0, []int{12} +} +func (m *TxProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TxProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TxProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TxProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxProof.Merge(m, src) +} +func (m *TxProof) XXX_Size() int { + return m.Size() +} +func (m *TxProof) XXX_DiscardUnknown() { + xxx_messageInfo_TxProof.DiscardUnknown(m) +} + +var xxx_messageInfo_TxProof proto.InternalMessageInfo + +func (m *TxProof) GetRootHash() []byte { + if m != nil { + return m.RootHash + } + return nil +} + +func (m *TxProof) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *TxProof) GetProof() *v1.Proof { + if m != nil { + return m.Proof + } + return nil +} + +func init() { + proto.RegisterEnum("cometbft.types.v1beta1.SignedMsgType", SignedMsgType_name, SignedMsgType_value) + proto.RegisterType((*PartSetHeader)(nil), "cometbft.types.v1beta1.PartSetHeader") + proto.RegisterType((*Part)(nil), "cometbft.types.v1beta1.Part") + proto.RegisterType((*BlockID)(nil), "cometbft.types.v1beta1.BlockID") + proto.RegisterType((*Header)(nil), "cometbft.types.v1beta1.Header") + proto.RegisterType((*Data)(nil), "cometbft.types.v1beta1.Data") + proto.RegisterType((*Vote)(nil), "cometbft.types.v1beta1.Vote") + proto.RegisterType((*Commit)(nil), "cometbft.types.v1beta1.Commit") + proto.RegisterType((*CommitSig)(nil), "cometbft.types.v1beta1.CommitSig") + proto.RegisterType((*Proposal)(nil), "cometbft.types.v1beta1.Proposal") + proto.RegisterType((*SignedHeader)(nil), "cometbft.types.v1beta1.SignedHeader") + proto.RegisterType((*LightBlock)(nil), "cometbft.types.v1beta1.LightBlock") + proto.RegisterType((*BlockMeta)(nil), "cometbft.types.v1beta1.BlockMeta") + proto.RegisterType((*TxProof)(nil), "cometbft.types.v1beta1.TxProof") +} + +func init() { + proto.RegisterFile("cometbft/types/v1beta1/types.proto", fileDescriptor_af6831286a0350e0) +} + +var fileDescriptor_af6831286a0350e0 = []byte{ + // 1243 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xda, 0x9b, 0xd8, 0x7e, 0xb6, 0x13, 0x77, 0x15, 0xb5, 0xae, 0x0b, 0x8e, 0x71, 0x69, + 0x09, 0x05, 0xd9, 0x4d, 0x10, 0x7f, 0x2a, 0x21, 0xa4, 0xe6, 0x0f, 0xa9, 0xd5, 0x26, 0x31, 0x6b, + 0xb7, 0x08, 0x2e, 0xab, 0xb1, 0x3d, 0xb1, 0x57, 0xb5, 0x77, 0x56, 0x3b, 0x63, 0x93, 0xf4, 0xc0, + 0x19, 0xf5, 0xd4, 0x2f, 0xd0, 0x0b, 0x70, 0xe0, 0x5b, 0x70, 0xed, 0x01, 0xa1, 0xde, 0xe0, 0x54, + 0x50, 0x72, 0xe6, 0xc6, 0x07, 0x40, 0xf3, 0x66, 0x76, 0x6d, 0xb7, 0x71, 0x89, 0x68, 0xc5, 0x6d, + 0xe7, 0xbd, 0xdf, 0x7b, 0xf3, 0xe6, 0xf7, 0x7e, 0xb3, 0x6f, 0xa0, 0xdc, 0x66, 0x03, 0x2a, 0x5a, + 0x07, 0xa2, 0x2a, 0x8e, 0x7c, 0xca, 0xab, 0xa3, 0xb5, 0x16, 0x15, 0x64, 0x4d, 0xad, 0x2a, 0x7e, + 0xc0, 0x04, 0xb3, 0xce, 0x87, 0x98, 0x8a, 0xb2, 0x6a, 0x4c, 0x61, 0xb9, 0xcb, 0xba, 0x0c, 0x21, + 0x55, 0xf9, 0xa5, 0xd0, 0x85, 0x95, 0x2e, 0x63, 0xdd, 0x3e, 0xad, 0xe2, 0xaa, 0x35, 0x3c, 0xa8, + 0x0a, 0x77, 0x40, 0xb9, 0x20, 0x03, 0x5f, 0x03, 0x8a, 0xd1, 0x96, 0xed, 0xe0, 0xc8, 0x17, 0xac, + 0x3a, 0x5a, 0x93, 0x60, 0x76, 0x10, 0x26, 0x88, 0xfc, 0x23, 0x1a, 0x70, 0x97, 0x79, 0x12, 0x30, + 0x51, 0x4f, 0xe1, 0xea, 0x8c, 0x9a, 0x47, 0xa4, 0xef, 0x76, 0x88, 0x60, 0x81, 0xc2, 0x95, 0x6f, + 0x40, 0xb6, 0x4e, 0x02, 0xd1, 0xa0, 0xe2, 0x16, 0x25, 0x1d, 0x1a, 0x58, 0xcb, 0x30, 0x2f, 0x98, + 0x20, 0xfd, 0xbc, 0x51, 0x32, 0x56, 0xb3, 0xb6, 0x5a, 0x58, 0x16, 0x98, 0x3d, 0xc2, 0x7b, 0xf9, + 0x58, 0xc9, 0x58, 0xcd, 0xd8, 0xf8, 0x5d, 0x76, 0xc1, 0x94, 0xa1, 0x32, 0xc2, 0xf5, 0x3a, 0xf4, + 0x30, 0x8c, 0xc0, 0x85, 0xb4, 0xb6, 0x8e, 0x04, 0xe5, 0x3a, 0x44, 0x2d, 0xac, 0x0f, 0x61, 0x1e, + 0x8f, 0x91, 0x8f, 0x97, 0x8c, 0xd5, 0xf4, 0xfa, 0xc5, 0x4a, 0x44, 0x9b, 0x3a, 0x67, 0x65, 0xb4, + 0x56, 0xa9, 0x4b, 0xc0, 0x86, 0xf9, 0xe4, 0xd9, 0xca, 0x9c, 0xad, 0xd0, 0xe5, 0x00, 0x12, 0x1b, + 0x7d, 0xd6, 0xbe, 0x5f, 0xdb, 0x8a, 0x2a, 0x31, 0xc6, 0x95, 0x58, 0x0d, 0x58, 0xf2, 0x49, 0x20, + 0x1c, 0x4e, 0x85, 0xd3, 0xc3, 0x63, 0xe0, 0xae, 0xe9, 0xf5, 0x2b, 0x95, 0xd3, 0xdb, 0x52, 0x99, + 0x3a, 0xb3, 0xde, 0x2b, 0xeb, 0x4f, 0x1a, 0xcb, 0x7f, 0x99, 0xb0, 0xa0, 0x39, 0xf9, 0x0c, 0x12, + 0x9a, 0x66, 0xdc, 0x36, 0xbd, 0x5e, 0x1c, 0xe7, 0xd5, 0x0e, 0x59, 0xf8, 0x26, 0xf3, 0x38, 0xf5, + 0xf8, 0x90, 0xeb, 0x84, 0x61, 0x90, 0x75, 0x15, 0x92, 0xed, 0x1e, 0x71, 0x3d, 0xc7, 0xed, 0x60, + 0x61, 0xa9, 0x8d, 0xf4, 0xf1, 0xb3, 0x95, 0xc4, 0xa6, 0xb4, 0xd5, 0xb6, 0xec, 0x04, 0x3a, 0x6b, + 0x1d, 0xeb, 0x3c, 0x2c, 0xf4, 0xa8, 0xdb, 0xed, 0x09, 0xa4, 0x27, 0x6e, 0xeb, 0x95, 0xf5, 0x09, + 0x98, 0x52, 0x20, 0x79, 0x13, 0x37, 0x2f, 0x54, 0x94, 0x7a, 0x2a, 0xa1, 0x7a, 0x2a, 0xcd, 0x50, + 0x3d, 0x1b, 0x49, 0xb9, 0xf1, 0xa3, 0x3f, 0x56, 0x0c, 0x1b, 0x23, 0xac, 0x1a, 0x64, 0xfb, 0x84, + 0x0b, 0xa7, 0x25, 0xd9, 0x93, 0xdb, 0xcf, 0x63, 0x8a, 0x95, 0x59, 0xbc, 0x68, 0x96, 0xf5, 0x01, + 0xd2, 0x32, 0x56, 0x99, 0x3a, 0xd6, 0x2a, 0xe4, 0x30, 0x55, 0x9b, 0x0d, 0x06, 0xae, 0x70, 0xb0, + 0x09, 0x0b, 0xd8, 0x84, 0x45, 0x69, 0xdf, 0x44, 0xf3, 0x2d, 0xd9, 0x8e, 0x4b, 0x90, 0xea, 0x10, + 0x41, 0x14, 0x24, 0x81, 0x90, 0xa4, 0x34, 0xa0, 0xf3, 0x1d, 0x58, 0x8a, 0x34, 0xc8, 0x15, 0x24, + 0xa9, 0xb2, 0x8c, 0xcd, 0x08, 0xbc, 0x0e, 0xcb, 0x1e, 0x3d, 0x14, 0xce, 0xf3, 0xe8, 0x14, 0xa2, + 0x2d, 0xe9, 0xbb, 0x37, 0x1d, 0x71, 0x05, 0x16, 0xdb, 0x61, 0x0b, 0x14, 0x16, 0x10, 0x9b, 0x8d, + 0xac, 0x08, 0xbb, 0x08, 0x49, 0xe2, 0xfb, 0x0a, 0x90, 0x46, 0x40, 0x82, 0xf8, 0x3e, 0xba, 0xae, + 0xc1, 0x39, 0x3c, 0x63, 0x40, 0xf9, 0xb0, 0x2f, 0x74, 0x92, 0x0c, 0x62, 0x96, 0xa4, 0xc3, 0x56, + 0x76, 0xc4, 0x5e, 0x86, 0x2c, 0x1d, 0xb9, 0x1d, 0xea, 0xb5, 0xa9, 0xc2, 0x65, 0x11, 0x97, 0x09, + 0x8d, 0x08, 0x7a, 0x17, 0x72, 0x7e, 0xc0, 0x7c, 0xc6, 0x69, 0xe0, 0x90, 0x4e, 0x27, 0xa0, 0x9c, + 0xe7, 0x17, 0x55, 0xbe, 0xd0, 0x7e, 0x53, 0x99, 0xcb, 0x79, 0x30, 0xb7, 0x88, 0x20, 0x56, 0x0e, + 0xe2, 0xe2, 0x90, 0xe7, 0x8d, 0x52, 0x7c, 0x35, 0x63, 0xcb, 0xcf, 0xf2, 0xdf, 0x31, 0x30, 0xef, + 0x31, 0x41, 0xad, 0x1b, 0x60, 0xca, 0x76, 0xa1, 0x08, 0x17, 0x67, 0x8b, 0xbb, 0xe1, 0x76, 0x3d, + 0xda, 0xd9, 0xe5, 0xdd, 0xe6, 0x91, 0x4f, 0x6d, 0x0c, 0x99, 0x90, 0x56, 0x6c, 0x4a, 0x5a, 0xcb, + 0x30, 0x1f, 0xb0, 0xa1, 0xd7, 0x41, 0xc5, 0xcd, 0xdb, 0x6a, 0x61, 0xdd, 0x86, 0x64, 0xa4, 0x18, + 0xf3, 0x6c, 0x8a, 0x59, 0x92, 0x8a, 0x91, 0xaa, 0xd6, 0x06, 0x3b, 0xd1, 0xd2, 0xc2, 0xd9, 0x80, + 0x54, 0xf4, 0x7b, 0xd3, 0xfa, 0x3b, 0x9b, 0x84, 0xc7, 0x61, 0xd6, 0x7b, 0x70, 0x2e, 0xd2, 0x41, + 0x44, 0xa4, 0x52, 0x5f, 0x2e, 0x72, 0x68, 0x26, 0xa7, 0x24, 0xe6, 0xa8, 0x5f, 0x53, 0x02, 0x4f, + 0x37, 0x96, 0x58, 0x0d, 0xff, 0x51, 0x6f, 0x40, 0x8a, 0xbb, 0x5d, 0x8f, 0x88, 0x61, 0x40, 0xb5, + 0x0a, 0xc7, 0x86, 0xf2, 0x2f, 0x06, 0x2c, 0x28, 0x55, 0x4f, 0xb0, 0x67, 0x9c, 0xce, 0x5e, 0x6c, + 0x16, 0x7b, 0xf1, 0x57, 0x65, 0x6f, 0x07, 0x20, 0x2a, 0x89, 0xe7, 0xcd, 0x52, 0x7c, 0x35, 0xbd, + 0xfe, 0xd6, 0xac, 0x74, 0xaa, 0xdc, 0x86, 0xdb, 0xd5, 0x17, 0x78, 0x22, 0xb4, 0x7c, 0x62, 0x40, + 0x2a, 0xf2, 0x5b, 0x3b, 0x90, 0x0d, 0x6b, 0x74, 0x0e, 0xfa, 0xa4, 0xab, 0x35, 0x75, 0xf9, 0x5f, + 0x0a, 0xfd, 0xbc, 0x4f, 0xba, 0x76, 0x5a, 0xd7, 0x26, 0x17, 0xa7, 0x77, 0x26, 0x36, 0xa3, 0x33, + 0x53, 0x52, 0x88, 0xff, 0x37, 0x29, 0x4c, 0x35, 0xcd, 0x7c, 0xbe, 0x69, 0x3f, 0xc7, 0x20, 0x59, + 0xc7, 0x9b, 0x45, 0xfa, 0xff, 0xdf, 0x7d, 0xb9, 0x04, 0x29, 0x9f, 0xf5, 0x1d, 0xe5, 0x31, 0xd1, + 0x93, 0xf4, 0x59, 0xdf, 0x7e, 0x41, 0x0e, 0xf3, 0xaf, 0xf5, 0x32, 0x2d, 0xbc, 0x06, 0x06, 0x13, + 0xcf, 0x33, 0xf8, 0x2d, 0x64, 0x14, 0x21, 0x7a, 0xf8, 0x7d, 0x24, 0x99, 0xc0, 0x99, 0xfa, 0xc2, + 0xec, 0x9b, 0x2e, 0x5e, 0xe1, 0x6d, 0x8d, 0x96, 0x71, 0x6a, 0x54, 0xe8, 0x59, 0x5c, 0x7c, 0xb9, + 0x68, 0x6d, 0x8d, 0x2e, 0x7f, 0x6f, 0x00, 0xdc, 0x91, 0x5c, 0xe3, 0xd9, 0xe5, 0x04, 0xe3, 0x58, + 0x8e, 0x33, 0x55, 0xc5, 0xdb, 0x2f, 0x6f, 0xa6, 0xae, 0x25, 0xc3, 0x27, 0x4f, 0x52, 0x83, 0xec, + 0x58, 0xaa, 0x9c, 0x86, 0x85, 0xcd, 0x4c, 0x15, 0x8d, 0x97, 0x06, 0x15, 0x76, 0x66, 0x34, 0xb1, + 0x2a, 0xff, 0x6a, 0x40, 0x0a, 0xeb, 0xdb, 0xa5, 0x82, 0x4c, 0x75, 0xd8, 0x78, 0xd5, 0x0e, 0xbf, + 0x09, 0xa0, 0x92, 0x71, 0xf7, 0x01, 0xd5, 0xea, 0x4b, 0xa1, 0xa5, 0xe1, 0x3e, 0xa0, 0xd6, 0xa7, + 0x51, 0x3b, 0xe2, 0x67, 0x69, 0x87, 0xfe, 0x11, 0x84, 0x4d, 0xb9, 0x00, 0x09, 0x6f, 0x38, 0x70, + 0xe4, 0x80, 0x31, 0x95, 0xae, 0xbd, 0xe1, 0xa0, 0x79, 0xc8, 0xcb, 0xf7, 0x21, 0xd1, 0x3c, 0xc4, + 0x97, 0x97, 0x14, 0x73, 0xc0, 0x98, 0x9e, 0xf0, 0xea, 0x99, 0x95, 0x94, 0x06, 0x1c, 0x68, 0x16, + 0x98, 0x72, 0x94, 0x87, 0x0f, 0x41, 0xf9, 0x6d, 0x55, 0xcf, 0xfa, 0xa8, 0xd3, 0xcf, 0xb9, 0x6b, + 0xbf, 0x19, 0x90, 0x9d, 0xba, 0x74, 0xd6, 0xfb, 0x70, 0xa1, 0x51, 0xdb, 0xd9, 0xdb, 0xde, 0x72, + 0x76, 0x1b, 0x3b, 0x4e, 0xf3, 0xab, 0xfa, 0xb6, 0x73, 0x77, 0xef, 0xf6, 0xde, 0xfe, 0x97, 0x7b, + 0xb9, 0xb9, 0xc2, 0xd2, 0xc3, 0xc7, 0xa5, 0xf4, 0x5d, 0xef, 0xbe, 0xc7, 0xbe, 0xf1, 0x66, 0xa1, + 0xeb, 0xf6, 0xf6, 0xbd, 0xfd, 0xe6, 0x76, 0xce, 0x50, 0xe8, 0x7a, 0x40, 0x47, 0x4c, 0x50, 0x44, + 0x5f, 0x87, 0x8b, 0xa7, 0xa0, 0x37, 0xf7, 0x77, 0x77, 0x6b, 0xcd, 0x5c, 0xac, 0x70, 0xee, 0xe1, + 0xe3, 0x52, 0xb6, 0x1e, 0x50, 0x25, 0x3f, 0x8c, 0xa8, 0x40, 0xfe, 0xc5, 0x88, 0xfd, 0xfa, 0x7e, + 0xe3, 0xe6, 0x9d, 0x5c, 0xa9, 0x90, 0x7b, 0xf8, 0xb8, 0x94, 0x09, 0xff, 0x31, 0x12, 0x5f, 0x48, + 0x7e, 0xf7, 0x43, 0x71, 0xee, 0xa7, 0x1f, 0x8b, 0xc6, 0xc6, 0x17, 0x4f, 0x8e, 0x8b, 0xc6, 0xd3, + 0xe3, 0xa2, 0xf1, 0xe7, 0x71, 0xd1, 0x78, 0x74, 0x52, 0x9c, 0x7b, 0x7a, 0x52, 0x9c, 0xfb, 0xfd, + 0xa4, 0x38, 0xf7, 0xf5, 0xc7, 0x5d, 0x57, 0xf4, 0x86, 0x2d, 0xc9, 0x4d, 0x75, 0xfc, 0xb8, 0x0f, + 0x3f, 0x88, 0xef, 0x56, 0x4f, 0x7f, 0xb1, 0xb7, 0x16, 0xf0, 0x5a, 0x7f, 0xf0, 0x4f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xd3, 0xd1, 0x3a, 0x5b, 0x86, 0x0c, 0x00, 0x00, +} + +func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartSetHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartSetHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x12 + } + if m.Total != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Part) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Part) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Part) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Bytes) > 0 { + i -= len(m.Bytes) + copy(dAtA[i:], m.Bytes) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Bytes))) + i-- + dAtA[i] = 0x12 + } + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BlockID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.PartSetHeader.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Header) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Header) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProposerAddress) > 0 { + i -= len(m.ProposerAddress) + copy(dAtA[i:], m.ProposerAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) + i-- + dAtA[i] = 0x72 + } + if len(m.EvidenceHash) > 0 { + i -= len(m.EvidenceHash) + copy(dAtA[i:], m.EvidenceHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.EvidenceHash))) + i-- + dAtA[i] = 0x6a + } + if len(m.LastResultsHash) > 0 { + i -= len(m.LastResultsHash) + copy(dAtA[i:], m.LastResultsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) + i-- + dAtA[i] = 0x62 + } + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x5a + } + if len(m.ConsensusHash) > 0 { + i -= len(m.ConsensusHash) + copy(dAtA[i:], m.ConsensusHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ConsensusHash))) + i-- + dAtA[i] = 0x52 + } + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x4a + } + if len(m.ValidatorsHash) > 0 { + i -= len(m.ValidatorsHash) + copy(dAtA[i:], m.ValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorsHash))) + i-- + dAtA[i] = 0x42 + } + if len(m.DataHash) > 0 { + i -= len(m.DataHash) + copy(dAtA[i:], m.DataHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.DataHash))) + i-- + dAtA[i] = 0x3a + } + if len(m.LastCommitHash) > 0 { + i -= len(m.LastCommitHash) + copy(dAtA[i:], m.LastCommitHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastCommitHash))) + i-- + dAtA[i] = 0x32 + } + { + size, err := m.LastBlockId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + n4, err4 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err4 != nil { + return 0, err4 + } + i -= n4 + i = encodeVarintTypes(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x22 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x18 + } + if len(m.ChainID) > 0 { + i -= len(m.ChainID) + copy(dAtA[i:], m.ChainID) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Data) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Data) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Vote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Vote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x42 + } + if m.ValidatorIndex != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ValidatorIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.ValidatorAddress) > 0 { + i -= len(m.ValidatorAddress) + copy(dAtA[i:], m.ValidatorAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + i-- + dAtA[i] = 0x32 + } + n6, err6 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) + if err6 != nil { + return 0, err6 + } + i -= n6 + i = encodeVarintTypes(dAtA, i, uint64(n6)) + i-- + dAtA[i] = 0x2a + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x18 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Commit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Commit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signatures) > 0 { + for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CommitSig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitSig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x22 + } + n9, err9 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) + if err9 != nil { + return 0, err9 + } + i -= n9 + i = encodeVarintTypes(dAtA, i, uint64(n9)) + i-- + dAtA[i] = 0x1a + if len(m.ValidatorAddress) > 0 { + i -= len(m.ValidatorAddress) + copy(dAtA[i:], m.ValidatorAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + i-- + dAtA[i] = 0x12 + } + if m.BlockIdFlag != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Proposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Proposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x3a + } + n10, err10 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) + if err10 != nil { + return 0, err10 + } + i -= n10 + i = encodeVarintTypes(dAtA, i, uint64(n10)) + i-- + dAtA[i] = 0x32 + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.PolRound != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.PolRound)) + i-- + dAtA[i] = 0x20 + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x18 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SignedHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignedHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignedHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LightBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LightBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LightBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.SignedHeader != nil { + { + size, err := m.SignedHeader.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BlockMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NumTxs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.NumTxs)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.BlockSize != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockSize)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TxProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TxProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if len(m.RootHash) > 0 { + i -= len(m.RootHash) + copy(dAtA[i:], m.RootHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.RootHash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PartSetHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Total != 0 { + n += 1 + sovTypes(uint64(m.Total)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Part) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + l = len(m.Bytes) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Proof.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *BlockID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.PartSetHeader.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *Header) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Version.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ChainID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = m.LastBlockId.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.LastCommitHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.DataHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ConsensusHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.LastResultsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.EvidenceHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Data) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *Vote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ValidatorAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorIndex != 0 { + n += 1 + sovTypes(uint64(m.ValidatorIndex)) + } + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Commit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.Signatures) > 0 { + for _, e := range m.Signatures { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *CommitSig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockIdFlag != 0 { + n += 1 + sovTypes(uint64(m.BlockIdFlag)) + } + l = len(m.ValidatorAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Proposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if m.PolRound != 0 { + n += 1 + sovTypes(uint64(m.PolRound)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignedHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *LightBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignedHeader != nil { + l = m.SignedHeader.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *BlockMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.BlockSize != 0 { + n += 1 + sovTypes(uint64(m.BlockSize)) + } + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.NumTxs != 0 { + n += 1 + sovTypes(uint64(m.NumTxs)) + } + return n +} + +func (m *TxProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RootHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PartSetHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartSetHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartSetHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Part) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Part: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Part: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bytes = append(m.Bytes[:0], dAtA[iNdEx:postIndex]...) + if m.Bytes == nil { + m.Bytes = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PartSetHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PartSetHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Header) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Header: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastBlockId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastCommitHash = append(m.LastCommitHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastCommitHash == nil { + m.LastCommitHash = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) + if m.DataHash == nil { + m.DataHash = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorsHash = append(m.ValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorsHash == nil { + m.ValidatorsHash = []byte{} + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConsensusHash = append(m.ConsensusHash[:0], dAtA[iNdEx:postIndex]...) + if m.ConsensusHash == nil { + m.ConsensusHash = []byte{} + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastResultsHash == nil { + m.LastResultsHash = []byte{} + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvidenceHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvidenceHash = append(m.EvidenceHash[:0], dAtA[iNdEx:postIndex]...) + if m.EvidenceHash == nil { + m.EvidenceHash = []byte{} + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerAddress == nil { + m.ProposerAddress = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Data) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Data: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Data: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Vote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorAddress == nil { + m.ValidatorAddress = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorIndex", wireType) + } + m.ValidatorIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ValidatorIndex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Commit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Commit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Commit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signatures", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signatures = append(m.Signatures, CommitSig{}) + if err := m.Signatures[len(m.Signatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitSig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitSig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitSig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockIdFlag", wireType) + } + m.BlockIdFlag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockIdFlag |= BlockIDFlag(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorAddress == nil { + m.ValidatorAddress = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Proposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Proposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Proposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PolRound", wireType) + } + m.PolRound = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PolRound |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignedHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignedHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignedHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &Header{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Commit == nil { + m.Commit = &Commit{} + } + if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LightBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LightBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LightBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SignedHeader == nil { + m.SignedHeader = &SignedHeader{} + } + if err := m.SignedHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorSet == nil { + m.ValidatorSet = &ValidatorSet{} + } + if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockSize", wireType) + } + m.BlockSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumTxs", wireType) + } + m.NumTxs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumTxs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TxProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootHash = append(m.RootHash[:0], dAtA[iNdEx:postIndex]...) + if m.RootHash == nil { + m.RootHash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proof == nil { + m.Proof = &v1.Proof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/types/validator.pb.go b/api/cometbft/types/v1beta1/validator.pb.go similarity index 79% rename from proto/tendermint/types/validator.pb.go rename to api/cometbft/types/v1beta1/validator.pb.go index 45faec7e6ea..fdcc6887193 100644 --- a/proto/tendermint/types/validator.pb.go +++ b/api/cometbft/types/v1beta1/validator.pb.go @@ -1,11 +1,11 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/types/validator.proto +// source: cometbft/types/v1beta1/validator.proto -package types +package v1beta1 import ( fmt "fmt" - crypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + v1 "github.com/cometbft/cometbft/api/cometbft/crypto/v1" _ "github.com/cosmos/gogoproto/gogoproto" proto "github.com/cosmos/gogoproto/proto" io "io" @@ -28,10 +28,14 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type BlockIDFlag int32 const ( + // Indicates an error condition BlockIDFlagUnknown BlockIDFlag = 0 - BlockIDFlagAbsent BlockIDFlag = 1 - BlockIDFlagCommit BlockIDFlag = 2 - BlockIDFlagNil BlockIDFlag = 3 + // The vote was not received + BlockIDFlagAbsent BlockIDFlag = 1 + // Voted for the block that received the majority + BlockIDFlagCommit BlockIDFlag = 2 + // Voted for nil + BlockIDFlagNil BlockIDFlag = 3 ) var BlockIDFlag_name = map[int32]string{ @@ -53,9 +57,10 @@ func (x BlockIDFlag) String() string { } func (BlockIDFlag) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_4e92274df03d3088, []int{0} + return fileDescriptor_2e1661b4f555b138, []int{0} } +// ValidatorSet defines a set of validators. type ValidatorSet struct { Validators []*Validator `protobuf:"bytes,1,rep,name=validators,proto3" json:"validators,omitempty"` Proposer *Validator `protobuf:"bytes,2,opt,name=proposer,proto3" json:"proposer,omitempty"` @@ -66,7 +71,7 @@ func (m *ValidatorSet) Reset() { *m = ValidatorSet{} } func (m *ValidatorSet) String() string { return proto.CompactTextString(m) } func (*ValidatorSet) ProtoMessage() {} func (*ValidatorSet) Descriptor() ([]byte, []int) { - return fileDescriptor_4e92274df03d3088, []int{0} + return fileDescriptor_2e1661b4f555b138, []int{0} } func (m *ValidatorSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -116,18 +121,19 @@ func (m *ValidatorSet) GetTotalVotingPower() int64 { return 0 } +// Validator represents a node participating in the consensus protocol. type Validator struct { - Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` - PubKey crypto.PublicKey `protobuf:"bytes,2,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` - VotingPower int64 `protobuf:"varint,3,opt,name=voting_power,json=votingPower,proto3" json:"voting_power,omitempty"` - ProposerPriority int64 `protobuf:"varint,4,opt,name=proposer_priority,json=proposerPriority,proto3" json:"proposer_priority,omitempty"` + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + PubKey v1.PublicKey `protobuf:"bytes,2,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + VotingPower int64 `protobuf:"varint,3,opt,name=voting_power,json=votingPower,proto3" json:"voting_power,omitempty"` + ProposerPriority int64 `protobuf:"varint,4,opt,name=proposer_priority,json=proposerPriority,proto3" json:"proposer_priority,omitempty"` } func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_4e92274df03d3088, []int{1} + return fileDescriptor_2e1661b4f555b138, []int{1} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,11 +169,11 @@ func (m *Validator) GetAddress() []byte { return nil } -func (m *Validator) GetPubKey() crypto.PublicKey { +func (m *Validator) GetPubKey() v1.PublicKey { if m != nil { return m.PubKey } - return crypto.PublicKey{} + return v1.PublicKey{} } func (m *Validator) GetVotingPower() int64 { @@ -184,16 +190,19 @@ func (m *Validator) GetProposerPriority() int64 { return 0 } +// SimpleValidator is a Validator, which is serialized and hashed in consensus. +// Address is removed because it's redundant with the pubkey. +// Proposer priority is removed because it changes every round. type SimpleValidator struct { - PubKey *crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` - VotingPower int64 `protobuf:"varint,2,opt,name=voting_power,json=votingPower,proto3" json:"voting_power,omitempty"` + PubKey *v1.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` + VotingPower int64 `protobuf:"varint,2,opt,name=voting_power,json=votingPower,proto3" json:"voting_power,omitempty"` } func (m *SimpleValidator) Reset() { *m = SimpleValidator{} } func (m *SimpleValidator) String() string { return proto.CompactTextString(m) } func (*SimpleValidator) ProtoMessage() {} func (*SimpleValidator) Descriptor() ([]byte, []int) { - return fileDescriptor_4e92274df03d3088, []int{2} + return fileDescriptor_2e1661b4f555b138, []int{2} } func (m *SimpleValidator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -222,7 +231,7 @@ func (m *SimpleValidator) XXX_DiscardUnknown() { var xxx_messageInfo_SimpleValidator proto.InternalMessageInfo -func (m *SimpleValidator) GetPubKey() *crypto.PublicKey { +func (m *SimpleValidator) GetPubKey() *v1.PublicKey { if m != nil { return m.PubKey } @@ -237,48 +246,51 @@ func (m *SimpleValidator) GetVotingPower() int64 { } func init() { - proto.RegisterEnum("tendermint.types.BlockIDFlag", BlockIDFlag_name, BlockIDFlag_value) - proto.RegisterType((*ValidatorSet)(nil), "tendermint.types.ValidatorSet") - proto.RegisterType((*Validator)(nil), "tendermint.types.Validator") - proto.RegisterType((*SimpleValidator)(nil), "tendermint.types.SimpleValidator") -} - -func init() { proto.RegisterFile("tendermint/types/validator.proto", fileDescriptor_4e92274df03d3088) } - -var fileDescriptor_4e92274df03d3088 = []byte{ - // 502 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xcd, 0x6e, 0xd3, 0x40, - 0x14, 0x85, 0x3d, 0x4d, 0xd5, 0x96, 0x49, 0x04, 0xce, 0xa8, 0x45, 0x91, 0xa9, 0x8c, 0xe9, 0x2a, - 0xfc, 0xc8, 0x16, 0x54, 0x88, 0x45, 0x57, 0x49, 0x4a, 0x51, 0x94, 0xc4, 0x89, 0x92, 0xb6, 0x48, - 0x6c, 0xac, 0x38, 0x19, 0xcc, 0xc8, 0x3f, 0x33, 0x1a, 0x4f, 0x52, 0xf9, 0x0d, 0x50, 0x56, 0xbc, - 0x40, 0x56, 0xb0, 0x60, 0xcd, 0x53, 0x74, 0xd9, 0x1d, 0xac, 0x10, 0x4a, 0x5e, 0x04, 0xc5, 0x6e, - 0x62, 0xb7, 0x01, 0x75, 0x77, 0x7d, 0xcf, 0x39, 0xf7, 0x7e, 0x1e, 0xe9, 0x42, 0x4d, 0xe0, 0x60, - 0x88, 0xb9, 0x4f, 0x02, 0x61, 0x88, 0x88, 0xe1, 0xd0, 0x18, 0xf7, 0x3d, 0x32, 0xec, 0x0b, 0xca, - 0x75, 0xc6, 0xa9, 0xa0, 0x48, 0x4e, 0x1d, 0x7a, 0xec, 0x50, 0x76, 0x1d, 0xea, 0xd0, 0x58, 0x34, - 0x16, 0x55, 0xe2, 0x53, 0xf6, 0x33, 0x93, 0x06, 0x3c, 0x62, 0x82, 0x1a, 0x2e, 0x8e, 0xc2, 0x44, - 0x3d, 0xf8, 0x01, 0x60, 0xe1, 0x7c, 0x39, 0xb9, 0x87, 0x05, 0x3a, 0x82, 0x70, 0xb5, 0x29, 0x2c, - 0x01, 0x2d, 0x57, 0xce, 0xbf, 0x7a, 0xa4, 0xdf, 0xde, 0xa5, 0xaf, 0x32, 0xdd, 0x8c, 0x1d, 0xbd, - 0x81, 0x3b, 0x8c, 0x53, 0x46, 0x43, 0xcc, 0x4b, 0x1b, 0x1a, 0xb8, 0x2b, 0xba, 0x32, 0xa3, 0x17, - 0x10, 0x09, 0x2a, 0xfa, 0x9e, 0x35, 0xa6, 0x82, 0x04, 0x8e, 0xc5, 0xe8, 0x05, 0xe6, 0xa5, 0x9c, - 0x06, 0xca, 0xb9, 0xae, 0x1c, 0x2b, 0xe7, 0xb1, 0xd0, 0x59, 0xf4, 0x17, 0xd0, 0xf7, 0x56, 0x53, - 0x50, 0x09, 0x6e, 0xf7, 0x87, 0x43, 0x8e, 0xc3, 0x05, 0x2e, 0x28, 0x17, 0xba, 0xcb, 0x4f, 0x74, - 0x04, 0xb7, 0xd9, 0xc8, 0xb6, 0x5c, 0x1c, 0x5d, 0xd3, 0xec, 0x67, 0x69, 0x92, 0xc7, 0xd0, 0x3b, - 0x23, 0xdb, 0x23, 0x83, 0x06, 0x8e, 0xaa, 0x9b, 0x97, 0xbf, 0x1f, 0x4b, 0xdd, 0x2d, 0x36, 0xb2, - 0x1b, 0x38, 0x42, 0x4f, 0x60, 0xe1, 0x1f, 0x30, 0xf9, 0x71, 0xca, 0x81, 0x9e, 0xc3, 0xe2, 0xf2, - 0x0f, 0x2c, 0xc6, 0x09, 0xe5, 0x44, 0x44, 0xa5, 0xcd, 0x04, 0x7a, 0x29, 0x74, 0xae, 0xfb, 0x07, - 0x2e, 0x7c, 0xd0, 0x23, 0x3e, 0xf3, 0x70, 0x4a, 0xfe, 0x3a, 0xe5, 0x03, 0x77, 0xf3, 0xfd, 0x97, - 0x6c, 0x63, 0x8d, 0xec, 0xd9, 0x4f, 0x00, 0xf3, 0x55, 0x8f, 0x0e, 0xdc, 0xfa, 0xf1, 0x89, 0xd7, - 0x77, 0xd0, 0x4b, 0xb8, 0x57, 0x6d, 0xb6, 0x6b, 0x0d, 0xab, 0x7e, 0x6c, 0x9d, 0x34, 0x2b, 0xef, - 0xac, 0x33, 0xb3, 0x61, 0xb6, 0xdf, 0x9b, 0xb2, 0xa4, 0x3c, 0x9c, 0x4c, 0x35, 0x94, 0xf1, 0x9e, - 0x05, 0x6e, 0x40, 0x2f, 0x02, 0x64, 0xc0, 0xdd, 0x9b, 0x91, 0x4a, 0xb5, 0xf7, 0xd6, 0x3c, 0x95, - 0x81, 0xb2, 0x37, 0x99, 0x6a, 0xc5, 0x4c, 0xa2, 0x62, 0x87, 0x38, 0x10, 0xeb, 0x81, 0x5a, 0xbb, - 0xd5, 0xaa, 0x9f, 0xca, 0x1b, 0x6b, 0x81, 0x1a, 0xf5, 0x7d, 0x22, 0xd0, 0x53, 0x58, 0xbc, 0x19, - 0x30, 0xeb, 0x4d, 0x39, 0xa7, 0xa0, 0xc9, 0x54, 0xbb, 0x9f, 0x71, 0x9b, 0xc4, 0x53, 0x76, 0x3e, - 0x7f, 0x55, 0xa5, 0xef, 0xdf, 0x54, 0x50, 0x6d, 0x5d, 0xce, 0x54, 0x70, 0x35, 0x53, 0xc1, 0x9f, - 0x99, 0x0a, 0xbe, 0xcc, 0x55, 0xe9, 0x6a, 0xae, 0x4a, 0xbf, 0xe6, 0xaa, 0xf4, 0xe1, 0xd0, 0x21, - 0xe2, 0xd3, 0xc8, 0xd6, 0x07, 0xd4, 0x37, 0x06, 0xd4, 0xc7, 0xc2, 0xfe, 0x28, 0xd2, 0x22, 0xb9, - 0x8b, 0xdb, 0x57, 0x65, 0x6f, 0xc5, 0xfd, 0xc3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x8c, - 0xe3, 0x20, 0x70, 0x03, 0x00, 0x00, + proto.RegisterEnum("cometbft.types.v1beta1.BlockIDFlag", BlockIDFlag_name, BlockIDFlag_value) + proto.RegisterType((*ValidatorSet)(nil), "cometbft.types.v1beta1.ValidatorSet") + proto.RegisterType((*Validator)(nil), "cometbft.types.v1beta1.Validator") + proto.RegisterType((*SimpleValidator)(nil), "cometbft.types.v1beta1.SimpleValidator") +} + +func init() { + proto.RegisterFile("cometbft/types/v1beta1/validator.proto", fileDescriptor_2e1661b4f555b138) +} + +var fileDescriptor_2e1661b4f555b138 = []byte{ + // 515 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x6f, 0xda, 0x40, + 0x18, 0x87, 0x7d, 0x10, 0x25, 0xe9, 0x81, 0x5a, 0x38, 0x25, 0x11, 0xb2, 0x14, 0xd7, 0x61, 0xa8, + 0xe8, 0x1f, 0xd9, 0xa2, 0x95, 0xda, 0xa5, 0x1d, 0x80, 0x34, 0x15, 0x82, 0x18, 0x0a, 0x49, 0x2a, + 0x75, 0xb1, 0x6c, 0xb8, 0xba, 0x27, 0x8c, 0xef, 0x64, 0x1f, 0x8e, 0xfc, 0x0d, 0x2a, 0xa6, 0x7e, + 0x01, 0xa6, 0x76, 0xe8, 0xde, 0xbd, 0x73, 0xc6, 0x6c, 0xed, 0x54, 0x55, 0xf0, 0x45, 0x2a, 0x0c, + 0xb6, 0x89, 0x20, 0x52, 0xb6, 0xf3, 0xbd, 0xbf, 0xe7, 0x7d, 0x1f, 0x9f, 0xf4, 0xc2, 0x47, 0x3d, + 0x3a, 0xc4, 0xdc, 0xfc, 0xc4, 0x55, 0x1e, 0x30, 0xec, 0xa9, 0x7e, 0xd9, 0xc4, 0xdc, 0x28, 0xab, + 0xbe, 0x61, 0x93, 0xbe, 0xc1, 0xa9, 0xab, 0x30, 0x97, 0x72, 0x8a, 0x0e, 0xa2, 0x9c, 0x12, 0xe6, + 0x94, 0x65, 0x4e, 0xdc, 0xb3, 0xa8, 0x45, 0xc3, 0x88, 0x3a, 0x3f, 0x2d, 0xd2, 0xe2, 0x61, 0xdc, + 0xb5, 0xe7, 0x06, 0x8c, 0x53, 0xd5, 0x2f, 0xab, 0x03, 0x1c, 0x78, 0x8b, 0x72, 0xf1, 0x17, 0x80, + 0xd9, 0x8b, 0x68, 0x40, 0x17, 0x73, 0x54, 0x81, 0x30, 0x1e, 0xe8, 0x15, 0x80, 0x9c, 0x2e, 0x65, + 0x9e, 0x1f, 0x29, 0x9b, 0x47, 0x2a, 0x31, 0xd9, 0x59, 0x81, 0xd0, 0x1b, 0xb8, 0xcb, 0x5c, 0xca, + 0xa8, 0x87, 0xdd, 0x42, 0x4a, 0x06, 0x77, 0x6b, 0x10, 0x23, 0xe8, 0x19, 0x44, 0x9c, 0x72, 0xc3, + 0xd6, 0x7d, 0xca, 0x89, 0x63, 0xe9, 0x8c, 0x5e, 0x62, 0xb7, 0x90, 0x96, 0x41, 0x29, 0xdd, 0xc9, + 0x85, 0x95, 0x8b, 0xb0, 0xd0, 0x9e, 0xdf, 0x17, 0x7f, 0x02, 0x78, 0x2f, 0xee, 0x82, 0x0a, 0x70, + 0xc7, 0xe8, 0xf7, 0x5d, 0xec, 0xcd, 0xd5, 0x41, 0x29, 0xdb, 0x89, 0x3e, 0xd1, 0x6b, 0xb8, 0xc3, + 0x46, 0xa6, 0x3e, 0xc0, 0xc1, 0xd2, 0xe9, 0x30, 0x71, 0x5a, 0xbc, 0x8c, 0xe2, 0x97, 0x95, 0xf6, + 0xc8, 0xb4, 0x49, 0xaf, 0x81, 0x83, 0xea, 0xd6, 0xd5, 0xdf, 0x87, 0x42, 0x67, 0x9b, 0x8d, 0xcc, + 0x06, 0x0e, 0xd0, 0x11, 0xcc, 0x6e, 0xb0, 0xc9, 0xf8, 0x89, 0x08, 0x7a, 0x0a, 0xf3, 0xd1, 0x2f, + 0xe8, 0xcc, 0x25, 0xd4, 0x25, 0x3c, 0x28, 0x6c, 0x2d, 0xac, 0xa3, 0x42, 0x7b, 0x79, 0x5f, 0xb4, + 0xe1, 0x83, 0x2e, 0x19, 0x32, 0x1b, 0x27, 0xea, 0x2f, 0x13, 0x41, 0x70, 0x07, 0xc1, 0x5b, 0xd5, + 0x52, 0x6b, 0x6a, 0x4f, 0x7e, 0x03, 0x98, 0xa9, 0xda, 0xb4, 0x37, 0xa8, 0x1f, 0x9f, 0xd8, 0x86, + 0x85, 0xca, 0x70, 0xbf, 0xda, 0x6c, 0xd5, 0x1a, 0x7a, 0xfd, 0x58, 0x3f, 0x69, 0x56, 0xde, 0xe9, + 0xe7, 0x5a, 0x43, 0x6b, 0x7d, 0xd0, 0x72, 0x82, 0x78, 0x30, 0x9e, 0xc8, 0x68, 0x25, 0x7b, 0xee, + 0x0c, 0x1c, 0x7a, 0xe9, 0x20, 0x15, 0xee, 0xdd, 0x44, 0x2a, 0xd5, 0xee, 0x5b, 0xed, 0x2c, 0x07, + 0xc4, 0xfd, 0xf1, 0x44, 0xce, 0xaf, 0x10, 0x15, 0xd3, 0xc3, 0x0e, 0x5f, 0x07, 0x6a, 0xad, 0xd3, + 0xd3, 0xfa, 0x59, 0x2e, 0xb5, 0x06, 0xd4, 0xe8, 0x70, 0x48, 0x38, 0x7a, 0x0c, 0xf3, 0x37, 0x01, + 0xad, 0xde, 0xcc, 0xa5, 0x45, 0x34, 0x9e, 0xc8, 0xf7, 0x57, 0xd2, 0x1a, 0xb1, 0xc5, 0xdd, 0x2f, + 0xdf, 0x24, 0xe1, 0xc7, 0x77, 0x09, 0x54, 0xdf, 0x5f, 0x4d, 0x25, 0x70, 0x3d, 0x95, 0xc0, 0xbf, + 0xa9, 0x04, 0xbe, 0xce, 0x24, 0xe1, 0x7a, 0x26, 0x09, 0x7f, 0x66, 0x92, 0xf0, 0xf1, 0x95, 0x45, + 0xf8, 0xe7, 0x91, 0x39, 0x7f, 0x43, 0x35, 0x59, 0x81, 0xe8, 0x60, 0x30, 0xa2, 0x6e, 0x5e, 0x37, + 0x73, 0x3b, 0x5c, 0x8c, 0x17, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x49, 0x07, 0x50, 0x2d, 0x8f, + 0x03, 0x00, 0x00, } func (m *ValidatorSet) Marshal() (dAtA []byte, err error) { @@ -854,7 +866,7 @@ func (m *SimpleValidator) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.PubKey == nil { - m.PubKey = &crypto.PublicKey{} + m.PubKey = &v1.PublicKey{} } if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err diff --git a/api/cometbft/types/v1beta2/params.pb.go b/api/cometbft/types/v1beta2/params.pb.go new file mode 100644 index 00000000000..a6213bb4ece --- /dev/null +++ b/api/cometbft/types/v1beta2/params.pb.go @@ -0,0 +1,774 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/types/v1beta2/params.proto + +package v1beta2 + +import ( + fmt "fmt" + v1beta1 "github.com/cometbft/cometbft/api/cometbft/types/v1beta1" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ConsensusParams contains consensus critical parameters that determine the +// validity of blocks. +type ConsensusParams struct { + Block *BlockParams `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + Evidence *v1beta1.EvidenceParams `protobuf:"bytes,2,opt,name=evidence,proto3" json:"evidence,omitempty"` + Validator *v1beta1.ValidatorParams `protobuf:"bytes,3,opt,name=validator,proto3" json:"validator,omitempty"` + Version *v1beta1.VersionParams `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` +} + +func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } +func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } +func (*ConsensusParams) ProtoMessage() {} +func (*ConsensusParams) Descriptor() ([]byte, []int) { + return fileDescriptor_530f7a66f5f74d85, []int{0} +} +func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusParams.Merge(m, src) +} +func (m *ConsensusParams) XXX_Size() int { + return m.Size() +} +func (m *ConsensusParams) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusParams.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusParams proto.InternalMessageInfo + +func (m *ConsensusParams) GetBlock() *BlockParams { + if m != nil { + return m.Block + } + return nil +} + +func (m *ConsensusParams) GetEvidence() *v1beta1.EvidenceParams { + if m != nil { + return m.Evidence + } + return nil +} + +func (m *ConsensusParams) GetValidator() *v1beta1.ValidatorParams { + if m != nil { + return m.Validator + } + return nil +} + +func (m *ConsensusParams) GetVersion() *v1beta1.VersionParams { + if m != nil { + return m.Version + } + return nil +} + +// BlockParams contains limits on the block size. +type BlockParams struct { + // Max block size, in bytes. + // Note: must be greater than 0 + MaxBytes int64 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` + // Max gas per block. + // Note: must be greater or equal to -1 + MaxGas int64 `protobuf:"varint,2,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` +} + +func (m *BlockParams) Reset() { *m = BlockParams{} } +func (m *BlockParams) String() string { return proto.CompactTextString(m) } +func (*BlockParams) ProtoMessage() {} +func (*BlockParams) Descriptor() ([]byte, []int) { + return fileDescriptor_530f7a66f5f74d85, []int{1} +} +func (m *BlockParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockParams.Merge(m, src) +} +func (m *BlockParams) XXX_Size() int { + return m.Size() +} +func (m *BlockParams) XXX_DiscardUnknown() { + xxx_messageInfo_BlockParams.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockParams proto.InternalMessageInfo + +func (m *BlockParams) GetMaxBytes() int64 { + if m != nil { + return m.MaxBytes + } + return 0 +} + +func (m *BlockParams) GetMaxGas() int64 { + if m != nil { + return m.MaxGas + } + return 0 +} + +func init() { + proto.RegisterType((*ConsensusParams)(nil), "cometbft.types.v1beta2.ConsensusParams") + proto.RegisterType((*BlockParams)(nil), "cometbft.types.v1beta2.BlockParams") +} + +func init() { + proto.RegisterFile("cometbft/types/v1beta2/params.proto", fileDescriptor_530f7a66f5f74d85) +} + +var fileDescriptor_530f7a66f5f74d85 = []byte{ + // 333 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4e, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, + 0x49, 0x34, 0xd2, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0x12, 0x83, 0x29, 0xd2, 0x03, 0x2b, 0xd2, 0x83, 0x2a, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, + 0x2b, 0xd1, 0x07, 0xb1, 0x20, 0xaa, 0xa5, 0xb0, 0x1b, 0x69, 0x88, 0x62, 0xa4, 0xd2, 0x0c, 0x26, + 0x2e, 0x7e, 0xe7, 0xfc, 0xbc, 0xe2, 0xd4, 0xbc, 0xe2, 0xd2, 0xe2, 0x00, 0xb0, 0x8c, 0x90, 0x25, + 0x17, 0x6b, 0x52, 0x4e, 0x7e, 0x72, 0xb6, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xb7, 0x91, 0xb2, 0x1e, + 0x76, 0x6b, 0xf5, 0x9c, 0x40, 0x8a, 0x20, 0x7a, 0x82, 0x20, 0x3a, 0x84, 0x9c, 0xb8, 0x38, 0x52, + 0xcb, 0x32, 0x53, 0x52, 0xf3, 0x92, 0x53, 0x25, 0x98, 0xc0, 0xba, 0xd5, 0xb0, 0xeb, 0x36, 0xd4, + 0x73, 0x85, 0xaa, 0x83, 0x1a, 0x00, 0xd7, 0x27, 0xe4, 0xca, 0xc5, 0x59, 0x96, 0x98, 0x93, 0x99, + 0x92, 0x58, 0x92, 0x5f, 0x24, 0xc1, 0x0c, 0x36, 0x44, 0x1d, 0x97, 0x21, 0x61, 0x30, 0x85, 0x50, + 0x53, 0x10, 0x3a, 0x85, 0xec, 0xb9, 0xd8, 0xcb, 0x52, 0x8b, 0x8a, 0x33, 0xf3, 0xf3, 0x24, 0x58, + 0xc0, 0x86, 0xa8, 0xe2, 0x34, 0x04, 0xa2, 0x0c, 0x6a, 0x04, 0x4c, 0x97, 0x92, 0x27, 0x17, 0x37, + 0x92, 0x0f, 0x85, 0xa4, 0xb9, 0x38, 0x73, 0x13, 0x2b, 0xe2, 0x93, 0x2a, 0x4b, 0x52, 0x8b, 0xc1, + 0x21, 0xc3, 0x1c, 0xc4, 0x91, 0x9b, 0x58, 0xe1, 0x04, 0xe2, 0x0b, 0x89, 0x73, 0xb1, 0x83, 0x24, + 0xd3, 0x13, 0x8b, 0xc1, 0xde, 0x66, 0x0e, 0x62, 0xcb, 0x4d, 0xac, 0x70, 0x4f, 0x2c, 0xf6, 0x62, + 0xe1, 0x60, 0x16, 0x60, 0x71, 0x0a, 0x5d, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, + 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, + 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0xcc, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0x40, 0xce, 0xd3, 0x87, + 0xc7, 0x19, 0x9c, 0x91, 0x58, 0x90, 0xa9, 0x8f, 0x3d, 0x71, 0x24, 0xb1, 0x81, 0xe3, 0xd0, 0x18, + 0x10, 0x00, 0x00, 0xff, 0xff, 0x28, 0xa8, 0xb8, 0xff, 0x3d, 0x02, 0x00, 0x00, +} + +func (this *ConsensusParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ConsensusParams) + if !ok { + that2, ok := that.(ConsensusParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Block.Equal(that1.Block) { + return false + } + if !this.Evidence.Equal(that1.Evidence) { + return false + } + if !this.Validator.Equal(that1.Validator) { + return false + } + if !this.Version.Equal(that1.Version) { + return false + } + return true +} +func (this *BlockParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BlockParams) + if !ok { + that2, ok := that.(BlockParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.MaxBytes != that1.MaxBytes { + return false + } + if this.MaxGas != that1.MaxGas { + return false + } + return true +} +func (m *ConsensusParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Version != nil { + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Validator != nil { + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Evidence != nil { + { + size, err := m.Evidence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Block != nil { + { + size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BlockParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxGas != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.MaxGas)) + i-- + dAtA[i] = 0x10 + } + if m.MaxBytes != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.MaxBytes)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ConsensusParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Block != nil { + l = m.Block.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.Evidence != nil { + l = m.Evidence.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.Validator != nil { + l = m.Validator.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.Version != nil { + l = m.Version.Size() + n += 1 + l + sovParams(uint64(l)) + } + return n +} + +func (m *BlockParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxBytes != 0 { + n += 1 + sovParams(uint64(m.MaxBytes)) + } + if m.MaxGas != 0 { + n += 1 + sovParams(uint64(m.MaxGas)) + } + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ConsensusParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Block == nil { + m.Block = &BlockParams{} + } + if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Evidence == nil { + m.Evidence = &v1beta1.EvidenceParams{} + } + if err := m.Evidence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Validator == nil { + m.Validator = &v1beta1.ValidatorParams{} + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Version == nil { + m.Version = &v1beta1.VersionParams{} + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxBytes", wireType) + } + m.MaxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxGas", wireType) + } + m.MaxGas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxGas |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/version/types.pb.go b/api/cometbft/version/v1/types.pb.go similarity index 86% rename from proto/tendermint/version/types.pb.go rename to api/cometbft/version/v1/types.pb.go index f7393bf9037..29b0fde5e55 100644 --- a/proto/tendermint/version/types.pb.go +++ b/api/cometbft/version/v1/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/version/types.proto +// source: cometbft/version/v1/types.proto -package version +package v1 import ( fmt "fmt" @@ -35,7 +35,7 @@ func (m *App) Reset() { *m = App{} } func (m *App) String() string { return proto.CompactTextString(m) } func (*App) ProtoMessage() {} func (*App) Descriptor() ([]byte, []int) { - return fileDescriptor_f9b42966edc5edad, []int{0} + return fileDescriptor_f76e29b77f2d731e, []int{0} } func (m *App) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -90,7 +90,7 @@ func (m *Consensus) Reset() { *m = Consensus{} } func (m *Consensus) String() string { return proto.CompactTextString(m) } func (*Consensus) ProtoMessage() {} func (*Consensus) Descriptor() ([]byte, []int) { - return fileDescriptor_f9b42966edc5edad, []int{1} + return fileDescriptor_f76e29b77f2d731e, []int{1} } func (m *Consensus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,28 +134,28 @@ func (m *Consensus) GetApp() uint64 { } func init() { - proto.RegisterType((*App)(nil), "tendermint.version.App") - proto.RegisterType((*Consensus)(nil), "tendermint.version.Consensus") + proto.RegisterType((*App)(nil), "cometbft.version.v1.App") + proto.RegisterType((*Consensus)(nil), "cometbft.version.v1.Consensus") } -func init() { proto.RegisterFile("tendermint/version/types.proto", fileDescriptor_f9b42966edc5edad) } +func init() { proto.RegisterFile("cometbft/version/v1/types.proto", fileDescriptor_f76e29b77f2d731e) } -var fileDescriptor_f9b42966edc5edad = []byte{ - // 223 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2b, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0xd3, 0x2f, - 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x42, 0xc8, 0xeb, 0x41, - 0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xd2, 0xfa, 0x20, 0x16, 0x44, 0xa5, 0x92, 0x2d, - 0x17, 0xb3, 0x63, 0x41, 0x81, 0x90, 0x14, 0x17, 0x07, 0x98, 0x9f, 0x9c, 0x9f, 0x23, 0xc1, 0xa8, - 0xc0, 0xa8, 0xc1, 0x12, 0x04, 0xe7, 0x83, 0xe4, 0x8a, 0xf3, 0xd3, 0x4a, 0xca, 0x13, 0x8b, 0x52, - 0x25, 0x98, 0x14, 0x18, 0x35, 0x38, 0x83, 0xe0, 0x7c, 0x25, 0x4b, 0x2e, 0x4e, 0xe7, 0xfc, 0xbc, - 0xe2, 0xd4, 0xbc, 0xe2, 0xd2, 0x62, 0x21, 0x11, 0x2e, 0xd6, 0xa4, 0x9c, 0xfc, 0xe4, 0x6c, 0xa8, - 0x09, 0x10, 0x8e, 0x90, 0x00, 0x17, 0x73, 0x62, 0x41, 0x01, 0x58, 0x27, 0x4b, 0x10, 0x88, 0x69, - 0xc5, 0xf2, 0x62, 0x81, 0x3c, 0xa3, 0x93, 0xff, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, - 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, - 0x31, 0x44, 0x99, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x27, 0xe7, - 0xe7, 0xa6, 0x96, 0x24, 0xa5, 0x95, 0x20, 0x18, 0x10, 0x2f, 0x60, 0x06, 0x40, 0x12, 0x1b, 0x58, - 0xc6, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x1a, 0xc7, 0x18, 0x2b, 0x1d, 0x01, 0x00, 0x00, +var fileDescriptor_f76e29b77f2d731e = []byte{ + // 224 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xce, 0xcf, 0x4d, + 0x2d, 0x49, 0x4a, 0x2b, 0xd1, 0x2f, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0xd3, 0x2f, 0x33, 0xd4, + 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x86, 0x29, 0xd0, + 0x83, 0x2a, 0xd0, 0x2b, 0x33, 0x94, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xcb, 0xeb, 0x83, 0x58, + 0x10, 0xa5, 0x4a, 0xb6, 0x5c, 0xcc, 0x8e, 0x05, 0x05, 0x42, 0x52, 0x5c, 0x1c, 0x60, 0x7e, 0x72, + 0x7e, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x4b, 0x10, 0x9c, 0x0f, 0x92, 0x2b, 0xce, 0x4f, 0x2b, + 0x29, 0x4f, 0x2c, 0x4a, 0x95, 0x60, 0x52, 0x60, 0xd4, 0xe0, 0x0c, 0x82, 0xf3, 0x95, 0x2c, 0xb9, + 0x38, 0x9d, 0xf3, 0xf3, 0x8a, 0x53, 0xf3, 0x8a, 0x4b, 0x8b, 0x85, 0x44, 0xb8, 0x58, 0x93, 0x72, + 0xf2, 0x93, 0xb3, 0xa1, 0x26, 0x40, 0x38, 0x42, 0x02, 0x5c, 0xcc, 0x89, 0x05, 0x05, 0x60, 0x9d, + 0x2c, 0x41, 0x20, 0xa6, 0x15, 0xcb, 0x8b, 0x05, 0xf2, 0x8c, 0x4e, 0x7e, 0x27, 0x1e, 0xc9, 0x31, + 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, + 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x92, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, + 0x9f, 0xab, 0x0f, 0xf7, 0x2a, 0x9c, 0x91, 0x58, 0x90, 0xa9, 0x8f, 0x25, 0x00, 0x92, 0xd8, 0xc0, + 0x0e, 0x36, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x26, 0x83, 0x70, 0xc1, 0x1e, 0x01, 0x00, 0x00, } func (this *Consensus) Equal(that interface{}) bool { diff --git a/api/go.mod b/api/go.mod new file mode 100644 index 00000000000..165eafc9177 --- /dev/null +++ b/api/go.mod @@ -0,0 +1,19 @@ +module github.com/cometbft/cometbft/api + +go 1.23.1 + +require ( + github.com/cosmos/gogoproto v1.4.12 + github.com/golang/protobuf v1.5.3 + google.golang.org/grpc v1.62.1 +) + +require ( + github.com/google/go-cmp v0.6.0 // indirect + golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/protobuf v1.33.0 // indirect +) diff --git a/api/go.sum b/api/go.sum new file mode 100644 index 00000000000..8ae60eefe81 --- /dev/null +++ b/api/go.sum @@ -0,0 +1,25 @@ +github.com/cosmos/gogoproto v1.4.12 h1:vB6Lbe/rtnYGjQuFxkPiPYiCybqFT8QvLipDZP8JpFE= +github.com/cosmos/gogoproto v1.4.12/go.mod h1:LnZob1bXRdUoqMMtwYlcR3wjiElmlC+FkjaZRv1/eLY= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= diff --git a/blocksync/pool.go b/blocksync/pool.go deleted file mode 100644 index 919586693d9..00000000000 --- a/blocksync/pool.go +++ /dev/null @@ -1,683 +0,0 @@ -package blocksync - -import ( - "errors" - "fmt" - "math" - "sync/atomic" - "time" - - flow "github.com/cometbft/cometbft/libs/flowrate" - "github.com/cometbft/cometbft/libs/log" - "github.com/cometbft/cometbft/libs/service" - cmtsync "github.com/cometbft/cometbft/libs/sync" - "github.com/cometbft/cometbft/p2p" - "github.com/cometbft/cometbft/types" -) - -/* -eg, L = latency = 0.1s - P = num peers = 10 - FN = num full nodes - BS = 1kB block size - CB = 1 Mbit/s = 128 kB/s - CB/P = 12.8 kB - B/S = CB/P/BS = 12.8 blocks/s - - 12.8 * 0.1 = 1.28 blocks on conn -*/ - -const ( - requestIntervalMS = 2 - maxTotalRequesters = 600 - maxPendingRequests = maxTotalRequesters - maxPendingRequestsPerPeer = 20 - requestRetrySeconds = 30 - - // Minimum recv rate to ensure we're receiving blocks from a peer fast - // enough. If a peer is not sending us data at at least that rate, we - // consider them to have timedout and we disconnect. - // - // Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s, - // sending data across atlantic ~ 7.5 KB/s. - minRecvRate = 7680 - - // Maximum difference between current and new block's height. - maxDiffBetweenCurrentAndReceivedBlockHeight = 100 -) - -var peerTimeout = 15 * time.Second // not const so we can override with tests - -/* - Peers self report their heights when we join the block pool. - Starting from our latest pool.height, we request blocks - in sequence from peers that reported higher heights than ours. - Every so often we ask peers what height they're on so we can keep going. - - Requests are continuously made for blocks of higher heights until - the limit is reached. If most of the requests have no available peers, and we - are not at peer limits, we can probably switch to consensus reactor -*/ - -// BlockPool keeps track of the block sync peers, block requests and block responses. -type BlockPool struct { - service.BaseService - startTime time.Time - - mtx cmtsync.Mutex - // block requests - requesters map[int64]*bpRequester - height int64 // the lowest key in requesters. - // peers - peers map[p2p.ID]*bpPeer - maxPeerHeight int64 // the biggest reported height - - // atomic - numPending int32 // number of requests pending assignment or block response - - requestsCh chan<- BlockRequest - errorsCh chan<- peerError -} - -// NewBlockPool returns a new BlockPool with the height equal to start. Block -// requests and errors will be sent to requestsCh and errorsCh accordingly. -func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool { - bp := &BlockPool{ - peers: make(map[p2p.ID]*bpPeer), - - requesters: make(map[int64]*bpRequester), - height: start, - numPending: 0, - - requestsCh: requestsCh, - errorsCh: errorsCh, - } - bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp) - return bp -} - -// OnStart implements service.Service by spawning requesters routine and recording -// pool's start time. -func (pool *BlockPool) OnStart() error { - go pool.makeRequestersRoutine() - pool.startTime = time.Now() - return nil -} - -// spawns requesters as needed -func (pool *BlockPool) makeRequestersRoutine() { - for { - if !pool.IsRunning() { - break - } - - _, numPending, lenRequesters := pool.GetStatus() - switch { - case numPending >= maxPendingRequests: - // sleep for a bit. - time.Sleep(requestIntervalMS * time.Millisecond) - // check for timed out peers - pool.removeTimedoutPeers() - case lenRequesters >= maxTotalRequesters: - // sleep for a bit. - time.Sleep(requestIntervalMS * time.Millisecond) - // check for timed out peers - pool.removeTimedoutPeers() - default: - // request for more blocks. - pool.makeNextRequester() - } - } -} - -func (pool *BlockPool) removeTimedoutPeers() { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - for _, peer := range pool.peers { - if !peer.didTimeout && peer.numPending > 0 { - curRate := peer.recvMonitor.Status().CurRate - // curRate can be 0 on start - if curRate != 0 && curRate < minRecvRate { - err := errors.New("peer is not sending us data fast enough") - pool.sendError(err, peer.id) - pool.Logger.Error("SendTimeout", "peer", peer.id, - "reason", err, - "curRate", fmt.Sprintf("%d KB/s", curRate/1024), - "minRate", fmt.Sprintf("%d KB/s", minRecvRate/1024)) - peer.didTimeout = true - } - } - if peer.didTimeout { - pool.removePeer(peer.id) - } - } -} - -// GetStatus returns pool's height, numPending requests and the number of -// requesters. -func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters) -} - -// IsCaughtUp returns true if this node is caught up, false - otherwise. -// TODO: relax conditions, prevent abuse. -func (pool *BlockPool) IsCaughtUp() bool { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - // Need at least 1 peer to be considered caught up. - if len(pool.peers) == 0 { - pool.Logger.Debug("Blockpool has no peers") - return false - } - - // Some conditions to determine if we're caught up. - // Ensures we've either received a block or waited some amount of time, - // and that we're synced to the highest known height. - // Note we use maxPeerHeight - 1 because to sync block H requires block H+1 - // to verify the LastCommit. - receivedBlockOrTimedOut := pool.height > 0 || time.Since(pool.startTime) > 5*time.Second - ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= (pool.maxPeerHeight-1) - isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers - return isCaughtUp -} - -// PeekTwoBlocks returns blocks at pool.height and pool.height+1. We need to -// see the second block's Commit to validate the first block. So we peek two -// blocks at a time. We return an extended commit, containing vote extensions -// and their associated signatures, as this is critical to consensus in ABCI++ -// as we switch from block sync to consensus mode. -// -// The caller will verify the commit. -func (pool *BlockPool) PeekTwoBlocks() (first, second *types.Block, firstExtCommit *types.ExtendedCommit) { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - if r := pool.requesters[pool.height]; r != nil { - first = r.getBlock() - firstExtCommit = r.getExtendedCommit() - } - if r := pool.requesters[pool.height+1]; r != nil { - second = r.getBlock() - } - return -} - -// PopRequest pops the first block at pool.height. -// It must have been validated by the second Commit from PeekTwoBlocks. -// TODO(thane): (?) and its corresponding ExtendedCommit. -func (pool *BlockPool) PopRequest() { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - if r := pool.requesters[pool.height]; r != nil { - /* The block can disappear at any time, due to removePeer(). - if r := pool.requesters[pool.height]; r == nil || r.block == nil { - PanicSanity("PopRequest() requires a valid block") - } - */ - if err := r.Stop(); err != nil { - pool.Logger.Error("Error stopping requester", "err", err) - } - delete(pool.requesters, pool.height) - pool.height++ - } else { - panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height)) - } -} - -// RedoRequest invalidates the block at pool.height, -// Remove the peer and redo request from others. -// Returns the ID of the removed peer. -func (pool *BlockPool) RedoRequest(height int64) p2p.ID { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - request := pool.requesters[height] - peerID := request.getPeerID() - if peerID != p2p.ID("") { - // RemovePeer will redo all requesters associated with this peer. - pool.removePeer(peerID) - } - return peerID -} - -// AddBlock validates that the block comes from the peer it was expected from -// and calls the requester to store it. -// -// This requires an extended commit at the same height as the supplied block - -// the block contains the last commit, but we need the latest commit in case we -// need to switch over from block sync to consensus at this height. If the -// height of the extended commit and the height of the block do not match, we -// do not add the block and return an error. -// TODO: ensure that blocks come in order for each peer. -func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, extCommit *types.ExtendedCommit, blockSize int) error { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - if extCommit != nil && block.Height != extCommit.Height { - return fmt.Errorf("heights don't match, not adding block (block height: %d, commit height: %d)", block.Height, extCommit.Height) - } - - requester := pool.requesters[block.Height] - if requester == nil { - pool.Logger.Info( - "peer sent us a block we didn't expect", - "peer", - peerID, - "curHeight", - pool.height, - "blockHeight", - block.Height) - diff := pool.height - block.Height - if diff < 0 { - diff *= -1 - } - if diff > maxDiffBetweenCurrentAndReceivedBlockHeight { - pool.sendError(errors.New("peer sent us a block we didn't expect with a height too far ahead/behind"), peerID) - } - return fmt.Errorf("peer sent us a block we didn't expect (peer: %s, current height: %d, block height: %d)", peerID, pool.height, block.Height) - } - - if requester.setBlock(block, extCommit, peerID) { - atomic.AddInt32(&pool.numPending, -1) - peer := pool.peers[peerID] - if peer != nil { - peer.decrPending(blockSize) - } - } else { - err := errors.New("requester is different or block already exists") - pool.sendError(err, peerID) - return fmt.Errorf("%w (peer: %s, requester: %s, block height: %d)", err, peerID, requester.getPeerID(), block.Height) - } - - return nil -} - -// MaxPeerHeight returns the highest reported height. -func (pool *BlockPool) MaxPeerHeight() int64 { - pool.mtx.Lock() - defer pool.mtx.Unlock() - return pool.maxPeerHeight -} - -// SetPeerRange sets the peer's alleged blockchain base and height. -func (pool *BlockPool) SetPeerRange(peerID p2p.ID, base int64, height int64) { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - peer := pool.peers[peerID] - if peer != nil { - peer.base = base - peer.height = height - } else { - peer = newBPPeer(pool, peerID, base, height) - peer.setLogger(pool.Logger.With("peer", peerID)) - pool.peers[peerID] = peer - } - - if height > pool.maxPeerHeight { - pool.maxPeerHeight = height - } -} - -// RemovePeer removes the peer with peerID from the pool. If there's no peer -// with peerID, function is a no-op. -func (pool *BlockPool) RemovePeer(peerID p2p.ID) { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - pool.removePeer(peerID) -} - -func (pool *BlockPool) removePeer(peerID p2p.ID) { - for _, requester := range pool.requesters { - if requester.getPeerID() == peerID { - requester.redo(peerID) - } - } - - peer, ok := pool.peers[peerID] - if ok { - if peer.timeout != nil { - peer.timeout.Stop() - } - - delete(pool.peers, peerID) - - // Find a new peer with the biggest height and update maxPeerHeight if the - // peer's height was the biggest. - if peer.height == pool.maxPeerHeight { - pool.updateMaxPeerHeight() - } - } -} - -// If no peers are left, maxPeerHeight is set to 0. -func (pool *BlockPool) updateMaxPeerHeight() { - var max int64 - for _, peer := range pool.peers { - if peer.height > max { - max = peer.height - } - } - pool.maxPeerHeight = max -} - -// Pick an available peer with the given height available. -// If no peers are available, returns nil. -func (pool *BlockPool) pickIncrAvailablePeer(height int64) *bpPeer { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - for _, peer := range pool.peers { - if peer.didTimeout { - pool.removePeer(peer.id) - continue - } - if peer.numPending >= maxPendingRequestsPerPeer { - continue - } - if height < peer.base || height > peer.height { - continue - } - peer.incrPending() - return peer - } - return nil -} - -func (pool *BlockPool) makeNextRequester() { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - nextHeight := pool.height + pool.requestersLen() - if nextHeight > pool.maxPeerHeight { - return - } - - request := newBPRequester(pool, nextHeight) - - pool.requesters[nextHeight] = request - atomic.AddInt32(&pool.numPending, 1) - - err := request.Start() - if err != nil { - request.Logger.Error("Error starting request", "err", err) - } -} - -func (pool *BlockPool) requestersLen() int64 { - return int64(len(pool.requesters)) -} - -func (pool *BlockPool) sendRequest(height int64, peerID p2p.ID) { - if !pool.IsRunning() { - return - } - pool.requestsCh <- BlockRequest{height, peerID} -} - -func (pool *BlockPool) sendError(err error, peerID p2p.ID) { - if !pool.IsRunning() { - return - } - pool.errorsCh <- peerError{err, peerID} -} - -// for debugging purposes -// -//nolint:unused -func (pool *BlockPool) debug() string { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - str := "" - nextHeight := pool.height + pool.requestersLen() - for h := pool.height; h < nextHeight; h++ { - if pool.requesters[h] == nil { - str += fmt.Sprintf("H(%v):X ", h) - } else { - str += fmt.Sprintf("H(%v):", h) - str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil) - str += fmt.Sprintf("C?(%v) ", pool.requesters[h].extCommit != nil) - } - } - return str -} - -//------------------------------------- - -type bpPeer struct { - didTimeout bool - numPending int32 - height int64 - base int64 - pool *BlockPool - id p2p.ID - recvMonitor *flow.Monitor - - timeout *time.Timer - - logger log.Logger -} - -func newBPPeer(pool *BlockPool, peerID p2p.ID, base int64, height int64) *bpPeer { - peer := &bpPeer{ - pool: pool, - id: peerID, - base: base, - height: height, - numPending: 0, - logger: log.NewNopLogger(), - } - return peer -} - -func (peer *bpPeer) setLogger(l log.Logger) { - peer.logger = l -} - -func (peer *bpPeer) resetMonitor() { - peer.recvMonitor = flow.New(time.Second, time.Second*40) - initialValue := float64(minRecvRate) * math.E - peer.recvMonitor.SetREMA(initialValue) -} - -func (peer *bpPeer) resetTimeout() { - if peer.timeout == nil { - peer.timeout = time.AfterFunc(peerTimeout, peer.onTimeout) - } else { - peer.timeout.Reset(peerTimeout) - } -} - -func (peer *bpPeer) incrPending() { - if peer.numPending == 0 { - peer.resetMonitor() - peer.resetTimeout() - } - peer.numPending++ -} - -func (peer *bpPeer) decrPending(recvSize int) { - peer.numPending-- - if peer.numPending == 0 { - peer.timeout.Stop() - } else { - peer.recvMonitor.Update(recvSize) - peer.resetTimeout() - } -} - -func (peer *bpPeer) onTimeout() { - peer.pool.mtx.Lock() - defer peer.pool.mtx.Unlock() - - err := errors.New("peer did not send us anything") - peer.pool.sendError(err, peer.id) - peer.logger.Error("SendTimeout", "reason", err, "timeout", peerTimeout) - peer.didTimeout = true -} - -//------------------------------------- - -type bpRequester struct { - service.BaseService - pool *BlockPool - height int64 - gotBlockCh chan struct{} - redoCh chan p2p.ID // redo may send multitime, add peerId to identify repeat - - mtx cmtsync.Mutex - peerID p2p.ID - block *types.Block - extCommit *types.ExtendedCommit -} - -func newBPRequester(pool *BlockPool, height int64) *bpRequester { - bpr := &bpRequester{ - pool: pool, - height: height, - gotBlockCh: make(chan struct{}, 1), - redoCh: make(chan p2p.ID, 1), - - peerID: "", - block: nil, - } - bpr.BaseService = *service.NewBaseService(nil, "bpRequester", bpr) - return bpr -} - -func (bpr *bpRequester) OnStart() error { - go bpr.requestRoutine() - return nil -} - -// Returns true if the peer matches and block doesn't already exist. -func (bpr *bpRequester) setBlock(block *types.Block, extCommit *types.ExtendedCommit, peerID p2p.ID) bool { - bpr.mtx.Lock() - if bpr.block != nil || bpr.peerID != peerID { - bpr.mtx.Unlock() - return false - } - bpr.block = block - bpr.extCommit = extCommit - bpr.mtx.Unlock() - - select { - case bpr.gotBlockCh <- struct{}{}: - default: - } - return true -} - -func (bpr *bpRequester) getBlock() *types.Block { - bpr.mtx.Lock() - defer bpr.mtx.Unlock() - return bpr.block -} - -func (bpr *bpRequester) getExtendedCommit() *types.ExtendedCommit { - bpr.mtx.Lock() - defer bpr.mtx.Unlock() - return bpr.extCommit -} - -func (bpr *bpRequester) getPeerID() p2p.ID { - bpr.mtx.Lock() - defer bpr.mtx.Unlock() - return bpr.peerID -} - -// This is called from the requestRoutine, upon redo(). -func (bpr *bpRequester) reset() { - bpr.mtx.Lock() - defer bpr.mtx.Unlock() - - if bpr.block != nil { - atomic.AddInt32(&bpr.pool.numPending, 1) - } - - bpr.peerID = "" - bpr.block = nil - bpr.extCommit = nil -} - -// Tells bpRequester to pick another peer and try again. -// NOTE: Nonblocking, and does nothing if another redo -// was already requested. -func (bpr *bpRequester) redo(peerID p2p.ID) { - select { - case bpr.redoCh <- peerID: - default: - } -} - -// Responsible for making more requests as necessary -// Returns only when a block is found (e.g. AddBlock() is called) -func (bpr *bpRequester) requestRoutine() { -OUTER_LOOP: - for { - // Pick a peer to send request to. - var peer *bpPeer - PICK_PEER_LOOP: - for { - if !bpr.IsRunning() || !bpr.pool.IsRunning() { - return - } - peer = bpr.pool.pickIncrAvailablePeer(bpr.height) - if peer == nil { - bpr.Logger.Debug("No peers currently available; will retry shortly", "height", bpr.height) - time.Sleep(requestIntervalMS * time.Millisecond) - continue PICK_PEER_LOOP - } - break PICK_PEER_LOOP - } - bpr.mtx.Lock() - bpr.peerID = peer.id - bpr.mtx.Unlock() - - to := time.NewTimer(requestRetrySeconds * time.Second) - // Send request and wait. - bpr.pool.sendRequest(bpr.height, peer.id) - WAIT_LOOP: - for { - select { - case <-bpr.pool.Quit(): - if err := bpr.Stop(); err != nil { - bpr.Logger.Error("Error stopped requester", "err", err) - } - return - case <-bpr.Quit(): - return - case <-to.C: - bpr.Logger.Debug("Retrying block request after timeout", "height", bpr.height, "peer", bpr.peerID) - // Simulate a redo - bpr.reset() - continue OUTER_LOOP - case peerID := <-bpr.redoCh: - if peerID == bpr.peerID { - bpr.reset() - continue OUTER_LOOP - } - continue WAIT_LOOP - case <-bpr.gotBlockCh: - // We got a block! - // Continue the for-loop and wait til Quit. - continue WAIT_LOOP - } - } - } -} - -// BlockRequest stores a block request identified by the block Height and the PeerID responsible for -// delivering the block -type BlockRequest struct { - Height int64 - PeerID p2p.ID -} diff --git a/blocksync/pool_test.go b/blocksync/pool_test.go deleted file mode 100644 index c5bfab46b5a..00000000000 --- a/blocksync/pool_test.go +++ /dev/null @@ -1,245 +0,0 @@ -package blocksync - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/cometbft/cometbft/libs/log" - cmtrand "github.com/cometbft/cometbft/libs/rand" - "github.com/cometbft/cometbft/p2p" - "github.com/cometbft/cometbft/types" -) - -func init() { - peerTimeout = 2 * time.Second -} - -type testPeer struct { - id p2p.ID - base int64 - height int64 - inputChan chan inputData // make sure each peer's data is sequential -} - -type inputData struct { - t *testing.T - pool *BlockPool - request BlockRequest -} - -func (p testPeer) runInputRoutine() { - go func() { - for input := range p.inputChan { - p.simulateInput(input) - } - }() -} - -// Request desired, pretend like we got the block immediately. -func (p testPeer) simulateInput(input inputData) { - block := &types.Block{Header: types.Header{Height: input.request.Height}} - extCommit := &types.ExtendedCommit{ - Height: input.request.Height, - } - _ = input.pool.AddBlock(input.request.PeerID, block, extCommit, 123) - // TODO: uncommenting this creates a race which is detected by: - // https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856 - // see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890 - // input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height) -} - -type testPeers map[p2p.ID]testPeer - -func (ps testPeers) start() { - for _, v := range ps { - v.runInputRoutine() - } -} - -func (ps testPeers) stop() { - for _, v := range ps { - close(v.inputChan) - } -} - -func makePeers(numPeers int, minHeight, maxHeight int64) testPeers { - peers := make(testPeers, numPeers) - for i := 0; i < numPeers; i++ { - peerID := p2p.ID(cmtrand.Str(12)) - height := minHeight + cmtrand.Int63n(maxHeight-minHeight) - base := minHeight + int64(i) - if base > height { - base = height - } - peers[peerID] = testPeer{peerID, base, height, make(chan inputData, 10)} - } - return peers -} - -func TestBlockPoolBasic(t *testing.T) { - start := int64(42) - peers := makePeers(10, start+1, 1000) - errorsCh := make(chan peerError, 1000) - requestsCh := make(chan BlockRequest, 1000) - pool := NewBlockPool(start, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) - - err := pool.Start() - if err != nil { - t.Error(err) - } - - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) - - peers.start() - defer peers.stop() - - // Introduce each peer. - go func() { - for _, peer := range peers { - pool.SetPeerRange(peer.id, peer.base, peer.height) - } - }() - - // Start a goroutine to pull blocks - go func() { - for { - if !pool.IsRunning() { - return - } - first, second, _ := pool.PeekTwoBlocks() - if first != nil && second != nil { - pool.PopRequest() - } else { - time.Sleep(1 * time.Second) - } - } - }() - - // Pull from channels - for { - select { - case err := <-errorsCh: - t.Error(err) - case request := <-requestsCh: - t.Logf("Pulled new BlockRequest %v", request) - if request.Height == 300 { - return // Done! - } - - peers[request.PeerID].inputChan <- inputData{t, pool, request} - } - } -} - -func TestBlockPoolTimeout(t *testing.T) { - start := int64(42) - peers := makePeers(10, start+1, 1000) - errorsCh := make(chan peerError, 1000) - requestsCh := make(chan BlockRequest, 1000) - pool := NewBlockPool(start, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) - err := pool.Start() - if err != nil { - t.Error(err) - } - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) - - for _, peer := range peers { - t.Logf("Peer %v", peer.id) - } - - // Introduce each peer. - go func() { - for _, peer := range peers { - pool.SetPeerRange(peer.id, peer.base, peer.height) - } - }() - - // Start a goroutine to pull blocks - go func() { - for { - if !pool.IsRunning() { - return - } - first, second, _ := pool.PeekTwoBlocks() - if first != nil && second != nil { - pool.PopRequest() - } else { - time.Sleep(1 * time.Second) - } - } - }() - - // Pull from channels - counter := 0 - timedOut := map[p2p.ID]struct{}{} - for { - select { - case err := <-errorsCh: - t.Log(err) - // consider error to be always timeout here - if _, ok := timedOut[err.peerID]; !ok { - counter++ - if counter == len(peers) { - return // Done! - } - } - case request := <-requestsCh: - t.Logf("Pulled new BlockRequest %+v", request) - } - } -} - -func TestBlockPoolRemovePeer(t *testing.T) { - peers := make(testPeers, 10) - for i := 0; i < 10; i++ { - peerID := p2p.ID(fmt.Sprintf("%d", i+1)) - height := int64(i + 1) - peers[peerID] = testPeer{peerID, 0, height, make(chan inputData)} - } - requestsCh := make(chan BlockRequest) - errorsCh := make(chan peerError) - - pool := NewBlockPool(1, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) - err := pool.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) - - // add peers - for peerID, peer := range peers { - pool.SetPeerRange(peerID, peer.base, peer.height) - } - assert.EqualValues(t, 10, pool.MaxPeerHeight()) - - // remove not-existing peer - assert.NotPanics(t, func() { pool.RemovePeer(p2p.ID("Superman")) }) - - // remove peer with biggest height - pool.RemovePeer(p2p.ID("10")) - assert.EqualValues(t, 9, pool.MaxPeerHeight()) - - // remove all peers - for peerID := range peers { - pool.RemovePeer(peerID) - } - - assert.EqualValues(t, 0, pool.MaxPeerHeight()) -} diff --git a/blocksync/reactor.go b/blocksync/reactor.go deleted file mode 100644 index 2eb74aacefa..00000000000 --- a/blocksync/reactor.go +++ /dev/null @@ -1,543 +0,0 @@ -package blocksync - -import ( - "fmt" - "reflect" - "time" - - "github.com/cometbft/cometbft/libs/log" - "github.com/cometbft/cometbft/p2p" - bcproto "github.com/cometbft/cometbft/proto/tendermint/blocksync" - sm "github.com/cometbft/cometbft/state" - "github.com/cometbft/cometbft/store" - "github.com/cometbft/cometbft/types" -) - -const ( - // BlocksyncChannel is a channel for blocks and status updates (`BlockStore` height) - BlocksyncChannel = byte(0x40) - - trySyncIntervalMS = 10 - - // stop syncing when last block's time is - // within this much of the system time. - // stopSyncingDurationMinutes = 10 - - // ask for best height every 10s - statusUpdateIntervalSeconds = 10 - // check if we should switch to consensus reactor - switchToConsensusIntervalSeconds = 1 -) - -type consensusReactor interface { - // for when we switch from blocksync reactor and block sync to - // the consensus machine - SwitchToConsensus(state sm.State, skipWAL bool) -} - -type mempoolReactor interface { - // for when we finish doing block sync or state sync - EnableInOutTxs() -} - -type peerError struct { - err error - peerID p2p.ID -} - -func (e peerError) Error() string { - return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error()) -} - -// Reactor handles long-term catchup syncing. -type Reactor struct { - p2p.BaseReactor - - // immutable - initialState sm.State - - blockExec *sm.BlockExecutor - store sm.BlockStore - pool *BlockPool - blockSync bool - - requestsCh <-chan BlockRequest - errorsCh <-chan peerError - - switchToConsensusMs int - - metrics *Metrics -} - -// NewReactor returns new reactor instance. -func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, - blockSync bool, metrics *Metrics, offlineStateSyncHeight int64, -) *Reactor { - storeHeight := store.Height() - if storeHeight == 0 { - // If state sync was performed offline and the stores were bootstrapped to height H - // the state store's lastHeight will be H while blockstore's Height and Base are still 0 - // 1. This scenario should not lead to a panic in this case, which is indicated by - // having a OfflineStateSyncHeight > 0 - // 2. We need to instruct the blocksync reactor to start fetching blocks from H+1 - // instead of 0. - storeHeight = offlineStateSyncHeight - } - if state.LastBlockHeight != storeHeight { - panic(fmt.Sprintf("state (%v) and store (%v) height mismatch, stores were left in an inconsistent state", state.LastBlockHeight, - storeHeight)) - } - requestsCh := make(chan BlockRequest, maxTotalRequesters) - - const capacity = 1000 // must be bigger than peers count - errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock - - startHeight := storeHeight + 1 - if startHeight == 1 { - startHeight = state.InitialHeight - } - pool := NewBlockPool(startHeight, requestsCh, errorsCh) - - bcR := &Reactor{ - initialState: state, - blockExec: blockExec, - store: store, - pool: pool, - blockSync: blockSync, - requestsCh: requestsCh, - errorsCh: errorsCh, - metrics: metrics, - } - bcR.BaseReactor = *p2p.NewBaseReactor("Reactor", bcR) - return bcR -} - -// SetLogger implements service.Service by setting the logger on reactor and pool. -func (bcR *Reactor) SetLogger(l log.Logger) { - bcR.BaseService.Logger = l - bcR.pool.Logger = l -} - -// OnStart implements service.Service. -func (bcR *Reactor) OnStart() error { - if bcR.blockSync { - err := bcR.pool.Start() - if err != nil { - return err - } - go bcR.poolRoutine(false) - } - return nil -} - -// SwitchToBlockSync is called by the state sync reactor when switching to block sync. -func (bcR *Reactor) SwitchToBlockSync(state sm.State) error { - bcR.blockSync = true - bcR.initialState = state - - bcR.pool.height = state.LastBlockHeight + 1 - err := bcR.pool.Start() - if err != nil { - return err - } - go bcR.poolRoutine(true) - return nil -} - -// OnStop implements service.Service. -func (bcR *Reactor) OnStop() { - if bcR.blockSync { - if err := bcR.pool.Stop(); err != nil { - bcR.Logger.Error("Error stopping pool", "err", err) - } - } -} - -// GetChannels implements Reactor -func (bcR *Reactor) GetChannels() []*p2p.ChannelDescriptor { - return []*p2p.ChannelDescriptor{ - { - ID: BlocksyncChannel, - Priority: 5, - SendQueueCapacity: 1000, - RecvBufferCapacity: 50 * 4096, - RecvMessageCapacity: MaxMsgSize, - MessageType: &bcproto.Message{}, - }, - } -} - -// AddPeer implements Reactor by sending our state to peer. -func (bcR *Reactor) AddPeer(peer p2p.Peer) { - peer.Send(p2p.Envelope{ - ChannelID: BlocksyncChannel, - Message: &bcproto.StatusResponse{ - Base: bcR.store.Base(), - Height: bcR.store.Height(), - }, - }) - // it's OK if send fails. will try later in poolRoutine - - // peer is added to the pool once we receive the first - // bcStatusResponseMessage from the peer and call pool.SetPeerRange -} - -// RemovePeer implements Reactor by removing peer from the pool. -func (bcR *Reactor) RemovePeer(peer p2p.Peer, _ interface{}) { - bcR.pool.RemovePeer(peer.ID()) -} - -// respondToPeer loads a block and sends it to the requesting peer, -// if we have it. Otherwise, we'll respond saying we don't have it. -func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest, src p2p.Peer) (queued bool) { - block := bcR.store.LoadBlock(msg.Height) - if block == nil { - bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height) - return src.TrySend(p2p.Envelope{ - ChannelID: BlocksyncChannel, - Message: &bcproto.NoBlockResponse{Height: msg.Height}, - }) - } - - state, err := bcR.blockExec.Store().Load() - if err != nil { - bcR.Logger.Error("loading state", "err", err) - return false - } - var extCommit *types.ExtendedCommit - if state.ConsensusParams.ABCI.VoteExtensionsEnabled(msg.Height) { - extCommit = bcR.store.LoadBlockExtendedCommit(msg.Height) - if extCommit == nil { - bcR.Logger.Error("found block in store with no extended commit", "block", block) - return false - } - } - - bl, err := block.ToProto() - if err != nil { - bcR.Logger.Error("could not convert msg to protobuf", "err", err) - return false - } - - return src.TrySend(p2p.Envelope{ - ChannelID: BlocksyncChannel, - Message: &bcproto.BlockResponse{ - Block: bl, - ExtCommit: extCommit.ToProto(), - }, - }) -} - -// Receive implements Reactor by handling 4 types of messages (look below). -func (bcR *Reactor) Receive(e p2p.Envelope) { - if err := ValidateMsg(e.Message); err != nil { - bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err) - bcR.Switch.StopPeerForError(e.Src, err) - return - } - - bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message) - - switch msg := e.Message.(type) { - case *bcproto.BlockRequest: - bcR.respondToPeer(msg, e.Src) - case *bcproto.BlockResponse: - bi, err := types.BlockFromProto(msg.Block) - if err != nil { - bcR.Logger.Error("Block content is invalid", "err", err) - return - } - var extCommit *types.ExtendedCommit - if msg.ExtCommit != nil { - var err error - extCommit, err = types.ExtendedCommitFromProto(msg.ExtCommit) - if err != nil { - bcR.Logger.Error("failed to convert extended commit from proto", - "peer", e.Src, - "err", err) - return - } - } - - if err := bcR.pool.AddBlock(e.Src.ID(), bi, extCommit, msg.Block.Size()); err != nil { - bcR.Logger.Error("failed to add block", "err", err) - } - case *bcproto.StatusRequest: - // Send peer our state. - e.Src.TrySend(p2p.Envelope{ - ChannelID: BlocksyncChannel, - Message: &bcproto.StatusResponse{ - Height: bcR.store.Height(), - Base: bcR.store.Base(), - }, - }) - case *bcproto.StatusResponse: - // Got a peer status. Unverified. - bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height) - case *bcproto.NoBlockResponse: - bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height) - default: - bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) - } -} - -// Handle messages from the poolReactor telling the reactor what to do. -// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! -func (bcR *Reactor) poolRoutine(stateSynced bool) { - bcR.metrics.Syncing.Set(1) - defer bcR.metrics.Syncing.Set(0) - - trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond) - defer trySyncTicker.Stop() - - statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) - defer statusUpdateTicker.Stop() - - if bcR.switchToConsensusMs == 0 { - bcR.switchToConsensusMs = switchToConsensusIntervalSeconds * 1000 - } - switchToConsensusTicker := time.NewTicker(time.Duration(bcR.switchToConsensusMs) * time.Millisecond) - defer switchToConsensusTicker.Stop() - - blocksSynced := uint64(0) - - chainID := bcR.initialState.ChainID - state := bcR.initialState - - lastHundred := time.Now() - lastRate := 0.0 - - didProcessCh := make(chan struct{}, 1) - - initialCommitHasExtensions := (bcR.initialState.LastBlockHeight > 0 && bcR.store.LoadBlockExtendedCommit(bcR.initialState.LastBlockHeight) != nil) - - go func() { - for { - select { - case <-bcR.Quit(): - return - case <-bcR.pool.Quit(): - return - case request := <-bcR.requestsCh: - peer := bcR.Switch.Peers().Get(request.PeerID) - if peer == nil { - continue - } - queued := peer.TrySend(p2p.Envelope{ - ChannelID: BlocksyncChannel, - Message: &bcproto.BlockRequest{Height: request.Height}, - }) - if !queued { - bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height) - } - case err := <-bcR.errorsCh: - peer := bcR.Switch.Peers().Get(err.peerID) - if peer != nil { - bcR.Switch.StopPeerForError(peer, err) - } - - case <-statusUpdateTicker.C: - // ask for status updates - go bcR.BroadcastStatusRequest() - - } - } - }() - -FOR_LOOP: - for { - select { - case <-switchToConsensusTicker.C: - height, numPending, lenRequesters := bcR.pool.GetStatus() - outbound, inbound, _ := bcR.Switch.NumPeers() - bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters, - "outbound", outbound, "inbound", inbound, "lastHeight", state.LastBlockHeight) - - // The "if" statement below is a bit confusing, so here is a breakdown - // of its logic and purpose: - // - // If we are at genesis (no block in the chain), we don't need VoteExtensions - // because the first block's LastCommit is empty anyway. - // - // If VoteExtensions were disabled for the previous height then we don't need - // VoteExtensions. - // - // If we have sync'd at least one block, then we are guaranteed to have extensions - // if we need them by the logic inside loop FOR_LOOP: it requires that the blocks - // it fetches have extensions if extensions were enabled during the height. - // - // If we already had extensions for the initial height (e.g. we are recovering), - // then we are guaranteed to have extensions for the last block (if required) even - // if we did not blocksync any block. - // - missingExtension := true - if state.LastBlockHeight == 0 || - !state.ConsensusParams.ABCI.VoteExtensionsEnabled(state.LastBlockHeight) || - blocksSynced > 0 || - initialCommitHasExtensions { - missingExtension = false - } - - // If require extensions, but since we don't have them yet, then we cannot switch to consensus yet. - if missingExtension { - bcR.Logger.Info( - "no extended commit yet", - "height", height, - "last_block_height", state.LastBlockHeight, - "initial_height", state.InitialHeight, - "max_peer_height", bcR.pool.MaxPeerHeight(), - ) - continue FOR_LOOP - } - if bcR.pool.IsCaughtUp() { - bcR.Logger.Info("Time to switch to consensus mode!", "height", height) - if err := bcR.pool.Stop(); err != nil { - bcR.Logger.Error("Error stopping pool", "err", err) - } - if memR, ok := bcR.Switch.Reactor("MEMPOOL").(mempoolReactor); ok { - memR.EnableInOutTxs() - } - if conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor); ok { - conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced) - } - // else { - // should only happen during testing - // } - - break FOR_LOOP - } - - case <-trySyncTicker.C: // chan time - select { - case didProcessCh <- struct{}{}: - default: - } - - case <-didProcessCh: - // NOTE: It is a subtle mistake to process more than a single block - // at a time (e.g. 10) here, because we only TrySend 1 request per - // loop. The ratio mismatch can result in starving of blocks, a - // sudden burst of requests and responses, and repeat. - // Consequently, it is better to split these routines rather than - // coupling them as it's written here. TODO uncouple from request - // routine. - - // See if there are any blocks to sync. - first, second, extCommit := bcR.pool.PeekTwoBlocks() - if first == nil || second == nil { - // we need to have fetched two consecutive blocks in order to - // perform blocksync verification - continue FOR_LOOP - } - // Some sanity checks on heights - if state.LastBlockHeight > 0 && state.LastBlockHeight+1 != first.Height { - // Panicking because the block pool's height MUST keep consistent with the state; the block pool is totally under our control - panic(fmt.Errorf("peeked first block has unexpected height; expected %d, got %d", state.LastBlockHeight+1, first.Height)) - } - if first.Height+1 != second.Height { - // Panicking because this is an obvious bug in the block pool, which is totally under our control - panic(fmt.Errorf("heights of first and second block are not consecutive; expected %d, got %d", state.LastBlockHeight, first.Height)) - } - if extCommit == nil && state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) { - // See https://github.com/tendermint/tendermint/pull/8433#discussion_r866790631 - panic(fmt.Errorf("peeked first block without extended commit at height %d - possible node store corruption", first.Height)) - } - - // Try again quickly next loop. - didProcessCh <- struct{}{} - - firstParts, err := first.MakePartSet(types.BlockPartSizeBytes) - if err != nil { - bcR.Logger.Error("failed to make ", - "height", first.Height, - "err", err.Error()) - break FOR_LOOP - } - firstPartSetHeader := firstParts.Header() - firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader} - // Finally, verify the first block using the second's commit - // NOTE: we can probably make this more efficient, but note that calling - // first.Hash() doesn't verify the tx contents, so MakePartSet() is - // currently necessary. - // TODO(sergio): Should we also validate against the extended commit? - err = state.Validators.VerifyCommitLight( - chainID, firstID, first.Height, second.LastCommit) - - if err == nil { - // validate the block before we persist it - err = bcR.blockExec.ValidateBlock(state, first) - } - if err == nil { - // if vote extensions were required at this height, ensure they exist. - if state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) { - err = extCommit.EnsureExtensions(true) - } else { - if extCommit != nil { - err = fmt.Errorf("received non-nil extCommit for height %d (extensions disabled)", first.Height) - } - } - } - if err != nil { - bcR.Logger.Error("Error in validation", "err", err) - peerID := bcR.pool.RedoRequest(first.Height) - peer := bcR.Switch.Peers().Get(peerID) - if peer != nil { - // NOTE: we've already removed the peer's request, but we - // still need to clean up the rest. - bcR.Switch.StopPeerForError(peer, ErrReactorValidation{Err: err}) - } - peerID2 := bcR.pool.RedoRequest(second.Height) - peer2 := bcR.Switch.Peers().Get(peerID2) - if peer2 != nil && peer2 != peer { - // NOTE: we've already removed the peer's request, but we - // still need to clean up the rest. - bcR.Switch.StopPeerForError(peer2, ErrReactorValidation{Err: err}) - } - continue FOR_LOOP - } - - bcR.pool.PopRequest() - - // TODO: batch saves so we dont persist to disk every block - if state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) { - bcR.store.SaveBlockWithExtendedCommit(first, firstParts, extCommit) - } else { - // We use LastCommit here instead of extCommit. extCommit is not - // guaranteed to be populated by the peer if extensions are not enabled. - // Currently, the peer should provide an extCommit even if the vote extension data are absent - // but this may change so using second.LastCommit is safer. - bcR.store.SaveBlock(first, firstParts, second.LastCommit) - } - - // TODO: same thing for app - but we would need a way to - // get the hash without persisting the state - state, err = bcR.blockExec.ApplyBlock(state, firstID, first) - if err != nil { - // TODO This is bad, are we zombie? - panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) - } - bcR.metrics.recordBlockMetrics(first) - blocksSynced++ - - if blocksSynced%100 == 0 { - lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) - bcR.Logger.Info("Block Sync Rate", "height", bcR.pool.height, - "max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate) - lastHundred = time.Now() - } - - continue FOR_LOOP - - case <-bcR.Quit(): - break FOR_LOOP - } - } -} - -// BroadcastStatusRequest broadcasts `BlockStore` base and height. -func (bcR *Reactor) BroadcastStatusRequest() { - bcR.Switch.Broadcast(p2p.Envelope{ - ChannelID: BlocksyncChannel, - Message: &bcproto.StatusRequest{}, - }) -} diff --git a/blocksync/reactor_test.go b/blocksync/reactor_test.go deleted file mode 100644 index cd6e640ab75..00000000000 --- a/blocksync/reactor_test.go +++ /dev/null @@ -1,380 +0,0 @@ -package blocksync - -import ( - "fmt" - "os" - "sort" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - dbm "github.com/cometbft/cometbft-db" - - abci "github.com/cometbft/cometbft/abci/types" - cfg "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/internal/test" - "github.com/cometbft/cometbft/libs/log" - mpmocks "github.com/cometbft/cometbft/mempool/mocks" - "github.com/cometbft/cometbft/p2p" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - "github.com/cometbft/cometbft/proxy" - sm "github.com/cometbft/cometbft/state" - "github.com/cometbft/cometbft/store" - "github.com/cometbft/cometbft/types" - cmttime "github.com/cometbft/cometbft/types/time" -) - -var config *cfg.Config - -func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) { - validators := make([]types.GenesisValidator, numValidators) - privValidators := make([]types.PrivValidator, numValidators) - for i := 0; i < numValidators; i++ { - val, privVal := types.RandValidator(randPower, minPower) - validators[i] = types.GenesisValidator{ - PubKey: val.PubKey, - Power: val.VotingPower, - } - privValidators[i] = privVal - } - sort.Sort(types.PrivValidatorsByAddress(privValidators)) - - consPar := types.DefaultConsensusParams() - consPar.ABCI.VoteExtensionsEnableHeight = 1 - return &types.GenesisDoc{ - GenesisTime: cmttime.Now(), - ChainID: test.DefaultTestChainID, - Validators: validators, - ConsensusParams: consPar, - }, privValidators -} - -type ReactorPair struct { - reactor *Reactor - app proxy.AppConns -} - -func newReactor( - t *testing.T, - logger log.Logger, - genDoc *types.GenesisDoc, - privVals []types.PrivValidator, - maxBlockHeight int64, -) ReactorPair { - if len(privVals) != 1 { - panic("only support one validator") - } - - app := abci.NewBaseApplication() - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - if err != nil { - panic(fmt.Errorf("error start app: %w", err)) - } - - blockDB := dbm.NewMemDB() - stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB, sm.StoreOptions{ - DiscardABCIResponses: false, - }) - blockStore := store.NewBlockStore(blockDB) - - state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) - if err != nil { - panic(fmt.Errorf("error constructing state from genesis file: %w", err)) - } - - mp := &mpmocks.Mempool{} - mp.On("Lock").Return() - mp.On("Unlock").Return() - mp.On("FlushAppConn", mock.Anything).Return(nil) - mp.On("Update", - mock.Anything, - mock.Anything, - mock.Anything, - mock.Anything, - mock.Anything, - mock.Anything).Return(nil) - - // Make the Reactor itself. - // NOTE we have to create and commit the blocks first because - // pool.height is determined from the store. - fastSync := true - db := dbm.NewMemDB() - stateStore = sm.NewStore(db, sm.StoreOptions{ - DiscardABCIResponses: false, - }) - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), - mp, sm.EmptyEvidencePool{}, blockStore) - if err = stateStore.Save(state); err != nil { - panic(err) - } - - // The commit we are building for the current height. - seenExtCommit := &types.ExtendedCommit{} - - // let's add some blocks in - for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { - lastExtCommit := seenExtCommit.Clone() - - thisBlock := state.MakeBlock(blockHeight, nil, lastExtCommit.ToCommit(), nil, state.Validators.Proposer.Address) - - thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes) - require.NoError(t, err) - blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} - - // Simulate a commit for the current height - pubKey, err := privVals[0].GetPubKey() - if err != nil { - panic(err) - } - addr := pubKey.Address() - idx, _ := state.Validators.GetByAddress(addr) - vote, err := types.MakeVote( - privVals[0], - thisBlock.Header.ChainID, - idx, - thisBlock.Header.Height, - 0, - cmtproto.PrecommitType, - blockID, - time.Now(), - ) - if err != nil { - panic(err) - } - seenExtCommit = &types.ExtendedCommit{ - Height: vote.Height, - Round: vote.Round, - BlockID: blockID, - ExtendedSignatures: []types.ExtendedCommitSig{vote.ExtendedCommitSig()}, - } - - state, err = blockExec.ApplyBlock(state, blockID, thisBlock) - if err != nil { - panic(fmt.Errorf("error apply block: %w", err)) - } - - blockStore.SaveBlockWithExtendedCommit(thisBlock, thisParts, seenExtCommit) - } - - bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync, NopMetrics(), 0) - bcReactor.SetLogger(logger.With("module", "blocksync")) - - return ReactorPair{bcReactor, proxyApp} -} - -func TestNoBlockResponse(t *testing.T) { - config = test.ResetTestRoot("blocksync_reactor_test") - defer os.RemoveAll(config.RootDir) - genDoc, privVals := randGenesisDoc(1, false, 30) - - maxBlockHeight := int64(65) - - reactorPairs := make([]ReactorPair, 2) - - reactorPairs[0] = newReactor(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight) - reactorPairs[1] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0) - - p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch { - s.AddReactor("BLOCKSYNC", reactorPairs[i].reactor) - return s - }, p2p.Connect2Switches) - - defer func() { - for _, r := range reactorPairs { - err := r.reactor.Stop() - require.NoError(t, err) - err = r.app.Stop() - require.NoError(t, err) - } - }() - - tests := []struct { - height int64 - existent bool - }{ - {maxBlockHeight + 2, false}, - {10, true}, - {1, true}, - {100, false}, - } - - for { - if reactorPairs[1].reactor.pool.IsCaughtUp() { - break - } - - time.Sleep(10 * time.Millisecond) - } - - assert.Equal(t, maxBlockHeight, reactorPairs[0].reactor.store.Height()) - - for _, tt := range tests { - block := reactorPairs[1].reactor.store.LoadBlock(tt.height) - if tt.existent { - assert.True(t, block != nil) - } else { - assert.True(t, block == nil) - } - } -} - -// NOTE: This is too hard to test without -// an easy way to add test peer to switch -// or without significant refactoring of the module. -// Alternatively we could actually dial a TCP conn but -// that seems extreme. -func TestBadBlockStopsPeer(t *testing.T) { - config = test.ResetTestRoot("blocksync_reactor_test") - defer os.RemoveAll(config.RootDir) - genDoc, privVals := randGenesisDoc(1, false, 30) - - maxBlockHeight := int64(148) - - // Other chain needs a different validator set - otherGenDoc, otherPrivVals := randGenesisDoc(1, false, 30) - otherChain := newReactor(t, log.TestingLogger(), otherGenDoc, otherPrivVals, maxBlockHeight) - - defer func() { - err := otherChain.reactor.Stop() - require.Error(t, err) - err = otherChain.app.Stop() - require.NoError(t, err) - }() - - reactorPairs := make([]ReactorPair, 4) - - reactorPairs[0] = newReactor(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight) - reactorPairs[1] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0) - reactorPairs[2] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0) - reactorPairs[3] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0) - - switches := p2p.MakeConnectedSwitches(config.P2P, 4, func(i int, s *p2p.Switch) *p2p.Switch { - s.AddReactor("BLOCKSYNC", reactorPairs[i].reactor) - return s - }, p2p.Connect2Switches) - - defer func() { - for _, r := range reactorPairs { - err := r.reactor.Stop() - require.NoError(t, err) - - err = r.app.Stop() - require.NoError(t, err) - } - }() - - for { - time.Sleep(1 * time.Second) - caughtUp := true - for _, r := range reactorPairs { - if !r.reactor.pool.IsCaughtUp() { - caughtUp = false - } - } - if caughtUp { - break - } - } - - // at this time, reactors[0-3] is the newest - assert.Equal(t, 3, reactorPairs[1].reactor.Switch.Peers().Size()) - - // Mark reactorPairs[3] as an invalid peer. Fiddling with .store without a mutex is a data - // race, but can't be easily avoided. - reactorPairs[3].reactor.store = otherChain.reactor.store - - lastReactorPair := newReactor(t, log.TestingLogger(), genDoc, privVals, 0) - reactorPairs = append(reactorPairs, lastReactorPair) - - switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch { - s.AddReactor("BLOCKSYNC", reactorPairs[len(reactorPairs)-1].reactor) - return s - }, p2p.Connect2Switches)...) - - for i := 0; i < len(reactorPairs)-1; i++ { - p2p.Connect2Switches(switches, i, len(reactorPairs)-1) - } - - for { - if lastReactorPair.reactor.pool.IsCaughtUp() || lastReactorPair.reactor.Switch.Peers().Size() == 0 { - break - } - - time.Sleep(1 * time.Second) - } - - assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1) -} - -func TestCheckSwitchToConsensusLastHeightZero(t *testing.T) { - const maxBlockHeight = int64(45) - - config = test.ResetTestRoot("blocksync_reactor_test") - defer os.RemoveAll(config.RootDir) - genDoc, privVals := randGenesisDoc(1, false, 30) - - reactorPairs := make([]ReactorPair, 1, 2) - reactorPairs[0] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0) - reactorPairs[0].reactor.switchToConsensusMs = 50 - defer func() { - for _, r := range reactorPairs { - err := r.reactor.Stop() - require.NoError(t, err) - err = r.app.Stop() - require.NoError(t, err) - } - }() - - reactorPairs = append(reactorPairs, newReactor(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight)) - - var switches []*p2p.Switch - for _, r := range reactorPairs { - switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch { - s.AddReactor("BLOCKSYNC", r.reactor) - return s - }, p2p.Connect2Switches)...) - } - - time.Sleep(60 * time.Millisecond) - - // Connect both switches - p2p.Connect2Switches(switches, 0, 1) - - startTime := time.Now() - for { - time.Sleep(20 * time.Millisecond) - caughtUp := true - for _, r := range reactorPairs { - if !r.reactor.pool.IsCaughtUp() { - caughtUp = false - break - } - } - if caughtUp { - break - } - if time.Since(startTime) > 90*time.Second { - msg := "timeout: reactors didn't catch up;" - for i, r := range reactorPairs { - h, p, lr := r.reactor.pool.GetStatus() - c := r.reactor.pool.IsCaughtUp() - msg += fmt.Sprintf(" reactor#%d (h %d, p %d, lr %d, c %t);", i, h, p, lr, c) - } - require.Fail(t, msg) - } - } - - // -1 because of "-1" in IsCaughtUp - // -1 pool.height points to the _next_ height - // -1 because we measure height of block store - const maxDiff = 3 - for _, r := range reactorPairs { - assert.GreaterOrEqual(t, r.reactor.store.Height(), maxBlockHeight-maxDiff) - } -} diff --git a/buf.gen.yaml b/buf.gen.yaml index a36032410ce..4581fa7e320 100644 --- a/buf.gen.yaml +++ b/buf.gen.yaml @@ -1,9 +1,10 @@ version: v1 plugins: - - name: gogofaster - out: ./proto/ + - name: gocosmos + out: ./api/ opt: - Mgoogle/protobuf/timestamp.proto=github.com/cosmos/gogoproto/types - Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration + - Mgoogle/protobuf/wrappers.proto=github.com/cosmos/gogoproto/types - plugins=grpc - paths=source_relative diff --git a/cmd/cometbft/commands/compact.go b/cmd/cometbft/commands/compact.go index 327e1dbca68..8ccb58b7ec5 100644 --- a/cmd/cometbft/commands/compact.go +++ b/cmd/cometbft/commands/compact.go @@ -25,7 +25,7 @@ the planned refactor to the storage engine. Currently, only GoLevelDB is supported. `, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { if config.DBBackend != "goleveldb" { return errors.New("compaction is currently only supported with goleveldb") } @@ -43,11 +43,10 @@ func compactGoLevelDBs(rootDir string, logger log.Logger) { wg := sync.WaitGroup{} for _, dbName := range dbNames { - dbName := dbName wg.Add(1) - go func() { + go func(name string) { defer wg.Done() - dbPath := filepath.Join(rootDir, "data", dbName+".db") + dbPath := filepath.Join(rootDir, "data", name+".db") store, err := leveldb.OpenFile(dbPath, o) if err != nil { logger.Error("failed to initialize cometbft db", "path", dbPath, "err", err) @@ -61,7 +60,7 @@ func compactGoLevelDBs(rootDir string, logger log.Logger) { if err != nil { logger.Error("failed to compact cometbft db", "path", dbPath, "err", err) } - }() + }(dbName) } wg.Wait() } diff --git a/cmd/cometbft/commands/config/common.go b/cmd/cometbft/commands/config/common.go new file mode 100644 index 00000000000..48fd09dfe13 --- /dev/null +++ b/cmd/cometbft/commands/config/common.go @@ -0,0 +1,18 @@ +package config + +import ( + "path/filepath" + + "github.com/spf13/cobra" + + "github.com/cometbft/cometbft/cmd/cometbft/commands" + cfg "github.com/cometbft/cometbft/config" +) + +func defaultConfigPath(cmd *cobra.Command) string { + home, err := commands.ConfigHome(cmd) + if err != nil { + return "" + } + return filepath.Join(home, cfg.DefaultConfigDir, cfg.DefaultConfigFileName) +} diff --git a/cmd/cometbft/commands/config/config.go b/cmd/cometbft/commands/config/config.go new file mode 100644 index 00000000000..32765cb2463 --- /dev/null +++ b/cmd/cometbft/commands/config/config.go @@ -0,0 +1,24 @@ +package config + +import ( + "github.com/spf13/cobra" +) + +// Command contains all the confix commands +// These command can be used to interactively update a config value. +func Command() *cobra.Command { + cmd := &cobra.Command{ + Use: "config", + Short: "Utilities for managing configuration", + } + + cmd.AddCommand( + MigrateCommand(), + DiffCommand(), + GetCommand(), + SetCommand(), + ViewCommand(), + ) + + return cmd +} diff --git a/cmd/cometbft/commands/config/diff.go b/cmd/cometbft/commands/config/diff.go new file mode 100644 index 00000000000..17660c73782 --- /dev/null +++ b/cmd/cometbft/commands/config/diff.go @@ -0,0 +1,55 @@ +package config + +import ( + "fmt" + + "github.com/spf13/cobra" + "golang.org/x/exp/maps" + + "github.com/cometbft/cometbft/internal/confix" +) + +// DiffCommand creates a new command for comparing configuration files. +func DiffCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "diff [target-version] ", + Short: "Outputs all config values that are different from the default.", + Long: "This command compares the configuration file with the defaults and outputs any differences.", + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var configPath string + if len(args) > 1 { + configPath = args[1] + } else { + configPath = defaultConfigPath(cmd) + } + + targetVersion := args[0] + if _, ok := confix.Migrations[targetVersion]; !ok { + return fmt.Errorf("unknown version %q, supported versions are: %q", targetVersion, maps.Keys(confix.Migrations)) + } + + targetVersionFile, err := confix.LoadLocalConfig(targetVersion + ".toml") + if err != nil { + return fmt.Errorf("failed to load internal config: %w", err) + } + + rawFile, err := confix.LoadConfig(configPath) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + diff := confix.DiffValues(rawFile, targetVersionFile) + if len(diff) == 0 { + fmt.Print("All config values are the same as the defaults.\n") + } + + fmt.Print("The following config values are different from the defaults:\n") + + confix.PrintDiff(cmd.OutOrStdout(), diff) + return nil + }, + } + + return cmd +} diff --git a/cmd/cometbft/commands/config/migrate.go b/cmd/cometbft/commands/config/migrate.go new file mode 100644 index 00000000000..8e88c9fe43e --- /dev/null +++ b/cmd/cometbft/commands/config/migrate.go @@ -0,0 +1,69 @@ +package config + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + "golang.org/x/exp/maps" + + "github.com/cometbft/cometbft/internal/confix" +) + +var ( + FlagStdOut bool + FlagVerbose bool + FlagSkipValidate bool +) + +func MigrateCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "migrate [target-version] ", + Short: "Migrate configuration file to the specified version", + Long: `Migrate the contents of the configuration to the specified version. +The output is written in-place unless --stdout is provided. +In case of any error in updating the file, no output is written.`, + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var configPath string + if len(args) > 1 { + configPath = args[1] + } else { + configPath = defaultConfigPath(cmd) + } + + targetVersion := args[0] + plan, ok := confix.Migrations[targetVersion] + if !ok { + return fmt.Errorf("unknown version %q, supported versions are: %q", targetVersion, maps.Keys(confix.Migrations)) + } + + rawFile, err := confix.LoadConfig(configPath) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + ctx := context.Background() + if FlagVerbose { + ctx = confix.WithLogWriter(ctx, cmd.ErrOrStderr()) + } + + outputPath := configPath + if FlagStdOut { + outputPath = "" + } + + if err := confix.Upgrade(ctx, plan(rawFile, targetVersion), configPath, outputPath, FlagSkipValidate); err != nil { + return fmt.Errorf("failed to migrate config: %w", err) + } + + return nil + }, + } + + cmd.Flags().BoolVar(&FlagStdOut, "stdout", false, "print the updated config to stdout") + cmd.Flags().BoolVar(&FlagVerbose, "verbose", false, "log changes to stderr") + cmd.Flags().BoolVar(&FlagSkipValidate, "skip-validate", false, "skip configuration validation (allows to migrate unknown configurations)") + + return cmd +} diff --git a/cmd/cometbft/commands/config/mutate.go b/cmd/cometbft/commands/config/mutate.go new file mode 100644 index 00000000000..c86d56c44ff --- /dev/null +++ b/cmd/cometbft/commands/config/mutate.go @@ -0,0 +1,144 @@ +package config + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/parser" + "github.com/creachadair/tomledit/transform" + "github.com/spf13/cobra" + + "github.com/cometbft/cometbft/internal/confix" +) + +// SetCommand returns a CLI command to interactively update an application config value. +func SetCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "set [config] [key] [value]", + Short: "Set a config value", + Long: "Set a config value. The [config] is an optional absolute path to the config file (default: `~/.cometbft/config/config.toml`)", + Args: cobra.MinimumNArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + var ( + filename, inputValue string + key []string + ) + switch len(args) { + case 2: + { + filename = defaultConfigPath(cmd) + // parse key e.g mempool.size -> [mempool, size] + key = strings.Split(args[0], ".") + inputValue = args[1] + } + case 3: + { + filename, inputValue = args[0], args[2] + key = strings.Split(args[1], ".") + } + default: + return errors.New("expected 2 or 3 arguments") + } + + plan := transform.Plan{ + { + Desc: fmt.Sprintf("update %q=%q in %s", key, inputValue, filename), + T: transform.Func(func(_ context.Context, doc *tomledit.Document) error { + results := doc.Find(key...) + if len(results) == 0 { + return fmt.Errorf("key %q not found", key) + } else if len(results) > 1 { + return fmt.Errorf("key %q is ambiguous", key) + } + + value, err := parser.ParseValue(inputValue) + if err != nil { + value = parser.MustValue(`"` + inputValue + `"`) + } + + if ok := transform.InsertMapping(results[0].Section, &parser.KeyValue{ + Block: results[0].Block, + Name: results[0].Name, + Value: value, + }, true); !ok { + return errors.New("failed to set value") + } + + return nil + }), + }, + } + + outputPath := filename + if FlagStdOut { + outputPath = "" + } + + ctx := cmd.Context() + if FlagVerbose { + ctx = confix.WithLogWriter(ctx, cmd.ErrOrStderr()) + } + + return confix.Upgrade(ctx, plan, filename, outputPath, FlagSkipValidate) + }, + } + + cmd.Flags().BoolVar(&FlagStdOut, "stdout", false, "print the updated config to stdout") + cmd.Flags().BoolVarP(&FlagVerbose, "verbose", "v", false, "log changes to stderr") + cmd.Flags().BoolVarP(&FlagSkipValidate, "skip-validate", "s", false, "skip configuration validation (allows to mutate unknown configurations)") + + return cmd +} + +// GetCommand returns a CLI command to interactively get an application config value. +func GetCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "get [config] [key]", + Short: "Get a config value", + Long: "Get a config value. The [config] is an optional absolute path to the config file (default: `~/.cometbft/config/config.toml`)", + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var ( + filename, key string + keys []string + ) + switch len(args) { + case 1: + { + filename = defaultConfigPath(cmd) + // parse key e.g mempool.size -> [mempool, size] + key = args[0] + keys = strings.Split(key, ".") + } + case 2: + { + filename = args[0] + key = args[1] + keys = strings.Split(key, ".") + } + default: + return errors.New("expected 1 or 2 arguments") + } + + doc, err := confix.LoadConfig(filename) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + results := doc.Find(keys...) + if len(results) == 0 { + return fmt.Errorf("key %q not found", key) + } else if len(results) > 1 { + return fmt.Errorf("key %q is ambiguous", key) + } + + fmt.Printf("%s\n", results[0].Value.String()) + return nil + }, + } + + return cmd +} diff --git a/cmd/cometbft/commands/config/view.go b/cmd/cometbft/commands/config/view.go new file mode 100644 index 00000000000..3940f5e8bb7 --- /dev/null +++ b/cmd/cometbft/commands/config/view.go @@ -0,0 +1,52 @@ +package config + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/pelletier/go-toml/v2" + "github.com/spf13/cobra" +) + +func ViewCommand() *cobra.Command { + flagOutputFormat := "output-format" + + cmd := &cobra.Command{ + Use: "view [config]", + Short: "View the config file", + Long: "View the config file. The [config] is an optional absolute path to the config file (default: `~/.cometbft/config/config.toml`)", + RunE: func(cmd *cobra.Command, args []string) error { + var filename string + if len(args) > 0 { + filename = args[0] + } else { + filename = defaultConfigPath(cmd) + } + + file, err := os.ReadFile(filename) + if err != nil { + return err + } + + if format, _ := cmd.Flags().GetString(flagOutputFormat); format == "toml" { + cmd.Println(string(file)) + return nil + } + + var v any + if err := toml.Unmarshal(file, &v); err != nil { + return fmt.Errorf("failed to decode config file: %w", err) + } + + e := json.NewEncoder(cmd.OutOrStdout()) + e.SetIndent("", " ") + return e.Encode(v) + }, + } + + // output flag + cmd.Flags().String(flagOutputFormat, "toml", "Output format (json|toml)") + + return cmd +} diff --git a/cmd/cometbft/commands/debug/debug.go b/cmd/cometbft/commands/debug/debug.go index 2476968048b..c722777347e 100644 --- a/cmd/cometbft/commands/debug/debug.go +++ b/cmd/cometbft/commands/debug/debug.go @@ -17,7 +17,7 @@ var ( flagProfAddr = "pprof-laddr" flagFrequency = "frequency" - logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + logger = log.NewLogger(os.Stdout) ) // DebugCmd defines the root command containing subcommands that assist in diff --git a/cmd/cometbft/commands/debug/dump.go b/cmd/cometbft/commands/debug/dump.go index e38462933dd..e0bb4331ce9 100644 --- a/cmd/cometbft/commands/debug/dump.go +++ b/cmd/cometbft/commands/debug/dump.go @@ -13,6 +13,7 @@ import ( cfg "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/libs/cli" rpchttp "github.com/cometbft/cometbft/rpc/client/http" + cmttime "github.com/cometbft/cometbft/types/time" ) var dumpCmd = &cobra.Command{ @@ -79,7 +80,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { } func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) { - start := time.Now().UTC() + start := cmttime.Now() tmpDir, err := os.MkdirTemp(outDir, "cometbft_debug_tmp") if err != nil { @@ -126,7 +127,7 @@ func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) { } } - outFile := filepath.Join(outDir, fmt.Sprintf("%s.zip", start.Format(time.RFC3339))) + outFile := filepath.Join(outDir, start.Format(time.RFC3339)+".zip") if err := zipDir(tmpDir, outFile); err != nil { logger.Error("failed to create and compress archive", "file", outFile, "error", err) } diff --git a/cmd/cometbft/commands/debug/io.go b/cmd/cometbft/commands/debug/io.go index 01a14ea710f..759001d39e5 100644 --- a/cmd/cometbft/commands/debug/io.go +++ b/cmd/cometbft/commands/debug/io.go @@ -67,7 +67,6 @@ func zipDir(src, dest string) error { _, err = io.Copy(headerWriter, file) return err }) - } // copyFile copies a file from src to dest and returns an error upon failure. The @@ -101,14 +100,14 @@ func copyFile(src, dest string) error { return os.Chmod(dest, srcInfo.Mode()) } -// writeStateToFile pretty JSON encodes an object and writes it to file composed +// writeStateJSONToFile pretty JSON encodes an object and writes it to file composed // of dir and filename. It returns an error upon failure to encode or write to // file. -func writeStateJSONToFile(state interface{}, dir, filename string) error { +func writeStateJSONToFile(state any, dir, filename string) error { stateJSON, err := json.MarshalIndent(state, "", " ") if err != nil { return fmt.Errorf("failed to encode state dump: %w", err) } - return os.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm) + return os.WriteFile(path.Join(dir, filename), stateJSON, 0o600) } diff --git a/cmd/cometbft/commands/debug/util.go b/cmd/cometbft/commands/debug/util.go index 0972a03a1da..ea24a8fc3c9 100644 --- a/cmd/cometbft/commands/debug/util.go +++ b/cmd/cometbft/commands/debug/util.go @@ -6,7 +6,6 @@ import ( "io" "net/http" "os" - "path" "path/filepath" cfg "github.com/cometbft/cometbft/config" @@ -67,7 +66,7 @@ func copyConfig(home, dir string) error { func dumpProfile(dir, addr, profile string, debug int) error { endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug) - //nolint:gosec,nolintlint + //nolint:gosec,nolintlint,noctx resp, err := http.Get(endpoint) if err != nil { return fmt.Errorf("failed to query for %s profile: %w", profile, err) @@ -79,5 +78,5 @@ func dumpProfile(dir, addr, profile string, debug int) error { return fmt.Errorf("failed to read %s profile response body: %w", profile, err) } - return os.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm) + return os.WriteFile(filepath.Join(dir, profile+".out"), body, 0o600) } diff --git a/cmd/cometbft/commands/gen_node_key.go b/cmd/cometbft/commands/gen_node_key.go index 7954257c9e6..6b20796ca38 100644 --- a/cmd/cometbft/commands/gen_node_key.go +++ b/cmd/cometbft/commands/gen_node_key.go @@ -5,8 +5,8 @@ import ( "github.com/spf13/cobra" - cmtos "github.com/cometbft/cometbft/libs/os" - "github.com/cometbft/cometbft/p2p" + cmtos "github.com/cometbft/cometbft/internal/os" + "github.com/cometbft/cometbft/p2p/nodekey" ) // GenNodeKeyCmd allows the generation of a node key. It prints node's ID to @@ -24,10 +24,10 @@ func genNodeKey(*cobra.Command, []string) error { return fmt.Errorf("node key at %s already exists", nodeKeyFile) } - nodeKey, err := p2p.LoadOrGenNodeKey(nodeKeyFile) + nk, err := nodekey.LoadOrGen(nodeKeyFile) if err != nil { return err } - fmt.Println(nodeKey.ID()) + fmt.Println(nk.ID()) return nil } diff --git a/cmd/cometbft/commands/gen_validator.go b/cmd/cometbft/commands/gen_validator.go index 072b26576dd..198a3d440ee 100644 --- a/cmd/cometbft/commands/gen_validator.go +++ b/cmd/cometbft/commands/gen_validator.go @@ -5,6 +5,8 @@ import ( "github.com/spf13/cobra" + "github.com/cometbft/cometbft/crypto/ed25519" + kt "github.com/cometbft/cometbft/internal/keytypes" cmtjson "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/privval" ) @@ -15,15 +17,24 @@ var GenValidatorCmd = &cobra.Command{ Use: "gen-validator", Aliases: []string{"gen_validator"}, Short: "Generate new validator keypair", - Run: genValidator, + Long: `Generate new validator keypair using an optional key-type (default: "ed25519").`, + RunE: genValidator, } -func genValidator(*cobra.Command, []string) { - pv := privval.GenFilePV("", "") +func init() { + GenValidatorCmd.Flags().StringVarP(&keyType, "key-type", "k", ed25519.KeyType, fmt.Sprintf("private key type (one of %s)", kt.SupportedKeyTypesStr())) +} + +func genValidator(*cobra.Command, []string) error { + pv, err := privval.GenFilePV("", "", genPrivKeyFromFlag) + if err != nil { + return fmt.Errorf("cannot generate file pv: %w", err) + } jsbz, err := cmtjson.Marshal(pv) if err != nil { - panic(err) + return fmt.Errorf("failed to marshal private validator: %w", err) } fmt.Printf(`%v `, string(jsbz)) + return nil } diff --git a/cmd/cometbft/commands/init.go b/cmd/cometbft/commands/init.go index 8bb572d3303..f6efff121ea 100644 --- a/cmd/cometbft/commands/init.go +++ b/cmd/cometbft/commands/init.go @@ -6,9 +6,11 @@ import ( "github.com/spf13/cobra" cfg "github.com/cometbft/cometbft/config" - cmtos "github.com/cometbft/cometbft/libs/os" - cmtrand "github.com/cometbft/cometbft/libs/rand" - "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/crypto/ed25519" + kt "github.com/cometbft/cometbft/internal/keytypes" + cmtos "github.com/cometbft/cometbft/internal/os" + cmtrand "github.com/cometbft/cometbft/internal/rand" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/privval" "github.com/cometbft/cometbft/types" cmttime "github.com/cometbft/cometbft/types/time" @@ -21,6 +23,10 @@ var InitFilesCmd = &cobra.Command{ RunE: initFiles, } +func init() { + InitFilesCmd.Flags().StringVarP(&keyType, "key-type", "k", ed25519.KeyType, fmt.Sprintf("private key type (one of %s)", kt.SupportedKeyTypesStr())) +} + func initFiles(*cobra.Command, []string) error { return initFilesWithConfig(config) } @@ -35,7 +41,11 @@ func initFilesWithConfig(config *cfg.Config) error { logger.Info("Found private validator", "keyFile", privValKeyFile, "stateFile", privValStateFile) } else { - pv = privval.GenFilePV(privValKeyFile, privValStateFile) + var err error + pv, err = privval.GenFilePV(privValKeyFile, privValStateFile, genPrivKeyFromFlag) + if err != nil { + return fmt.Errorf("can't generate file pv: %w", err) + } pv.Save() logger.Info("Generated private validator", "keyFile", privValKeyFile, "stateFile", privValStateFile) @@ -45,7 +55,7 @@ func initFilesWithConfig(config *cfg.Config) error { if cmtos.FileExists(nodeKeyFile) { logger.Info("Found node key", "path", nodeKeyFile) } else { - if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil { + if _, err := nodekey.LoadOrGen(nodeKeyFile); err != nil { return err } logger.Info("Generated node key", "path", nodeKeyFile) diff --git a/cmd/cometbft/commands/inspect.go b/cmd/cometbft/commands/inspect.go index 2d4c5948094..b47c62ee2f6 100644 --- a/cmd/cometbft/commands/inspect.go +++ b/cmd/cometbft/commands/inspect.go @@ -1,15 +1,13 @@ package commands import ( - "context" - "os" "os/signal" "syscall" "github.com/spf13/cobra" cfg "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/inspect" + "github.com/cometbft/cometbft/internal/inspect" "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/state/indexer/block" "github.com/cometbft/cometbft/store" @@ -38,28 +36,24 @@ func init() { String("rpc.laddr", config.RPC.ListenAddress, "RPC listenener address. Port required") InspectCmd.Flags(). - String("db-backend", - config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb") + String( + "db-backend", + config.DBBackend, + "database backend: goleveldb | rocksdb | badgerdb | pebbledb", + ) InspectCmd.Flags(). String("db-dir", config.DBPath, "database directory") } func runInspect(cmd *cobra.Command, _ []string) error { - ctx, cancel := context.WithCancel(cmd.Context()) + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM, syscall.SIGINT) defer cancel() - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGTERM, syscall.SIGINT) - go func() { - <-c - cancel() - }() - blockStoreDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "blockstore", Config: config}) if err != nil { return err } - blockStore := store.NewBlockStore(blockStoreDB) + blockStore := store.NewBlockStore(blockStoreDB, store.WithDBKeyLayout(config.Storage.ExperimentalKeyLayout)) defer blockStore.Close() stateDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "state", Config: config}) @@ -73,7 +67,7 @@ func runInspect(cmd *cobra.Command, _ []string) error { if err != nil { return err } - txIndexer, blockIndexer, err := block.IndexerFromConfig(config, cfg.DefaultDBProvider, genDoc.ChainID) + txIndexer, blockIndexer, _, err := block.IndexerFromConfig(config, cfg.DefaultDBProvider, genDoc.ChainID) if err != nil { return err } diff --git a/cmd/cometbft/commands/light.go b/cmd/cometbft/commands/light.go index 490075f486b..16b12e567db 100644 --- a/cmd/cometbft/commands/light.go +++ b/cmd/cometbft/commands/light.go @@ -14,10 +14,9 @@ import ( "github.com/spf13/cobra" dbm "github.com/cometbft/cometbft-db" - + cmtos "github.com/cometbft/cometbft/internal/os" "github.com/cometbft/cometbft/libs/log" cmtmath "github.com/cometbft/cometbft/libs/math" - cmtos "github.com/cometbft/cometbft/libs/os" "github.com/cometbft/cometbft/light" lproxy "github.com/cometbft/cometbft/light/proxy" lrpc "github.com/cometbft/cometbft/light/rpc" @@ -25,7 +24,7 @@ import ( rpcserver "github.com/cometbft/cometbft/rpc/jsonrpc/server" ) -// LightCmd represents the base command when called without any subcommands +// LightCmd represents the base command when called without any subcommands. var LightCmd = &cobra.Command{ Use: "light [chainID]", Short: "Run a light client proxy server, verifying CometBFT rpc", @@ -102,7 +101,7 @@ func init() { func runProxy(_ *cobra.Command, args []string) error { // Initialize logger. - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + logger := log.NewLogger(os.Stdout) var option log.Option if verbose { option, _ = log.AllowLevel("debug") @@ -119,7 +118,7 @@ func runProxy(_ *cobra.Command, args []string) error { witnessesAddrs = strings.Split(witnessAddrsJoined, ",") } - db, err := dbm.NewGoLevelDB("light-client-db", home) + db, err := dbm.NewPebbleDB("light-client-db", home) if err != nil { return fmt.Errorf("can't create a db: %w", err) } @@ -196,6 +195,9 @@ func runProxy(_ *cobra.Command, args []string) error { dbs.New(db, chainID), options..., ) + if errors.Is(err, light.ErrEmptyTrustedStore) { + logger.Error("Cannot start the light client from an empty trusted store. Please provide either an initialized trusted store, using the `--home-dir` flag, or trusted information to bootstrap the trusted store, via `--hash` and `--height` flags.") + } } if err != nil { return err diff --git a/cmd/cometbft/commands/reindex_event.go b/cmd/cometbft/commands/reindex_event.go index e59e60bd30e..5982bfe75a7 100644 --- a/cmd/cometbft/commands/reindex_event.go +++ b/cmd/cometbft/commands/reindex_event.go @@ -8,10 +8,9 @@ import ( "github.com/spf13/cobra" dbm "github.com/cometbft/cometbft-db" - abcitypes "github.com/cometbft/cometbft/abci/types" cmtcfg "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/libs/progressbar" + "github.com/cometbft/cometbft/internal/progressbar" "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/state/indexer" blockidxkv "github.com/cometbft/cometbft/state/indexer/block/kv" @@ -37,9 +36,9 @@ var ReIndexEventCmd = &cobra.Command{ Short: "reindex events to the event store backends", Long: ` reindex-event is an offline tooling to re-index block and tx events to the eventsinks, -you can run this command when the event store backend dropped/disconnected or you want to -replace the backend. The default start-height is 0, meaning the tooling will start -reindex from the base block height(inclusive); and the default end-height is 0, meaning +you can run this command when the event store backend dropped/disconnected or you want to +replace the backend. The default start-height is 0, meaning the tooling will start +reindex from the base block height(inclusive); and the default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omit either or both arguments. @@ -52,7 +51,7 @@ want to use this command. cometbft reindex-event --end-height 10 cometbft reindex-event --start-height 2 --end-height 10 `, - Run: func(cmd *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, _ []string) { bs, ss, err := loadStateAndBlockStore(config) if err != nil { fmt.Println(reindexFailed, err) @@ -150,7 +149,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error { case <-cmd.Context().Done(): return fmt.Errorf("event re-index terminated at height %d: %w", height, cmd.Context().Err()) default: - block := args.blockStore.LoadBlock(height) + block, _ := args.blockStore.LoadBlock(height) if block == nil { return fmt.Errorf("not able to load block at height %d from the blockstore", height) } diff --git a/cmd/cometbft/commands/reindex_event_test.go b/cmd/cometbft/commands/reindex_event_test.go index 2fc684ef579..350d7f22a33 100644 --- a/cmd/cometbft/commands/reindex_event_test.go +++ b/cmd/cometbft/commands/reindex_event_test.go @@ -10,7 +10,6 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - abcitypes "github.com/cometbft/cometbft/abci/types" cmtcfg "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/internal/test" @@ -28,7 +27,7 @@ const ( func setupReIndexEventCmd() *cobra.Command { reIndexEventCmd := &cobra.Command{ Use: ReIndexEventCmd.Use, - Run: func(cmd *cobra.Command, args []string) {}, + Run: func(_ *cobra.Command, _ []string) {}, } _ = reIndexEventCmd.ExecuteContext(context.Background()) @@ -113,11 +112,11 @@ func TestLoadBlockStore(t *testing.T) { _, _, err := loadStateAndBlockStore(cfg) require.Error(t, err) - _, err = dbm.NewDB("blockstore", dbm.GoLevelDBBackend, cfg.DBDir()) + _, err = dbm.NewDB("blockstore", dbm.PebbleDBBackend, cfg.DBDir()) require.NoError(t, err) // Get StateStore - _, err = dbm.NewDB("state", dbm.GoLevelDBBackend, cfg.DBDir()) + _, err = dbm.NewDB("state", dbm.PebbleDBBackend, cfg.DBDir()) require.NoError(t, err) bs, ss, err := loadStateAndBlockStore(cfg) @@ -135,11 +134,11 @@ func TestReIndexEvent(t *testing.T) { mockBlockStore. On("Base").Return(base). On("Height").Return(height). - On("LoadBlock", base).Return(nil).Once(). - On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}). - On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}) + On("LoadBlock", base).Return(nil, nil).Once(). + On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}, &types.BlockMeta{}). + On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}, &types.BlockMeta{}) - abciResp := &abcitypes.ResponseFinalizeBlock{ + abciResp := &abcitypes.FinalizeBlockResponse{ TxResults: []*abcitypes.ExecTxResult{ {Code: 1}, }, diff --git a/cmd/cometbft/commands/reset.go b/cmd/cometbft/commands/reset.go index bfd11821046..042db87c173 100644 --- a/cmd/cometbft/commands/reset.go +++ b/cmd/cometbft/commands/reset.go @@ -1,13 +1,16 @@ package commands import ( + "fmt" "os" "path/filepath" "github.com/spf13/cobra" + "github.com/cometbft/cometbft/crypto/ed25519" + kt "github.com/cometbft/cometbft/internal/keytypes" + cmtos "github.com/cometbft/cometbft/internal/os" "github.com/cometbft/cometbft/libs/log" - cmtos "github.com/cometbft/cometbft/libs/os" "github.com/cometbft/cometbft/privval" ) @@ -20,6 +23,12 @@ var ResetAllCmd = &cobra.Command{ RunE: resetAllCmd, } +func init() { + ResetAllCmd.Flags().StringVarP(&keyType, "key-type", "k", ed25519.KeyType, fmt.Sprintf("private key type (one of %s)", kt.SupportedKeyTypesStr())) + ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "keep the address book intact") + ResetPrivValidatorCmd.Flags().StringVarP(&keyType, "key-type", "k", ed25519.KeyType, fmt.Sprintf("private key type (one of %s)", kt.SupportedKeyTypesStr())) +} + var keepAddrBook bool // ResetStateCmd removes the database of the specified CometBFT core instance. @@ -27,7 +36,7 @@ var ResetStateCmd = &cobra.Command{ Use: "reset-state", Aliases: []string{"reset_state"}, Short: "Remove all the data and WAL", - RunE: func(cmd *cobra.Command, args []string) (err error) { + RunE: func(cmd *cobra.Command, _ []string) (err error) { config, err = ParseConfig(cmd) if err != nil { return err @@ -37,10 +46,6 @@ var ResetStateCmd = &cobra.Command{ }, } -func init() { - ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "keep the address book intact") -} - // ResetPrivValidatorCmd resets the private validator files. var ResetPrivValidatorCmd = &cobra.Command{ Use: "unsafe-reset-priv-validator", @@ -74,8 +79,7 @@ func resetPrivValidator(cmd *cobra.Command, _ []string) (err error) { return err } - resetFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), logger) - return nil + return resetFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), logger) } // resetAll removes address book files plus all data, and resets the privValdiator data. @@ -97,8 +101,7 @@ func resetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logg } // recreate the dbDir since the privVal state needs to live there - resetFilePV(privValKeyFile, privValStateFile, logger) - return nil + return resetFilePV(privValKeyFile, privValStateFile, logger) } // resetState removes address book files plus all databases. @@ -155,7 +158,7 @@ func resetState(dbDir string, logger log.Logger) error { return nil } -func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) { +func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) error { if _, err := os.Stat(privValKeyFile); err == nil { pv := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile) pv.Reset() @@ -165,7 +168,10 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) { "stateFile", privValStateFile, ) } else { - pv := privval.GenFilePV(privValKeyFile, privValStateFile) + pv, err := privval.GenFilePV(privValKeyFile, privValStateFile, genPrivKeyFromFlag) + if err != nil { + return err + } pv.Save() logger.Info( "Generated private validator file", @@ -173,6 +179,7 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) { "stateFile", privValStateFile, ) } + return nil } func removeAddrBook(addrBookFile string, logger log.Logger) { diff --git a/cmd/cometbft/commands/rollback.go b/cmd/cometbft/commands/rollback.go index 0471a0a0597..d7a2b62fd92 100644 --- a/cmd/cometbft/commands/rollback.go +++ b/cmd/cometbft/commands/rollback.go @@ -7,9 +7,8 @@ import ( "github.com/spf13/cobra" dbm "github.com/cometbft/cometbft-db" - cfg "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/libs/os" + "github.com/cometbft/cometbft/internal/os" "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/store" ) @@ -27,12 +26,12 @@ var RollbackStateCmd = &cobra.Command{ A state rollback is performed to recover from an incorrect application state transition, when CometBFT has persisted an incorrect app hash and is thus unable to make progress. Rollback overwrites a state at height n with the state at height n - 1. -The application should also roll back to height n - 1. If the --hard flag is not used, -no blocks will be removed so upon restarting CometBFT the transactions in block n will be +The application should also roll back to height n - 1. If the --hard flag is not used, +no blocks will be removed so upon restarting CometBFT the transactions in block n will be re-executed against the application. Using --hard will also remove block n. This can be done multiple times. `, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { height, hash, err := RollbackState(config, removeBlock) if err != nil { return fmt.Errorf("failed to rollback state: %w", err) @@ -78,7 +77,7 @@ func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store, if err != nil { return nil, nil, err } - blockStore := store.NewBlockStore(blockStoreDB) + blockStore := store.NewBlockStore(blockStoreDB, store.WithDBKeyLayout(config.Storage.ExperimentalKeyLayout)) if !os.FileExists(filepath.Join(config.DBDir(), "state.db")) { return nil, nil, fmt.Errorf("no statestore found in %v", config.DBDir()) diff --git a/cmd/cometbft/commands/root.go b/cmd/cometbft/commands/root.go index c21b415758d..9ae6bd9519e 100644 --- a/cmd/cometbft/commands/root.go +++ b/cmd/cometbft/commands/root.go @@ -15,7 +15,7 @@ import ( var ( config = cfg.DefaultConfig() - logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + logger = log.NewLogger(os.Stdout) ) func init() { @@ -26,8 +26,28 @@ func registerFlagsRootCmd(cmd *cobra.Command) { cmd.PersistentFlags().String("log_level", config.LogLevel, "log level") } +func ConfigHome(cmd *cobra.Command) (string, error) { + var home string + switch { + case os.Getenv("CMTHOME") != "": + home = os.Getenv("CMTHOME") + case os.Getenv("TMHOME") != "": + // XXX: Deprecated. + home = os.Getenv("TMHOME") + default: + var err error + // Default: $HOME/.cometbft + home, err = cmd.Flags().GetString(cli.HomeFlag) + if err != nil { + return "", err + } + } + + return home, nil +} + // ParseConfig retrieves the default environment configuration, -// sets up the CometBFT root and ensures that the root exists +// sets up the CometBFT root and ensures that the root exists. func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { conf := cfg.DefaultConfig() err := viper.Unmarshal(conf) @@ -35,20 +55,13 @@ func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { return nil, err } - var home string - if os.Getenv("CMTHOME") != "" { - home = os.Getenv("CMTHOME") - } else if os.Getenv("TMHOME") != "" { - // XXX: Deprecated. - home = os.Getenv("TMHOME") + if os.Getenv("TMHOME") != "" { logger.Error("Deprecated environment variable TMHOME identified. CMTHOME should be used instead.") - } else { - home, err = cmd.Flags().GetString(cli.HomeFlag) - if err != nil { - return nil, err - } } - + home, err := ConfigHome(cmd) + if err != nil { + return nil, err + } conf.RootDir = home conf.SetRoot(conf.RootDir) @@ -58,7 +71,7 @@ func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { } if warnings := conf.CheckDeprecated(); len(warnings) > 0 { for _, warning := range warnings { - logger.Info("deprecated usage found in configuration file", "usage", warning) + logger.Warn("deprecated usage found in configuration file", "usage", warning) } } return conf, nil @@ -68,7 +81,7 @@ func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { var RootCmd = &cobra.Command{ Use: "cometbft", Short: "BFT state machine replication for applications in any programming languages", - PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { + PersistentPreRunE: func(cmd *cobra.Command, _ []string) (err error) { if cmd.Name() == VersionCmd.Name() { return nil } @@ -78,8 +91,14 @@ var RootCmd = &cobra.Command{ return err } + for _, possibleMisconfiguration := range config.PossibleMisconfigurations() { + logger.Info(possibleMisconfiguration) + } + if config.LogFormat == cfg.LogFormatJSON { - logger = log.NewTMJSONLogger(log.NewSyncWriter(os.Stdout)) + logger = log.NewJSONLogger(os.Stdout) + } else if !config.LogColors { + logger = log.NewLoggerWithColor(os.Stdout, false) } logger, err = cmtflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel) @@ -91,7 +110,6 @@ var RootCmd = &cobra.Command{ logger = log.NewTracingLogger(logger) } - logger = logger.With("module", "main") return nil }, } diff --git a/cmd/cometbft/commands/root_test.go b/cmd/cometbft/commands/root_test.go index 5213d940c84..ee7c386a548 100644 --- a/cmd/cometbft/commands/root_test.go +++ b/cmd/cometbft/commands/root_test.go @@ -13,12 +13,13 @@ import ( "github.com/stretchr/testify/require" cfg "github.com/cometbft/cometbft/config" + cmtos "github.com/cometbft/cometbft/internal/os" "github.com/cometbft/cometbft/libs/cli" - cmtos "github.com/cometbft/cometbft/libs/os" ) // clearConfig clears env vars, the given root dir, and resets viper. func clearConfig(t *testing.T, dir string) { + t.Helper() os.Clearenv() err := os.RemoveAll(dir) require.NoError(t, err) @@ -27,12 +28,12 @@ func clearConfig(t *testing.T, dir string) { config = cfg.DefaultConfig() } -// prepare new rootCmd +// prepare new rootCmd. func testRootCmd() *cobra.Command { rootCmd := &cobra.Command{ Use: RootCmd.Use, PersistentPreRunE: RootCmd.PersistentPreRunE, - Run: func(cmd *cobra.Command, args []string) {}, + Run: func(_ *cobra.Command, _ []string) {}, } registerFlagsRootCmd(rootCmd) var l string @@ -41,6 +42,7 @@ func testRootCmd() *cobra.Command { } func testSetup(t *testing.T, root string, args []string, env map[string]string) error { + t.Helper() clearConfig(t, root) rootCmd := testRootCmd() @@ -73,7 +75,7 @@ func TestRootHome(t *testing.T) { idxString := "idx: " + strconv.Itoa(i) err := testSetup(t, root, tc.args, tc.env) - require.Nil(t, err, idxString) + require.NoError(t, err, idxString) assert.Equal(t, tc.root, config.RootDir, idxString) assert.Equal(t, tc.root, config.P2P.RootDir, idxString) @@ -108,7 +110,7 @@ func TestRootFlagsEnv(t *testing.T) { idxString = "idx: " + idxString defer clearConfig(t, root) err := testSetup(t, root, tc.args, tc.env) - require.Nil(t, err, idxString) + require.NoError(t, err, idxString) assert.Equal(t, tc.logLevel, config.LogLevel, idxString) } @@ -141,12 +143,12 @@ func TestRootConfig(t *testing.T) { // XXX: path must match cfg.defaultConfigPath configFilePath := filepath.Join(root, "config") err := cmtos.EnsureDir(configFilePath, 0o700) - require.Nil(t, err) + require.NoError(t, err) // write the non-defaults to a different path // TODO: support writing sub configs so we can test that too err = WriteConfigVals(configFilePath, cvals) - require.Nil(t, err) + require.NoError(t, err) rootCmd := testRootCmd() cmd := cli.PrepareBaseCmd(rootCmd, "CMT", root) @@ -154,7 +156,7 @@ func TestRootConfig(t *testing.T) { // run with the args and env tc.args = append([]string{rootCmd.Use}, tc.args...) err = cli.RunWithArgs(cmd, tc.args, tc.env) - require.Nil(t, err, idxString) + require.NoError(t, err, idxString) assert.Equal(t, tc.logLvl, config.LogLevel, idxString) } diff --git a/cmd/cometbft/commands/run_node.go b/cmd/cometbft/commands/run_node.go index 2e96cf04c0f..f11c3131f38 100644 --- a/cmd/cometbft/commands/run_node.go +++ b/cmd/cometbft/commands/run_node.go @@ -1,19 +1,28 @@ package commands import ( - "encoding/hex" "fmt" "github.com/spf13/cobra" - cmtos "github.com/cometbft/cometbft/libs/os" + "github.com/cometbft/cometbft/crypto" + "github.com/cometbft/cometbft/crypto/ed25519" + kt "github.com/cometbft/cometbft/internal/keytypes" + cmtos "github.com/cometbft/cometbft/internal/os" nm "github.com/cometbft/cometbft/node" ) -var genesisHash []byte +var ( + cliParams nm.CliParams + keyType string +) + +func genPrivKeyFromFlag() (crypto.PrivKey, error) { + return kt.GenPrivKey(keyType) +} // AddNodeFlags exposes some common configuration options on the command-line -// These are exposed for convenience of commands embedding a CometBFT node +// These are exposed for convenience of commands embedding a CometBFT node. func AddNodeFlags(cmd *cobra.Command) { // bind flags cmd.Flags().String("moniker", config.Moniker, "node name") @@ -26,7 +35,7 @@ func AddNodeFlags(cmd *cobra.Command) { // node flags cmd.Flags().BytesHexVar( - &genesisHash, + &cliParams.GenesisHash, "genesis_hash", []byte{}, "optional SHA-256 hash of the genesis file") @@ -52,7 +61,7 @@ func AddNodeFlags(cmd *cobra.Command) { "p2p.laddr", config.P2P.ListenAddress, "node listen address. (0.0.0.0:0 means any interface, any port)") - cmd.Flags().String("p2p.external-address", config.P2P.ExternalAddress, "ip:port address to advertise to peers for them to dial") + cmd.Flags().String("p2p.external_address", config.P2P.ExternalAddress, "ip:port address to advertise to peers for them to dial") cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes") cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") cmd.Flags().String("p2p.unconditional_peer_ids", @@ -75,11 +84,12 @@ func AddNodeFlags(cmd *cobra.Command) { cmd.Flags().String( "db_backend", config.DBBackend, - "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb") + "database backend: goleveldb | rocksdb | badgerdb | pebbledb") cmd.Flags().String( "db_dir", config.DBPath, "database directory") + cmd.Flags().StringVarP(&keyType, "key-type", "k", ed25519.KeyType, fmt.Sprintf("private key type (one of %s)", kt.SupportedKeyTypesStr())) } // NewRunNodeCmd returns the command that allows the CLI to start a node. @@ -89,12 +99,8 @@ func NewRunNodeCmd(nodeProvider nm.Provider) *cobra.Command { Use: "start", Aliases: []string{"node", "run"}, Short: "Run the CometBFT node", - RunE: func(cmd *cobra.Command, args []string) error { - if len(genesisHash) != 0 { - config.Storage.GenesisHash = hex.EncodeToString(genesisHash) - } - - n, err := nodeProvider(config, logger) + RunE: func(_ *cobra.Command, _ []string) error { + n, err := nodeProvider(config, logger, cliParams, genPrivKeyFromFlag) if err != nil { return fmt.Errorf("failed to create node: %w", err) } diff --git a/cmd/cometbft/commands/show_node_id.go b/cmd/cometbft/commands/show_node_id.go index 17bc0ed20c4..bcd47841dd0 100644 --- a/cmd/cometbft/commands/show_node_id.go +++ b/cmd/cometbft/commands/show_node_id.go @@ -5,7 +5,7 @@ import ( "github.com/spf13/cobra" - "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/p2p/nodekey" ) // ShowNodeIDCmd dumps node's ID to the standard output. @@ -17,11 +17,11 @@ var ShowNodeIDCmd = &cobra.Command{ } func showNodeID(*cobra.Command, []string) error { - nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) + nk, err := nodekey.Load(config.NodeKeyFile()) if err != nil { return err } - fmt.Println(nodeKey.ID()) + fmt.Println(nk.ID()) return nil } diff --git a/cmd/cometbft/commands/show_validator.go b/cmd/cometbft/commands/show_validator.go index 5dc4af83ec9..4d8d97f4881 100644 --- a/cmd/cometbft/commands/show_validator.go +++ b/cmd/cometbft/commands/show_validator.go @@ -5,8 +5,8 @@ import ( "github.com/spf13/cobra" + cmtos "github.com/cometbft/cometbft/internal/os" cmtjson "github.com/cometbft/cometbft/libs/json" - cmtos "github.com/cometbft/cometbft/libs/os" "github.com/cometbft/cometbft/privval" ) diff --git a/cmd/cometbft/commands/testnet.go b/cmd/cometbft/commands/testnet.go index 6870876d101..caddcc6d65f 100644 --- a/cmd/cometbft/commands/testnet.go +++ b/cmd/cometbft/commands/testnet.go @@ -11,9 +11,10 @@ import ( "github.com/spf13/viper" cfg "github.com/cometbft/cometbft/config" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/bytes" - cmtrand "github.com/cometbft/cometbft/libs/rand" - "github.com/cometbft/cometbft/p2p" + na "github.com/cometbft/cometbft/p2p/netaddr" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/privval" "github.com/cometbft/cometbft/types" cmttime "github.com/cometbft/cometbft/types/time" @@ -251,11 +252,11 @@ func persistentPeersString(config *cfg.Config) (string, error) { for i := 0; i < nValidators+nNonValidators; i++ { nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) config.SetRoot(nodeDir) - nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) + nk, err := nodekey.Load(config.NodeKeyFile()) if err != nil { return "", err } - persistentPeers[i] = p2p.IDAddressString(nodeKey.ID(), fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort)) + persistentPeers[i] = na.IDAddrString(nk.ID(), fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort)) } return strings.Join(persistentPeers, ","), nil } diff --git a/cmd/cometbft/commands/version.go b/cmd/cometbft/commands/version.go index 020b69da034..a4176702740 100644 --- a/cmd/cometbft/commands/version.go +++ b/cmd/cometbft/commands/version.go @@ -13,14 +13,14 @@ import ( var VersionCmd = &cobra.Command{ Use: "version", Short: "Show version info", - Run: func(cmd *cobra.Command, args []string) { - cmtVersion := version.TMCoreSemVer - if version.TMGitCommitHash != "" { - cmtVersion += "+" + version.TMGitCommitHash + Run: func(_ *cobra.Command, _ []string) { + cmtVersion := version.CMTSemVer + if version.CMTGitCommitHash != "" { + cmtVersion += "+" + version.CMTGitCommitHash } if verbose { - values, _ := json.MarshalIndent(struct { + values, err := json.MarshalIndent(struct { CometBFT string `json:"cometbft"` ABCI string `json:"abci"` BlockProtocol uint64 `json:"block_protocol"` @@ -31,6 +31,9 @@ var VersionCmd = &cobra.Command{ BlockProtocol: version.BlockProtocol, P2PProtocol: version.P2PProtocol, }, "", " ") + if err != nil { + panic(fmt.Sprintf("failed to marshal version info: %v", err)) + } fmt.Println(string(values)) } else { fmt.Println(cmtVersion) diff --git a/cmd/cometbft/main.go b/cmd/cometbft/main.go index 0ca1ce8d24d..5892c8251a5 100644 --- a/cmd/cometbft/main.go +++ b/cmd/cometbft/main.go @@ -5,6 +5,7 @@ import ( "path/filepath" cmd "github.com/cometbft/cometbft/cmd/cometbft/commands" + "github.com/cometbft/cometbft/cmd/cometbft/commands/config" "github.com/cometbft/cometbft/cmd/cometbft/commands/debug" cfg "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/libs/cli" @@ -29,6 +30,7 @@ func main() { cmd.CompactGoLevelDBCmd, cmd.InspectCmd, debug.DebugCmd, + config.Command(), cli.NewCompletionCmd(rootCmd, true), ) diff --git a/cmd/priv_val_server/main.go b/cmd/priv_val_server/main.go index fa2edc9342f..7a3fddc3f67 100644 --- a/cmd/priv_val_server/main.go +++ b/cmd/priv_val_server/main.go @@ -6,10 +6,9 @@ import ( "time" "github.com/cometbft/cometbft/crypto/ed25519" + cmtnet "github.com/cometbft/cometbft/internal/net" + cmtos "github.com/cometbft/cometbft/internal/os" "github.com/cometbft/cometbft/libs/log" - cmtnet "github.com/cometbft/cometbft/libs/net" - cmtos "github.com/cometbft/cometbft/libs/os" - "github.com/cometbft/cometbft/privval" ) @@ -20,8 +19,8 @@ func main() { privValKeyPath = flag.String("priv-key", "", "priv val key file path") privValStatePath = flag.String("priv-state", "", "priv val state file path") - logger = log.NewTMLogger( - log.NewSyncWriter(os.Stdout), + logger = log.NewLogger( + os.Stdout, ).With("module", "priv_val") ) flag.Parse() diff --git a/codecov.yml b/codecov.yml index 57c4bb16036..94448646568 100644 --- a/codecov.yml +++ b/codecov.yml @@ -19,7 +19,6 @@ ignore: - "DOCKER" - "scripts" - "**/*.pb.go" - - "libs/pubsub/query/query.peg.go" - "*.md" - "*.rst" - "*.yml" diff --git a/common.mk b/common.mk index b999f2d4ea2..fa8df3a75dc 100644 --- a/common.mk +++ b/common.mk @@ -3,7 +3,7 @@ BUILD_TAGS ?= cometbft COMMIT_HASH := $(shell git rev-parse --short HEAD) -LD_FLAGS = -X github.com/cometbft/cometbft/version.TMGitCommitHash=$(COMMIT_HASH) +LD_FLAGS = -X github.com/cometbft/cometbft/version.CMTGitCommitHash=$(COMMIT_HASH) BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)" # allow users to pass additional flags via the conventional LDFLAGS variable LD_FLAGS += $(LDFLAGS) @@ -23,10 +23,10 @@ ifeq (race,$(findstring race,$(COMETBFT_BUILD_OPTIONS))) BUILD_FLAGS += -race endif -# handle cleveldb -ifeq (cleveldb,$(findstring cleveldb,$(COMETBFT_BUILD_OPTIONS))) +# handle clock_skew +ifeq (clock_skew,$(findstring clock_skew,$(COMETBFT_BUILD_OPTIONS))) CGO_ENABLED=1 - BUILD_TAGS += cleveldb + BUILD_TAGS += clock_skew endif # handle badgerdb @@ -40,7 +40,14 @@ ifeq (rocksdb,$(findstring rocksdb,$(COMETBFT_BUILD_OPTIONS))) BUILD_TAGS += rocksdb endif -# handle boltdb -ifeq (boltdb,$(findstring boltdb,$(COMETBFT_BUILD_OPTIONS))) - BUILD_TAGS += boltdb +# handle bls12381 +ifeq (bls12381,$(findstring bls12381,$(COMETBFT_BUILD_OPTIONS))) + CGO_ENABLED=1 + BUILD_TAGS += bls12381 +endif + +# handle nodebug +ifeq (nodebug,$(findstring nodebug,$(COMETBFT_BUILD_OPTIONS))) + CGO_ENABLED=1 + BUILD_TAGS += nodebug endif diff --git a/config/config.go b/config/config.go index d631ed388f9..78dc19bc636 100644 --- a/config/config.go +++ b/config/config.go @@ -4,6 +4,7 @@ import ( "encoding/hex" "errors" "fmt" + "net" "net/http" "os" "path/filepath" @@ -12,19 +13,18 @@ import ( "time" cmterrors "github.com/cometbft/cometbft/types/errors" - "github.com/cometbft/cometbft/version" ) const ( - // FuzzModeDrop is a mode in which we randomly drop reads/writes, connections or sleep + // FuzzModeDrop is a mode in which we randomly drop reads/writes, connections or sleep. FuzzModeDrop = iota - // FuzzModeDelay is a mode in which we randomly sleep + // FuzzModeDelay is a mode in which we randomly sleep. FuzzModeDelay - // LogFormatPlain is a format for colored text + // LogFormatPlain is a format for colored text. LogFormatPlain = "plain" - // LogFormatJSON is a format for json output + // LogFormatJSON is a format for json output. LogFormatJSON = "json" // DefaultLogLevel defines a default log level as INFO. @@ -48,6 +48,9 @@ const ( v0 = "v0" v1 = "v1" v2 = "v2" + + MempoolTypeFlood = "flood" + MempoolTypeNop = "nop" ) // NOTE: Most of the structs & relevant comments + the @@ -70,9 +73,23 @@ var ( // taken from https://semver.org/ semverRegexp = regexp.MustCompile(`^(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`) + + // Don't forget to change proxy.DefaultClientCreator if you add new options here. + proxyAppList = []string{ + "kvstore", + "kvstore_connsync", + "kvstore_unsync", + "persistent_kvstore", + "persistent_kvstore_connsync", + "persistent_kvstore_unsync", + "e2e", + "e2e_connsync", + "e2e_unsync", + "noop", + } ) -// Config defines the top level configuration for a CometBFT node +// Config defines the top level configuration for a CometBFT node. type Config struct { // Top level options use an anonymous struct BaseConfig `mapstructure:",squash"` @@ -90,7 +107,7 @@ type Config struct { Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` } -// DefaultConfig returns a default configuration for a CometBFT node +// DefaultConfig returns a default configuration for a CometBFT node. func DefaultConfig() *Config { return &Config{ BaseConfig: DefaultBaseConfig(), @@ -107,7 +124,7 @@ func DefaultConfig() *Config { } } -// TestConfig returns a configuration that can be used for testing +// TestConfig returns a configuration that can be used for testing. func TestConfig() *Config { return &Config{ BaseConfig: TestBaseConfig(), @@ -124,7 +141,7 @@ func TestConfig() *Config { } } -// SetRoot sets the RootDir for all Config structs +// SetRoot sets the RootDir for all Config structs. func (cfg *Config) SetRoot(root string) *Config { cfg.BaseConfig.RootDir = root cfg.RPC.RootDir = root @@ -167,21 +184,36 @@ func (cfg *Config) ValidateBasic() error { if err := cfg.Instrumentation.ValidateBasic(); err != nil { return ErrInSection{Section: "instrumentation", Err: err} } + if !cfg.Consensus.CreateEmptyBlocks && cfg.Mempool.Type == MempoolTypeNop { + return errors.New("`nop` mempool does not support create_empty_blocks = false") + } return nil } -// CheckDeprecated returns any deprecation warnings. These are printed to the operator on startup +// CheckDeprecated returns any deprecation warnings. These are printed to the operator on startup. func (cfg *Config) CheckDeprecated() []string { var warnings []string + if cfg.Consensus.TimeoutCommit != 0 { + warnings = append(warnings, "[consensus.timeout_commit] is deprecated. Use `next_block_delay` in the ABCI `FinalizeBlockResponse`.") + } return warnings } -//----------------------------------------------------------------------------- -// BaseConfig +// PossibleMisconfigurations returns a list of possible conflicting entries that +// may lead to unexpected behavior. +func (cfg *Config) PossibleMisconfigurations() []string { + res := []string{} + for _, elem := range cfg.StateSync.PossibleMisconfigurations() { + res = append(res, "[statesync] section: "+elem) + } + return res +} -// BaseConfig defines the base configuration for a CometBFT node -type BaseConfig struct { //nolint: maligned +// ----------------------------------------------------------------------------- +// BaseConfig +// BaseConfig defines the base configuration for a CometBFT node. +type BaseConfig struct { // The version of the CometBFT binary that created // or last modified the config file Version string `mapstructure:"version"` @@ -197,25 +229,21 @@ type BaseConfig struct { //nolint: maligned // A custom human readable name for this node Moniker string `mapstructure:"moniker"` - // Database backend: goleveldb | cleveldb | boltdb | rocksdb - // * goleveldb (github.com/syndtr/goleveldb - most popular implementation) + // Database backend: badgerdb | goleveldb | pebbledb | rocksdb + // * badgerdb (uses github.com/dgraph-io/badger) + // - stable + // - pure go + // - use badgerdb build tag (go build -tags badgerdb) + // * goleveldb (github.com/syndtr/goleveldb) + // - UNMAINTAINED + // - stable // - pure go + // * pebbledb (uses github.com/cockroachdb/pebble) // - stable - // * cleveldb (uses levigo wrapper) - // - fast - // - requires gcc - // - use cleveldb build tag (go build -tags cleveldb) - // * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) - // - EXPERIMENTAL - // - may be faster is some use-cases (random reads - indexer) - // - use boltdb build tag (go build -tags boltdb) - // * rocksdb (uses github.com/tecbot/gorocksdb) - // - EXPERIMENTAL + // - pure go + // * rocksdb (uses github.com/linxGnu/grocksdb) // - requires gcc // - use rocksdb build tag (go build -tags rocksdb) - // * badgerdb (uses github.com/dgraph-io/badger) - // - EXPERIMENTAL - // - use badgerdb build tag (go build -tags badgerdb) DBBackend string `mapstructure:"db_backend"` // Database directory @@ -224,9 +252,12 @@ type BaseConfig struct { //nolint: maligned // Output level for logging LogLevel string `mapstructure:"log_level"` - // Output format: 'plain' (colored text) or 'json' + // Output format: 'plain' or 'json' LogFormat string `mapstructure:"log_format"` + // Colored log output. Considered only when `log_format = plain`. + LogColors bool `mapstructure:"log_colors"` + // Path to the JSON file containing the initial validator set and other meta data Genesis string `mapstructure:"genesis_file"` @@ -251,10 +282,10 @@ type BaseConfig struct { //nolint: maligned FilterPeers bool `mapstructure:"filter_peers"` // false } -// DefaultBaseConfig returns a default base configuration for a CometBFT node +// DefaultBaseConfig returns a default base configuration for a CometBFT node. func DefaultBaseConfig() BaseConfig { return BaseConfig{ - Version: version.TMCoreSemVer, + Version: version.CMTSemVer, Genesis: defaultGenesisJSONPath, PrivValidatorKey: defaultPrivValKeyPath, PrivValidatorState: defaultPrivValStatePath, @@ -264,13 +295,14 @@ func DefaultBaseConfig() BaseConfig { ABCI: "socket", LogLevel: DefaultLogLevel, LogFormat: LogFormatPlain, + LogColors: true, FilterPeers: false, - DBBackend: "goleveldb", + DBBackend: "pebbledb", DBPath: DefaultDataDir, } } -// TestBaseConfig returns a base configuration for testing a CometBFT node +// TestBaseConfig returns a base configuration for testing a CometBFT node. func TestBaseConfig() BaseConfig { cfg := DefaultBaseConfig() cfg.ProxyApp = "kvstore" @@ -278,27 +310,27 @@ func TestBaseConfig() BaseConfig { return cfg } -// GenesisFile returns the full path to the genesis.json file +// GenesisFile returns the full path to the genesis.json file. func (cfg BaseConfig) GenesisFile() string { return rootify(cfg.Genesis, cfg.RootDir) } -// PrivValidatorKeyFile returns the full path to the priv_validator_key.json file +// PrivValidatorKeyFile returns the full path to the priv_validator_key.json file. func (cfg BaseConfig) PrivValidatorKeyFile() string { return rootify(cfg.PrivValidatorKey, cfg.RootDir) } -// PrivValidatorFile returns the full path to the priv_validator_state.json file +// PrivValidatorStateFile returns the full path to the priv_validator_state.json file. func (cfg BaseConfig) PrivValidatorStateFile() string { return rootify(cfg.PrivValidatorState, cfg.RootDir) } -// NodeKeyFile returns the full path to the node_key.json file +// NodeKeyFile returns the full path to the node_key.json file. func (cfg BaseConfig) NodeKeyFile() string { return rootify(cfg.NodeKey, cfg.RootDir) } -// DBDir returns the full path to the database directory +// DBDir returns the full path to the database directory. func (cfg BaseConfig) DBDir() string { return rootify(cfg.DBPath, cfg.RootDir) } @@ -317,13 +349,65 @@ func (cfg BaseConfig) ValidateBasic() error { default: return errors.New("unknown log_format (must be 'plain' or 'json')") } + + return cfg.validateProxyApp() +} + +func (cfg BaseConfig) validateProxyApp() error { + if cfg.ProxyApp == "" { + return errors.New("proxy_app cannot be empty") + } + + // proxy is a static application. + for _, proxyApp := range proxyAppList { + if cfg.ProxyApp == proxyApp { + return nil + } + } + + // proxy is a network address. + parts := strings.SplitN(cfg.ProxyApp, "://", 2) + if len(parts) != 2 { // TCP address + _, err := net.ResolveTCPAddr("tcp", cfg.ProxyApp) + if err != nil { + return fmt.Errorf("failed to resolve TCP proxy_app %s: %w", cfg.ProxyApp, err) + } + } else { // other protocol + proto := parts[0] + address := parts[1] + switch proto { + case "tcp", "tcp4", "tcp6": + _, err := net.ResolveTCPAddr(proto, address) + if err != nil { + return fmt.Errorf("failed to resolve TCP proxy_app %s: %w", cfg.ProxyApp, err) + } + case "udp", "udp4", "udp6": + _, err := net.ResolveUDPAddr(proto, address) + if err != nil { + return fmt.Errorf("failed to resolve UDP proxy_app %s: %w", cfg.ProxyApp, err) + } + case "ip", "ip4", "ip6": + _, err := net.ResolveIPAddr(proto, address) + if err != nil { + return fmt.Errorf("failed to resolve IP proxy_app %s: %w", cfg.ProxyApp, err) + } + case "unix", "unixgram", "unixpacket": + _, err := net.ResolveUnixAddr(proto, address) + if err != nil { + return fmt.Errorf("failed to resolve UNIX proxy_app %s: %w", cfg.ProxyApp, err) + } + default: + return fmt.Errorf("invalid protocol in proxy_app: %s (expected one supported by net.Dial)", cfg.ProxyApp) + } + } + return nil } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // RPCConfig -// RPCConfig defines the configuration options for the CometBFT RPC server +// RPCConfig defines the configuration options for the CometBFT RPC server. type RPCConfig struct { RootDir string `mapstructure:"home"` @@ -392,6 +476,10 @@ type RPCConfig struct { // See https://github.com/tendermint/tendermint/issues/3435 TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout_broadcast_tx_commit"` + // Maximum number of requests that can be sent in a batch + // https://www.jsonrpc.org/specification#batch + MaxRequestBatchSize int `mapstructure:"max_request_batch_size"` + // Maximum size of request body, in bytes MaxBodyBytes int64 `mapstructure:"max_body_bytes"` @@ -421,7 +509,7 @@ type RPCConfig struct { PprofListenAddress string `mapstructure:"pprof_laddr"` } -// DefaultRPCConfig returns a default configuration for the RPC server +// DefaultRPCConfig returns a default configuration for the RPC server. func DefaultRPCConfig() *RPCConfig { return &RPCConfig{ ListenAddress: "tcp://127.0.0.1:26657", @@ -438,15 +526,16 @@ func DefaultRPCConfig() *RPCConfig { TimeoutBroadcastTxCommit: 10 * time.Second, WebSocketWriteBufferSize: defaultSubscriptionBufferSize, - MaxBodyBytes: int64(1000000), // 1MB - MaxHeaderBytes: 1 << 20, // same as the net/http default + MaxRequestBatchSize: 10, // maximum requests in a JSON-RPC batch request + MaxBodyBytes: int64(1000000), // 1MB + MaxHeaderBytes: 1 << 20, // same as the net/http default TLSCertFile: "", TLSKeyFile: "", } } -// TestRPCConfig returns a configuration for testing the RPC server +// TestRPCConfig returns a configuration for testing the RPC server. func TestRPCConfig() *RPCConfig { cfg := DefaultRPCConfig() cfg.ListenAddress = "tcp://127.0.0.1:36657" @@ -478,6 +567,9 @@ func (cfg *RPCConfig) ValidateBasic() error { if cfg.TimeoutBroadcastTxCommit < 0 { return cmterrors.ErrNegativeField{Field: "timeout_broadcast_tx_commit"} } + if cfg.MaxRequestBatchSize < 0 { + return cmterrors.ErrNegativeField{Field: "max_request_batch_size"} + } if cfg.MaxBodyBytes < 0 { return cmterrors.ErrNegativeField{Field: "max_body_bytes"} } @@ -516,7 +608,7 @@ func (cfg RPCConfig) IsTLSEnabled() bool { return cfg.TLSCertFile != "" && cfg.TLSKeyFile != "" } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // GRPCConfig // GRPCConfig defines the configuration for the CometBFT gRPC server. @@ -616,7 +708,7 @@ func TestGRPCBlockServiceConfig() *GRPCBlockServiceConfig { } } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // GRPCPrivilegedConfig // GRPCPrivilegedConfig defines the configuration for the CometBFT gRPC server @@ -661,10 +753,10 @@ func TestGRPCPruningServiceConfig() *GRPCPruningServiceConfig { } } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // P2PConfig -// P2PConfig defines the configuration options for the CometBFT peer-to-peer networking layer +// P2PConfig defines the configuration options for the CometBFT peer-to-peer networking layer. type P2PConfig struct { //nolint: maligned RootDir string `mapstructure:"home"` @@ -740,7 +832,7 @@ type P2PConfig struct { //nolint: maligned TestFuzzConfig *FuzzConnConfig `mapstructure:"test_fuzz_config"` } -// DefaultP2PConfig returns a default configuration for the peer-to-peer layer +// DefaultP2PConfig returns a default configuration for the peer-to-peer layer. func DefaultP2PConfig() *P2PConfig { return &P2PConfig{ ListenAddress: "tcp://0.0.0.0:26656", @@ -750,7 +842,7 @@ func DefaultP2PConfig() *P2PConfig { MaxNumInboundPeers: 40, MaxNumOutboundPeers: 10, PersistentPeersMaxDialPeriod: 0 * time.Second, - FlushThrottleTimeout: 100 * time.Millisecond, + FlushThrottleTimeout: 10 * time.Millisecond, MaxPacketMsgPayloadSize: 1024, // 1 kB SendRate: 5120000, // 5 mB/s RecvRate: 5120000, // 5 mB/s @@ -765,16 +857,15 @@ func DefaultP2PConfig() *P2PConfig { } } -// TestP2PConfig returns a configuration for testing the peer-to-peer layer +// TestP2PConfig returns a configuration for testing the peer-to-peer layer. func TestP2PConfig() *P2PConfig { cfg := DefaultP2PConfig() cfg.ListenAddress = "tcp://127.0.0.1:36656" - cfg.FlushThrottleTimeout = 10 * time.Millisecond cfg.AllowDuplicateIP = true return cfg } -// AddrBookFile returns the full path to the address book +// AddrBookFile returns the full path to the address book. func (cfg *P2PConfig) AddrBookFile() string { return rootify(cfg.AddrBook, cfg.RootDir) } @@ -826,7 +917,7 @@ func DefaultFuzzConnConfig() *FuzzConnConfig { } } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // MempoolConfig // MempoolConfig defines the configuration options for the CometBFT mempool @@ -836,6 +927,15 @@ func DefaultFuzzConnConfig() *FuzzConnConfig { // implementation (previously called v0), and a prioritized mempool (v1), which // was removed (see https://github.com/cometbft/cometbft/issues/260). type MempoolConfig struct { + // The type of mempool for this node to use. + // + // Possible types: + // - "flood" : concurrent linked list mempool with flooding gossip protocol + // (default) + // - "nop" : nop-mempool (short for no operation; the ABCI app is + // responsible for storing, disseminating and proposing txs). + // "create_empty_blocks=false" is not supported. + Type string `mapstructure:"type"` // RootDir is the root directory for all data. This should be configured via // the $CMTHOME env variable or --home cmd flag rather than overriding this // struct field. @@ -846,73 +946,87 @@ type MempoolConfig struct { // mempool may become invalid. If this does not apply to your application, // you can disable rechecking. Recheck bool `mapstructure:"recheck"` + // RecheckTimeout is the time the application has during the rechecking process + // to return CheckTx responses, once all requests have been sent. Responses that + // arrive after the timeout expires are discarded. It only applies to + // non-local ABCI clients and when recheck is enabled. + RecheckTimeout time.Duration `mapstructure:"recheck_timeout"` // Broadcast (default: true) defines whether the mempool should relay // transactions to other peers. Setting this to false will stop the mempool // from relaying transactions to other peers until they are included in a // block. In other words, if Broadcast is disabled, only the peer you send // the tx to will see it until it is included in a block. Broadcast bool `mapstructure:"broadcast"` - // WalPath (default: "") configures the location of the Write Ahead Log - // (WAL) for the mempool. The WAL is disabled by default. To enable, set - // WalPath to where you want the WAL to be written (e.g. - // "data/mempool.wal"). - WalPath string `mapstructure:"wal_dir"` // Maximum number of transactions in the mempool Size int `mapstructure:"size"` - // Limit the total size of all txs in the mempool. - // This only accounts for raw transactions (e.g. given 1MB transactions and - // max_txs_bytes=5MB, mempool will only accept 5 transactions). + // Maximum size in bytes of a single transaction accepted into the mempool. + MaxTxBytes int `mapstructure:"max_tx_bytes"` + // The maximum size in bytes of all transactions stored in the mempool. + // This is the raw, total transaction size. For example, given 1MB + // transactions and a 5MB maximum mempool byte size, the mempool will + // only accept five transactions. MaxTxsBytes int64 `mapstructure:"max_txs_bytes"` - // Size of the cache (used to filter transactions we saw earlier) in transactions + // Size of the cache (used to filter transactions we saw earlier) in transactions. CacheSize int `mapstructure:"cache_size"` // Do not remove invalid transactions from the cache (default: false) // Set to true if it's not possible for any invalid transaction to become // valid again in the future. KeepInvalidTxsInCache bool `mapstructure:"keep-invalid-txs-in-cache"` - // Maximum size of a single transaction - // NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. - MaxTxBytes int `mapstructure:"max_tx_bytes"` - // Maximum size of a batch of transactions to send to a peer - // Including space needed by encoding (one varint per transaction). - // XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 - MaxBatchBytes int `mapstructure:"max_batch_bytes"` -} - -// DefaultMempoolConfig returns a default configuration for the CometBFT mempool + // Experimental parameters to limit gossiping txs to up to the specified number of peers. + // We use two independent upper values for persistent and non-persistent peers. + // Unconditional peers are not affected by this feature. + // If we are connected to more than the specified number of persistent peers, only send txs to + // ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those + // persistent peers disconnects, activate another persistent peer. + // Similarly for non-persistent peers, with an upper limit of + // ExperimentalMaxGossipConnectionsToNonPersistentPeers. + // If set to 0, the feature is disabled for the corresponding group of peers, that is, the + // number of active connections to that group of peers is not bounded. + // For non-persistent peers, if enabled, a value of 10 is recommended based on experimental + // performance results using the default P2P configuration. + ExperimentalMaxGossipConnectionsToPersistentPeers int `mapstructure:"experimental_max_gossip_connections_to_persistent_peers"` + ExperimentalMaxGossipConnectionsToNonPersistentPeers int `mapstructure:"experimental_max_gossip_connections_to_non_persistent_peers"` + + // ExperimentalPublishEventPendingTx enables publishing a `PendingTx` event when a new transaction is added to the mempool. + // Note: Enabling this feature may introduce potential delays in transaction processing due to blocking behavior. + // Use this feature with caution and consider the impact on transaction processing performance. + ExperimentalPublishEventPendingTx bool `mapstructure:"experimental_publish_event_pending_tx"` +} + +// DefaultMempoolConfig returns a default configuration for the CometBFT mempool. func DefaultMempoolConfig() *MempoolConfig { return &MempoolConfig{ - Recheck: true, - Broadcast: true, - WalPath: "", + Type: MempoolTypeFlood, + Recheck: true, + RecheckTimeout: 1000 * time.Millisecond, + Broadcast: true, // Each signature verification takes .5ms, Size reduced until we implement // ABCI Recheck Size: 5000, - MaxTxsBytes: 1024 * 1024 * 1024, // 1GB + MaxTxBytes: 1024 * 1024, // 1MiB + MaxTxsBytes: 64 * 1024 * 1024, // 64MiB, enough to fill 16 blocks of 4 MiB CacheSize: 10000, - MaxTxBytes: 1024 * 1024, // 1MB + ExperimentalMaxGossipConnectionsToNonPersistentPeers: 0, + ExperimentalMaxGossipConnectionsToPersistentPeers: 0, } } -// TestMempoolConfig returns a configuration for testing the CometBFT mempool +// TestMempoolConfig returns a configuration for testing the CometBFT mempool. func TestMempoolConfig() *MempoolConfig { cfg := DefaultMempoolConfig() cfg.CacheSize = 1000 return cfg } -// WalDir returns the full path to the mempool's write-ahead log -func (cfg *MempoolConfig) WalDir() string { - return rootify(cfg.WalPath, cfg.RootDir) -} - -// WalEnabled returns true if the WAL is enabled. -func (cfg *MempoolConfig) WalEnabled() bool { - return cfg.WalPath != "" -} - // ValidateBasic performs basic validation (checking param bounds, etc.) and // returns an error if any check fails. func (cfg *MempoolConfig) ValidateBasic() error { + switch cfg.Type { + case MempoolTypeFlood, MempoolTypeNop: + case "": // allow empty string to be backwards compatible + default: + return fmt.Errorf("unknown mempool type: %q", cfg.Type) + } if cfg.Size < 0 { return cmterrors.ErrNegativeField{Field: "size"} } @@ -925,13 +1039,33 @@ func (cfg *MempoolConfig) ValidateBasic() error { if cfg.MaxTxBytes < 0 { return cmterrors.ErrNegativeField{Field: "max_tx_bytes"} } + if cfg.ExperimentalMaxGossipConnectionsToPersistentPeers < 0 { + return cmterrors.ErrNegativeField{Field: "experimental_max_gossip_connections_to_persistent_peers"} + } + if cfg.ExperimentalMaxGossipConnectionsToNonPersistentPeers < 0 { + return cmterrors.ErrNegativeField{Field: "experimental_max_gossip_connections_to_non_persistent_peers"} + } + + // Flood mempool with zero capacity is not allowed. + if cfg.Type != MempoolTypeNop { + if cfg.Size == 0 { + return cmterrors.ErrNegativeOrZeroField{Field: "size"} + } + if cfg.MaxTxsBytes == 0 { + return cmterrors.ErrNegativeOrZeroField{Field: "max_txs_bytes"} + } + if cfg.MaxTxBytes == 0 { + return cmterrors.ErrNegativeOrZeroField{Field: "max_tx_bytes"} + } + } + return nil } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // StateSyncConfig -// StateSyncConfig defines the configuration for the CometBFT state sync service +// StateSyncConfig defines the configuration for the CometBFT state sync service. type StateSyncConfig struct { Enable bool `mapstructure:"enable"` TempDir string `mapstructure:"temp_dir"` @@ -939,7 +1073,7 @@ type StateSyncConfig struct { TrustPeriod time.Duration `mapstructure:"trust_period"` TrustHeight int64 `mapstructure:"trust_height"` TrustHash string `mapstructure:"trust_hash"` - DiscoveryTime time.Duration `mapstructure:"discovery_time"` + MaxDiscoveryTime time.Duration `mapstructure:"max_discovery_time"` ChunkRequestTimeout time.Duration `mapstructure:"chunk_request_timeout"` ChunkFetchers int32 `mapstructure:"chunk_fetchers"` } @@ -953,17 +1087,17 @@ func (cfg *StateSyncConfig) TrustHashBytes() []byte { return bytes } -// DefaultStateSyncConfig returns a default configuration for the state sync service +// DefaultStateSyncConfig returns a default configuration for the state sync service. func DefaultStateSyncConfig() *StateSyncConfig { return &StateSyncConfig{ TrustPeriod: 168 * time.Hour, - DiscoveryTime: 15 * time.Second, + MaxDiscoveryTime: 2 * time.Minute, ChunkRequestTimeout: 10 * time.Second, ChunkFetchers: 4, } } -// TestStateSyncConfig returns a default configuration for the state sync service +// TestStateSyncConfig returns a default configuration for the state sync service. func TestStateSyncConfig() *StateSyncConfig { return DefaultStateSyncConfig() } @@ -985,8 +1119,8 @@ func (cfg *StateSyncConfig) ValidateBasic() error { } } - if cfg.DiscoveryTime != 0 && cfg.DiscoveryTime < 5*time.Second { - return ErrInsufficientDiscoveryTime + if cfg.MaxDiscoveryTime < 0 { + return cmterrors.ErrNegativeField{Field: "max_discovery_time"} } if cfg.TrustPeriod <= 0 { @@ -1018,15 +1152,24 @@ func (cfg *StateSyncConfig) ValidateBasic() error { return nil } -//----------------------------------------------------------------------------- +// PossibleMisconfigurations returns a list of possible conflicting entries that +// may lead to unexpected behavior. +func (cfg *StateSyncConfig) PossibleMisconfigurations() []string { + if !cfg.Enable && len(cfg.RPCServers) != 0 { + return []string{"rpc_servers specified but enable = false"} + } + return []string{} +} + +// ----------------------------------------------------------------------------- // BlockSyncConfig -// BlockSyncConfig (formerly known as FastSync) defines the configuration for the CometBFT block sync service +// BlockSyncConfig (formerly known as FastSync) defines the configuration for the CometBFT block sync service. type BlockSyncConfig struct { Version string `mapstructure:"version"` } -// DefaultBlockSyncConfig returns a default configuration for the block sync service +// DefaultBlockSyncConfig returns a default configuration for the block sync service. func DefaultBlockSyncConfig() *BlockSyncConfig { return &BlockSyncConfig{ Version: "v0", @@ -1050,7 +1193,7 @@ func (cfg *BlockSyncConfig) ValidateBasic() error { } } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // ConsensusConfig // ConsensusConfig defines the configuration for the Tendermint consensus algorithm, adopted by CometBFT, @@ -1064,23 +1207,13 @@ type ConsensusConfig struct { TimeoutPropose time.Duration `mapstructure:"timeout_propose"` // How much timeout_propose increases with each round TimeoutProposeDelta time.Duration `mapstructure:"timeout_propose_delta"` - // How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) - TimeoutPrevote time.Duration `mapstructure:"timeout_prevote"` - // How much the timeout_prevote increases with each round - TimeoutPrevoteDelta time.Duration `mapstructure:"timeout_prevote_delta"` - // How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) - TimeoutPrecommit time.Duration `mapstructure:"timeout_precommit"` - // How much the timeout_precommit increases with each round - TimeoutPrecommitDelta time.Duration `mapstructure:"timeout_precommit_delta"` - // How long we wait after committing a block, before starting on the new - // height (this gives us a chance to receive some more precommits, even - // though we already have +2/3). - // NOTE: when modifying, make sure to update time_iota_ms genesis parameter + // How long we wait after receiving +2/3 prevotes/precommits for “anything” (ie. not a single block or nil) + TimeoutVote time.Duration `mapstructure:"timeout_vote"` + // How much the timeout_vote increases with each round + TimeoutVoteDelta time.Duration `mapstructure:"timeout_vote_delta"` + // Deprecated: use `next_block_delay` in the ABCI application's `FinalizeBlockResponse`. TimeoutCommit time.Duration `mapstructure:"timeout_commit"` - // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) - SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"` - // EmptyBlocks mode and possible interval between empty blocks CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"` CreateEmptyBlocksInterval time.Duration `mapstructure:"create_empty_blocks_interval"` @@ -1093,18 +1226,15 @@ type ConsensusConfig struct { DoubleSignCheckHeight int64 `mapstructure:"double_sign_check_height"` } -// DefaultConsensusConfig returns a default configuration for the consensus service +// DefaultConsensusConfig returns a default configuration for the consensus service. func DefaultConsensusConfig() *ConsensusConfig { return &ConsensusConfig{ WalPath: filepath.Join(DefaultDataDir, "cs.wal", "wal"), TimeoutPropose: 3000 * time.Millisecond, TimeoutProposeDelta: 500 * time.Millisecond, - TimeoutPrevote: 1000 * time.Millisecond, - TimeoutPrevoteDelta: 500 * time.Millisecond, - TimeoutPrecommit: 1000 * time.Millisecond, - TimeoutPrecommitDelta: 500 * time.Millisecond, - TimeoutCommit: 1000 * time.Millisecond, - SkipTimeoutCommit: false, + TimeoutVote: 1000 * time.Millisecond, + TimeoutVoteDelta: 500 * time.Millisecond, + TimeoutCommit: 0 * time.Millisecond, CreateEmptyBlocks: true, CreateEmptyBlocksInterval: 0 * time.Second, PeerGossipSleepDuration: 100 * time.Millisecond, @@ -1114,57 +1244,53 @@ func DefaultConsensusConfig() *ConsensusConfig { } } -// TestConsensusConfig returns a configuration for testing the consensus service +// TestConsensusConfig returns a configuration for testing the consensus service. func TestConsensusConfig() *ConsensusConfig { cfg := DefaultConsensusConfig() cfg.TimeoutPropose = 40 * time.Millisecond cfg.TimeoutProposeDelta = 1 * time.Millisecond - cfg.TimeoutPrevote = 10 * time.Millisecond - cfg.TimeoutPrevoteDelta = 1 * time.Millisecond - cfg.TimeoutPrecommit = 10 * time.Millisecond - cfg.TimeoutPrecommitDelta = 1 * time.Millisecond - // NOTE: when modifying, make sure to update time_iota_ms (testGenesisFmt) in toml.go - cfg.TimeoutCommit = 10 * time.Millisecond - cfg.SkipTimeoutCommit = true + cfg.TimeoutVote = 10 * time.Millisecond + cfg.TimeoutVoteDelta = 1 * time.Millisecond + cfg.TimeoutCommit = 0 cfg.PeerGossipSleepDuration = 5 * time.Millisecond cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond cfg.DoubleSignCheckHeight = int64(0) return cfg } -// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step +// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step. func (cfg *ConsensusConfig) WaitForTxs() bool { return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0 } -// Propose returns the amount of time to wait for a proposal +func timeoutTime(baseTimeout, timeoutDelta time.Duration, round int32) time.Duration { + timeout := baseTimeout.Nanoseconds() + timeoutDelta.Nanoseconds()*int64(round) + return time.Duration(timeout) * time.Nanosecond +} + +// Propose returns the amount of time to wait for a proposal. func (cfg *ConsensusConfig) Propose(round int32) time.Duration { - return time.Duration( - cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round), - ) * time.Nanosecond + return timeoutTime(cfg.TimeoutPropose, cfg.TimeoutProposeDelta, round) } -// Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes +// Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes. func (cfg *ConsensusConfig) Prevote(round int32) time.Duration { - return time.Duration( - cfg.TimeoutPrevote.Nanoseconds()+cfg.TimeoutPrevoteDelta.Nanoseconds()*int64(round), - ) * time.Nanosecond + return timeoutTime(cfg.TimeoutVote, cfg.TimeoutVoteDelta, round) } -// Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits +// Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits. func (cfg *ConsensusConfig) Precommit(round int32) time.Duration { - return time.Duration( - cfg.TimeoutPrecommit.Nanoseconds()+cfg.TimeoutPrecommitDelta.Nanoseconds()*int64(round), - ) * time.Nanosecond + return timeoutTime(cfg.TimeoutVote, cfg.TimeoutVoteDelta, round) } // Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits // for a single block (ie. a commit). +// Deprecated: use `next_block_delay` in the ABCI application's `FinalizeBlockResponse`. func (cfg *ConsensusConfig) Commit(t time.Time) time.Time { return t.Add(cfg.TimeoutCommit) } -// WalFile returns the full path to the write-ahead log file +// WalFile returns the full path to the write-ahead log file. func (cfg *ConsensusConfig) WalFile() string { if cfg.walFile != "" { return cfg.walFile @@ -1172,7 +1298,7 @@ func (cfg *ConsensusConfig) WalFile() string { return rootify(cfg.WalPath, cfg.RootDir) } -// SetWalFile sets the path to the write-ahead log file +// SetWalFile sets the path to the write-ahead log file. func (cfg *ConsensusConfig) SetWalFile(walFile string) { cfg.walFile = walFile } @@ -1186,17 +1312,11 @@ func (cfg *ConsensusConfig) ValidateBasic() error { if cfg.TimeoutProposeDelta < 0 { return cmterrors.ErrNegativeField{Field: "timeout_propose_delta"} } - if cfg.TimeoutPrevote < 0 { - return cmterrors.ErrNegativeField{Field: "timeout_prevote"} - } - if cfg.TimeoutPrevoteDelta < 0 { - return cmterrors.ErrNegativeField{Field: "timeout_prevote_delta"} - } - if cfg.TimeoutPrecommit < 0 { - return cmterrors.ErrNegativeField{Field: "timeout_precommit"} + if cfg.TimeoutVote < 0 { + return cmterrors.ErrNegativeField{Field: "timeout_vote"} } - if cfg.TimeoutPrecommitDelta < 0 { - return cmterrors.ErrNegativeField{Field: "timeout_precommit_delta"} + if cfg.TimeoutVoteDelta < 0 { + return cmterrors.ErrNegativeField{Field: "timeout_vote_delta"} } if cfg.TimeoutCommit < 0 { return cmterrors.ErrNegativeField{Field: "timeout_commit"} @@ -1216,7 +1336,7 @@ func (cfg *ConsensusConfig) ValidateBasic() error { return nil } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // StorageConfig // StorageConfig allows more fine-grained control over certain storage-related @@ -1228,23 +1348,39 @@ type StorageConfig struct { DiscardABCIResponses bool `mapstructure:"discard_abci_responses"` // Configuration related to storage pruning. Pruning *PruningConfig `mapstructure:"pruning"` - - // Hex representation of the hash of the genesis file. - // This is an optional parameter set when an operator provides - // a hash via the command line. - // It is used to verify the hash of the actual genesis file. - // Note that if the provided has does not match the hash of the genesis file - // the node will report an error and not boot. - GenesisHash string `mapstructure:"genesis_hash"` + // Compaction on pruning - enable or disable in-process compaction. + // If the DB backend supports it, this will force the DB to compact + // the database levels and save on storage space. Setting this to true + // is most beneficial when used in combination with pruning as it will + // phyisically delete the entries marked for deletion. + // false by default (forcing compaction is disabled). + Compact bool `mapstructure:"compact"` + // Compaction interval - number of blocks to try explicit compaction on. + // This parameter should be tuned depending on the number of items + // you expect to delete between two calls to forced compaction. + // If your retain height is 1 block, it is too much of an overhead + // to try compaction every block. But it should also not be a very + // large multiple of your retain height as it might occur bigger overheads. + // 1000 by default. + CompactionInterval int64 `mapstructure:"compaction_interval"` + + // The representation of keys in the database. + // The current representation of keys in Comet's stores is considered to be v1 + // Users can experiment with a different layout by setting this field to v2. + // Not that this is an experimental feature and switching back from v2 to v1 + // is not supported by CometBFT. + ExperimentalKeyLayout string `mapstructure:"experimental_db_key_layout"` } // DefaultStorageConfig returns the default configuration options relating to // CometBFT storage optimization. func DefaultStorageConfig() *StorageConfig { return &StorageConfig{ - DiscardABCIResponses: false, - Pruning: DefaultPruningConfig(), - GenesisHash: "", + DiscardABCIResponses: false, + Pruning: DefaultPruningConfig(), + Compact: false, + CompactionInterval: 1000, + ExperimentalKeyLayout: "v1", } } @@ -1254,7 +1390,6 @@ func TestStorageConfig() *StorageConfig { return &StorageConfig{ DiscardABCIResponses: false, Pruning: TestPruningConfig(), - GenesisHash: "", } } @@ -1262,6 +1397,9 @@ func (cfg *StorageConfig) ValidateBasic() error { if err := cfg.Pruning.ValidateBasic(); err != nil { return fmt.Errorf("error in [pruning] section: %w", err) } + if cfg.ExperimentalKeyLayout != "v1" && cfg.ExperimentalKeyLayout != "v2" { + return fmt.Errorf("unsupported version of DB Key layout, expected v1 or v2, got %s", cfg.ExperimentalKeyLayout) + } return nil } @@ -1291,6 +1429,15 @@ type TxIndexConfig struct { // The PostgreSQL connection configuration, the connection format: // postgresql://:@:/? PsqlConn string `mapstructure:"psql-conn"` + + // The PostgreSQL table that stores indexed blocks. + TableBlocks string `mapstructure:"table_blocks"` + // The PostgreSQL table that stores indexed transaction results. + TableTxResults string `mapstructure:"table_tx_results"` + // The PostgreSQL table that stores indexed events. + TableEvents string `mapstructure:"table_events"` + // The PostgreSQL table that stores indexed attributes. + TableAttributes string `mapstructure:"table_attributes"` } // DefaultTxIndexConfig returns a default configuration for the transaction indexer. @@ -1305,7 +1452,7 @@ func TestTxIndexConfig() *TxIndexConfig { return DefaultTxIndexConfig() } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // InstrumentationConfig // InstrumentationConfig defines the configuration for metrics reporting. @@ -1358,10 +1505,10 @@ func (cfg *InstrumentationConfig) IsPrometheusEnabled() bool { return cfg.Prometheus && cfg.PrometheusListenAddr != "" } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // Utils -// helper function to make config creation independent of root dir +// helper function to make config creation independent of root dir. func rootify(path, root string) string { if filepath.IsAbs(path) { return path @@ -1369,7 +1516,7 @@ func rootify(path, root string) string { return filepath.Join(root, path) } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // Moniker var defaultMoniker = getDefaultMoniker() @@ -1384,7 +1531,7 @@ func getDefaultMoniker() string { return moniker } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // PruningConfig type PruningConfig struct { @@ -1418,7 +1565,7 @@ func (cfg *PruningConfig) ValidateBasic() error { return nil } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // DataCompanionPruningConfig type DataCompanionPruningConfig struct { diff --git a/config/config.toml.tpl b/config/config.toml.tpl new file mode 100644 index 00000000000..745d58979c0 --- /dev/null +++ b/config/config.toml.tpl @@ -0,0 +1,579 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable +# or --home cmd flag. + +# The version of the CometBFT binary that created or +# last modified the config file. Do not modify this. +version = "{{ .BaseConfig.Version }}" + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the CometBFT binary +proxy_app = "{{ .BaseConfig.ProxyApp }}" + +# A custom human readable name for this node +moniker = "{{ .BaseConfig.Moniker }}" + +# Database backend: badgerdb | goleveldb | pebbledb | rocksdb +# * badgerdb (uses github.com/dgraph-io/badger) +# - stable +# - pure go +# - use badgerdb build tag (go build -tags badgerdb) +# * goleveldb (github.com/syndtr/goleveldb) +# - UNMAINTAINED +# - stable +# - pure go +# * pebbledb (uses github.com/cockroachdb/pebble) +# - stable +# - pure go +# * rocksdb (uses github.com/linxGnu/grocksdb) +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +db_backend = "{{ .BaseConfig.DBBackend }}" + +# Database directory +db_dir = "{{ js .BaseConfig.DBPath }}" + +# Output level for logging, including package level options +log_level = "{{ .BaseConfig.LogLevel }}" + +# Output format: 'plain' or 'json' +log_format = "{{ .BaseConfig.LogFormat }}" + +# Colored log output when 'log_format' is 'plain'. Default is 'true'. +log_colors = {{ .BaseConfig.LogColors }} + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "{{ js .BaseConfig.Genesis }}" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "{{ js .BaseConfig.PrivValidatorKey }}" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "{{ js .BaseConfig.PrivValidatorState }}" + +# TCP or UNIX socket address for CometBFT to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "{{ js .BaseConfig.NodeKey }}" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "{{ .BaseConfig.ABCI }}" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = {{ .BaseConfig.FilterPeers }} + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "{{ .RPC.ListenAddress }}" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [{{ range .RPC.CORSAllowedOrigins }}{{ printf "%q, " . }}{{end}}] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = [{{ range .RPC.CORSAllowedMethods }}{{ printf "%q, " . }}{{end}}] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}{{end}}] + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = {{ .RPC.Unsafe }} + +# Maximum number of simultaneous connections (including WebSocket). +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = {{ .RPC.MaxOpenConnections }} + +# Maximum number of unique clientIDs that can /subscribe. +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = {{ .RPC.MaxSubscriptionClients }} + +# Maximum number of unique queries a given client can /subscribe to. +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscriptions_per_client = {{ .RPC.MaxSubscriptionsPerClient }} + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = {{ .RPC.SubscriptionBufferSize }} + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = {{ .RPC.WebSocketWriteBufferSize }} + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behavior. +experimental_close_on_slow_client = {{ .RPC.CloseOnSlowClient }} + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "{{ .RPC.TimeoutBroadcastTxCommit }}" + +# Maximum number of requests that can be sent in a batch +# If the value is set to '0' (zero-value), then no maximum batch size will be +# enforced for a JSON-RPC batch request. +max_request_batch_size = {{ .RPC.MaxRequestBatchSize }} + +# Maximum size of request body, in bytes +max_body_bytes = {{ .RPC.MaxBodyBytes }} + +# Maximum size of request header, in bytes +max_header_bytes = {{ .RPC.MaxHeaderBytes }} + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "{{ .RPC.TLSCertFile }}" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "{{ .RPC.TLSKeyFile }}" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "{{ .RPC.PprofListenAddress }}" + +####################################################### +### gRPC Server Configuration Options ### +####################################################### + +# +# Note that the gRPC server is exposed unauthenticated. It is critical that +# this server not be exposed directly to the public internet. If this service +# must be accessed via the public internet, please ensure that appropriate +# precautions are taken (e.g. fronting with a reverse proxy like nginx with TLS +# termination and authentication, using DDoS protection services like +# CloudFlare, etc.). +# + +[grpc] + +# TCP or UNIX socket address for the RPC server to listen on. If not specified, +# the gRPC server will be disabled. +laddr = "{{ .GRPC.ListenAddress }}" + +# +# Each gRPC service can be turned on/off, and in some cases configured, +# individually. If the gRPC server is not enabled, all individual services' +# configurations are ignored. +# + +# The gRPC version service provides version information about the node and the +# protocols it uses. +[grpc.version_service] +enabled = {{ .GRPC.VersionService.Enabled }} + +# The gRPC block service returns block information +[grpc.block_service] +enabled = {{ .GRPC.BlockService.Enabled }} + +# The gRPC block results service returns block results for a given height. If no height +# is given, it will return the block results from the latest height. +[grpc.block_results_service] +enabled = {{ .GRPC.BlockResultsService.Enabled }} + +# +# Configuration for privileged gRPC endpoints, which should **never** be exposed +# to the public internet. +# +[grpc.privileged] +# The host/port on which to expose privileged gRPC endpoints. +laddr = "{{ .GRPC.Privileged.ListenAddress }}" + +# +# Configuration specifically for the gRPC pruning service, which is considered a +# privileged service. +# +[grpc.privileged.pruning_service] + +# Only controls whether the pruning service is accessible via the gRPC API - not +# whether a previously set pruning service retain height is honored by the +# node. See the [storage.pruning] section for control over pruning. +# +# Disabled by default. +enabled = {{ .GRPC.Privileged.PruningService.Enabled }} + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "{{ .P2P.ListenAddress }}" + +# Address to advertise to peers for them to dial. If empty, will use the same +# port as the laddr, and will introspect on the listener to figure out the +# address. IP and port are required. Example: 159.89.10.97:26656 +external_address = "{{ .P2P.ExternalAddress }}" + +# Comma separated list of seed nodes to connect to +seeds = "{{ .P2P.Seeds }}" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "{{ .P2P.PersistentPeers }}" + +# Path to address book +addr_book_file = "{{ js .P2P.AddrBook }}" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = {{ .P2P.AddrBookStrict }} + +# Maximum number of inbound peers +max_num_inbound_peers = {{ .P2P.MaxNumInboundPeers }} + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = {{ .P2P.MaxNumOutboundPeers }} + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "{{ .P2P.UnconditionalPeerIDs }}" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "{{ .P2P.PersistentPeersMaxDialPeriod }}" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "{{ .P2P.FlushThrottleTimeout }}" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }} + +# Rate at which packets can be sent, in bytes/second +send_rate = {{ .P2P.SendRate }} + +# Rate at which packets can be received, in bytes/second +recv_rate = {{ .P2P.RecvRate }} + +# Set true to enable the peer-exchange reactor +pex = {{ .P2P.PexReactor }} + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = {{ .P2P.SeedMode }} + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "{{ .P2P.PrivatePeerIDs }}" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = {{ .P2P.AllowDuplicateIP }} + +# Peer connection configuration. +handshake_timeout = "{{ .P2P.HandshakeTimeout }}" +dial_timeout = "{{ .P2P.DialTimeout }}" + +####################################################### +### Mempool Configuration Options ### +####################################################### +[mempool] + +# The type of mempool for this node to use. +# +# Possible types: +# - "flood" : concurrent linked list mempool with flooding gossip protocol +# (default) +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" is +# not supported. +type = "{{ .Mempool.Type }}" + +# recheck (default: true) defines whether CometBFT should recheck the +# validity for all remaining transaction in the mempool after a block. +# Since a block affects the application state, some transactions in the +# mempool may become invalid. If this does not apply to your application, +# you can disable rechecking. +recheck = {{ .Mempool.Recheck }} + +# recheck_timeout is the time the application has during the rechecking process +# to return CheckTx responses, once all requests have been sent. Responses that +# arrive after the timeout expires are discarded. It only applies to +# non-local ABCI clients and when recheck is enabled. +recheck_timeout = "{{ .Mempool.RecheckTimeout }}" + +# broadcast (default: true) defines whether the mempool should relay +# transactions to other peers. Setting this to false will stop the mempool +# from relaying transactions to other peers until they are included in a +# block. In other words, if Broadcast is disabled, only the peer you send +# the tx to will see it until it is included in a block. +broadcast = {{ .Mempool.Broadcast }} + +# Maximum number of transactions in the mempool +size = {{ .Mempool.Size }} + +# Maximum size in bytes of a single transaction accepted into the mempool. +max_tx_bytes = {{ .Mempool.MaxTxBytes }} + +# The maximum size in bytes of all transactions stored in the mempool. +# This is the raw, total transaction size. For example, given 1MB +# transactions and a 5MB maximum mempool byte size, the mempool will +# only accept five transactions. +max_txs_bytes = {{ .Mempool.MaxTxsBytes }} + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = {{ .Mempool.CacheSize }} + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = {{ .Mempool.KeepInvalidTxsInCache }} + +# Experimental parameters to limit gossiping txs to up to the specified number of peers. +# We use two independent upper values for persistent and non-persistent peers. +# Unconditional peers are not affected by this feature. +# If we are connected to more than the specified number of persistent peers, only send txs to +# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those +# persistent peers disconnects, activate another persistent peer. +# Similarly for non-persistent peers, with an upper limit of +# ExperimentalMaxGossipConnectionsToNonPersistentPeers. +# If set to 0, the feature is disabled for the corresponding group of peers, that is, the +# number of active connections to that group of peers is not bounded. +# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental +# performance results using the default P2P configuration. +experimental_max_gossip_connections_to_persistent_peers = {{ .Mempool.ExperimentalMaxGossipConnectionsToPersistentPeers }} +experimental_max_gossip_connections_to_non_persistent_peers = {{ .Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers }} + +# ExperimentalPublishEventPendingTx enables publishing a `PendingTx` event when a new transaction is added to the mempool. +# Note: Enabling this feature may introduce potential delays in transaction processing due to blocking behavior. +# Use this feature with caution and consider the impact on transaction processing performance. +experimental_publish_event_pending_tx = {{ .Mempool.ExperimentalPublishEventPendingTx }} + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = {{ .StateSync.Enable }} + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "{{ StringsJoin .StateSync.RPCServers "," }}" +trust_height = {{ .StateSync.TrustHeight }} +trust_hash = "{{ .StateSync.TrustHash }}" +trust_period = "{{ .StateSync.TrustPeriod }}" + +# Time to spend discovering snapshots before switching to blocksync. If set to +# 0, state sync will be trying indefinitely. +max_discovery_time = "{{ .StateSync.MaxDiscoveryTime }}" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "{{ .StateSync.TempDir }}" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "{{ .StateSync.ChunkRequestTimeout }}" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "{{ .StateSync.ChunkFetchers }}" + +####################################################### +### Block Sync Configuration Options ### +####################################################### +[blocksync] + +# Block Sync version to use: +# +# In v0.37, v1 and v2 of the block sync protocols were deprecated. +# Please use v0 instead. +# +# 1) "v0" - the default block sync implementation +version = "{{ .BlockSync.Version }}" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "{{ js .Consensus.WalPath }}" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "{{ .Consensus.TimeoutPropose }}" +# How much timeout_propose increases with each round +timeout_propose_delta = "{{ .Consensus.TimeoutProposeDelta }}" +# How long we wait after receiving +2/3 prevotes/precommits for “anything” (ie. not a single block or nil) +timeout_vote = "{{ .Consensus.TimeoutVote }}" +# How much the timeout_vote increases with each round +timeout_vote_delta = "{{ .Consensus.TimeoutVoteDelta }}" +# Deprecated: use `next_block_delay` in the ABCI application's `FinalizeBlockResponse`. +timeout_commit = "{{ .Consensus.TimeoutCommit }}" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = {{ .Consensus.DoubleSignCheckHeight }} + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }} +create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}" +peer_gossip_intraloop_sleep_duration = "{{ .Consensus.PeerGossipIntraloopSleepDuration }}" +peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" + +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = {{ .Storage.DiscardABCIResponses}} + +# The representation of keys in the database. +# The current representation of keys in Comet's stores is considered to be v1 +# Users can experiment with a different layout by setting this field to v2. +# Note that this is an experimental feature and switching back from v2 to v1 +# is not supported by CometBFT. +# If the database was initially created with v1, it is necessary to migrate the DB +# before switching to v2. The migration is not done automatically. +# v1 - the legacy layout existing in Comet prior to v1. +# v2 - Order preserving representation ordering entries by height. +experimental_db_key_layout = "{{ .Storage.ExperimentalKeyLayout }}" + +# If set to true, CometBFT will force compaction to happen for databases that support this feature. +# and save on storage space. Setting this to true is most benefits when used in combination +# with pruning as it will physically delete the entries marked for deletion. +# false by default (forcing compaction is disabled). +compact = {{ .Storage.Compact }} + +# To avoid forcing compaction every time, this parameter instructs CometBFT to wait +# the given amount of blocks to be pruned before triggering compaction. +# It should be tuned depending on the number of items. If your retain height is 1 block, +# it is too much of an overhead to try compaction every block. But it should also not be a very +# large multiple of your retain height as it might occur bigger overheads. +compaction_interval = "{{ .Storage.CompactionInterval }}" + +[storage.pruning] + +# The time period between automated background pruning operations. +interval = "{{ .Storage.Pruning.Interval }}" + +# +# Storage pruning configuration relating only to the data companion. +# +[storage.pruning.data_companion] + +# Whether automatic pruning respects values set by the data companion. Disabled +# by default. All other parameters in this section are ignored when this is +# disabled. +# +# If disabled, only the application retain height will influence block pruning +# (but not block results pruning). Only enabling this at a later stage will +# potentially mean that blocks below the application-set retain height at the +# time will not be available to the data companion. +enabled = {{ .Storage.Pruning.DataCompanion.Enabled }} + +# The initial value for the data companion block retain height if the data +# companion has not yet explicitly set one. If the data companion has already +# set a block retain height, this is ignored. +initial_block_retain_height = {{ .Storage.Pruning.DataCompanion.InitialBlockRetainHeight }} + +# The initial value for the data companion block results retain height if the +# data companion has not yet explicitly set one. If the data companion has +# already set a block results retain height, this is ignored. +initial_block_results_retain_height = {{ .Storage.Pruning.DataCompanion.InitialBlockResultsRetainHeight }} + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "{{ .TxIndex.Indexer }}" + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "{{ .TxIndex.PsqlConn }}" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = {{ .Instrumentation.Prometheus }} + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = "{{ .Instrumentation.PrometheusListenAddr }}" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = {{ .Instrumentation.MaxOpenConnections }} + +# Instrumentation namespace +namespace = "{{ .Instrumentation.Namespace }}" diff --git a/config/config_test.go b/config/config_test.go index 9ada51873de..4ca95c2225a 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -24,20 +24,23 @@ func TestDefaultConfig(t *testing.T) { cfg.SetRoot("/foo") cfg.Genesis = "bar" cfg.DBPath = "/opt/data" - cfg.Mempool.WalPath = "wal/mem/" assert.Equal("/foo/bar", cfg.GenesisFile()) assert.Equal("/opt/data", cfg.DBDir()) - assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir()) } func TestConfigValidateBasic(t *testing.T) { cfg := config.DefaultConfig() - assert.NoError(t, cfg.ValidateBasic()) + require.NoError(t, cfg.ValidateBasic()) // tamper with timeout_propose cfg.Consensus.TimeoutPropose = -10 * time.Second - assert.Error(t, cfg.ValidateBasic()) + require.Error(t, cfg.ValidateBasic()) + cfg.Consensus.TimeoutPropose = 3 * time.Second + + cfg.Consensus.CreateEmptyBlocks = false + cfg.Mempool.Type = config.MempoolTypeNop + require.Error(t, cfg.ValidateBasic()) } func TestTLSConfiguration(t *testing.T) { @@ -58,16 +61,45 @@ func TestTLSConfiguration(t *testing.T) { func TestBaseConfigValidateBasic(t *testing.T) { cfg := config.TestBaseConfig() - assert.NoError(t, cfg.ValidateBasic()) + require.NoError(t, cfg.ValidateBasic()) // tamper with log format cfg.LogFormat = "invalid" - assert.Error(t, cfg.ValidateBasic()) + require.Error(t, cfg.ValidateBasic()) +} + +func TestBaseConfigProxyApp_ValidateBasic(t *testing.T) { + testcases := map[string]struct { + proxyApp string + expectErr bool + }{ + "empty": {"", true}, + "valid": {"kvstore", false}, + "invalid static": {"kvstore1", true}, + "invalid tcp": {"127.0.0.1", true}, + "invalid tcp with proto": {"tcp://127.0.0.1", true}, + "valid tcp": {"tcp://127.0.0.1:80", false}, + "invalid proto": {"unix1://local", true}, + "valid unix": {"unix://local", false}, + } + for desc, tc := range testcases { + t.Run(desc, func(t *testing.T) { + cfg := config.DefaultBaseConfig() + cfg.ProxyApp = tc.proxyApp + + err := cfg.ValidateBasic() + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } } func TestRPCConfigValidateBasic(t *testing.T) { cfg := config.TestRPCConfig() - assert.NoError(t, cfg.ValidateBasic()) + require.NoError(t, cfg.ValidateBasic()) fieldsToTest := []string{ "MaxOpenConnections", @@ -76,18 +108,19 @@ func TestRPCConfigValidateBasic(t *testing.T) { "TimeoutBroadcastTxCommit", "MaxBodyBytes", "MaxHeaderBytes", + "MaxRequestBatchSize", } for _, fieldName := range fieldsToTest { reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) - assert.Error(t, cfg.ValidateBasic()) + require.Error(t, cfg.ValidateBasic()) reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) } } func TestP2PConfigValidateBasic(t *testing.T) { cfg := config.TestP2PConfig() - assert.NoError(t, cfg.ValidateBasic()) + require.NoError(t, cfg.ValidateBasic()) fieldsToTest := []string{ "MaxNumInboundPeers", @@ -100,26 +133,62 @@ func TestP2PConfigValidateBasic(t *testing.T) { for _, fieldName := range fieldsToTest { reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) - assert.Error(t, cfg.ValidateBasic()) + require.Error(t, cfg.ValidateBasic()) reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) } } func TestMempoolConfigValidateBasic(t *testing.T) { cfg := config.TestMempoolConfig() - assert.NoError(t, cfg.ValidateBasic()) + require.NoError(t, cfg.ValidateBasic()) - fieldsToTest := []string{ + // tamper with type + reflect.ValueOf(cfg).Elem().FieldByName("Type").SetString("invalid") + require.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName("Type").SetString(config.MempoolTypeFlood) + + setFieldTo := func(fieldName string, value int64) { + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(value) + } + + // tamper with numbers + fields2values := []struct { + Name string + AllowedValues []int64 + DisallowedValues []int64 + }{ + {"Size", []int64{1}, []int64{-1, 0}}, + {"MaxTxsBytes", []int64{1}, []int64{-1, 0}}, + {"CacheSize", []int64{0, 1}, []int64{-1}}, + {"MaxTxBytes", []int64{1}, []int64{-1, 0}}, + {"ExperimentalMaxGossipConnectionsToPersistentPeers", []int64{0, 1}, []int64{-1}}, + {"ExperimentalMaxGossipConnectionsToNonPersistentPeers", []int64{0, 1}, []int64{-1}}, + } + for _, field := range fields2values { + for _, value := range field.AllowedValues { + setFieldTo(field.Name, value) + require.NoError(t, cfg.ValidateBasic()) + setFieldTo(field.Name, 1) // reset + } + + for _, value := range field.DisallowedValues { + setFieldTo(field.Name, value) + require.Error(t, cfg.ValidateBasic()) + setFieldTo(field.Name, 1) // reset + } + } + + // with noop mempool, zero values are allowed for the fields below + reflect.ValueOf(cfg).Elem().FieldByName("Type").SetString(config.MempoolTypeNop) + fieldNames := []string{ "Size", "MaxTxsBytes", - "CacheSize", "MaxTxBytes", } - - for _, fieldName := range fieldsToTest { - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) - assert.Error(t, cfg.ValidateBasic()) - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + for _, name := range fieldNames { + setFieldTo(name, 0) + require.NoError(t, cfg.ValidateBasic()) + setFieldTo(name, 1) // reset } } @@ -130,14 +199,14 @@ func TestStateSyncConfigValidateBasic(t *testing.T) { func TestBlockSyncConfigValidateBasic(t *testing.T) { cfg := config.TestBlockSyncConfig() - assert.NoError(t, cfg.ValidateBasic()) + require.NoError(t, cfg.ValidateBasic()) // tamper with version cfg.Version = "v1" - assert.Error(t, cfg.ValidateBasic()) + require.Error(t, cfg.ValidateBasic()) cfg.Version = "invalid" - assert.Error(t, cfg.ValidateBasic()) + require.Error(t, cfg.ValidateBasic()) } func TestConsensusConfig_ValidateBasic(t *testing.T) { @@ -150,14 +219,10 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) { "TimeoutPropose negative": {func(c *config.ConsensusConfig) { c.TimeoutPropose = -1 }, true}, "TimeoutProposeDelta": {func(c *config.ConsensusConfig) { c.TimeoutProposeDelta = time.Second }, false}, "TimeoutProposeDelta negative": {func(c *config.ConsensusConfig) { c.TimeoutProposeDelta = -1 }, true}, - "TimeoutPrevote": {func(c *config.ConsensusConfig) { c.TimeoutPrevote = time.Second }, false}, - "TimeoutPrevote negative": {func(c *config.ConsensusConfig) { c.TimeoutPrevote = -1 }, true}, - "TimeoutPrevoteDelta": {func(c *config.ConsensusConfig) { c.TimeoutPrevoteDelta = time.Second }, false}, - "TimeoutPrevoteDelta negative": {func(c *config.ConsensusConfig) { c.TimeoutPrevoteDelta = -1 }, true}, - "TimeoutPrecommit": {func(c *config.ConsensusConfig) { c.TimeoutPrecommit = time.Second }, false}, - "TimeoutPrecommit negative": {func(c *config.ConsensusConfig) { c.TimeoutPrecommit = -1 }, true}, - "TimeoutPrecommitDelta": {func(c *config.ConsensusConfig) { c.TimeoutPrecommitDelta = time.Second }, false}, - "TimeoutPrecommitDelta negative": {func(c *config.ConsensusConfig) { c.TimeoutPrecommitDelta = -1 }, true}, + "TimeoutVote": {func(c *config.ConsensusConfig) { c.TimeoutVote = time.Second }, false}, + "TimeoutVote negative": {func(c *config.ConsensusConfig) { c.TimeoutVote = -1 }, true}, + "TimeoutVoteDelta": {func(c *config.ConsensusConfig) { c.TimeoutVoteDelta = time.Second }, false}, + "TimeoutVoteDelta negative": {func(c *config.ConsensusConfig) { c.TimeoutVoteDelta = -1 }, true}, "TimeoutCommit": {func(c *config.ConsensusConfig) { c.TimeoutCommit = time.Second }, false}, "TimeoutCommit negative": {func(c *config.ConsensusConfig) { c.TimeoutCommit = -1 }, true}, "PeerGossipSleepDuration": {func(c *config.ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false}, @@ -167,16 +232,15 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) { "DoubleSignCheckHeight negative": {func(c *config.ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true}, } for desc, tc := range testcases { - tc := tc // appease linter t.Run(desc, func(t *testing.T) { cfg := config.DefaultConsensusConfig() tc.modify(cfg) err := cfg.ValidateBasic() if tc.expectErr { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) } }) } @@ -184,9 +248,31 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) { func TestInstrumentationConfigValidateBasic(t *testing.T) { cfg := config.TestInstrumentationConfig() - assert.NoError(t, cfg.ValidateBasic()) + require.NoError(t, cfg.ValidateBasic()) // tamper with maximum open connections cfg.MaxOpenConnections = -1 - assert.Error(t, cfg.ValidateBasic()) + require.Error(t, cfg.ValidateBasic()) +} + +func TestConfigPossibleMisconfigurations(t *testing.T) { + cfg := config.DefaultConfig() + require.Len(t, cfg.PossibleMisconfigurations(), 0) + // providing rpc_servers while enable = false is a possible misconfiguration + cfg.StateSync.RPCServers = []string{"first_rpc"} + require.Equal(t, []string{"[statesync] section: rpc_servers specified but enable = false"}, cfg.PossibleMisconfigurations()) + // enabling statesync deletes possible misconfiguration + cfg.StateSync.Enable = true + require.Len(t, cfg.PossibleMisconfigurations(), 0) +} + +func TestStateSyncPossibleMisconfigurations(t *testing.T) { + cfg := config.DefaultStateSyncConfig() + require.Len(t, cfg.PossibleMisconfigurations(), 0) + // providing rpc_servers while enable = false is a possible misconfiguration + cfg.RPCServers = []string{"first_rpc"} + require.Equal(t, []string{"rpc_servers specified but enable = false"}, cfg.PossibleMisconfigurations()) + // enabling statesync deletes possible misconfiguration + cfg.Enable = true + require.Len(t, cfg.PossibleMisconfigurations(), 0) } diff --git a/config/db.go b/config/db.go index 2f0235fd827..95a72ed04f6 100644 --- a/config/db.go +++ b/config/db.go @@ -4,7 +4,6 @@ import ( "context" dbm "github.com/cometbft/cometbft-db" - "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/service" ) diff --git a/config/errors.go b/config/errors.go index 010030343db..1712328db83 100644 --- a/config/errors.go +++ b/config/errors.go @@ -8,7 +8,6 @@ import ( var ( ErrEmptyRPCServerEntry = errors.New("found empty rpc_servers entry") ErrNotEnoughRPCServers = errors.New("at least two rpc_servers entries are required") - ErrInsufficientDiscoveryTime = errors.New("snapshot discovery time must be at least five seconds") ErrInsufficientChunkRequestTimeout = errors.New("timeout for re-requesting a chunk (chunk_request_timeout) is less than 5 seconds") ErrUnknownLogFormat = errors.New("unknown log_format (must be 'plain' or 'json')") ErrSubscriptionBufferSizeInvalid = fmt.Errorf("experimental_subscription_buffer_size must be >= %d", minSubscriptionBufferSize) @@ -42,5 +41,5 @@ type ErrUnknownBlocksyncVersion struct { } func (e ErrUnknownBlocksyncVersion) Error() string { - return fmt.Sprintf("unknown blocksync version %s", e.Version) + return "unknown blocksync version " + e.Version } diff --git a/config/toml.go b/config/toml.go index be51bcbecaa..5892aaa03f9 100644 --- a/config/toml.go +++ b/config/toml.go @@ -6,7 +6,9 @@ import ( "strings" "text/template" - cmtos "github.com/cometbft/cometbft/libs/os" + _ "embed" + + cmtos "github.com/cometbft/cometbft/internal/os" ) // DefaultDirPerm is the default permissions used when creating directories. @@ -24,7 +26,7 @@ func init() { } } -/****** these are for production settings ***********/ +// ****** these are for production settings *********** // // EnsureRoot creates the root, config, and data directories if they don't exist, // and panics if it fails. @@ -48,7 +50,7 @@ func EnsureRoot(rootDir string) { } // XXX: this func should probably be called by cmd/cometbft/commands/init.go -// alongside the writing of the genesis.json and priv_validator.json +// alongside the writing of the genesis.json and priv_validator.json. func writeDefaultConfigFile(configFilePath string) { WriteConfigFile(configFilePath, DefaultConfig()) } @@ -65,545 +67,7 @@ func WriteConfigFile(configFilePath string, config *Config) { } // Note: any changes to the comments/variables/mapstructure -// must be reflected in the appropriate struct in config/config.go -const defaultConfigTemplate = `# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable -# or --home cmd flag. - -# The version of the CometBFT binary that created or -# last modified the config file. Do not modify this. -version = "{{ .BaseConfig.Version }}" - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the CometBFT binary -proxy_app = "{{ .BaseConfig.ProxyApp }}" - -# A custom human readable name for this node -moniker = "{{ .BaseConfig.Moniker }}" - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "{{ .BaseConfig.DBBackend }}" - -# Database directory -db_dir = "{{ js .BaseConfig.DBPath }}" - -# Output level for logging, including package level options -log_level = "{{ .BaseConfig.LogLevel }}" - -# Output format: 'plain' (colored text) or 'json' -log_format = "{{ .BaseConfig.LogFormat }}" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "{{ js .BaseConfig.Genesis }}" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "{{ js .BaseConfig.PrivValidatorKey }}" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "{{ js .BaseConfig.PrivValidatorState }}" - -# TCP or UNIX socket address for CometBFT to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "{{ js .BaseConfig.NodeKey }}" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "{{ .BaseConfig.ABCI }}" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = {{ .BaseConfig.FilterPeers }} - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "{{ .RPC.ListenAddress }}" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [{{ range .RPC.CORSAllowedOrigins }}{{ printf "%q, " . }}{{end}}] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = [{{ range .RPC.CORSAllowedMethods }}{{ printf "%q, " . }}{{end}}] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}{{end}}] - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = {{ .RPC.Unsafe }} - -# Maximum number of simultaneous connections (including WebSocket). -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = {{ .RPC.MaxOpenConnections }} - -# Maximum number of unique clientIDs that can /subscribe. -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = {{ .RPC.MaxSubscriptionClients }} - -# Maximum number of unique queries a given client can /subscribe to. -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscriptions_per_client = {{ .RPC.MaxSubscriptionsPerClient }} - -# Experimental parameter to specify the maximum number of events a node will -# buffer, per subscription, before returning an error and closing the -# subscription. Must be set to at least 100, but higher values will accommodate -# higher event throughput rates (and will use more memory). -experimental_subscription_buffer_size = {{ .RPC.SubscriptionBufferSize }} - -# Experimental parameter to specify the maximum number of RPC responses that -# can be buffered per WebSocket client. If clients cannot read from the -# WebSocket endpoint fast enough, they will be disconnected, so increasing this -# parameter may reduce the chances of them being disconnected (but will cause -# the node to use more memory). -# -# Must be at least the same as "experimental_subscription_buffer_size", -# otherwise connections could be dropped unnecessarily. This value should -# ideally be somewhat higher than "experimental_subscription_buffer_size" to -# accommodate non-subscription-related RPC responses. -experimental_websocket_write_buffer_size = {{ .RPC.WebSocketWriteBufferSize }} - -# If a WebSocket client cannot read fast enough, at present we may -# silently drop events instead of generating an error or disconnecting the -# client. -# -# Enabling this experimental parameter will cause the WebSocket connection to -# be closed instead if it cannot read fast enough, allowing for greater -# predictability in subscription behavior. -experimental_close_on_slow_client = {{ .RPC.CloseOnSlowClient }} - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "{{ .RPC.TimeoutBroadcastTxCommit }}" - -# Maximum size of request body, in bytes -max_body_bytes = {{ .RPC.MaxBodyBytes }} - -# Maximum size of request header, in bytes -max_header_bytes = {{ .RPC.MaxHeaderBytes }} - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to CometBFT's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "{{ .RPC.TLSCertFile }}" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to CometBFT's config directory. -# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "{{ .RPC.TLSKeyFile }}" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "{{ .RPC.PprofListenAddress }}" - -####################################################### -### gRPC Server Configuration Options ### -####################################################### - -# -# Note that the gRPC server is exposed unauthenticated. It is critical that -# this server not be exposed directly to the public internet. If this service -# must be accessed via the public internet, please ensure that appropriate -# precautions are taken (e.g. fronting with a reverse proxy like nginx with TLS -# termination and authentication, using DDoS protection services like -# CloudFlare, etc.). -# - -[grpc] - -# TCP or UNIX socket address for the RPC server to listen on. If not specified, -# the gRPC server will be disabled. -laddr = "{{ .GRPC.ListenAddress }}" - -# -# Each gRPC service can be turned on/off, and in some cases configured, -# individually. If the gRPC server is not enabled, all individual services' -# configurations are ignored. -# - -# The gRPC version service provides version information about the node and the -# protocols it uses. -[grpc.version_service] -enabled = {{ .GRPC.VersionService.Enabled }} - -# The gRPC block service returns block information -[grpc.block_service] -enabled = {{ .GRPC.BlockService.Enabled }} - -# The gRPC block results service returns block results for a given height. If no height -# is given, it will return the block results from the latest height. -[grpc.block_results_service] -enabled = {{ .GRPC.BlockResultsService.Enabled }} - -# -# Configuration for privileged gRPC endpoints, which should **never** be exposed -# to the public internet. -# -[grpc.privileged] -# The host/port on which to expose privileged gRPC endpoints. -laddr = "{{ .GRPC.Privileged.ListenAddress }}" - -# -# Configuration specifically for the gRPC pruning service, which is considered a -# privileged service. -# -[grpc.privileged.pruning_service] - -# Only controls whether the pruning service is accessible via the gRPC API - not -# whether a previously set pruning service retain height is honored by the -# node. See the [storage.pruning] section for control over pruning. -# -# Disabled by default. -enabled = {{ .GRPC.Privileged.PruningService.Enabled }} - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "{{ .P2P.ListenAddress }}" - -# Address to advertise to peers for them to dial. If empty, will use the same -# port as the laddr, and will introspect on the listener to figure out the -# address. IP and port are required. Example: 159.89.10.97:26656 -external_address = "{{ .P2P.ExternalAddress }}" - -# Comma separated list of seed nodes to connect to -seeds = "{{ .P2P.Seeds }}" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "{{ .P2P.PersistentPeers }}" - -# Path to address book -addr_book_file = "{{ js .P2P.AddrBook }}" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = {{ .P2P.AddrBookStrict }} - -# Maximum number of inbound peers -max_num_inbound_peers = {{ .P2P.MaxNumInboundPeers }} - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = {{ .P2P.MaxNumOutboundPeers }} - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "{{ .P2P.UnconditionalPeerIDs }}" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "{{ .P2P.PersistentPeersMaxDialPeriod }}" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "{{ .P2P.FlushThrottleTimeout }}" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }} - -# Rate at which packets can be sent, in bytes/second -send_rate = {{ .P2P.SendRate }} - -# Rate at which packets can be received, in bytes/second -recv_rate = {{ .P2P.RecvRate }} - -# Set true to enable the peer-exchange reactor -pex = {{ .P2P.PexReactor }} - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = {{ .P2P.SeedMode }} - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "{{ .P2P.PrivatePeerIDs }}" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = {{ .P2P.AllowDuplicateIP }} - -# Peer connection configuration. -handshake_timeout = "{{ .P2P.HandshakeTimeout }}" -dial_timeout = "{{ .P2P.DialTimeout }}" - -####################################################### -### Mempool Configuration Options ### -####################################################### -[mempool] - -# recheck (default: true) defines whether CometBFT should recheck the -# validity for all remaining transaction in the mempool after a block. -# Since a block affects the application state, some transactions in the -# mempool may become invalid. If this does not apply to your application, -# you can disable rechecking. -recheck = {{ .Mempool.Recheck }} - -# broadcast (default: true) defines whether the mempool should relay -# transactions to other peers. Setting this to false will stop the mempool -# from relaying transactions to other peers until they are included in a -# block. In other words, if Broadcast is disabled, only the peer you send -# the tx to will see it until it is included in a block. -broadcast = {{ .Mempool.Broadcast }} - -# wal_dir (default: "") configures the location of the Write Ahead Log -# (WAL) for the mempool. The WAL is disabled by default. To enable, set -# wal_dir to where you want the WAL to be written (e.g. -# "data/mempool.wal"). -wal_dir = "{{ js .Mempool.WalPath }}" - -# Maximum number of transactions in the mempool -size = {{ .Mempool.Size }} - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = {{ .Mempool.MaxTxsBytes }} - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = {{ .Mempool.CacheSize }} - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = {{ .Mempool.KeepInvalidTxsInCache }} - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = {{ .Mempool.MaxTxBytes }} - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = {{ .Mempool.MaxBatchBytes }} - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = {{ .StateSync.Enable }} - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "{{ StringsJoin .StateSync.RPCServers "," }}" -trust_height = {{ .StateSync.TrustHeight }} -trust_hash = "{{ .StateSync.TrustHash }}" -trust_period = "{{ .StateSync.TrustPeriod }}" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "{{ .StateSync.DiscoveryTime }}" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "{{ .StateSync.TempDir }}" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "{{ .StateSync.ChunkRequestTimeout }}" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "{{ .StateSync.ChunkFetchers }}" - -####################################################### -### Block Sync Configuration Options ### -####################################################### -[blocksync] - -# Block Sync version to use: -# -# In v0.37, v1 and v2 of the block sync protocols were deprecated. -# Please use v0 instead. -# -# 1) "v0" - the default block sync implementation -version = "{{ .BlockSync.Version }}" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "{{ js .Consensus.WalPath }}" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "{{ .Consensus.TimeoutPropose }}" -# How much timeout_propose increases with each round -timeout_propose_delta = "{{ .Consensus.TimeoutProposeDelta }}" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "{{ .Consensus.TimeoutPrevote }}" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "{{ .Consensus.TimeoutPrevoteDelta }}" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "{{ .Consensus.TimeoutPrecommit }}" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "{{ .Consensus.TimeoutPrecommitDelta }}" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "{{ .Consensus.TimeoutCommit }}" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = {{ .Consensus.DoubleSignCheckHeight }} - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }} - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }} -create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}" -peer_gossip_intraloop_sleep_duration = "{{ .Consensus.PeerGossipIntraloopSleepDuration }}" -peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" - -####################################################### -### Storage Configuration Options ### -####################################################### -[storage] - -# Set to true to discard ABCI responses from the state store, which can save a -# considerable amount of disk space. Set to false to ensure ABCI responses are -# persisted. ABCI responses are required for /block_results RPC queries, and to -# reindex events in the command-line tool. -discard_abci_responses = {{ .Storage.DiscardABCIResponses}} - -[storage.pruning] - -# The time period between automated background pruning operations. -interval = "{{ .Storage.Pruning.Interval }}" - -# -# Storage pruning configuration relating only to the data companion. -# -[storage.pruning.data_companion] - -# Whether automatic pruning respects values set by the data companion. Disabled -# by default. All other parameters in this section are ignored when this is -# disabled. -# -# If disabled, only the application retain height will influence block pruning -# (but not block results pruning). Only enabling this at a later stage will -# potentially mean that blocks below the application-set retain height at the -# time will not be available to the data companion. -enabled = {{ .Storage.Pruning.DataCompanion.Enabled }} - -# The initial value for the data companion block retain height if the data -# companion has not yet explicitly set one. If the data companion has already -# set a block retain height, this is ignored. -initial_block_retain_height = {{ .Storage.Pruning.DataCompanion.InitialBlockRetainHeight }} - -# The initial value for the data companion block results retain height if the -# data companion has not yet explicitly set one. If the data companion has -# already set a block results retain height, this is ignored. -initial_block_results_retain_height = {{ .Storage.Pruning.DataCompanion.InitialBlockResultsRetainHeight }} - - -# Hash of the Genesis file (as hex string), passed to CometBFT via the command line. -# If this hash mismatches the hash that CometBFT computes on the genesis file, -# the node is not able to boot. -genesis_hash = "{{ .Storage.GenesisHash }}" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -# 3) "psql" - the indexer services backed by PostgreSQL. -# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "{{ .TxIndex.Indexer }}" - -# The PostgreSQL connection configuration, the connection format: -# postgresql://:@:/? -psql-conn = "{{ .TxIndex.PsqlConn }}" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = {{ .Instrumentation.Prometheus }} - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = "{{ .Instrumentation.PrometheusListenAddr }}" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = {{ .Instrumentation.MaxOpenConnections }} - -# Instrumentation namespace -namespace = "{{ .Instrumentation.Namespace }}" -` +// must be reflected in the appropriate struct in config/config.go. +// +//go:embed config.toml.tpl +var defaultConfigTemplate string diff --git a/config/toml_test.go b/config/toml_test.go index 58a103e9371..7434dd61028 100644 --- a/config/toml_test.go +++ b/config/toml_test.go @@ -13,10 +13,11 @@ import ( ) func ensureFiles(t *testing.T, rootDir string, files ...string) { + t.Helper() for _, f := range files { p := filepath.Join(rootDir, f) _, err := os.Stat(p) - assert.NoError(t, err, p) + require.NoError(t, err, p) } } @@ -25,7 +26,7 @@ func TestEnsureRoot(t *testing.T) { // setup temp dir for test tmpDir, err := os.MkdirTemp("", "config-test") - require.Nil(err) + require.NoError(err) defer os.RemoveAll(tmpDir) // create root dir @@ -33,7 +34,7 @@ func TestEnsureRoot(t *testing.T) { // make sure config is set properly data, err := os.ReadFile(filepath.Join(tmpDir, config.DefaultConfigDir, config.DefaultConfigFileName)) - require.Nil(err) + require.NoError(err) assertValidConfig(t, string(data)) @@ -50,7 +51,7 @@ func TestEnsureTestRoot(t *testing.T) { // make sure config is set properly data, err := os.ReadFile(filepath.Join(rootDir, config.DefaultConfigDir, config.DefaultConfigFileName)) - require.Nil(err) + require.NoError(err) assertValidConfig(t, string(data)) @@ -62,7 +63,7 @@ func TestEnsureTestRoot(t *testing.T) { func assertValidConfig(t *testing.T, configFile string) { t.Helper() // list of words we expect in the config - var elems = []string{ + elems := []string{ "moniker", "seeds", "proxy_app", diff --git a/consensus/replay_stubs.go b/consensus/replay_stubs.go deleted file mode 100644 index 3f614a94374..00000000000 --- a/consensus/replay_stubs.go +++ /dev/null @@ -1,82 +0,0 @@ -package consensus - -import ( - "context" - - abcicli "github.com/cometbft/cometbft/abci/client" - abci "github.com/cometbft/cometbft/abci/types" - "github.com/cometbft/cometbft/libs/clist" - mempl "github.com/cometbft/cometbft/mempool" - "github.com/cometbft/cometbft/proxy" - "github.com/cometbft/cometbft/types" -) - -//----------------------------------------------------------------------------- - -type emptyMempool struct{} - -var _ mempl.Mempool = emptyMempool{} - -func (emptyMempool) Lock() {} -func (emptyMempool) Unlock() {} -func (emptyMempool) Size() int { return 0 } -func (emptyMempool) SizeBytes() int64 { return 0 } -func (emptyMempool) CheckTx(types.Tx) (*abcicli.ReqRes, error) { - return nil, nil -} - -func (txmp emptyMempool) RemoveTxByKey(types.TxKey) error { - return nil -} - -func (emptyMempool) ReapMaxBytesMaxGas(int64, int64) types.Txs { return types.Txs{} } -func (emptyMempool) ReapMaxTxs(int) types.Txs { return types.Txs{} } -func (emptyMempool) Update( - int64, - types.Txs, - []*abci.ExecTxResult, - mempl.PreCheckFunc, - mempl.PostCheckFunc, -) error { - return nil -} -func (emptyMempool) Flush() {} -func (emptyMempool) FlushAppConn() error { return nil } -func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } -func (emptyMempool) EnableTxsAvailable() {} -func (emptyMempool) SetTxRemovedCallback(func(types.TxKey)) {} -func (emptyMempool) TxsBytes() int64 { return 0 } -func (emptyMempool) InMempool(types.TxKey) bool { return false } - -func (emptyMempool) TxsFront() *clist.CElement { return nil } -func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } - -func (emptyMempool) InitWAL() error { return nil } -func (emptyMempool) CloseWAL() {} - -//----------------------------------------------------------------------------- -// mockProxyApp uses ABCIResponses to give the right results. -// -// Useful because we don't want to call Commit() twice for the same block on -// the real app. - -func newMockProxyApp(finalizeBlockResponse *abci.ResponseFinalizeBlock) proxy.AppConnConsensus { - clientCreator := proxy.NewLocalClientCreator(&mockProxyApp{ - finalizeBlockResponse: finalizeBlockResponse, - }) - cli, _ := clientCreator.NewABCIConsensusClient() - err := cli.Start() - if err != nil { - panic(err) - } - return proxy.NewAppConnConsensus(cli, proxy.NopMetrics()) -} - -type mockProxyApp struct { - abci.BaseApplication - finalizeBlockResponse *abci.ResponseFinalizeBlock -} - -func (mock *mockProxyApp) FinalizeBlock(context.Context, *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { - return mock.finalizeBlockResponse, nil -} diff --git a/crypto/README.md b/crypto/README.md index 507239a4d8c..ed701e0f4f0 100644 --- a/crypto/README.md +++ b/crypto/README.md @@ -23,8 +23,6 @@ Example JSON encodings: ed25519.PrivKey - {"type":"tendermint/PrivKeyEd25519","value":"EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="} ed25519.PubKey - {"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="} -sr25519.PrivKeySr25519 - {"type":"tendermint/PrivKeySr25519","value":"xtYVH8UCIqfrY8FIFc0QEpAEBShSG4NT0zlEOVSZ2w4="} -sr25519.PubKeySr25519 - {"type":"tendermint/PubKeySr25519","value":"8sKBLKQ/OoXMcAJVxBqz1U7TyxRFQ5cmliuHy4MrF0s="} crypto.PrivKeySecp256k1 - {"type":"tendermint/PrivKeySecp256k1","value":"zx4Pnh67N+g2V+5vZbQzEyRerX9c4ccNZOVzM9RvJ0Y="} crypto.PubKeySecp256k1 - {"type":"tendermint/PubKeySecp256k1","value":"A8lPKJXcNl5VHt1FK8a244K9EJuS4WX1hFBnwisi0IJx"} ``` diff --git a/crypto/armor/armor_test.go b/crypto/armor/armor_test.go index 7f319a02bb0..051d285297f 100644 --- a/crypto/armor/armor_test.go +++ b/crypto/armor/armor_test.go @@ -11,11 +11,11 @@ func TestArmor(t *testing.T) { blockType := "MINT TEST" data := []byte("somedata") armorStr, err := EncodeArmor(blockType, nil, data) - require.Nil(t, err, "%+v", err) + require.NoError(t, err, "%+v", err) // Decode armorStr and test for equivalence. blockType2, _, data2, err := DecodeArmor(armorStr) - require.Nil(t, err, "%+v", err) + require.NoError(t, err, "%+v", err) assert.Equal(t, blockType, blockType2) assert.Equal(t, data, data2) } diff --git a/crypto/batch/batch.go b/crypto/batch/batch.go index 7587bc711ab..6cf381db2e6 100644 --- a/crypto/batch/batch.go +++ b/crypto/batch/batch.go @@ -3,30 +3,30 @@ package batch import ( "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" - "github.com/cometbft/cometbft/crypto/sr25519" ) // CreateBatchVerifier checks if a key type implements the batch verifier interface. -// Currently only ed25519 & sr25519 supports batch verification. +// Currently only ed25519 supports batch verification. func CreateBatchVerifier(pk crypto.PubKey) (crypto.BatchVerifier, bool) { switch pk.Type() { case ed25519.KeyType: return ed25519.NewBatchVerifier(), true - case sr25519.KeyType: - return sr25519.NewBatchVerifier(), true + default: + return nil, false } - - // case where the key does not support batch verification - return nil, false } // SupportsBatchVerifier checks if a key type implements the batch verifier // interface. func SupportsBatchVerifier(pk crypto.PubKey) bool { + if pk == nil { + return false + } + switch pk.Type() { - case ed25519.KeyType, sr25519.KeyType: + case ed25519.KeyType: return true + default: + return false } - - return false } diff --git a/crypto/bls12381/const.go b/crypto/bls12381/const.go new file mode 100644 index 00000000000..439a3d55b3e --- /dev/null +++ b/crypto/bls12381/const.go @@ -0,0 +1,16 @@ +package bls12381 + +const ( + // PrivKeySize defines the length of the PrivKey byte array. + PrivKeySize = 32 + // PubKeySize defines the length of the PubKey byte array. + PubKeySize = 96 + // SignatureLength defines the byte length of a BLS signature. + SignatureLength = 96 + // KeyType is the string constant for the BLS12-381 algorithm. + KeyType = "bls12_381" + // BLS12-381 private key name. + PrivKeyName = "cometbft/PrivKeyBls12_381" + // BLS12-381 public key name. + PubKeyName = "cometbft/PubKeyBls12_381" +) diff --git a/crypto/bls12381/doc.go b/crypto/bls12381/doc.go new file mode 100644 index 00000000000..77331365d34 --- /dev/null +++ b/crypto/bls12381/doc.go @@ -0,0 +1 @@ +package bls12381 diff --git a/crypto/bls12381/key.go b/crypto/bls12381/key.go new file mode 100644 index 00000000000..efe583cd0cd --- /dev/null +++ b/crypto/bls12381/key.go @@ -0,0 +1,110 @@ +//go:build !bls12381 + +package bls12381 + +import ( + "errors" + + "github.com/cometbft/cometbft/crypto" +) + +const ( + // Enabled indicates if this curve is enabled. + Enabled = false +) + +// ErrDisabled is returned if the caller didn't use the `bls12381` build tag or has an incompatible OS. +var ErrDisabled = errors.New("bls12_381 is disabled") + +// =============================================================================================== +// Private Key +// =============================================================================================== + +// PrivKey is a wrapper around the Ethereum BLS12-381 private key type. This +// wrapper conforms to crypto.Pubkey to allow for the use of the Ethereum +// BLS12-381 private key type. + +// Compile-time type assertion. +var _ crypto.PrivKey = &PrivKey{} + +// PrivKey represents a BLS private key noop when blst is not set as a build flag and cgo is disabled. +type PrivKey []byte + +// GenPrivKeyFromSecret returns ErrDisabled. +func GenPrivKeyFromSecret([]byte) (PrivKey, error) { + return nil, ErrDisabled +} + +// NewPrivateKeyFromBytes returns ErrDisabled. +func NewPrivateKeyFromBytes([]byte) (PrivKey, error) { + return nil, ErrDisabled +} + +// GenPrivKey returns ErrDisabled. +func GenPrivKey() (PrivKey, error) { + return nil, ErrDisabled +} + +// Bytes returns the byte representation of the Key. +func (privKey PrivKey) Bytes() []byte { + return privKey +} + +// PubKey always panics. +func (PrivKey) PubKey() crypto.PubKey { + panic("bls12_381 is disabled") +} + +// Type returns the key's type. +func (PrivKey) Type() string { + return KeyType +} + +// Sign always panics. +func (PrivKey) Sign([]byte) ([]byte, error) { + panic("bls12_381 is disabled") +} + +// Zeroize always panics. +func (PrivKey) Zeroize() { + panic("bls12_381 is disabled") +} + +// =============================================================================================== +// Public Key +// =============================================================================================== + +// Pubkey is a wrapper around the Ethereum BLS12-381 public key type. This +// wrapper conforms to crypto.Pubkey to allow for the use of the Ethereum +// BLS12-381 public key type. + +// Compile-time type assertion. +var _ crypto.PubKey = &PubKey{} + +// PubKey represents a BLS private key noop when blst is not set as a build flag and cgo is disabled. +type PubKey []byte + +// NewPublicKeyFromBytes returns ErrDisabled. +func NewPublicKeyFromBytes([]byte) (*PubKey, error) { + return nil, ErrDisabled +} + +// Address always panics. +func (PubKey) Address() crypto.Address { + panic("bls12_381 is disabled") +} + +// VerifySignature always panics. +func (PubKey) VerifySignature([]byte, []byte) bool { + panic("bls12_381 is disabled") +} + +// Bytes always panics. +func (PubKey) Bytes() []byte { + panic("bls12_381 is disabled") +} + +// Type returns the key's type. +func (PubKey) Type() string { + return KeyType +} diff --git a/crypto/bls12381/key_bls12381.go b/crypto/bls12381/key_bls12381.go new file mode 100644 index 00000000000..28727cf7af6 --- /dev/null +++ b/crypto/bls12381/key_bls12381.go @@ -0,0 +1,218 @@ +//go:build bls12381 + +package bls12381 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/json" + "errors" + + blst "github.com/supranational/blst/bindings/go" + + "github.com/cometbft/cometbft/crypto" + "github.com/cometbft/cometbft/crypto/tmhash" + cmtjson "github.com/cometbft/cometbft/libs/json" +) + +const ( + // Enabled indicates if this curve is enabled. + Enabled = true +) + +var ( + // ErrDeserialization is returned when deserialization fails. + ErrDeserialization = errors.New("bls12381: deserialization error") + // ErrInfinitePubKey is returned when the public key is infinite. It is part + // of a more comprehensive subgroup check on the key. + ErrInfinitePubKey = errors.New("bls12381: pubkey is infinite") + + dstMinSig = []byte("BLS_SIG_BLS12381G1_XMD:SHA-256_SSWU_RO_NUL_") +) + +// For minimal-pubkey-size operations. +// +// Changing this to 'minimal-signature-size' would render CometBFT not Ethereum +// compatible. +type ( + blstPublicKey = blst.P1Affine + blstSignature = blst.P2Affine + blstAggregateSignature = blst.P1Aggregate + blstAggregatePublicKey = blst.P2Aggregate +) + +// -------------------------------------. + +func init() { + cmtjson.RegisterType(PubKey{}, PubKeyName) + cmtjson.RegisterType(PrivKey{}, PrivKeyName) +} + +// =============================================================================================== +// Private Key +// =============================================================================================== + +// PrivKey is a wrapper around the Ethereum BLS12-381 private key type. This +// wrapper conforms to crypto.Pubkey to allow for the use of the Ethereum +// BLS12-381 private key type. + +var _ crypto.PrivKey = &PrivKey{} + +type PrivKey struct { + sk *blst.SecretKey +} + +// GenPrivKeyFromSecret generates a new random key using `secret` for the seed +func GenPrivKeyFromSecret(secret []byte) (*PrivKey, error) { + if len(secret) != 32 { + seed := sha256.Sum256(secret) // We need 32 bytes + secret = seed[:] + } + + sk := blst.KeyGen(secret) + return &PrivKey{sk: sk}, nil +} + +// NewPrivateKeyFromBytes build a new key from the given bytes. +func NewPrivateKeyFromBytes(bz []byte) (*PrivKey, error) { + sk := new(blst.SecretKey).Deserialize(bz) + if sk == nil { + return nil, ErrDeserialization + } + return &PrivKey{sk: sk}, nil +} + +// GenPrivKey generates a new key. +func GenPrivKey() (*PrivKey, error) { + var ikm [32]byte + _, err := rand.Read(ikm[:]) + if err != nil { + return nil, err + } + return GenPrivKeyFromSecret(ikm[:]) +} + +// Bytes returns the byte representation of the Key. +func (privKey PrivKey) Bytes() []byte { + return privKey.sk.Serialize() +} + +// PubKey returns the private key's public key. If the privkey is not valid +// it returns a nil value. +func (privKey PrivKey) PubKey() crypto.PubKey { + return PubKey{pk: new(blstPublicKey).From(privKey.sk)} +} + +// Type returns the type. +func (PrivKey) Type() string { + return KeyType +} + +// Sign signs the given byte array. +func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { + signature := new(blstSignature).Sign(privKey.sk, msg, dstMinSig) + return signature.Compress(), nil +} + +// Zeroize clears the private key. +func (privKey *PrivKey) Zeroize() { + privKey.sk.Zeroize() +} + +// MarshalJSON marshals the private key to JSON. +func (privKey *PrivKey) MarshalJSON() ([]byte, error) { + return json.Marshal(privKey.Bytes()) +} + +// UnmarshalJSON unmarshals the private key from JSON. +func (privKey *PrivKey) UnmarshalJSON(bz []byte) error { + var rawBytes []byte + if err := json.Unmarshal(bz, &rawBytes); err != nil { + return err + } + pk, err := NewPrivateKeyFromBytes(rawBytes) + if err != nil { + return err + } + privKey.sk = pk.sk + return nil +} + +// =============================================================================================== +// Public Key +// =============================================================================================== + +// Pubkey is a wrapper around the Ethereum BLS12-381 public key type. This +// wrapper conforms to crypto.Pubkey to allow for the use of the Ethereum +// BLS12-381 public key type. + +var _ crypto.PubKey = &PubKey{} + +type PubKey struct { + pk *blstPublicKey +} + +// NewPublicKeyFromBytes returns a new public key from the given bytes. +func NewPublicKeyFromBytes(bz []byte) (*PubKey, error) { + pk := new(blstPublicKey).Deserialize(bz) + if pk == nil { + return nil, ErrDeserialization + } + // Subgroup and infinity check + if !pk.KeyValidate() { + return nil, ErrInfinitePubKey + } + return &PubKey{pk: pk}, nil +} + +// Address returns the address of the key. +// +// The function will panic if the public key is invalid. +func (pubKey PubKey) Address() crypto.Address { + return crypto.Address(tmhash.SumTruncated(pubKey.pk.Serialize())) +} + +// VerifySignature verifies the given signature. +func (pubKey PubKey) VerifySignature(msg, sig []byte) bool { + signature := new(blstSignature).Uncompress(sig) + if signature == nil { + return false + } + + // Group check signature. Do not check for infinity since an aggregated signature + // could be infinite. + if !signature.SigValidate(false) { + return false + } + + return signature.Verify(false, pubKey.pk, false, msg, dstMinSig) +} + +// Bytes returns the byte format. +func (pubKey PubKey) Bytes() []byte { + return pubKey.pk.Serialize() +} + +// Type returns the key's type. +func (PubKey) Type() string { + return KeyType +} + +// MarshalJSON marshals the public key to JSON. +func (pubkey PubKey) MarshalJSON() ([]byte, error) { + return json.Marshal(pubkey.Bytes()) +} + +// UnmarshalJSON unmarshals the public key from JSON. +func (pubkey *PubKey) UnmarshalJSON(bz []byte) error { + var rawBytes []byte + if err := json.Unmarshal(bz, &rawBytes); err != nil { + return err + } + pk, err := NewPublicKeyFromBytes(rawBytes) + if err != nil { + return err + } + pubkey.pk = pk.pk + return nil +} diff --git a/crypto/bls12381/key_test.go b/crypto/bls12381/key_test.go new file mode 100644 index 00000000000..bb66be1d866 --- /dev/null +++ b/crypto/bls12381/key_test.go @@ -0,0 +1,216 @@ +//go:build bls12381 + +package bls12381_test + +import ( + "encoding/hex" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + blst "github.com/supranational/blst/bindings/go" + + "github.com/cometbft/cometbft/crypto" + "github.com/cometbft/cometbft/crypto/bls12381" +) + +func TestNewPrivateKeyFromBytes(t *testing.T) { + privKey, err := bls12381.GenPrivKey() + require.NoError(t, err) + defer privKey.Zeroize() + + privKeyBytes := privKey.Bytes() + privKey2, err := bls12381.NewPrivateKeyFromBytes(privKeyBytes) + require.NoError(t, err) + defer privKey2.Zeroize() + + assert.Equal(t, privKey, privKey2) + + _, err = bls12381.NewPrivateKeyFromBytes(crypto.CRandBytes(31)) + assert.Error(t, err) +} + +func TestGenPrivateKey(t *testing.T) { + privKey, err := bls12381.GenPrivKey() + require.NoError(t, err) + defer privKey.Zeroize() + assert.NotNil(t, privKey) +} + +func TestGenPrivKeyFromSecret(t *testing.T) { + secret := []byte("this is my secret") + privKey, err := bls12381.GenPrivKeyFromSecret(secret) + require.NoError(t, err) + assert.NotNil(t, privKey) +} + +func TestGenPrivKeyFromSecret_SignVerify(t *testing.T) { + secret := []byte("this is my secret for priv key") + priv, err := bls12381.GenPrivKeyFromSecret(secret) + require.NoError(t, err) + msg := []byte("this is my message to sign") + sig, err := priv.Sign(msg) + require.NoError(t, err) + + pub := priv.PubKey() + assert.True(t, pub.VerifySignature(msg, sig), "Signature did not verify") +} + +func TestPrivKeyBytes(t *testing.T) { + privKey, err := bls12381.GenPrivKey() + require.NoError(t, err) + defer privKey.Zeroize() + + privKeyBytes := privKey.Bytes() + privKey2, err := bls12381.NewPrivateKeyFromBytes(privKeyBytes) + require.NoError(t, err) + defer privKey2.Zeroize() + + assert.Equal(t, privKey, privKey2) +} + +func TestPrivKeyPubKey(t *testing.T) { + privKey, err := bls12381.GenPrivKey() + require.NoError(t, err) + pubKey := privKey.PubKey() + assert.NotNil(t, pubKey) +} + +func TestPrivKeyType(t *testing.T) { + privKey, err := bls12381.GenPrivKey() + require.NoError(t, err) + defer privKey.Zeroize() + + assert.Equal(t, "bls12_381", privKey.Type()) +} + +func TestPrivKeySignAndPubKeyVerifySignature(t *testing.T) { + privKey, err := bls12381.GenPrivKey() + require.NoError(t, err) + defer privKey.Zeroize() + pubKey := privKey.PubKey() + + msg := crypto.CRandBytes(32) + sig, err := privKey.Sign(msg) + require.NoError(t, err) + + // Test the signature + assert.True(t, pubKey.VerifySignature(msg, sig)) + + // Mutate the signature, just one bit. + // TODO: Replace this with a much better fuzzer, tendermint/ed25519/issues/10 + sig[7] ^= byte(0x01) + + assert.False(t, pubKey.VerifySignature(msg, sig)) + + msg = crypto.CRandBytes(192) + sig, err = privKey.Sign(msg) + require.NoError(t, err) + + // Test the signature + assert.True(t, pubKey.VerifySignature(msg, sig)) +} + +func TestPubKey(t *testing.T) { + privKey, err := bls12381.GenPrivKey() + require.NoError(t, err) + defer privKey.Zeroize() + pubKey := privKey.PubKey() + assert.NotNil(t, pubKey) +} + +func TestPubKeyType(t *testing.T) { + privKey, err := bls12381.GenPrivKey() + require.NoError(t, err) + defer privKey.Zeroize() + pubKey := privKey.PubKey() + + assert.Equal(t, "bls12_381", pubKey.Type()) +} + +func TestConst(t *testing.T) { + privKey, err := bls12381.GenPrivKey() + require.NoError(t, err) + defer privKey.Zeroize() + assert.Equal(t, bls12381.PrivKeySize, len(privKey.Bytes())) + + pubKey := privKey.PubKey() + assert.Equal(t, bls12381.PubKeySize, len(pubKey.Bytes())) + + msg := crypto.CRandBytes(32) + sig, err := privKey.Sign(msg) + require.NoError(t, err) + assert.Equal(t, bls12381.SignatureLength, len(sig)) +} + +func TestPrivKey_MarshalJSON(t *testing.T) { + privKey, err := bls12381.GenPrivKey() + require.NoError(t, err) + defer privKey.Zeroize() + + jsonBytes, err := privKey.MarshalJSON() + require.NoError(t, err) + + privKey2 := new(bls12381.PrivKey) + err = privKey2.UnmarshalJSON(jsonBytes) + require.NoError(t, err) +} + +func TestPubKey_MarshalJSON(t *testing.T) { + privKey, err := bls12381.GenPrivKey() + require.NoError(t, err) + defer privKey.Zeroize() + pubKey, _ := privKey.PubKey().(bls12381.PubKey) + + jsonBytes, err := pubKey.MarshalJSON() + require.NoError(t, err) + + pubKey2 := new(bls12381.PubKey) + err = pubKey2.UnmarshalJSON(jsonBytes) + require.NoError(t, err) +} + +func TestPubKey_NewPublicKeyFromInvalidBytes(t *testing.T) { + unmarshal := func(s string) ([]byte, error) { + type blstPublicKey = blst.P1Affine + + bz, err := hex.DecodeString(s) + if err != nil { + return nil, err + } + pk := new(blstPublicKey).Uncompress(bz) + if pk == nil { + return nil, bls12381.ErrDeserialization + } + pkc := pk.Serialize() + if pkc == nil { + return nil, errors.New("could not serialize pubkey") + } + return pkc, nil + } + + testCases := []struct { + desc string + pkStr string + expectedErr error + }{ + {"NotInG1", "8123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", bls12381.ErrInfinitePubKey}, + {"InfFalseB", "800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", bls12381.ErrDeserialization}, + {"InfTrueB", "c01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", bls12381.ErrDeserialization}, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + bz, err := unmarshal(tc.pkStr) + if err != nil { + t.Log(tc.desc, "unmarshal error", err) + require.Equal(t, tc.expectedErr, err) + } + + _, err = bls12381.NewPublicKeyFromBytes(bz) + require.Equal(t, tc.expectedErr, err) + t.Log(tc.desc, "NewPrivateKeyFromBytes error", err) + }) + } +} diff --git a/crypto/crypto.go b/crypto/crypto.go index e4825b13259..9f26ee51664 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -19,11 +19,11 @@ func AddressHash(bz []byte) Address { return Address(tmhash.SumTruncated(bz)) } +//go:generate ../scripts/mockery_generate.sh PubKey type PubKey interface { Address() Address Bytes() []byte VerifySignature(msg []byte, sig []byte) bool - Equals(PubKey) bool Type() string } @@ -31,7 +31,6 @@ type PrivKey interface { Bytes() []byte Sign(msg []byte) ([]byte, error) PubKey() PubKey - Equals(PrivKey) bool Type() string } @@ -42,7 +41,9 @@ type Symmetric interface { } // If a new key type implements batch verification, -// the key type must be registered in github.com/cometbft/cometbft/crypto/batch +// the key type must be registered in github.com/cometbft/cometbft/crypto/batch. +// +//go:generate ../scripts/mockery_generate.sh BatchVerifier type BatchVerifier interface { // Add appends an entry into the BatchVerifier. Add(key PubKey, message, signature []byte) error diff --git a/crypto/doc.go b/crypto/doc.go index 6b58cd8b308..2b822f7fb4a 100644 --- a/crypto/doc.go +++ b/crypto/doc.go @@ -1,42 +1,34 @@ -// crypto is a customized/convenience cryptography package for supporting -// CometBFT. - +// crypto is a customized/convenience cryptography package for CometBFT. +// // It wraps select functionality of equivalent functions in the // Go standard library, for easy usage with our libraries. - +// // Keys: - +// // All key generation functions return an instance of the PrivKey interface -// which implements methods - -// AssertIsPrivKeyInner() -// Bytes() []byte -// Sign(msg []byte) Signature -// PubKey() PubKey -// Equals(PrivKey) bool -// Wrap() PrivKey - -// From the above method we can: -// a) Retrieve the public key if needed - -// pubKey := key.PubKey() - -// For example: -// privKey, err := ed25519.GenPrivKey() -// if err != nil { -// ... -// } -// pubKey := privKey.PubKey() -// ... -// // And then you can use the private and public key -// doSomething(privKey, pubKey) - -// We also provide hashing wrappers around algorithms: - -// Sha256 -// sum := crypto.Sha256([]byte("This is CometBFT")) -// fmt.Printf("%x\n", sum) - +// which implements methods: +// +// type PrivKey interface { +// Bytes() []byte +// Sign(msg []byte) ([]byte, error) +// PubKey() PubKey +// Type() string +// } +// +// From the above method we can retrieve the public key if needed: +// +// privKey, err := ed25519.GenPrivKey() +// if err != nil { +// panic(err) +// } +// pubKey := privKey.PubKey() +// +// The resulting public key is an instance of the PubKey interface: +// +// type PubKey interface { +// Address() Address +// Bytes() []byte +// VerifySignature(msg []byte, sig []byte) bool +// Type() string +// } package crypto - -// TODO: Add more docs in here diff --git a/crypto/ed25519/bench_test.go b/crypto/ed25519/bench_test.go index 114e872735b..e78b7170b65 100644 --- a/crypto/ed25519/bench_test.go +++ b/crypto/ed25519/bench_test.go @@ -32,7 +32,6 @@ func BenchmarkVerifyBatch(b *testing.B) { msg := []byte("BatchVerifyTest") for _, sigsCount := range []int{1, 8, 64, 1024} { - sigsCount := sigsCount b.Run(fmt.Sprintf("sig-count-%d", sigsCount), func(b *testing.B) { // Pre-generate all of the keys, and signatures, but do not // benchmark key-generation and signing. diff --git a/crypto/ed25519/ed25519.go b/crypto/ed25519/ed25519.go index 544cfc87dd5..bff611712b7 100644 --- a/crypto/ed25519/ed25519.go +++ b/crypto/ed25519/ed25519.go @@ -1,8 +1,7 @@ package ed25519 import ( - "bytes" - "crypto/subtle" + "crypto/sha256" "errors" "fmt" "io" @@ -47,7 +46,7 @@ var ( const ( PrivKeyName = "tendermint/PrivKeyEd25519" PubKeyName = "tendermint/PubKeyEd25519" - // PubKeySize is is the size, in bytes, of public keys as used in this package. + // PubKeySize is the size, in bytes, of public keys as used in this package. PubKeySize = 32 // PrivateKeySize is the size, in bytes, of private keys as used in this package. PrivateKeySize = 64 @@ -117,17 +116,7 @@ func (privKey PrivKey) PubKey() crypto.PubKey { return PubKey(pubkeyBytes) } -// Equals - you probably don't need to use this. -// Runs in constant time based on length of the keys. -func (privKey PrivKey) Equals(other crypto.PrivKey) bool { - if otherEd, ok := other.(PrivKey); ok { - return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1 - } - - return false -} - -func (privKey PrivKey) Type() string { +func (PrivKey) Type() string { return KeyType } @@ -153,12 +142,12 @@ func genPrivKey(rand io.Reader) PrivKey { // NOTE: secret should be the output of a KDF like bcrypt, // if it's derived from user input. func GenPrivKeyFromSecret(secret []byte) PrivKey { - seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes. + seed := sha256.Sum256(secret) // Not Ripemd160 because we want 32 bytes. - return PrivKey(ed25519.NewKeyFromSeed(seed)) + return PrivKey(ed25519.NewKeyFromSeed(seed[:])) } -//------------------------------------- +// ------------------------------------- var _ crypto.PubKey = PubKey{} @@ -191,19 +180,11 @@ func (pubKey PubKey) String() string { return fmt.Sprintf("PubKeyEd25519{%X}", []byte(pubKey)) } -func (pubKey PubKey) Type() string { +func (PubKey) Type() string { return KeyType } -func (pubKey PubKey) Equals(other crypto.PubKey) bool { - if otherEd, ok := other.(PubKey); ok { - return bytes.Equal(pubKey[:], otherEd[:]) - } - - return false -} - -//------------------------------------- +// ------------------------------------- // BatchVerifier implements batch verification for ed25519. type BatchVerifier struct { diff --git a/crypto/ed25519/ed25519_test.go b/crypto/ed25519/ed25519_test.go index 65696290929..3405165c314 100644 --- a/crypto/ed25519/ed25519_test.go +++ b/crypto/ed25519/ed25519_test.go @@ -16,7 +16,7 @@ func TestSignAndValidateEd25519(t *testing.T) { msg := crypto.CRandBytes(128) sig, err := privKey.Sign(msg) - require.Nil(t, err) + require.NoError(t, err) // Test the signature assert.True(t, pubKey.VerifySignature(msg, sig)) diff --git a/crypto/encoding/codec.go b/crypto/encoding/codec.go index 1831aa6a75c..e3fdfb4e3aa 100644 --- a/crypto/encoding/codec.go +++ b/crypto/encoding/codec.go @@ -2,25 +2,27 @@ package encoding import ( "fmt" + "reflect" + pc "github.com/cometbft/cometbft/api/cometbft/crypto/v1" "github.com/cometbft/cometbft/crypto" + "github.com/cometbft/cometbft/crypto/bls12381" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/crypto/secp256k1" "github.com/cometbft/cometbft/libs/json" - pc "github.com/cometbft/cometbft/proto/tendermint/crypto" ) // ErrUnsupportedKey describes an error resulting from the use of an // unsupported key in [PubKeyToProto] or [PubKeyFromProto]. type ErrUnsupportedKey struct { - Key any + KeyType string } func (e ErrUnsupportedKey) Error() string { - return fmt.Sprintf("encoding: unsupported key %v", e.Key) + return "encoding: unsupported key " + e.KeyType } -// InvalidKeyLen describes an error resulting from the use of a key with +// ErrInvalidKeyLen describes an error resulting from the use of a key with // an invalid length in [PubKeyFromProto]. type ErrInvalidKeyLen struct { Key any @@ -35,31 +37,52 @@ func init() { json.RegisterType((*pc.PublicKey)(nil), "tendermint.crypto.PublicKey") json.RegisterType((*pc.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519") json.RegisterType((*pc.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1") + if bls12381.Enabled { + json.RegisterType((*pc.PublicKey_Bls12381)(nil), "tendermint.crypto.PublicKey_Bls12381") + } } -// PubKeyToProto takes crypto.PubKey and transforms it to a protobuf Pubkey +// PubKeyToProto takes crypto.PubKey and transforms it to a protobuf Pubkey. It +// returns ErrUnsupportedKey if the pubkey type is unsupported. func PubKeyToProto(k crypto.PubKey) (pc.PublicKey, error) { var kp pc.PublicKey - switch k := k.(type) { - case ed25519.PubKey: + + if k == nil { + return kp, ErrUnsupportedKey{KeyType: ""} + } + + switch k.Type() { + case ed25519.KeyType: kp = pc.PublicKey{ Sum: &pc.PublicKey_Ed25519{ - Ed25519: k, + Ed25519: k.Bytes(), }, } - case secp256k1.PubKey: + case secp256k1.KeyType: kp = pc.PublicKey{ Sum: &pc.PublicKey_Secp256K1{ - Secp256K1: k, + Secp256K1: k.Bytes(), + }, + } + case bls12381.KeyType: + if !bls12381.Enabled { + return kp, ErrUnsupportedKey{KeyType: bls12381.KeyType} + } + + kp = pc.PublicKey{ + Sum: &pc.PublicKey_Bls12381{ + Bls12381: k.Bytes(), }, } default: - return kp, ErrUnsupportedKey{Key: k} + return kp, ErrUnsupportedKey{KeyType: k.Type()} } return kp, nil } -// PubKeyFromProto takes a protobuf Pubkey and transforms it to a crypto.Pubkey +// PubKeyFromProto takes a protobuf Pubkey and transforms it to a +// crypto.Pubkey. It returns ErrUnsupportedKey if the pubkey type is +// unsupported or ErrInvalidKeyLen if the key length is invalid. func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) { switch k := k.Sum.(type) { case *pc.PublicKey_Ed25519: @@ -84,7 +107,75 @@ func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) { pk := make(secp256k1.PubKey, secp256k1.PubKeySize) copy(pk, k.Secp256K1) return pk, nil + case *pc.PublicKey_Bls12381: + if !bls12381.Enabled { + return nil, ErrUnsupportedKey{KeyType: bls12381.KeyType} + } + + if len(k.Bls12381) != bls12381.PubKeySize { + return nil, ErrInvalidKeyLen{ + Key: k, + Got: len(k.Bls12381), + Want: bls12381.PubKeySize, + } + } + return bls12381.NewPublicKeyFromBytes(k.Bls12381) + default: + kt := reflect.TypeOf(k) + if kt == nil { + return nil, ErrUnsupportedKey{KeyType: ""} + } else { + return nil, ErrUnsupportedKey{KeyType: kt.String()} + } + } +} + +// PubKeyFromTypeAndBytes builds a crypto.PubKey from the given type and bytes. +// It returns ErrUnsupportedKey if the pubkey type is unsupported or +// ErrInvalidKeyLen if the key length is invalid. +func PubKeyFromTypeAndBytes(pkType string, bytes []byte) (crypto.PubKey, error) { + var pubKey crypto.PubKey + switch pkType { + case ed25519.KeyType: + if len(bytes) != ed25519.PubKeySize { + return nil, ErrInvalidKeyLen{ + Key: pkType, + Got: len(bytes), + Want: ed25519.PubKeySize, + } + } + + pk := make(ed25519.PubKey, ed25519.PubKeySize) + copy(pk, bytes) + pubKey = pk + case secp256k1.KeyType: + if len(bytes) != secp256k1.PubKeySize { + return nil, ErrInvalidKeyLen{ + Key: pkType, + Got: len(bytes), + Want: secp256k1.PubKeySize, + } + } + + pk := make(secp256k1.PubKey, secp256k1.PubKeySize) + copy(pk, bytes) + pubKey = pk + case bls12381.KeyType: + if !bls12381.Enabled { + return nil, ErrUnsupportedKey{KeyType: pkType} + } + + if len(bytes) != bls12381.PubKeySize { + return nil, ErrInvalidKeyLen{ + Key: pkType, + Got: len(bytes), + Want: bls12381.PubKeySize, + } + } + + return bls12381.NewPublicKeyFromBytes(bytes) default: - return nil, ErrUnsupportedKey{Key: k} + return nil, ErrUnsupportedKey{KeyType: pkType} } + return pubKey, nil } diff --git a/crypto/encoding/codec_test.go b/crypto/encoding/codec_test.go new file mode 100644 index 00000000000..66dac79f45c --- /dev/null +++ b/crypto/encoding/codec_test.go @@ -0,0 +1,119 @@ +package encoding + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/crypto" + "github.com/cometbft/cometbft/crypto/bls12381" + "github.com/cometbft/cometbft/crypto/ed25519" + "github.com/cometbft/cometbft/crypto/secp256k1" +) + +type unsupportedPubKey struct{} + +func (unsupportedPubKey) Address() crypto.Address { return nil } +func (unsupportedPubKey) Bytes() []byte { return nil } +func (unsupportedPubKey) VerifySignature([]byte, []byte) bool { return false } +func (unsupportedPubKey) Type() string { return "unsupportedPubKey" } + +func TestPubKeyToFromProto(t *testing.T) { + // ed25519 + pk := ed25519.GenPrivKey().PubKey() + proto, err := PubKeyToProto(pk) + require.NoError(t, err) + + pubkey, err := PubKeyFromProto(proto) + require.NoError(t, err) + assert.Equal(t, pk.Type(), pubkey.Type()) + assert.Equal(t, pk.Bytes(), pubkey.Bytes()) + assert.Equal(t, pk.Address(), pubkey.Address()) + assert.Equal(t, pk.VerifySignature([]byte("msg"), []byte("sig")), pubkey.VerifySignature([]byte("msg"), []byte("sig"))) + + // secp256k1 + pk = secp256k1.GenPrivKey().PubKey() + proto, err = PubKeyToProto(pk) + require.NoError(t, err) + + pubkey, err = PubKeyFromProto(proto) + require.NoError(t, err) + assert.Equal(t, pk.Type(), pubkey.Type()) + assert.Equal(t, pk.Bytes(), pubkey.Bytes()) + assert.Equal(t, pk.Address(), pubkey.Address()) + assert.Equal(t, pk.VerifySignature([]byte("msg"), []byte("sig")), pubkey.VerifySignature([]byte("msg"), []byte("sig"))) + + // bls12381 + if bls12381.Enabled { + privKey, err := bls12381.GenPrivKey() + require.NoError(t, err) + defer privKey.Zeroize() + pk = privKey.PubKey() + proto, err := PubKeyToProto(pk) + require.NoError(t, err) + + pubkey, err := PubKeyFromProto(proto) + require.NoError(t, err) + assert.Equal(t, pk.Type(), pubkey.Type()) + assert.Equal(t, pk.Bytes(), pubkey.Bytes()) + assert.Equal(t, pk.Address(), pubkey.Address()) + assert.Equal(t, pk.VerifySignature([]byte("msg"), []byte("sig")), pubkey.VerifySignature([]byte("msg"), []byte("sig"))) + } else { + _, err = PubKeyToProto(bls12381.PubKey{}) + assert.Error(t, err) + } + + // unsupported key type + _, err = PubKeyToProto(unsupportedPubKey{}) + require.Error(t, err) + assert.Equal(t, ErrUnsupportedKey{KeyType: unsupportedPubKey{}.Type()}, err) +} + +func TestPubKeyFromTypeAndBytes(t *testing.T) { + // ed25519 + pk := ed25519.GenPrivKey().PubKey() + pubkey, err := PubKeyFromTypeAndBytes(pk.Type(), pk.Bytes()) + assert.NoError(t, err) + assert.Equal(t, pk.Type(), pubkey.Type()) + assert.Equal(t, pk.Bytes(), pubkey.Bytes()) + assert.Equal(t, pk.Address(), pubkey.Address()) + assert.Equal(t, pk.VerifySignature([]byte("msg"), []byte("sig")), pubkey.VerifySignature([]byte("msg"), []byte("sig"))) + + // ed25519 invalid size + _, err = PubKeyFromTypeAndBytes(pk.Type(), pk.Bytes()[:10]) + assert.Error(t, err) + + // secp256k1 + pk = secp256k1.GenPrivKey().PubKey() + pubkey, err = PubKeyFromTypeAndBytes(pk.Type(), pk.Bytes()) + assert.NoError(t, err) + assert.Equal(t, pk.Type(), pubkey.Type()) + assert.Equal(t, pk.Bytes(), pubkey.Bytes()) + assert.Equal(t, pk.Address(), pubkey.Address()) + assert.Equal(t, pk.VerifySignature([]byte("msg"), []byte("sig")), pubkey.VerifySignature([]byte("msg"), []byte("sig"))) + + // secp256k1 invalid size + _, err = PubKeyFromTypeAndBytes(pk.Type(), pk.Bytes()[:10]) + assert.Error(t, err) + + // bls12381 + if bls12381.Enabled { + privKey, err := bls12381.GenPrivKey() + require.NoError(t, err) + pk := privKey.PubKey() + pubkey, err = PubKeyFromTypeAndBytes(pk.Type(), pk.Bytes()) + assert.NoError(t, err) + assert.Equal(t, pk.Type(), pubkey.Type()) + assert.Equal(t, pk.Bytes(), pubkey.Bytes()) + assert.Equal(t, pk.Address(), pubkey.Address()) + assert.Equal(t, pk.VerifySignature([]byte("msg"), []byte("sig")), pubkey.VerifySignature([]byte("msg"), []byte("sig"))) + + // bls12381 invalid size + _, err = PubKeyFromTypeAndBytes(pk.Type(), pk.Bytes()[:10]) + assert.Error(t, err) + } else { + _, err = PubKeyFromTypeAndBytes(bls12381.KeyType, []byte{}) + assert.Error(t, err) + } +} diff --git a/crypto/example_test.go b/crypto/example_test.go deleted file mode 100644 index 6100727782d..00000000000 --- a/crypto/example_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package crypto_test - -import ( - "fmt" - - "github.com/cometbft/cometbft/crypto" -) - -func ExampleSha256() { - sum := crypto.Sha256([]byte("This is CometBFT")) - fmt.Printf("%x\n", sum) - // Output: - // ea186526b041852d923b02c91aa04b00c0df258b3d69cb688eaba577f5562758 -} diff --git a/crypto/hash.go b/crypto/hash.go deleted file mode 100644 index e1d22523f27..00000000000 --- a/crypto/hash.go +++ /dev/null @@ -1,11 +0,0 @@ -package crypto - -import ( - "crypto/sha256" -) - -func Sha256(bytes []byte) []byte { - hasher := sha256.New() - hasher.Write(bytes) - return hasher.Sum(nil) -} diff --git a/crypto/internal/benchmarking/bench.go b/crypto/internal/benchmarking/bench.go index de1c97974bb..20b5964b920 100644 --- a/crypto/internal/benchmarking/bench.go +++ b/crypto/internal/benchmarking/bench.go @@ -38,7 +38,6 @@ func BenchmarkSigning(b *testing.B, priv crypto.PrivKey) { b.ResetTimer() for i := 0; i < b.N; i++ { _, err := priv.Sign(message) - if err != nil { b.FailNow() } diff --git a/crypto/merkle/bench_test.go b/crypto/merkle/bench_test.go new file mode 100644 index 00000000000..c6566c9da61 --- /dev/null +++ b/crypto/merkle/bench_test.go @@ -0,0 +1,64 @@ +package merkle + +import ( + "crypto/sha256" + "strings" + "testing" +) + +var sink any + +type innerHashTest struct { + left, right string +} + +var innerHashTests = []*innerHashTest{ + {"aaaaaaaaaaaaaaa", " "}, + {"", ""}, + {" ", "a ff b f1 a"}, + {"ffff122fff", "ffff122fff"}, + {"😎💡✅alalalalalalalalalallalallaallalaallalalalalalalalaallalalalalalala", "😎💡✅alalalalalalalalalallalallaallalaallalalalalalalalaallalalalalalalaffff122fff"}, + {strings.Repeat("ff", 1<<10), strings.Repeat("00af", 4<<10)}, + {strings.Repeat("f", sha256.Size), strings.Repeat("00af", 10<<10)}, + {"aaaaaaaaaaaaaaaaaaaaaaaaaaaffff122fffaaaaaaaaa", "aaaaaaaaaffff1aaaaaaaaaaaaaaaaaa22fffaaaaaaaaa"}, +} + +func BenchmarkInnerHash(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, tt := range innerHashTests { + got := innerHash([]byte(tt.left), []byte(tt.right)) + if g, w := len(got), sha256.Size; g != w { + b.Fatalf("size discrepancy: got %d, want %d", g, w) + } + sink = got + } + } + + if sink == nil { + b.Fatal("Benchmark did not run!") + } +} + +// Benchmark the time it takes to hash a 64kb leaf, which is the size of +// a block part. +// This helps determine whether its worth parallelizing this hash for the proposer. +func BenchmarkLeafHash64kb(b *testing.B) { + b.ReportAllocs() + leaf := make([]byte, 64*1024) + hash := sha256.New() + + for i := 0; i < b.N; i++ { + leaf[0] = byte(i) + got := leafHashOpt(hash, leaf) + if g, w := len(got), sha256.Size; g != w { + b.Fatalf("size discrepancy: got %d, want %d", g, w) + } + sink = got + } + + if sink == nil { + b.Fatal("Benchmark did not run!") + } +} diff --git a/crypto/merkle/hash.go b/crypto/merkle/hash.go index be2010aefcc..dc527cac067 100644 --- a/crypto/merkle/hash.go +++ b/crypto/merkle/hash.go @@ -6,23 +6,23 @@ import ( "github.com/cometbft/cometbft/crypto/tmhash" ) -// TODO: make these have a large predefined capacity +// TODO: make these have a large predefined capacity. var ( leafPrefix = []byte{0} innerPrefix = []byte{1} ) -// returns tmhash() +// returns tmhash(). func emptyHash() []byte { return tmhash.Sum([]byte{}) } -// returns tmhash(0x00 || leaf) +// returns tmhash(0x00 || leaf). func leafHash(leaf []byte) []byte { return tmhash.Sum(append(leafPrefix, leaf...)) } -// returns tmhash(0x00 || leaf) +// returns tmhash(0x00 || leaf). func leafHashOpt(s hash.Hash, leaf []byte) []byte { s.Reset() s.Write(leafPrefix) @@ -30,13 +30,9 @@ func leafHashOpt(s hash.Hash, leaf []byte) []byte { return s.Sum(nil) } -// returns tmhash(0x01 || left || right) +// returns tmhash(0x01 || left || right). func innerHash(left []byte, right []byte) []byte { - data := make([]byte, len(innerPrefix)+len(left)+len(right)) - n := copy(data, innerPrefix) - n += copy(data[n:], left) - copy(data[n:], right) - return tmhash.Sum(data) + return tmhash.SumMany(innerPrefix, left, right) } func innerHashOpt(s hash.Hash, left []byte, right []byte) []byte { diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go index 94eab083195..4414d718846 100644 --- a/crypto/merkle/proof.go +++ b/crypto/merkle/proof.go @@ -4,15 +4,16 @@ import ( "bytes" "errors" "fmt" + "hash" + cmtcrypto "github.com/cometbft/cometbft/api/cometbft/crypto/v1" "github.com/cometbft/cometbft/crypto/tmhash" - cmtcrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" ) const ( // MaxAunts is the maximum number of aunts that can be included in a Proof. // This corresponds to a tree of size 2^100, which should be sufficient for all conceivable purposes. - // This maximum helps prevent Denial-of-Service attacks by limitting the size of the proofs. + // This maximum helps prevent Denial-of-Service attacks by limiting the size of the proofs. MaxAunts = 100 ) @@ -50,10 +51,10 @@ func (e ErrInvalidProof) Unwrap() error { // everything. This also affects the generalized proof system as // well. type Proof struct { - Total int64 `json:"total"` // Total number of items. - Index int64 `json:"index"` // Index of item to prove. - LeafHash []byte `json:"leaf_hash"` // Hash of item value. - Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. + Total int64 `json:"total"` // Total number of items. + Index int64 `json:"index"` // Index of item to prove. + LeafHash []byte `json:"leaf_hash"` // Hash of item value. + Aunts [][]byte `json:"aunts,omitempty"` // Hashes from leaf's sibling to a root's child. } // ProofsFromByteSlices computes inclusion proof for given items. @@ -70,11 +71,11 @@ func ProofsFromByteSlices(items [][]byte) (rootHash []byte, proofs []*Proof) { Aunts: trail.FlattenAunts(), } } - return + return rootHash, proofs } // Verify that the Proof proves the root hash. -// Check sp.Index/sp.Total manually if needed +// Check sp.Index/sp.Total manually if needed. func (sp *Proof) Verify(rootHash []byte, leaf []byte) error { if rootHash == nil { return ErrInvalidHash{ @@ -91,13 +92,14 @@ func (sp *Proof) Verify(rootHash []byte, leaf []byte) error { Err: errors.New("negative proof index"), } } - leafHash := leafHash(leaf) + hash := tmhash.New() + leafHash := leafHashOpt(hash, leaf) if !bytes.Equal(sp.LeafHash, leafHash) { return ErrInvalidHash{ Err: fmt.Errorf("leaf %x, want %x", sp.LeafHash, leafHash), } } - computedHash, err := sp.computeRootHash() + computedHash, err := sp.computeRootHash(hash) if err != nil { return ErrInvalidHash{ Err: fmt.Errorf("compute root hash: %w", err), @@ -112,8 +114,9 @@ func (sp *Proof) Verify(rootHash []byte, leaf []byte) error { } // Compute the root hash given a leaf hash. -func (sp *Proof) computeRootHash() ([]byte, error) { +func (sp *Proof) computeRootHash(hash hash.Hash) ([]byte, error) { return computeHashFromAunts( + hash, sp.Index, sp.Total, sp.LeafHash, @@ -168,6 +171,7 @@ func (sp *Proof) ValidateBasic() error { return nil } +// ToProto converts the Proof structure into its corresponding protobuf representation. func (sp *Proof) ToProto() *cmtcrypto.Proof { if sp == nil { return nil @@ -182,6 +186,7 @@ func (sp *Proof) ToProto() *cmtcrypto.Proof { return pb } +// ProofFromProto converts a protobuf cmtcrypto.Proof object back into the Proof structure. func ProofFromProto(pb *cmtcrypto.Proof) (*Proof, error) { if pb == nil { return nil, ErrInvalidProof{Err: errors.New("nil proof")} @@ -200,7 +205,7 @@ func ProofFromProto(pb *cmtcrypto.Proof) (*Proof, error) { // Use the leafHash and innerHashes to get the root merkle hash. // If the length of the innerHashes slice isn't exactly correct, the result is nil. // Recursive impl. -func computeHashFromAunts(index, total int64, leafHash []byte, innerHashes [][]byte) ([]byte, error) { +func computeHashFromAunts(hash hash.Hash, index, total int64, leafHash []byte, innerHashes [][]byte) ([]byte, error) { if index >= total || index < 0 || total <= 0 { return nil, fmt.Errorf("invalid index %d and/or total %d", index, total) } @@ -209,27 +214,27 @@ func computeHashFromAunts(index, total int64, leafHash []byte, innerHashes [][]b panic("Cannot call computeHashFromAunts() with 0 total") case 1: if len(innerHashes) != 0 { - return nil, fmt.Errorf("unexpected inner hashes") + return nil, errors.New("unexpected inner hashes") } return leafHash, nil default: if len(innerHashes) == 0 { - return nil, fmt.Errorf("expected at least one inner hash") + return nil, errors.New("expected at least one inner hash") } numLeft := getSplitPoint(total) if index < numLeft { - leftHash, err := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + leftHash, err := computeHashFromAunts(hash, index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) if err != nil { return nil, err } - return innerHash(leftHash, innerHashes[len(innerHashes)-1]), nil + return innerHashOpt(hash, leftHash, innerHashes[len(innerHashes)-1]), nil } - rightHash, err := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + rightHash, err := computeHashFromAunts(hash, index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) if err != nil { return nil, err } - return innerHash(innerHashes[len(innerHashes)-1], rightHash), nil + return innerHashOpt(hash, innerHashes[len(innerHashes)-1], rightHash), nil } } @@ -250,6 +255,7 @@ type ProofNode struct { func (spn *ProofNode) FlattenAunts() [][]byte { // Nonrecursive impl. innerHashes := [][]byte{} +FOR_LOOP: for spn != nil { switch { case spn.Left != nil: @@ -257,7 +263,7 @@ func (spn *ProofNode) FlattenAunts() [][]byte { case spn.Right != nil: innerHashes = append(innerHashes, spn.Right.Hash) default: - break + break FOR_LOOP } spn = spn.Parent } @@ -267,18 +273,23 @@ func (spn *ProofNode) FlattenAunts() [][]byte { // trails[0].Hash is the leaf hash for items[0]. // trails[i].Parent.Parent....Parent == root for all i. func trailsFromByteSlices(items [][]byte) (trails []*ProofNode, root *ProofNode) { + return trailsFromByteSlicesInternal(tmhash.New(), items) +} + +// trailsFromByteSlicesInternal computes the Merkle tree trails and root for a given set of byte slices. +func trailsFromByteSlicesInternal(hash hash.Hash, items [][]byte) (trails []*ProofNode, root *ProofNode) { // Recursive impl. switch len(items) { case 0: return []*ProofNode{}, &ProofNode{emptyHash(), nil, nil, nil} case 1: - trail := &ProofNode{leafHash(items[0]), nil, nil, nil} + trail := &ProofNode{leafHashOpt(hash, items[0]), nil, nil, nil} return []*ProofNode{trail}, trail default: k := getSplitPoint(int64(len(items))) - lefts, leftRoot := trailsFromByteSlices(items[:k]) - rights, rightRoot := trailsFromByteSlices(items[k:]) - rootHash := innerHash(leftRoot.Hash, rightRoot.Hash) + lefts, leftRoot := trailsFromByteSlicesInternal(hash, items[:k]) + rights, rightRoot := trailsFromByteSlicesInternal(hash, items[k:]) + rootHash := innerHashOpt(hash, leftRoot.Hash, rightRoot.Hash) root := &ProofNode{rootHash, nil, nil, nil} leftRoot.Parent = root leftRoot.Right = rightRoot diff --git a/crypto/merkle/proof_key_path.go b/crypto/merkle/proof_key_path.go index bff53a9b68f..c8db64c5298 100644 --- a/crypto/merkle/proof_key_path.go +++ b/crypto/merkle/proof_key_path.go @@ -8,45 +8,41 @@ import ( "strings" ) -/* - - For generalized Merkle proofs, each layer of the proof may require an - optional key. The key may be encoded either by URL-encoding or - (upper-case) hex-encoding. - TODO: In the future, more encodings may be supported, like base32 (e.g. - /32:) - - For example, for a Cosmos-SDK application where the first two proof layers - are ValueOps, and the third proof layer is an IAVLValueOp, the keys - might look like: - - 0: []byte("App") - 1: []byte("IBC") - 2: []byte{0x01, 0x02, 0x03} - - Assuming that we know that the first two layers are always ASCII texts, we - probably want to use URLEncoding for those, whereas the third layer will - require HEX encoding for efficient representation. - - kp := new(KeyPath) - kp.AppendKey([]byte("App"), KeyEncodingURL) - kp.AppendKey([]byte("IBC"), KeyEncodingURL) - kp.AppendKey([]byte{0x01, 0x02, 0x03}, KeyEncodingURL) - kp.String() // Should return "/App/IBC/x:010203" - - NOTE: Key paths must begin with a `/`. - - NOTE: All encodings *MUST* work compatibly, such that you can choose to use - whatever encoding, and the decoded keys will always be the same. In other - words, it's just as good to encode all three keys using URL encoding or HEX - encoding... it just wouldn't be optimal in terms of readability or space - efficiency. - - NOTE: Punycode will never be supported here, because not all values can be - decoded. For example, no string decodes to the string "xn--blah" in - Punycode. - -*/ +// For generalized Merkle proofs, each layer of the proof may require an +// optional key. The key may be encoded either by URL-encoding or +// (upper-case) hex-encoding. +// TODO: In the future, more encodings may be supported, like base32 (e.g. +// /32:) + +// For example, for a Cosmos-SDK application where the first two proof layers +// are ValueOps, and the third proof layer is an IAVLValueOp, the keys +// might look like: + +// 0: []byte("App") +// 1: []byte("IBC") +// 2: []byte{0x01, 0x02, 0x03} + +// Assuming that we know that the first two layers are always ASCII texts, we +// probably want to use URLEncoding for those, whereas the third layer will +// require HEX encoding for efficient representation. + +// kp := new(KeyPath) +// kp.AppendKey([]byte("App"), KeyEncodingURL) +// kp.AppendKey([]byte("IBC"), KeyEncodingURL) +// kp.AppendKey([]byte{0x01, 0x02, 0x03}, KeyEncodingURL) +// kp.String() // Should return "/App/IBC/x:010203" + +// NOTE: Key paths must begin with a `/`. + +// NOTE: All encodings *MUST* work compatibly, such that you can choose to use +// whatever encoding, and the decoded keys will always be the same. In other +// words, it's just as good to encode all three keys using URL encoding or HEX +// encoding... it just wouldn't be optimal in terms of readability or space +// efficiency. + +// NOTE: Punycode will never be supported here, because not all values can be +// decoded. For example, no string decodes to the string "xn--blah" in +// Punycode. type keyEncoding int @@ -90,7 +86,7 @@ func (e ErrInvalidKey) Error() string { return fmt.Sprintf("merkle: invalid key error: %v", e.Err) } -// Decode a path to a list of keys. Path must begin with `/`. +// KeyPathToKeys decodes a path to a list of keys. Path must begin with `/`. // Each key must use a known encoding. func KeyPathToKeys(path string) (keys [][]byte, err error) { if path == "" || path[0] != '/' { diff --git a/crypto/merkle/proof_key_path_test.go b/crypto/merkle/proof_key_path_test.go index 0d6d3354d33..aadb3f76c80 100644 --- a/crypto/merkle/proof_key_path_test.go +++ b/crypto/merkle/proof_key_path_test.go @@ -2,7 +2,7 @@ package merkle import ( // it is ok to use math/rand here: we do not need a cryptographically secure random - // number generator here and we can run the tests a bit faster + // number generator here and we can run the tests a bit faster. crand "crypto/rand" "math/rand" "testing" @@ -35,7 +35,7 @@ func TestKeyPath(t *testing.T) { } res, err := KeyPathToKeys(path.String()) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, len(keys), len(res)) for i, key := range keys { diff --git a/crypto/merkle/proof_op.go b/crypto/merkle/proof_op.go index bb6250b2c9a..579fae27397 100644 --- a/crypto/merkle/proof_op.go +++ b/crypto/merkle/proof_op.go @@ -5,12 +5,12 @@ import ( "errors" "fmt" - cmtcrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + cmtcrypto "github.com/cometbft/cometbft/api/cometbft/crypto/v1" ) var ErrKeyPathNotConsumed = errors.New("merkle: keypath not consumed") -//---------------------------------------- +// ---------------------------------------- // ProofOp gets converted to an instance of ProofOperator: // ProofOperator is a layer for calculating intermediate Merkle roots @@ -21,23 +21,24 @@ var ErrKeyPathNotConsumed = errors.New("merkle: keypath not consumed") // ProofOp() encodes the ProofOperator in a generic way so it can later be // decoded with OpDecoder. type ProofOperator interface { - Run([][]byte) ([][]byte, error) + Run(leaves [][]byte) ([][]byte, error) GetKey() []byte ProofOp() cmtcrypto.ProofOp } -//---------------------------------------- +// ---------------------------------------- // Operations on a list of ProofOperators // ProofOperators is a slice of ProofOperator(s). // Each operator will be applied to the input value sequentially -// and the last Merkle root will be verified with already known data +// and the last Merkle root will be verified with already known data. type ProofOperators []ProofOperator func (poz ProofOperators) VerifyValue(root []byte, keypath string, value []byte) (err error) { return poz.Verify(root, keypath, [][]byte{value}) } +// Verify applies a series of ProofOperators to verify the provided args (byte slices). func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) error { keys, err := KeyPathToKeys(keypath) if err != nil { @@ -76,7 +77,7 @@ func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) err return nil } -//---------------------------------------- +// ---------------------------------------- // ProofRuntime - main entrypoint type OpDecoder func(cmtcrypto.ProofOp) (ProofOperator, error) @@ -91,6 +92,7 @@ func NewProofRuntime() *ProofRuntime { } } +// RegisterOpDecoder registers a new OpDecoder for a specific proof operation type. func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) { _, ok := prt.decoders[typ] if ok { @@ -99,6 +101,7 @@ func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) { prt.decoders[typ] = dec } +// Decode decodes a cmtcrypto.ProofOp into a ProofOperator using the appropriate decoder. func (prt *ProofRuntime) Decode(pop cmtcrypto.ProofOp) (ProofOperator, error) { decoder := prt.decoders[pop.Type] if decoder == nil { @@ -109,6 +112,7 @@ func (prt *ProofRuntime) Decode(pop cmtcrypto.ProofOp) (ProofOperator, error) { return decoder(pop) } +// DecodeProof decodes a list of cmtcrypto.ProofOps into a ProofOperators slice. func (prt *ProofRuntime) DecodeProof(proof *cmtcrypto.ProofOps) (ProofOperators, error) { poz := make(ProofOperators, 0, len(proof.Ops)) for _, pop := range proof.Ops { @@ -127,12 +131,13 @@ func (prt *ProofRuntime) VerifyValue(proof *cmtcrypto.ProofOps, root []byte, key return prt.Verify(proof, root, keypath, [][]byte{value}) } -// TODO In the long run we'll need a method of classifcation of ops, +// TODO In the long run we'll need a method of classification of ops, // whether existence or absence or perhaps a third? func (prt *ProofRuntime) VerifyAbsence(proof *cmtcrypto.ProofOps, root []byte, keypath string) (err error) { return prt.Verify(proof, root, keypath, nil) } +// Verify verifies a proof by decoding it into ProofOperators. func (prt *ProofRuntime) Verify(proof *cmtcrypto.ProofOps, root []byte, keypath string, args [][]byte) (err error) { poz, err := prt.DecodeProof(proof) if err != nil { @@ -149,5 +154,5 @@ func (prt *ProofRuntime) Verify(proof *cmtcrypto.ProofOps, root []byte, keypath func DefaultProofRuntime() (prt *ProofRuntime) { prt = NewProofRuntime() prt.RegisterOpDecoder(ProofOpValue, ValueOpDecoder) - return + return prt } diff --git a/crypto/merkle/proof_test.go b/crypto/merkle/proof_test.go index 1d02b951e8a..390d7b50826 100644 --- a/crypto/merkle/proof_test.go +++ b/crypto/merkle/proof_test.go @@ -9,8 +9,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmtcrypto "github.com/cometbft/cometbft/api/cometbft/crypto/v1" "github.com/cometbft/cometbft/crypto/tmhash" - cmtcrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" ) const ProofOpDomino = "test:domino" @@ -64,7 +64,7 @@ func (dop DominoOp) GetKey() []byte { return []byte(dop.key) } -//---------------------------------------- +// ---------------------------------------- func TestProofOperators(t *testing.T) { var err error @@ -81,58 +81,58 @@ func TestProofOperators(t *testing.T) { // Good popz := ProofOperators([]ProofOperator{op1, op2, op3, op4}) err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.Nil(t, err) + require.NoError(t, err) err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1")) - assert.Nil(t, err) + require.NoError(t, err) // BAD INPUT err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1_WRONG")}) - assert.NotNil(t, err) + require.Error(t, err) err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1_WRONG")) - assert.NotNil(t, err) + require.Error(t, err) // BAD KEY 1 err = popz.Verify(bz("OUTPUT4"), "/KEY3/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + require.Error(t, err) // BAD KEY 2 err = popz.Verify(bz("OUTPUT4"), "KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + require.Error(t, err) // BAD KEY 3 err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1/", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + require.Error(t, err) // BAD KEY 4 err = popz.Verify(bz("OUTPUT4"), "//KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + require.Error(t, err) // BAD KEY 5 err = popz.Verify(bz("OUTPUT4"), "/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + require.Error(t, err) // BAD OUTPUT 1 err = popz.Verify(bz("OUTPUT4_WRONG"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + require.Error(t, err) // BAD OUTPUT 2 err = popz.Verify(bz(""), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + require.Error(t, err) // BAD POPZ 1 popz = []ProofOperator{op1, op2, op4} err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + require.Error(t, err) // BAD POPZ 2 popz = []ProofOperator{op4, op3, op2, op1} err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + require.Error(t, err) // BAD POPZ 3 popz = []ProofOperator{} err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + require.Error(t, err) } func bz(s string) []byte { @@ -145,7 +145,7 @@ func TestProofValidateBasic(t *testing.T) { malleateProof func(*Proof) errStr string }{ - {"Good", func(sp *Proof) {}, ""}, + {"Good", func(_ *Proof) {}, ""}, {"Negative Total", func(sp *Proof) { sp.Total = -1 }, "negative proof total"}, {"Negative Index", func(sp *Proof) { sp.Index = -1 }, "negative proof index"}, { @@ -163,7 +163,6 @@ func TestProofValidateBasic(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { _, proofs := ProofsFromByteSlices([][]byte{ []byte("apple"), @@ -228,5 +227,5 @@ func TestVsa2022_100(t *testing.T) { // the nil root var root []byte - assert.NotNil(t, ProofOperators{op}.Verify(root, "/"+string(key), [][]byte{value})) + require.Error(t, ProofOperators{op}.Verify(root, "/"+string(key), [][]byte{value})) } diff --git a/crypto/merkle/proof_value.go b/crypto/merkle/proof_value.go index 4624157c0b0..dde8f3aeb30 100644 --- a/crypto/merkle/proof_value.go +++ b/crypto/merkle/proof_value.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" + cmtcrypto "github.com/cometbft/cometbft/api/cometbft/crypto/v1" "github.com/cometbft/cometbft/crypto/tmhash" - cmtcrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" ) const ProofOpValue = "simple:v" @@ -38,6 +38,7 @@ func NewValueOp(key []byte, proof *Proof) ValueOp { } } +// ValueOpDecoder decodes a cmtcrypto.ProofOp into a ValueOp instance. func ValueOpDecoder(pop cmtcrypto.ProofOp) (ProofOperator, error) { if pop.Type != ProofOpValue { return nil, ErrInvalidProof{ @@ -59,6 +60,7 @@ func ValueOpDecoder(pop cmtcrypto.ProofOp) (ProofOperator, error) { return NewValueOp(pop.Key, sp), nil } +// ProofOp encodes the ValueOp as a cmtcrypto.ProofOp, which can later be decoded. func (op ValueOp) ProofOp() cmtcrypto.ProofOp { pbval := cmtcrypto.ValueOp{ Key: op.key, @@ -83,6 +85,7 @@ func (op ValueOp) String() string { // exceeding 1. var ErrTooManyArgs = errors.New("merkle: len(args) != 1") +// Run computes the Merkle root using the ValueOp. func (op ValueOp) Run(args [][]byte) ([][]byte, error) { if len(args) != 1 { return nil, ErrTooManyArgs @@ -104,7 +107,7 @@ func (op ValueOp) Run(args [][]byte) ([][]byte, error) { } } - rootHash, err := op.Proof.computeRootHash() + rootHash, err := op.Proof.computeRootHash(tmhash.New()) if err != nil { return nil, err } diff --git a/crypto/merkle/rfc6962_test.go b/crypto/merkle/rfc6962_test.go index dd0c817cc9c..f88b151a730 100644 --- a/crypto/merkle/rfc6962_test.go +++ b/crypto/merkle/rfc6962_test.go @@ -63,7 +63,6 @@ func TestRFC6962Hasher(t *testing.T) { got: innerHash([]byte("N123"), []byte("N456")), }, } { - tc := tc t.Run(tc.desc, func(t *testing.T) { wantBytes, err := hex.DecodeString(tc.want) if err != nil { diff --git a/crypto/merkle/tree.go b/crypto/merkle/tree.go index 896b67c5952..9f73fd3f58a 100644 --- a/crypto/merkle/tree.go +++ b/crypto/merkle/tree.go @@ -26,7 +26,7 @@ func hashFromByteSlices(sha hash.Hash, items [][]byte) []byte { } } -// HashFromByteSliceIterative is an iterative alternative to +// HashFromByteSlicesIterative is an iterative alternative to // HashFromByteSlice motivated by potential performance improvements. // (#2611) had suggested that an iterative version of // HashFromByteSlice would be faster, presumably because @@ -97,7 +97,7 @@ func HashFromByteSlicesIterative(input [][]byte) []byte { } } -// getSplitPoint returns the largest power of 2 less than length +// getSplitPoint returns the largest power of 2 less than length. func getSplitPoint(length int64) int64 { if length < 1 { panic("Trying to split a tree with size < 1") diff --git a/crypto/merkle/tree_test.go b/crypto/merkle/tree_test.go index df76360efc0..94282dcfa39 100644 --- a/crypto/merkle/tree_test.go +++ b/crypto/merkle/tree_test.go @@ -7,10 +7,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - cmtrand "github.com/cometbft/cometbft/libs/rand" - "github.com/cometbft/cometbft/libs/test" - "github.com/cometbft/cometbft/crypto/tmhash" + cmtrand "github.com/cometbft/cometbft/internal/rand" + "github.com/cometbft/cometbft/libs/test" ) type testItem []byte @@ -35,7 +34,6 @@ func TestHashFromByteSlices(t *testing.T) { }, } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { hash := HashFromByteSlices(tc.slices) assert.Equal(t, tc.expectHash, hex.EncodeToString(hash)) @@ -44,7 +42,6 @@ func TestHashFromByteSlices(t *testing.T) { } func TestProof(t *testing.T) { - // Try an empty proof first rootHash, proofs := ProofsFromByteSlices([][]byte{}) require.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hex.EncodeToString(rootHash)) @@ -68,7 +65,7 @@ func TestProof(t *testing.T) { proof := proofs[i] // Check total/index - require.EqualValues(t, proof.Index, i, "Unmatched indicies: %d vs %d", proof.Index, i) + require.EqualValues(t, proof.Index, i, "Unmatched indices: %d vs %d", proof.Index, i) require.EqualValues(t, proof.Total, total, "Unmatched totals: %d vs %d", proof.Total, total) @@ -102,7 +99,6 @@ func TestProof(t *testing.T) { } func TestHashAlternatives(t *testing.T) { - total := 100 items := make([][]byte, total) diff --git a/crypto/merkle/types.go b/crypto/merkle/types.go index 6a5c7e6a362..f95b96d8a5f 100644 --- a/crypto/merkle/types.go +++ b/crypto/merkle/types.go @@ -20,20 +20,20 @@ type Tree interface { Save() (hash []byte) Load(hash []byte) Copy() Tree - Iterate(func(key []byte, value []byte) (stop bool)) (stopped bool) + Iterate(fx func(key []byte, value []byte) (stop bool)) (stopped bool) IterateRange(start []byte, end []byte, ascending bool, fx func(key []byte, value []byte) (stop bool)) (stopped bool) } -//----------------------------------------------------------------------- +// ----------------------------------------------------------------------- -// Uvarint length prefixed byteslice +// Uvarint length prefixed byteslice. func encodeByteSlice(w io.Writer, bz []byte) (err error) { var buf [binary.MaxVarintLen64]byte n := binary.PutUvarint(buf[:], uint64(len(bz))) _, err = w.Write(buf[0:n]) if err != nil { - return + return err } _, err = w.Write(bz) - return + return err } diff --git a/crypto/mocks/batch_verifier.go b/crypto/mocks/batch_verifier.go new file mode 100644 index 00000000000..784f5e9bcd9 --- /dev/null +++ b/crypto/mocks/batch_verifier.go @@ -0,0 +1,75 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + crypto "github.com/cometbft/cometbft/crypto" + mock "github.com/stretchr/testify/mock" +) + +// BatchVerifier is an autogenerated mock type for the BatchVerifier type +type BatchVerifier struct { + mock.Mock +} + +// Add provides a mock function with given fields: key, message, signature +func (_m *BatchVerifier) Add(key crypto.PubKey, message []byte, signature []byte) error { + ret := _m.Called(key, message, signature) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(crypto.PubKey, []byte, []byte) error); ok { + r0 = rf(key, message, signature) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Verify provides a mock function with given fields: +func (_m *BatchVerifier) Verify() (bool, []bool) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Verify") + } + + var r0 bool + var r1 []bool + if rf, ok := ret.Get(0).(func() (bool, []bool)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func() []bool); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]bool) + } + } + + return r0, r1 +} + +// NewBatchVerifier creates a new instance of BatchVerifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBatchVerifier(t interface { + mock.TestingT + Cleanup(func()) +}) *BatchVerifier { + mock := &BatchVerifier{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/crypto/mocks/pub_key.go b/crypto/mocks/pub_key.go new file mode 100644 index 00000000000..f5b31610b41 --- /dev/null +++ b/crypto/mocks/pub_key.go @@ -0,0 +1,103 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + crypto "github.com/cometbft/cometbft/crypto" + mock "github.com/stretchr/testify/mock" +) + +// PubKey is an autogenerated mock type for the PubKey type +type PubKey struct { + mock.Mock +} + +// Address provides a mock function with given fields: +func (_m *PubKey) Address() crypto.Address { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Address") + } + + var r0 crypto.Address + if rf, ok := ret.Get(0).(func() crypto.Address); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.Address) + } + } + + return r0 +} + +// Bytes provides a mock function with given fields: +func (_m *PubKey) Bytes() []byte { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Bytes") + } + + var r0 []byte + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + return r0 +} + +// Type provides a mock function with given fields: +func (_m *PubKey) Type() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Type") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// VerifySignature provides a mock function with given fields: msg, sig +func (_m *PubKey) VerifySignature(msg []byte, sig []byte) bool { + ret := _m.Called(msg, sig) + + if len(ret) == 0 { + panic("no return value specified for VerifySignature") + } + + var r0 bool + if rf, ok := ret.Get(0).(func([]byte, []byte) bool); ok { + r0 = rf(msg, sig) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NewPubKey creates a new instance of PubKey. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPubKey(t interface { + mock.TestingT + Cleanup(func()) +}) *PubKey { + mock := &PubKey{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/crypto/random.go b/crypto/random.go index 275fb1044f2..543b7c91133 100644 --- a/crypto/random.go +++ b/crypto/random.go @@ -6,7 +6,7 @@ import ( "io" ) -// This only uses the OS's randomness +// This only uses the OS's randomness. func randBytes(numBytes int) []byte { b := make([]byte, numBytes) _, err := crand.Read(b) @@ -16,7 +16,7 @@ func randBytes(numBytes int) []byte { return b } -// This only uses the OS's randomness +// This only uses the OS's randomness. func CRandBytes(numBytes int) []byte { return randBytes(numBytes) } diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index c6e31f1e794..7f9a893c346 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -1,22 +1,20 @@ package secp256k1 import ( - "bytes" "crypto/sha256" - "crypto/subtle" "fmt" "io" "math/big" - secp256k1 "github.com/btcsuite/btcd/btcec/v2" - "github.com/btcsuite/btcd/btcec/v2/ecdsa" - "golang.org/x/crypto/ripemd160" //nolint: staticcheck // necessary for Bitcoin address format + "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" + "golang.org/x/crypto/ripemd160" //nolint: gosec,staticcheck // necessary for Bitcoin address format "github.com/cometbft/cometbft/crypto" cmtjson "github.com/cometbft/cometbft/libs/json" ) -// ------------------------------------- +// -------------------------------------. const ( PrivKeyName = "tendermint/PrivKeySecp256k1" PubKeyName = "tendermint/PubKeySecp256k1" @@ -35,41 +33,36 @@ var _ crypto.PrivKey = PrivKey{} // PrivKey implements PrivKey. type PrivKey []byte -// Bytes marshalls the private key using amino encoding. +// Bytes returns the privkey as bytes. func (privKey PrivKey) Bytes() []byte { return []byte(privKey) } // PubKey performs the point-scalar multiplication from the privKey on the // generator point to get the pubkey. +// +// See secp256k1.PrivKeyFromBytes. func (privKey PrivKey) PubKey() crypto.PubKey { - _, pubkeyObject := secp256k1.PrivKeyFromBytes(privKey) + secpPrivKey := secp256k1.PrivKeyFromBytes(privKey) - pk := pubkeyObject.SerializeCompressed() + pk := secpPrivKey.PubKey().SerializeCompressed() return PubKey(pk) } -// Equals - you probably don't need to use this. -// Runs in constant time based on length of the keys. -func (privKey PrivKey) Equals(other crypto.PrivKey) bool { - if otherSecp, ok := other.(PrivKey); ok { - return subtle.ConstantTimeCompare(privKey[:], otherSecp[:]) == 1 - } - return false -} - -func (privKey PrivKey) Type() string { +// Type returns the key type. +func (PrivKey) Type() string { return KeyType } // GenPrivKey generates a new ECDSA private key on curve secp256k1 private key. // It uses OS randomness to generate the private key. +// +// See crypto.CReader. func GenPrivKey() PrivKey { return genPrivKey(crypto.CReader()) } -// genPrivKey generates a new secp256k1 private key using the provided reader. func genPrivKey(rand io.Reader) PrivKey { var privKeyBytes [PrivKeySize]byte d := new(big.Int) @@ -126,18 +119,16 @@ func GenPrivKeySecp256k1(secret []byte) PrivKey { // Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg. // The returned signature will be of the form R || S (in lower-S form). func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { - priv, _ := secp256k1.PrivKeyFromBytes(privKey) + priv := secp256k1.PrivKeyFromBytes(privKey) - sig, err := ecdsa.SignCompact(priv, crypto.Sha256(msg), false) - if err != nil { - return nil, err - } + sum := sha256.Sum256(msg) + sig := ecdsa.SignCompact(priv, sum[:], false) // remove the first byte which is compactSigRecoveryCode return sig[1:], nil } -//------------------------------------- +// ------------------------------------- var _ crypto.PubKey = PubKey{} @@ -152,22 +143,33 @@ const PubKeySize = 33 // This prefix is followed with the x-coordinate. type PubKey []byte -// Address returns a Bitcoin style addresses: RIPEMD160(SHA256(pubkey)) +// Address returns a Bitcoin style address: RIPEMD160(SHA256(pubkey)). func (pubKey PubKey) Address() crypto.Address { if len(pubKey) != PubKeySize { panic("length of pubkey is incorrect") } hasherSHA256 := sha256.New() - _, _ = hasherSHA256.Write(pubKey) // does not error + _, err := hasherSHA256.Write(pubKey) + if err != nil { + panic(err) + } sha := hasherSHA256.Sum(nil) - hasherRIPEMD160 := ripemd160.New() - _, _ = hasherRIPEMD160.Write(sha) // does not error + // Check if the size of the hash is what we expect. + if ripemd160.Size != crypto.AddressSize { + panic("ripemd160.Size != crypto.AddressSize") + } + + hasherRIPEMD160 := ripemd160.New() // #nosec G406 // necessary for Bitcoin address format + _, err = hasherRIPEMD160.Write(sha) + if err != nil { + panic(err) + } return crypto.Address(hasherRIPEMD160.Sum(nil)) } -// Bytes returns the pubkey marshaled with amino encoding. +// Bytes returns the pubkey as bytes. func (pubKey PubKey) Bytes() []byte { return []byte(pubKey) } @@ -176,14 +178,8 @@ func (pubKey PubKey) String() string { return fmt.Sprintf("PubKeySecp256k1{%X}", []byte(pubKey)) } -func (pubKey PubKey) Equals(other crypto.PubKey) bool { - if otherSecp, ok := other.(PubKey); ok { - return bytes.Equal(pubKey[:], otherSecp[:]) - } - return false -} - -func (pubKey PubKey) Type() string { +// Type returns the key type. +func (PubKey) Type() string { return KeyType } @@ -201,11 +197,11 @@ func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool { // parse the signature: signature := signatureFromBytes(sigStr) - // Reject malleable signatures. libsecp256k1 does this check but btcec doesn't. + // Reject malleable signatures. libsecp256k1 does this check but decred doesn't. // see: https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93 // Serialize() would negate S value if it is over half order. // Hence, if the signature is different after Serialize() if should be rejected. - var modifiedSignature, parseErr = ecdsa.ParseDERSignature(signature.Serialize()) + modifiedSignature, parseErr := ecdsa.ParseDERSignature(signature.Serialize()) if parseErr != nil { return false } @@ -213,7 +209,8 @@ func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool { return false } - return signature.Verify(crypto.Sha256(msg), pub) + sum := sha256.Sum256(msg) + return signature.Verify(sum[:], pub) } // Read Signature struct from R || S. Caller needs to ensure diff --git a/crypto/secp256k1/secp256k1_internal_test.go b/crypto/secp256k1/secp256k1_internal_test.go index ae1f55e4926..b6639c0fd18 100644 --- a/crypto/secp256k1/secp256k1_internal_test.go +++ b/crypto/secp256k1/secp256k1_internal_test.go @@ -5,14 +5,12 @@ import ( "math/big" "testing" + "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/stretchr/testify/require" - - secp256k1 "github.com/btcsuite/btcd/btcec/v2" ) func Test_genPrivKey(t *testing.T) { - - empty := make([]byte, 32) + empty := make([]byte, 0, 32) oneB := big.NewInt(1).Bytes() onePadded := make([]byte, 32) copy(onePadded[32-len(oneB):32], oneB) @@ -29,7 +27,6 @@ func Test_genPrivKey(t *testing.T) { {"valid because 0 < 1 < N", validOne, false}, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { if tt.shouldPanic { require.Panics(t, func() { @@ -39,8 +36,8 @@ func Test_genPrivKey(t *testing.T) { } got := genPrivKey(bytes.NewReader(tt.notSoRand)) fe := new(big.Int).SetBytes(got[:]) - require.True(t, fe.Cmp(secp256k1.S256().N) < 0) - require.True(t, fe.Sign() > 0) + require.Less(t, fe.Cmp(secp256k1.S256().N), 0, "expected %v to be less than %v", fe, secp256k1.S256().N) + require.Greater(t, fe.Sign(), 0) }) } } @@ -64,9 +61,9 @@ func TestSignatureVerificationAndRejectUpperS(t *testing.T) { require.True(t, pub.VerifySignature(msg, sigStr)) // malleate: - var S256 secp256k1.ModNScalar - S256.SetByteSlice(secp256k1.S256().N.Bytes()) - s.Negate().Add(&S256) + var s256 secp256k1.ModNScalar + s256.SetByteSlice(secp256k1.S256().N.Bytes()) + s.Negate().Add(&s256) require.True(t, s.IsOverHalfOrder()) rBytes := r.Bytes() diff --git a/crypto/secp256k1/secp256k1_test.go b/crypto/secp256k1/secp256k1_test.go index 195d9dde709..cca891a4cf5 100644 --- a/crypto/secp256k1/secp256k1_test.go +++ b/crypto/secp256k1/secp256k1_test.go @@ -6,13 +6,12 @@ import ( "testing" "github.com/btcsuite/btcd/btcutil/base58" + underlyingsecp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/secp256k1" - - underlyingSecp256k1 "github.com/btcsuite/btcd/btcec/v2" ) type keyData struct { @@ -29,6 +28,17 @@ var secpDataTable = []keyData{ }, } +func TestPrivKey_Size(t *testing.T) { + privKey := secp256k1.GenPrivKey() + assert.Equal(t, secp256k1.PrivKeySize, len(privKey.Bytes())) +} + +func TestPubKey_Size(t *testing.T) { + privKey := secp256k1.GenPrivKey() + pubKey := privKey.PubKey() + assert.Equal(t, secp256k1.PubKeySize, len(pubKey.Bytes())) +} + func TestPubKeySecp256k1Address(t *testing.T) { for _, d := range secpDataTable { privB, _ := hex.DecodeString(d.priv) @@ -54,7 +64,7 @@ func TestSignAndValidateSecp256k1(t *testing.T) { msg := crypto.CRandBytes(128) sig, err := privKey.Sign(msg) - require.Nil(t, err) + require.NoError(t, err) assert.True(t, pubKey.VerifySignature(msg, sig)) @@ -75,7 +85,7 @@ func TestSecp256k1LoadPrivkeyAndSerializeIsIdentity(t *testing.T) { // This function creates a private and public key in the underlying libraries format. // The private key is basically calling new(big.Int).SetBytes(pk), which removes leading zero bytes - priv, _ := underlyingSecp256k1.PrivKeyFromBytes(privKeyBytes[:]) + priv := underlyingsecp256k1.PrivKeyFromBytes(privKeyBytes[:]) // this takes the bytes returned by `(big int).Bytes()`, and if the length is less than 32 bytes, // pads the bytes from the left with zero bytes. Therefore these two functions composed // result in the identity function on privKeyBytes, hence the following equality check @@ -86,8 +96,8 @@ func TestSecp256k1LoadPrivkeyAndSerializeIsIdentity(t *testing.T) { } func TestGenPrivKeySecp256k1(t *testing.T) { - // curve oder N - N := underlyingSecp256k1.S256().N + // curve order N + n := underlyingsecp256k1.S256().N tests := []struct { name string secret []byte @@ -103,14 +113,13 @@ func TestGenPrivKeySecp256k1(t *testing.T) { {"another seed used in cosmos tests #3", []byte("")}, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { gotPrivKey := secp256k1.GenPrivKeySecp256k1(tt.secret) require.NotNil(t, gotPrivKey) // interpret as a big.Int and make sure it is a valid field element: fe := new(big.Int).SetBytes(gotPrivKey[:]) - require.True(t, fe.Cmp(N) < 0) - require.True(t, fe.Sign() > 0) + require.Less(t, fe.Cmp(n), 0) + require.Greater(t, fe.Sign(), 0) }) } } diff --git a/crypto/sr25519/batch.go b/crypto/sr25519/batch.go deleted file mode 100644 index 05b71dc6334..00000000000 --- a/crypto/sr25519/batch.go +++ /dev/null @@ -1,77 +0,0 @@ -package sr25519 - -import ( - "errors" - "fmt" - - "github.com/oasisprotocol/curve25519-voi/primitives/sr25519" - - "github.com/cometbft/cometbft/crypto" -) - -var _ crypto.BatchVerifier = &BatchVerifier{} - -// ErrInvalidKey represents an error that could occur as a result of -// using an invalid private or public key. It wraps errors that could -// arise due to failures in serialization or the use of an incorrect -// key, i.e., uninitialised or not sr25519. -type ErrInvalidKey struct { - Err error -} - -func (e ErrInvalidKey) Error() string { - return fmt.Sprintf("sr25519: invalid public key: %v", e.Err) -} - -func (e ErrInvalidKey) Unwrap() error { - return e.Err -} - -// ErrInvalidSignature wraps an error that could occur as a result of -// generating an invalid signature. -type ErrInvalidSignature struct { - Err error -} - -func (e ErrInvalidSignature) Error() string { - return fmt.Sprintf("sr25519: invalid signature: %v", e.Err) -} - -func (e ErrInvalidSignature) Unwrap() error { - return e.Err -} - -// BatchVerifier implements batch verification for sr25519. -type BatchVerifier struct { - *sr25519.BatchVerifier -} - -func NewBatchVerifier() crypto.BatchVerifier { - return &BatchVerifier{sr25519.NewBatchVerifier()} -} - -func (b *BatchVerifier) Add(key crypto.PubKey, msg, signature []byte) error { - pk, ok := key.(PubKey) - if !ok { - return ErrInvalidKey{Err: errors.New("sr25519: pubkey is not sr25519")} - } - - var srpk sr25519.PublicKey - if err := srpk.UnmarshalBinary(pk); err != nil { - return ErrInvalidKey{Err: err} - } - - var sig sr25519.Signature - if err := sig.UnmarshalBinary(signature); err != nil { - return ErrInvalidSignature{Err: err} - } - - st := signingCtx.NewTranscriptBytes(msg) - b.BatchVerifier.Add(&srpk, st, &sig) - - return nil -} - -func (b *BatchVerifier) Verify() (bool, []bool) { - return b.BatchVerifier.Verify(crypto.CReader()) -} diff --git a/crypto/sr25519/bench_test.go b/crypto/sr25519/bench_test.go deleted file mode 100644 index bee3f4f2470..00000000000 --- a/crypto/sr25519/bench_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package sr25519 - -import ( - "fmt" - "io" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/cometbft/cometbft/crypto" - "github.com/cometbft/cometbft/crypto/internal/benchmarking" -) - -func BenchmarkKeyGeneration(b *testing.B) { - benchmarkKeygenWrapper := func(reader io.Reader) crypto.PrivKey { - return genPrivKey(reader) - } - benchmarking.BenchmarkKeyGeneration(b, benchmarkKeygenWrapper) -} - -func BenchmarkSigning(b *testing.B) { - priv := GenPrivKey() - benchmarking.BenchmarkSigning(b, priv) -} - -func BenchmarkVerification(b *testing.B) { - priv := GenPrivKey() - benchmarking.BenchmarkVerification(b, priv) -} - -func BenchmarkVerifyBatch(b *testing.B) { - msg := []byte("BatchVerifyTest") - - for _, sigsCount := range []int{1, 8, 64, 1024} { - sigsCount := sigsCount - b.Run(fmt.Sprintf("sig-count-%d", sigsCount), func(b *testing.B) { - // Pre-generate all of the keys, and signatures, but do not - // benchmark key-generation and signing. - pubs := make([]crypto.PubKey, 0, sigsCount) - sigs := make([][]byte, 0, sigsCount) - for i := 0; i < sigsCount; i++ { - priv := GenPrivKey() - sig, _ := priv.Sign(msg) - pubs = append(pubs, priv.PubKey().(PubKey)) - sigs = append(sigs, sig) - } - b.ResetTimer() - - b.ReportAllocs() - // NOTE: dividing by n so that metrics are per-signature - for i := 0; i < b.N/sigsCount; i++ { - // The benchmark could just benchmark the Verify() - // routine, but there is non-trivial overhead associated - // with BatchVerifier.Add(), which should be included - // in the benchmark. - v := NewBatchVerifier() - for i := 0; i < sigsCount; i++ { - err := v.Add(pubs[i], msg, sigs[i]) - require.NoError(b, err) - } - - if ok, _ := v.Verify(); !ok { - b.Fatal("signature set failed batch verification") - } - } - }) - } -} diff --git a/crypto/sr25519/encoding.go b/crypto/sr25519/encoding.go deleted file mode 100644 index ab4ad54ffe6..00000000000 --- a/crypto/sr25519/encoding.go +++ /dev/null @@ -1,13 +0,0 @@ -package sr25519 - -import cmtjson "github.com/cometbft/cometbft/libs/json" - -const ( - PrivKeyName = "tendermint/PrivKeySr25519" - PubKeyName = "tendermint/PubKeySr25519" -) - -func init() { - cmtjson.RegisterType(PubKey{}, PubKeyName) - cmtjson.RegisterType(PrivKey{}, PrivKeyName) -} diff --git a/crypto/sr25519/privkey.go b/crypto/sr25519/privkey.go deleted file mode 100644 index bc1c6820101..00000000000 --- a/crypto/sr25519/privkey.go +++ /dev/null @@ -1,172 +0,0 @@ -package sr25519 - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/oasisprotocol/curve25519-voi/primitives/sr25519" - - "github.com/cometbft/cometbft/crypto" -) - -var ( - _ crypto.PrivKey = PrivKey{} - - signingCtx = sr25519.NewSigningContext([]byte{}) -) - -const ( - // PrivKeySize is the number of bytes in an Sr25519 private key. - PrivKeySize = 32 - - KeyType = "sr25519" -) - -// PrivKey implements crypto.PrivKey. -type PrivKey struct { - msk sr25519.MiniSecretKey - kp *sr25519.KeyPair -} - -// Bytes returns the byte representation of the PrivKey. -func (privKey PrivKey) Bytes() []byte { - if privKey.kp == nil { - return nil - } - return privKey.msk[:] -} - -// Sign produces a signature on the provided message. -func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { - if privKey.kp == nil { - return nil, ErrInvalidKey{ - Err: fmt.Errorf("sr25519: uninitialized private key"), - } - } - - st := signingCtx.NewTranscriptBytes(msg) - - sig, err := privKey.kp.Sign(crypto.CReader(), st) - if err != nil { - return nil, ErrInvalidSignature{ - Err: fmt.Errorf("sr25519: failed to sign message: %w", err), - } - } - - sigBytes, err := sig.MarshalBinary() - if err != nil { - return nil, ErrInvalidSignature{ - Err: fmt.Errorf("sr25519: failed to serialize signature: %w", err), - } - } - - return sigBytes, nil -} - -// PubKey gets the corresponding public key from the private key. -func (privKey PrivKey) PubKey() crypto.PubKey { - if privKey.kp == nil { - panic("sr25519: uninitialized private key") - } - - b, err := privKey.kp.PublicKey().MarshalBinary() - if err != nil { - panic("sr25519: failed to serialize public key: " + err.Error()) - } - - return PubKey(b) -} - -// Equals - you probably don't need to use this. -// Runs in constant time based on length of the keys. -func (privKey PrivKey) Equals(other crypto.PrivKey) bool { - if otherSr, ok := other.(PrivKey); ok { - return privKey.msk.Equal(&otherSr.msk) - } - return false -} - -func (privKey PrivKey) Type() string { - return KeyType -} - -func (privKey PrivKey) MarshalJSON() ([]byte, error) { - var b []byte - - // Handle uninitialized private keys gracefully. - if privKey.kp != nil { - b = privKey.Bytes() - } - - return json.Marshal(b) -} - -func (privKey *PrivKey) UnmarshalJSON(data []byte) error { - for i := range privKey.msk { - privKey.msk[i] = 0 - } - privKey.kp = nil - - var b []byte - if err := json.Unmarshal(data, &b); err != nil { - return ErrInvalidKey{ - Err: fmt.Errorf("sr25519: failed to deserialize JSON: %w", err), - } - } - if len(b) == 0 { - return nil - } - - msk, err := sr25519.NewMiniSecretKeyFromBytes(b) - if err != nil { - return err - } - - sk := msk.ExpandEd25519() - - privKey.msk = *msk - privKey.kp = sk.KeyPair() - - return nil -} - -// GenPrivKey generates a new sr25519 private key. -// It uses OS randomness in conjunction with the current global random seed -// in cometbft/libs/rand to generate the private key. -func GenPrivKey() PrivKey { - return genPrivKey(crypto.CReader()) -} - -// genPrivKey generates a new sr25519 private key using the provided reader. -func genPrivKey(rng io.Reader) PrivKey { - msk, err := sr25519.GenerateMiniSecretKey(rng) - if err != nil { - panic("sr25519: failed to generate MiniSecretKey: " + err.Error()) - } - - sk := msk.ExpandEd25519() - - return PrivKey{ - msk: *msk, - kp: sk.KeyPair(), - } -} - -// GenPrivKeyFromSecret hashes the secret with SHA2, and uses -// that 32 byte output to create the private key. -// NOTE: secret should be the output of a KDF like bcrypt, -// if it's derived from user input. -func GenPrivKeyFromSecret(secret []byte) PrivKey { - seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes. - - var privKey PrivKey - if err := privKey.msk.UnmarshalBinary(seed); err != nil { - panic("sr25519: failed to deserialize MiniSecretKey: " + err.Error()) - } - - sk := privKey.msk.ExpandEd25519() - privKey.kp = sk.KeyPair() - - return privKey -} diff --git a/crypto/sr25519/pubkey.go b/crypto/sr25519/pubkey.go deleted file mode 100644 index b25718f9a43..00000000000 --- a/crypto/sr25519/pubkey.go +++ /dev/null @@ -1,70 +0,0 @@ -package sr25519 - -import ( - "bytes" - "fmt" - - "github.com/oasisprotocol/curve25519-voi/primitives/sr25519" - - "github.com/cometbft/cometbft/crypto" - "github.com/cometbft/cometbft/crypto/tmhash" -) - -var _ crypto.PubKey = PubKey{} - -const ( - // PubKeySize is the number of bytes in an Sr25519 public key. - PubKeySize = 32 - - // SignatureSize is the size of a Sr25519 signature in bytes. - SignatureSize = 64 -) - -// PubKey implements crypto.PubKey for the Sr25519 signature scheme. -type PubKey []byte - -// Address is the SHA256-20 of the raw pubkey bytes. -func (pubKey PubKey) Address() crypto.Address { - if len(pubKey) != PubKeySize { - panic("pubkey is incorrect size") - } - return crypto.Address(tmhash.SumTruncated(pubKey[:])) -} - -// Bytes returns the byte representation of the PubKey. -func (pubKey PubKey) Bytes() []byte { - return []byte(pubKey) -} - -// Equals - checks that two public keys are the same time -// Runs in constant time based on length of the keys. -func (pubKey PubKey) Equals(other crypto.PubKey) bool { - if otherSr, ok := other.(PubKey); ok { - return bytes.Equal(pubKey[:], otherSr[:]) - } - - return false -} - -func (pubKey PubKey) VerifySignature(msg []byte, sigBytes []byte) bool { - var srpk sr25519.PublicKey - if err := srpk.UnmarshalBinary(pubKey); err != nil { - return false - } - - var sig sr25519.Signature - if err := sig.UnmarshalBinary(sigBytes); err != nil { - return false - } - - st := signingCtx.NewTranscriptBytes(msg) - return srpk.Verify(st, &sig) -} - -func (pubKey PubKey) String() string { - return fmt.Sprintf("PubKeySr25519{%X}", []byte(pubKey)) -} - -func (pubKey PubKey) Type() string { - return KeyType -} diff --git a/crypto/sr25519/sr25519_test.go b/crypto/sr25519/sr25519_test.go deleted file mode 100644 index b2437be797f..00000000000 --- a/crypto/sr25519/sr25519_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package sr25519_test - -import ( - "encoding/base64" - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/cometbft/cometbft/crypto" - "github.com/cometbft/cometbft/crypto/sr25519" -) - -func TestSignAndValidateSr25519(t *testing.T) { - privKey := sr25519.GenPrivKey() - pubKey := privKey.PubKey() - - msg := crypto.CRandBytes(128) - sig, err := privKey.Sign(msg) - require.Nil(t, err) - - // Test the signature - assert.True(t, pubKey.VerifySignature(msg, sig)) - assert.True(t, pubKey.VerifySignature(msg, sig)) - - // Mutate the signature, just one bit. - // TODO: Replace this with a much better fuzzer, tendermint/ed25519/issues/10 - sig[7] ^= byte(0x01) - - assert.False(t, pubKey.VerifySignature(msg, sig)) -} - -func TestBatchSafe(t *testing.T) { - v := sr25519.NewBatchVerifier() - vFail := sr25519.NewBatchVerifier() - for i := 0; i <= 38; i++ { - priv := sr25519.GenPrivKey() - pub := priv.PubKey() - - var msg []byte - if i%2 == 0 { - msg = []byte("easter") - } else { - msg = []byte("egg") - } - - sig, err := priv.Sign(msg) - require.NoError(t, err) - - err = v.Add(pub, msg, sig) - require.NoError(t, err) - - switch i % 2 { - case 0: - err = vFail.Add(pub, msg, sig) - case 1: - msg[2] ^= byte(0x01) - err = vFail.Add(pub, msg, sig) - } - require.NoError(t, err) - } - - ok, valid := v.Verify() - require.True(t, ok, "failed batch verification") - for i, ok := range valid { - require.Truef(t, ok, "sig[%d] should be marked valid", i) - } - - ok, valid = vFail.Verify() - require.False(t, ok, "succeeded batch verification (invalid batch)") - for i, ok := range valid { - expected := (i % 2) == 0 - require.Equalf(t, expected, ok, "sig[%d] should be %v", i, expected) - } -} - -func TestJSON(t *testing.T) { - privKey := sr25519.GenPrivKey() - - t.Run("PrivKey", func(t *testing.T) { - b, err := json.Marshal(privKey) - require.NoError(t, err) - - // b should be the base64 encoded MiniSecretKey, enclosed by doublequotes. - b64 := base64.StdEncoding.EncodeToString(privKey.Bytes()) - b64 = "\"" + b64 + "\"" - require.Equal(t, []byte(b64), b) - - var privKey2 sr25519.PrivKey - err = json.Unmarshal(b, &privKey2) - require.NoError(t, err) - require.Len(t, privKey2.Bytes(), sr25519.PrivKeySize) - require.EqualValues(t, privKey.Bytes(), privKey2.Bytes()) - }) - - // PubKeys are just []byte, so there is no special handling. -} diff --git a/crypto/tmhash/bench_test.go b/crypto/tmhash/bench_test.go new file mode 100644 index 00000000000..1165ec9fdd7 --- /dev/null +++ b/crypto/tmhash/bench_test.go @@ -0,0 +1,52 @@ +package tmhash + +import ( + "bytes" + "crypto/sha256" + "strings" + "testing" +) + +var sink any + +var manySlices = []struct { + name string + in [][]byte + want [32]byte +}{ + { + name: "all empty", + in: [][]byte{[]byte(""), []byte("")}, + want: sha256.Sum256(nil), + }, + { + name: "ax6", + in: [][]byte{[]byte("aaaa"), []byte("😎"), []byte("aaaa")}, + want: sha256.Sum256([]byte("aaaa😎aaaa")), + }, + { + name: "composite joined", + in: [][]byte{bytes.Repeat([]byte("a"), 1<<10), []byte("AA"), bytes.Repeat([]byte("z"), 100)}, + want: sha256.Sum256([]byte(strings.Repeat("a", 1<<10) + "AA" + strings.Repeat("z", 100))), + }, +} + +func BenchmarkSHA256Many(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, tt := range manySlices { + got := SumMany(tt.in[0], tt.in[1:]...) + if !bytes.Equal(got, tt.want[:]) { + b.Fatalf("Outward checksum mismatch for %q\n\tGot: %x\n\tWant: %x", tt.name, got, tt.want) + } + sink = got + } + } + + if sink == nil { + b.Fatal("Benchmark did not run!") + } + + sink = nil +} diff --git a/crypto/tmhash/hash.go b/crypto/tmhash/hash.go index f9b9582420d..c983450cf72 100644 --- a/crypto/tmhash/hash.go +++ b/crypto/tmhash/hash.go @@ -2,7 +2,10 @@ package tmhash import ( "crypto/sha256" + "errors" + "fmt" "hash" + "regexp" ) const ( @@ -21,7 +24,43 @@ func Sum(bz []byte) []byte { return h[:] } -//------------------------------------------------------------- +// SumMany takes at least 1 byteslice along with a variadic +// number of other byteslices and produces the SHA256 sum from +// hashing them as if they were 1 joined slice. +func SumMany(data []byte, rest ...[]byte) []byte { + h := sha256.New() + h.Write(data) + for _, data := range rest { + h.Write(data) + } + return h.Sum(nil) +} + +// ValidateSHA256 checks if the given string is a syntactically valid SHA256 hash. +// A valid SHA256 hash is a hex-encoded 64-character string. +// If the hash isn't valid, it returns an error explaining why. +func ValidateSHA256(hashStr string) error { + const sha256Pattern = `^[a-fA-F0-9]{64}$` + + if len(hashStr) != 64 { + return fmt.Errorf("expected 64 characters, but have %d", len(hashStr)) + } + + match, err := regexp.MatchString(sha256Pattern, hashStr) + if err != nil { + // if this happens, there is a bug in the regex or some internal regexp + // package error. + return fmt.Errorf("can't run regex %q: %s", sha256Pattern, err) + } + + if !match { + return errors.New("contains non-hexadecimal characters") + } + + return nil +} + +// ------------------------------------------------------------- const ( TruncatedSize = 20 @@ -34,6 +73,7 @@ type sha256trunc struct { func (h sha256trunc) Write(p []byte) (n int, err error) { return h.sha256.Write(p) } + func (h sha256trunc) Sum(b []byte) []byte { shasum := h.sha256.Sum(b) return shasum[:TruncatedSize] @@ -43,7 +83,7 @@ func (h sha256trunc) Reset() { h.sha256.Reset() } -func (h sha256trunc) Size() int { +func (sha256trunc) Size() int { return TruncatedSize } diff --git a/crypto/tmhash/hash_test.go b/crypto/tmhash/hash_test.go index 0849391e576..63990567847 100644 --- a/crypto/tmhash/hash_test.go +++ b/crypto/tmhash/hash_test.go @@ -46,3 +46,53 @@ func TestHashTruncated(t *testing.T) { assert.Equal(t, bz, bz2) assert.Equal(t, bz, bz3) } + +func TestValidSHA256String(t *testing.T) { + tests := []struct { + name string + hash string + wantErr string + }{ + { + "ValidLowercase", + "9e107d9d372bb6826bd81d3542a419d6e9c2e15d35b3d5d6b889def626eb8f23", + "", + }, + { + "ValidUppercase", + "9E107D9D372BB6826BD81D3542A419D6E9C2E15D35B3D5D6B889DEF626EB8F23", + "", + }, + { + "TooShort", + "9e107d9d372bb6826bd81d3542a419d6e9c2e15d35b3d5d6b889def626eb8f2", + "expected 64 characters, but have 63", + }, + { + "TooLong", + "9e107d9d372bb6826bd81d3542a419d6e9c2e15d35b3d5d6b889def626eb8f23a", + "expected 64 characters, but have 65", + }, + { + "InvalidChar", + "9e107d9d372bb6826bd81d3542a419d6e9c2e15d35b3d5d6b889def626eb8f2g", + "contains non-hexadecimal characters", + }, + } + + // success cases + for i, tt := range tests[:2] { + t.Run(tt.name, func(t *testing.T) { + err := tmhash.ValidateSHA256(tt.hash) + assert.NoError(t, err, "test %d", i) + }) + } + + // failure cases + for i, tt := range tests[2:] { + t.Run(tt.name, func(t *testing.T) { + err := tmhash.ValidateSHA256(tt.hash) + assert.EqualError(t, err, tt.wantErr, "test %d", i) + }) + } +} diff --git a/crypto/xchacha20poly1305/vector_test.go b/crypto/xchacha20poly1305/vector_test.go deleted file mode 100644 index c6ca9d8d23c..00000000000 --- a/crypto/xchacha20poly1305/vector_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package xchacha20poly1305 - -import ( - "bytes" - "encoding/hex" - "testing" -) - -func toHex(bits []byte) string { - return hex.EncodeToString(bits) -} - -func fromHex(bits string) []byte { - b, err := hex.DecodeString(bits) - if err != nil { - panic(err) - } - return b -} - -func TestHChaCha20(t *testing.T) { - for i, v := range hChaCha20Vectors { - var key [32]byte - var nonce [16]byte - copy(key[:], v.key) - copy(nonce[:], v.nonce) - - HChaCha20(&key, &nonce, &key) - if !bytes.Equal(key[:], v.keystream) { - t.Errorf("test %d: keystream mismatch:\n \t got: %s\n \t want: %s", i, toHex(key[:]), toHex(v.keystream)) - } - } -} - -var hChaCha20Vectors = []struct { - key, nonce, keystream []byte -}{ - { - fromHex("0000000000000000000000000000000000000000000000000000000000000000"), - fromHex("000000000000000000000000000000000000000000000000"), - fromHex("1140704c328d1d5d0e30086cdf209dbd6a43b8f41518a11cc387b669b2ee6586"), - }, - { - fromHex("8000000000000000000000000000000000000000000000000000000000000000"), - fromHex("000000000000000000000000000000000000000000000000"), - fromHex("7d266a7fd808cae4c02a0a70dcbfbcc250dae65ce3eae7fc210f54cc8f77df86"), - }, - { - fromHex("0000000000000000000000000000000000000000000000000000000000000001"), - fromHex("000000000000000000000000000000000000000000000002"), - fromHex("e0c77ff931bb9163a5460c02ac281c2b53d792b1c43fea817e9ad275ae546963"), - }, - { - fromHex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"), - fromHex("000102030405060708090a0b0c0d0e0f1011121314151617"), - fromHex("51e3ff45a895675c4b33b46c64f4a9ace110d34df6a2ceab486372bacbd3eff6"), - }, - { - fromHex("24f11cce8a1b3d61e441561a696c1c1b7e173d084fd4812425435a8896a013dc"), - fromHex("d9660c5900ae19ddad28d6e06e45fe5e"), - fromHex("5966b3eec3bff1189f831f06afe4d4e3be97fa9235ec8c20d08acfbbb4e851e3"), - }, -} - -func TestVectors(t *testing.T) { - for i, v := range vectors { - if len(v.plaintext) == 0 { - v.plaintext = make([]byte, len(v.ciphertext)) - } - - var nonce [24]byte - copy(nonce[:], v.nonce) - - aead, err := New(v.key) - if err != nil { - t.Error(err) - } - - dst := aead.Seal(nil, nonce[:], v.plaintext, v.ad) - if !bytes.Equal(dst, v.ciphertext) { - t.Errorf("test %d: ciphertext mismatch:\n \t got: %s\n \t want: %s", i, toHex(dst), toHex(v.ciphertext)) - } - open, err := aead.Open(nil, nonce[:], dst, v.ad) - if err != nil { - t.Error(err) - } - if !bytes.Equal(open, v.plaintext) { - t.Errorf("test %d: plaintext mismatch:\n \t got: %s\n \t want: %s", i, string(open), string(v.plaintext)) - } - } -} - -var vectors = []struct { - key, nonce, ad, plaintext, ciphertext []byte -}{ - { - []byte{ - 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, - 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, - 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, - }, - []byte{0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b}, - []byte{0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7}, - []byte( - "Ladies and Gentlemen of the class of '99: If I could offer you only one tip for the future, sunscreen would be it.", - ), - []byte{ - 0x45, 0x3c, 0x06, 0x93, 0xa7, 0x40, 0x7f, 0x04, 0xff, 0x4c, 0x56, - 0xae, 0xdb, 0x17, 0xa3, 0xc0, 0xa1, 0xaf, 0xff, 0x01, 0x17, 0x49, - 0x30, 0xfc, 0x22, 0x28, 0x7c, 0x33, 0xdb, 0xcf, 0x0a, 0xc8, 0xb8, - 0x9a, 0xd9, 0x29, 0x53, 0x0a, 0x1b, 0xb3, 0xab, 0x5e, 0x69, 0xf2, - 0x4c, 0x7f, 0x60, 0x70, 0xc8, 0xf8, 0x40, 0xc9, 0xab, 0xb4, 0xf6, - 0x9f, 0xbf, 0xc8, 0xa7, 0xff, 0x51, 0x26, 0xfa, 0xee, 0xbb, 0xb5, - 0x58, 0x05, 0xee, 0x9c, 0x1c, 0xf2, 0xce, 0x5a, 0x57, 0x26, 0x32, - 0x87, 0xae, 0xc5, 0x78, 0x0f, 0x04, 0xec, 0x32, 0x4c, 0x35, 0x14, - 0x12, 0x2c, 0xfc, 0x32, 0x31, 0xfc, 0x1a, 0x8b, 0x71, 0x8a, 0x62, - 0x86, 0x37, 0x30, 0xa2, 0x70, 0x2b, 0xb7, 0x63, 0x66, 0x11, 0x6b, - 0xed, 0x09, 0xe0, 0xfd, 0x5c, 0x6d, 0x84, 0xb6, 0xb0, 0xc1, 0xab, - 0xaf, 0x24, 0x9d, 0x5d, 0xd0, 0xf7, 0xf5, 0xa7, 0xea, - }, - }, -} diff --git a/crypto/xchacha20poly1305/xchachapoly.go b/crypto/xchacha20poly1305/xchachapoly.go deleted file mode 100644 index 6ae8e6ce9d3..00000000000 --- a/crypto/xchacha20poly1305/xchachapoly.go +++ /dev/null @@ -1,264 +0,0 @@ -// Package xchacha20poly1305 creates an AEAD using hchacha, chacha, and poly1305 -// This allows for randomized nonces to be used in conjunction with chacha. -package xchacha20poly1305 - -import ( - "crypto/cipher" - "encoding/binary" - "errors" - - "golang.org/x/crypto/chacha20poly1305" -) - -// Implements crypto.AEAD -type xchacha20poly1305 struct { - key [KeySize]byte -} - -const ( - // KeySize is the size of the key used by this AEAD, in bytes. - KeySize = 32 - // NonceSize is the size of the nonce used with this AEAD, in bytes. - NonceSize = 24 - // TagSize is the size added from poly1305 - TagSize = 16 - // MaxPlaintextSize is the max size that can be passed into a single call of Seal - MaxPlaintextSize = (1 << 38) - 64 - // MaxCiphertextSize is the max size that can be passed into a single call of Open, - // this differs from plaintext size due to the tag - MaxCiphertextSize = (1 << 38) - 48 - - // sigma are constants used in xchacha. - // Unrolled from a slice so that they can be inlined, as slices can't be constants. - sigma0 = uint32(0x61707865) - sigma1 = uint32(0x3320646e) - sigma2 = uint32(0x79622d32) - sigma3 = uint32(0x6b206574) -) - -var ( - ErrInvalidKeyLen = errors.New("xchacha20poly1305: bad key length") - ErrInvalidNonceLen = errors.New("xchacha20poly1305: bad nonce length") - ErrInvalidCipherTextLen = errors.New("xchacha20poly1305: ciphertext too large") -) - -// New returns a new xchachapoly1305 AEAD -func New(key []byte) (cipher.AEAD, error) { - if len(key) != KeySize { - return nil, ErrInvalidKeyLen - } - ret := new(xchacha20poly1305) - copy(ret.key[:], key) - return ret, nil -} - -func (c *xchacha20poly1305) NonceSize() int { - return NonceSize -} - -func (c *xchacha20poly1305) Overhead() int { - return TagSize -} - -func (c *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { - if len(nonce) != NonceSize { - panic("xchacha20poly1305: bad nonce length passed to Seal") - } - - if uint64(len(plaintext)) > MaxPlaintextSize { - panic("xchacha20poly1305: plaintext too large") - } - - var subKey [KeySize]byte - var hNonce [16]byte - var subNonce [chacha20poly1305.NonceSize]byte - copy(hNonce[:], nonce[:16]) - - HChaCha20(&subKey, &hNonce, &c.key) - - // This can't error because we always provide a correctly sized key - chacha20poly1305, _ := chacha20poly1305.New(subKey[:]) - - copy(subNonce[4:], nonce[16:]) - - return chacha20poly1305.Seal(dst, subNonce[:], plaintext, additionalData) -} - -func (c *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - if len(nonce) != NonceSize { - return nil, ErrInvalidNonceLen - } - if uint64(len(ciphertext)) > MaxCiphertextSize { - return nil, ErrInvalidCipherTextLen - } - var subKey [KeySize]byte - var hNonce [16]byte - var subNonce [chacha20poly1305.NonceSize]byte - copy(hNonce[:], nonce[:16]) - - HChaCha20(&subKey, &hNonce, &c.key) - - // This can't error because we always provide a correctly sized key - chacha20poly1305, _ := chacha20poly1305.New(subKey[:]) - - copy(subNonce[4:], nonce[16:]) - - return chacha20poly1305.Open(dst, subNonce[:], ciphertext, additionalData) -} - -// HChaCha exported from -// https://github.com/aead/chacha20/blob/8b13a72661dae6e9e5dea04f344f0dc95ea29547/chacha/chacha_generic.go#L194 -// TODO: Add support for the different assembly instructions used there. - -// The MIT License (MIT) - -// Copyright (c) 2016 Andreas Auernhammer - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -// HChaCha20 generates 32 pseudo-random bytes from a 128 bit nonce and a 256 bit secret key. -// It can be used as a key-derivation-function (KDF). -func HChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) { hChaCha20Generic(out, nonce, key) } - -func hChaCha20Generic(out *[32]byte, nonce *[16]byte, key *[32]byte) { - v00 := sigma0 - v01 := sigma1 - v02 := sigma2 - v03 := sigma3 - v04 := binary.LittleEndian.Uint32(key[0:]) - v05 := binary.LittleEndian.Uint32(key[4:]) - v06 := binary.LittleEndian.Uint32(key[8:]) - v07 := binary.LittleEndian.Uint32(key[12:]) - v08 := binary.LittleEndian.Uint32(key[16:]) - v09 := binary.LittleEndian.Uint32(key[20:]) - v10 := binary.LittleEndian.Uint32(key[24:]) - v11 := binary.LittleEndian.Uint32(key[28:]) - v12 := binary.LittleEndian.Uint32(nonce[0:]) - v13 := binary.LittleEndian.Uint32(nonce[4:]) - v14 := binary.LittleEndian.Uint32(nonce[8:]) - v15 := binary.LittleEndian.Uint32(nonce[12:]) - - for i := 0; i < 20; i += 2 { - v00 += v04 - v12 ^= v00 - v12 = (v12 << 16) | (v12 >> 16) - v08 += v12 - v04 ^= v08 - v04 = (v04 << 12) | (v04 >> 20) - v00 += v04 - v12 ^= v00 - v12 = (v12 << 8) | (v12 >> 24) - v08 += v12 - v04 ^= v08 - v04 = (v04 << 7) | (v04 >> 25) - v01 += v05 - v13 ^= v01 - v13 = (v13 << 16) | (v13 >> 16) - v09 += v13 - v05 ^= v09 - v05 = (v05 << 12) | (v05 >> 20) - v01 += v05 - v13 ^= v01 - v13 = (v13 << 8) | (v13 >> 24) - v09 += v13 - v05 ^= v09 - v05 = (v05 << 7) | (v05 >> 25) - v02 += v06 - v14 ^= v02 - v14 = (v14 << 16) | (v14 >> 16) - v10 += v14 - v06 ^= v10 - v06 = (v06 << 12) | (v06 >> 20) - v02 += v06 - v14 ^= v02 - v14 = (v14 << 8) | (v14 >> 24) - v10 += v14 - v06 ^= v10 - v06 = (v06 << 7) | (v06 >> 25) - v03 += v07 - v15 ^= v03 - v15 = (v15 << 16) | (v15 >> 16) - v11 += v15 - v07 ^= v11 - v07 = (v07 << 12) | (v07 >> 20) - v03 += v07 - v15 ^= v03 - v15 = (v15 << 8) | (v15 >> 24) - v11 += v15 - v07 ^= v11 - v07 = (v07 << 7) | (v07 >> 25) - v00 += v05 - v15 ^= v00 - v15 = (v15 << 16) | (v15 >> 16) - v10 += v15 - v05 ^= v10 - v05 = (v05 << 12) | (v05 >> 20) - v00 += v05 - v15 ^= v00 - v15 = (v15 << 8) | (v15 >> 24) - v10 += v15 - v05 ^= v10 - v05 = (v05 << 7) | (v05 >> 25) - v01 += v06 - v12 ^= v01 - v12 = (v12 << 16) | (v12 >> 16) - v11 += v12 - v06 ^= v11 - v06 = (v06 << 12) | (v06 >> 20) - v01 += v06 - v12 ^= v01 - v12 = (v12 << 8) | (v12 >> 24) - v11 += v12 - v06 ^= v11 - v06 = (v06 << 7) | (v06 >> 25) - v02 += v07 - v13 ^= v02 - v13 = (v13 << 16) | (v13 >> 16) - v08 += v13 - v07 ^= v08 - v07 = (v07 << 12) | (v07 >> 20) - v02 += v07 - v13 ^= v02 - v13 = (v13 << 8) | (v13 >> 24) - v08 += v13 - v07 ^= v08 - v07 = (v07 << 7) | (v07 >> 25) - v03 += v04 - v14 ^= v03 - v14 = (v14 << 16) | (v14 >> 16) - v09 += v14 - v04 ^= v09 - v04 = (v04 << 12) | (v04 >> 20) - v03 += v04 - v14 ^= v03 - v14 = (v14 << 8) | (v14 >> 24) - v09 += v14 - v04 ^= v09 - v04 = (v04 << 7) | (v04 >> 25) - } - - binary.LittleEndian.PutUint32(out[0:], v00) - binary.LittleEndian.PutUint32(out[4:], v01) - binary.LittleEndian.PutUint32(out[8:], v02) - binary.LittleEndian.PutUint32(out[12:], v03) - binary.LittleEndian.PutUint32(out[16:], v12) - binary.LittleEndian.PutUint32(out[20:], v13) - binary.LittleEndian.PutUint32(out[24:], v14) - binary.LittleEndian.PutUint32(out[28:], v15) -} diff --git a/crypto/xchacha20poly1305/xchachapoly_test.go b/crypto/xchacha20poly1305/xchachapoly_test.go deleted file mode 100644 index 6844f74106e..00000000000 --- a/crypto/xchacha20poly1305/xchachapoly_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package xchacha20poly1305 - -import ( - "bytes" - cr "crypto/rand" - mr "math/rand" - "testing" -) - -// The following test is taken from -// https://github.com/golang/crypto/blob/master/chacha20poly1305/chacha20poly1305_test.go#L69 -// It requires the below copyright notice, where "this source code" refers to the following function. -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found at the bottom of this file. -func TestRandom(t *testing.T) { - // Some random tests to verify Open(Seal) == Plaintext - for i := 0; i < 256; i++ { - var nonce [24]byte - var key [32]byte - - al := mr.Intn(128) - pl := mr.Intn(16384) - ad := make([]byte, al) - plaintext := make([]byte, pl) - _, err := cr.Read(key[:]) - if err != nil { - t.Errorf("error on read: %v", err) - } - _, err = cr.Read(nonce[:]) - if err != nil { - t.Errorf("error on read: %v", err) - } - _, err = cr.Read(ad) - if err != nil { - t.Errorf("error on read: %v", err) - } - _, err = cr.Read(plaintext) - if err != nil { - t.Errorf("error on read: %v", err) - } - - aead, err := New(key[:]) - if err != nil { - t.Fatal(err) - } - - ct := aead.Seal(nil, nonce[:], plaintext, ad) - - plaintext2, err := aead.Open(nil, nonce[:], ct, ad) - if err != nil { - t.Errorf("random #%d: Open failed", i) - continue - } - - if !bytes.Equal(plaintext, plaintext2) { - t.Errorf("random #%d: plaintext's don't match: got %x vs %x", i, plaintext2, plaintext) - continue - } - - if len(ad) > 0 { - alterAdIdx := mr.Intn(len(ad)) - ad[alterAdIdx] ^= 0x80 - if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { - t.Errorf("random #%d: Open was successful after altering additional data", i) - } - ad[alterAdIdx] ^= 0x80 - } - - alterNonceIdx := mr.Intn(aead.NonceSize()) - nonce[alterNonceIdx] ^= 0x80 - if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { - t.Errorf("random #%d: Open was successful after altering nonce", i) - } - nonce[alterNonceIdx] ^= 0x80 - - alterCtIdx := mr.Intn(len(ct)) - ct[alterCtIdx] ^= 0x80 - if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { - t.Errorf("random #%d: Open was successful after altering ciphertext", i) - } - ct[alterCtIdx] ^= 0x80 - } -} - -// AFOREMENTIONED LICENSE -// Copyright (c) 2009 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/crypto/xsalsa20symmetric/symmetric.go b/crypto/xsalsa20symmetric/symmetric.go deleted file mode 100644 index cfb0466af6e..00000000000 --- a/crypto/xsalsa20symmetric/symmetric.go +++ /dev/null @@ -1,61 +0,0 @@ -package xsalsa20symmetric - -import ( - "errors" - "fmt" - - "golang.org/x/crypto/nacl/secretbox" - - "github.com/cometbft/cometbft/crypto" -) - -// TODO, make this into a struct that implements crypto.Symmetric. - -const ( - nonceLen = 24 - secretLen = 32 -) - -var ( - ErrInvalidCiphertextLen = errors.New("xsalsa20symmetric: ciphertext is too short") - ErrCiphertextDecryption = errors.New("xsalsa20symmetric: ciphertext decryption failed") -) - -// secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase)) -// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. -func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { - if len(secret) != secretLen { - panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) - } - nonce := crypto.CRandBytes(nonceLen) - nonceArr := [nonceLen]byte{} - copy(nonceArr[:], nonce) - secretArr := [secretLen]byte{} - copy(secretArr[:], secret) - ciphertext = make([]byte, nonceLen+secretbox.Overhead+len(plaintext)) - copy(ciphertext, nonce) - secretbox.Seal(ciphertext[nonceLen:nonceLen], plaintext, &nonceArr, &secretArr) - return ciphertext -} - -// secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase)) -// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. -func DecryptSymmetric(ciphertext []byte, secret []byte) (plaintext []byte, err error) { - if len(secret) != secretLen { - panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) - } - if len(ciphertext) <= secretbox.Overhead+nonceLen { - return nil, ErrInvalidCiphertextLen - } - nonce := ciphertext[:nonceLen] - nonceArr := [nonceLen]byte{} - copy(nonceArr[:], nonce) - secretArr := [secretLen]byte{} - copy(secretArr[:], secret) - plaintext = make([]byte, len(ciphertext)-nonceLen-secretbox.Overhead) - _, ok := secretbox.Open(plaintext[:0], ciphertext[nonceLen:], &nonceArr, &secretArr) - if !ok { - return nil, ErrCiphertextDecryption - } - return plaintext, nil -} diff --git a/crypto/xsalsa20symmetric/symmetric_test.go b/crypto/xsalsa20symmetric/symmetric_test.go deleted file mode 100644 index 7f0da23b391..00000000000 --- a/crypto/xsalsa20symmetric/symmetric_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package xsalsa20symmetric - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "golang.org/x/crypto/bcrypt" - - "github.com/cometbft/cometbft/crypto" -) - -func TestSimple(t *testing.T) { - - plaintext := []byte("sometext") - secret := []byte("somesecretoflengththirtytwo===32") - ciphertext := EncryptSymmetric(plaintext, secret) - plaintext2, err := DecryptSymmetric(ciphertext, secret) - - require.Nil(t, err, "%+v", err) - assert.Equal(t, plaintext, plaintext2) -} - -func TestSimpleWithKDF(t *testing.T) { - - plaintext := []byte("sometext") - secretPass := []byte("somesecret") - secret, err := bcrypt.GenerateFromPassword(secretPass, 12) - if err != nil { - t.Error(err) - } - secret = crypto.Sha256(secret) - - ciphertext := EncryptSymmetric(plaintext, secret) - plaintext2, err := DecryptSymmetric(ciphertext, secret) - - require.Nil(t, err, "%+v", err) - assert.Equal(t, plaintext, plaintext2) -} diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index ee582871976..00000000000 --- a/docker-compose.yml +++ /dev/null @@ -1,66 +0,0 @@ -version: '3' - -services: - node0: - container_name: node0 - image: "cometbft/localnode" - ports: - - "26656-26657:26656-26657" - environment: - - ID=0 - - LOG=${LOG:-cometbft.log} - volumes: - - ./build:/cometbft:Z - networks: - localnet: - ipv4_address: 192.167.10.2 - - node1: - container_name: node1 - image: "cometbft/localnode" - ports: - - "26659-26660:26656-26657" - environment: - - ID=1 - - LOG=${LOG:-cometbft.log} - volumes: - - ./build:/cometbft:Z - networks: - localnet: - ipv4_address: 192.167.10.3 - - node2: - container_name: node2 - image: "cometbft/localnode" - environment: - - ID=2 - - LOG=${LOG:-cometbft.log} - ports: - - "26661-26662:26656-26657" - volumes: - - ./build:/cometbft:Z - networks: - localnet: - ipv4_address: 192.167.10.4 - - node3: - container_name: node3 - image: "cometbft/localnode" - environment: - - ID=3 - - LOG=${LOG:-cometbft.log} - ports: - - "26663-26664:26656-26657" - volumes: - - ./build:/cometbft:Z - networks: - localnet: - ipv4_address: 192.167.10.5 - -networks: - localnet: - driver: bridge - ipam: - driver: default - config: - - subnet: 192.167.10.0/16 diff --git a/docs/README.md b/docs/README.md index c13ca2f4ca7..1560a757464 100644 --- a/docs/README.md +++ b/docs/README.md @@ -16,14 +16,14 @@ CometBFT serves blockchain applications. More formally, CometBFT performs Byzantine Fault Tolerant (BFT) State Machine Replication (SMR) for arbitrary deterministic, finite state machines. -For more background, see [What is CometBFT?](introduction/README.md#what-is-cometbft.md). +For more background, see [What is CometBFT?](./explanation/introduction/README.md). -To get started quickly with an example application, see the [quick start guide](guides/quick-start.md). +To get started quickly with an example application, see the [quick start guide](tutorials/quick-start.md). To learn about application development on CometBFT, see the [Application Blockchain Interface](https://github.com/cometbft/cometbft/tree/main/spec/abci). For more details on using CometBFT, see the respective documentation for -[CometBFT internals](core/), [benchmarking and monitoring](tools/), and [network deployments](networks/). +[CometBFT internals](explanation/core/), [benchmarking and monitoring](guides/tools/), and [network deployments](guides/networks/). ## Contribute diff --git a/docs/app-dev/README.md b/docs/app-dev/README.md deleted file mode 100644 index aff0a570ca2..00000000000 --- a/docs/app-dev/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -order: false -parent: - order: 3 ---- - -# Apps - -- [Using ABCI-CLI](./abci-cli.md) -- [Getting Started](./getting-started.md) -- [Indexing transactions](./indexing-transactions.md) -- [Application Architecture Guide](./app-architecture.md) diff --git a/docs/architecture/adr-108-e2e-abci++.md b/docs/architecture/adr-108-e2e-abci++.md deleted file mode 100644 index 9d18db4422d..00000000000 --- a/docs/architecture/adr-108-e2e-abci++.md +++ /dev/null @@ -1,310 +0,0 @@ -# ADR 108: E2E tests for CometBFT's behaviour in respect to ABCI 1.0. - -## Changelog -- 2023-08-08: Initial version (@nenadmilosevic95) - - -## Context - -ABCI 1.0 defines the interface between the application and CometBFT. A part of the specification is the [ABCI 1.0 grammar](../../spec/abci/abci%2B%2B_comet_expected_behavior) that describes the sequences of calls that the application can expect from CometBFT. -In order to demonstrate that CometBFT behaves as expected from the viewpoint of the application, we need to test whether CometBFT respects this ABCI 1.0 grammar. To do this, we need to enhance the e2e tests infrastructure. Specifically, we plan to do three things: -- Log every CometBFT's ABCI 1.0 request during the execution. -- Parse the logs post-mortem and extract all ABCI 1.0 requests. -- Check if the set of observed requests respects the ABCI 1.0 grammar. - - -Issue: [353](https://github.com/cometbft/cometbft/issues/353). - -Current version, ABCI 1.0, does not support vote extensions (ABCI 2.0). However, this is the next step. - - -## Decision - -### 1) ABCI 1.0 requests logging -The idea was to do this at the Application side. Every time the Application -receives a request, it logs it. - -**Implementation** - -The rationale behind this part of the implementation was to log the request concisely and use the existing structures as much as possible. - -Whenever an ABCI 1.0 request is made, the application will create `abci.Request` (`abci` stands for `"github.com/cometbft/cometbft/abci/types"`) and log it. The example is below. - -```go -func (app *Application) InitChain(_ context.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { - - r := &abci.Request{Value: &abci.Request_InitChain{InitChain: &abci.RequestInitChain{}}} - err := app.logAbciRequest(r) - if err != nil { - return nil, err - } - - ... -} -``` -Notice here that we create an empty `abci.RequestInitChain` object while we can also use the one passed to the `InitChain` function. The reason behind this is that, at the moment, we do not need specific fields of the request; we just need to be able to extract the information about the request type. For this, an empty object of a particular type is enough. - -The `app.logABCIRequest(r)` function is a new function implemented in the same file (`test/e2e/app/app.go`). If the `ABCIRequestsLoggingEnabled` flag is set to `true`, set automatically when ABCI 1.0 tests are enabled, it logs received requests. The full implementation is the following: - -```go -func (app *Application) logABCIRequest(req *abci.Request) error { - if !app.cfg.ABCIRequestsLoggingEnabled { - return nil - } - s, err := GetABCIRequestString(req) - if err != nil { - return err - } - app.logger.Info(s) - return nil -} -``` - -`GetABCIRequestString(req)` is a new method that receives a request and returns its string representation. The implementation and tests for this function and the opposite function `GetABCIRequestFromString(req)` -that returns `abci.Request` from the string are provided in files `test/e2e/app/log.go` and `test/e2e/app/log_test.go`, respectively. To create a string representation of a request, we first marshal the request via `proto.Marshal()` method and then convert received bytes in the string using `base64.StdEncoding.EncodeToString()` method. In addition, we surround the new string with `abci-req` constants so that we can find lines with ABCI 1.0 request more easily. The code of the method is below: - -```go -func GetABCIRequestString(req *abci.Request) (string, error) { - b, err := proto.Marshal(req) - if err != nil { - return "", err - } - reqStr := base64.StdEncoding.EncodeToString(b) - s := ABCI_REQ + reqStr + ABCI_REQ - return s, nil -} -``` - -*Note:* At the moment, we are not compressing the marshalled request before converting it to `base64` `string` because we are logging the empty requests that take at most 24 bytes. However, if we decide to log the actual requests in the future, we might want to compress them. Based on a few tests, we observed that the size of a request can go up to 7KB. - -If in the future we want to log another ABCI 1.0 request type, we just need to do the same thing: -create a corresponding `abci.Request` and log it via -`app.logABCIRequest(r)`. - -### 2) Parsing the logs -We need a code that will take the logs from all nodes and collect the ABCI 1.0 requests that were logged by the application. - -**Implementation** - -This logic is implemented inside the `fetchABCIRequests(t *testing.T, nodeName string)` function that resides in `test/e2e/tests/e2e_test.go` file. This function does three things: -- Takes the output of a specific node in the testnet from the moment we launched the testnet until the function is called. The node name is passed as a function parameter. It uses the `docker-compose logs` and `grep nodeName` commands. -- Parses the logs line by line and extracts the `abci.Request`, if one exists. The request is received by forwarding each line to the `app.GetABCIRequestFromString(req)` method. -- Returns the array of slices where each slice contains the set of `abci.Request`s logged on that node. Every time a crash happens, a new array element (new slice `[]*abci.Request`) will be created. We know a crash has happened because we log "Application started" every time the application starts. Specifically, we added this log inside `NewApplication()` function in `test/e2e/app/app.go` file. In the end, `fetchABCIRequests()` will return just one slice if the node did not experience any crashes and $n+1$ slices if there were $n$ crashes. The benefit of logging the requests in the previously described way is that now we can use `[]*abci.Request` to store ABCI 1.0 requests of any type. - - - -### 3) ABCI 1.0 grammar checker -The idea here was to find a library that automatically verifies whether a specific execution respects the prescribed grammar. - -**Implementation** - -We found the following library - https://github.com/goccmack/gogll. It generates a GLL or LR(1) parser and an FSA-based lexer for any context-free grammar. What we needed to do is to rewrite [ABCI 1.0 grammar](../../spec/abci/abci%2B%2B_comet_expected_behavior.md#valid-method-call-sequences) -using the syntax that the library understands. -The new grammar is below. - -```abnf - -Start : CleanStart | Recovery ; - -CleanStart : InitChain StateSync ConsensusExec | InitChain ConsensusExec ; -StateSync : StateSyncAttempts SuccessSync | SuccessSync ; -StateSyncAttempts : StateSyncAttempt | StateSyncAttempt StateSyncAttempts ; -StateSyncAttempt : OfferSnapshot ApplyChunks | OfferSnapshot ; -SuccessSync : OfferSnapshot ApplyChunks ; -ApplyChunks : ApplyChunk | ApplyChunk ApplyChunks ; - -Recovery : ConsensusExec ; - -ConsensusExec : ConsensusHeights ; -ConsensusHeights : ConsensusHeight | ConsensusHeight ConsensusHeights ; -ConsensusHeight : ConsensusRounds FinalizeBlock Commit | FinalizeBlock Commit ; -ConsensusRounds : ConsensusRound | ConsensusRound ConsensusRounds ; -ConsensusRound : Proposer | NonProposer ; - -Proposer : PrepareProposal | PrepareProposal ProcessProposal ; -NonProposer: ProcessProposal ; - - -InitChain : "init_chain" ; -FinalizeBlock : "finalize_block" ; -Commit : "commit" ; -OfferSnapshot : "offer_snapshot" ; -ApplyChunk : "apply_snapshot_chunk" ; -PrepareProposal : "prepare_proposal" ; -ProcessProposal : "process_proposal" ; - - ``` - -If you compare this grammar with the original one, you will notice that, in addition to vote extensions, -`Info` is removed. The reason is that, as explained in the section [CometBFT's expected behaviour](../../spec/abci/abci%2B%2B_comet_expected_behavior.md#valid-method-call-sequences), one of the -purposes of the `Info` method is being part of the RPC handling from an external -client. Since this can happen at any time, it complicates the -grammar. -This is not true in other cases, but since the Application does -not know why the `Info` is called, we removed -it totally from the new grammar. The Application is still logging the `Info` -call, but a specific test would need to be written to check whether it happens -at the right moment. - -Moreover, both grammars, the original and the new, represent the node's expected behaviour from the fresh beginning (`CleanStart`) or after a crash (`Recovery`). -This is why we needed to separate the grammar into two different files (`test/e2e/pkg/grammar/clean-start/abci_grammar_clean_start.md` and `test/e2e/pkg/grammar/recovery/abci_grammar_recovery.md`) and generate two parsers: one for `CleanStart` and one for `Recovery` executions. If we didn't do this, a parser would classify a `CleanStart` execution that happens after the crash as a valid one. This is why later when we verify the execution, we first determine whether a set of requests represent a `CleanStart` or `Recovery` execution and then check its validity by calling an appropriate parser. - -Lastly, it is worth noticing that the `(inf)` part of the grammar is replaced with the `*`. This results in the new grammar being finite compared to the original, which represents an infinite (omega) grammar. - -The `gogll` library receives the file with the grammar as input, and it generates the corresponding parser and lexer. The actual commands are integrated into `test/e2e/Makefile` and executed when `make grammar` is invoked. -The resulting code is inside the following directories: -- `test/e2e/pkg/grammar/clean-start/lexer`, -- `test/e2e/pkg/grammar/clean-start/parser`, -- `test/e2e/pkg/grammar/clean-start/sppf`, -- `test/e2e/pkg/grammar/clean-start/token`, -- `test/e2e/pkg/grammar/recovery/lexer`, -- `test/e2e/pkg/grammar/recovery/parser`, -- `test/e2e/pkg/grammar/recoveryt/sppf`, -- `test/e2e/pkg/grammar/recovery/token`. - -Apart from this auto-generated code, we implemented `GrammarChecker` abstraction -which knows how to use the generated parsers and lexers to verify whether a -specific execution (list of ABCI 1.0 calls logged by the Application while the -testnet was running) respects the ABCI 1.0 grammar. The implementation and tests -for it are inside `test/e2e/pkg/grammar/checker.go` and -`test/e2e/pkg/grammar/checker_test.go`, respectively. - -How the `GrammarChecker` works is demonstrated with the test `TestABCIGrammar` -implemented in `test/e2e/tests/abci_test.go` file. - -```go -func TestABCIGrammar(t *testing.T) { - checker := grammar.NewGrammarChecker(grammar.DefaultConfig()) - testNode(t, func(t *testing.T, node e2e.Node) { - if !node.Testnet.ABCITestsEnabled { - return - } - reqs, err := fetchABCIRequests(t, node.Name) - if err != nil { - t.Error(fmt.Errorf("collecting of ABCI requests failed: %w", err)) - } - for i, r := range reqs { - isCleanStart := i == 0 - _, err := checker.Verify(r, isCleanStart) - if err != nil { - t.Error(fmt.Errorf("ABCI grammar verification failed: %w", err)) - } - } - }) -} -``` - -Specifically, the test first creates a `GrammarChecker` object. Then for each node in the testnet, it collects all requests -logged by this node. Remember here that `fetchABCIRequests()` returns an array of slices(`[]*abci.Request`) where the slice -with index 0 corresponds to the node's `CleanStart` execution, and each additional slice corresponds to the `Recovery` -execution after a specific crash. Each node must have one `CleanStart` execution and the same number of `Recovery` executions -as the number of crashes that happened on this node. If collecting was successful, the test checks whether each execution -respects the ABCI 1.0 -grammar by calling `checker.Verify()` method. If `Verify` returns an error, the specific execution does not respect the -grammar, and the test will fail. - -The tests are executed only if `ABCITestsEnabled` is set to `true`. This is done through the manifest file. Namely, if we -want to test whether CometBFT respects ABCI 1.0 grammar, we would need to enable these tests by adding `abci_tests_enabled = -true` in the manifest file of a particular testnet (e.g. `networks/ci.toml`). This will automatically activate logging on the -application side. - -The `Verify()` method is shown below. -```go -func (g *GrammarChecker) Verify(reqs []*abci.Request, isCleanStart bool) (bool, error) { - if len(reqs) == 0 { - return false, fmt.Errorf("execution with no ABCI calls.") - } - r := g.filterRequests(reqs) - // Check if the execution is incomplete. - if len(r) == 0 { - return true, nil - } - var errors []*Error - execution := g.getExecutionString(r) - if isCleanStart { - errors = g.verifyCleanStart(execution) - } else { - errors = g.verifyRecovery(execution) - } - if errors == nil { - return true, nil - } - return false, fmt.Errorf("%v\nFull execution:\n%v", g.combineErrors(errors, g.cfg.NumberOfErrorsToShow), g.addHeightNumbersToTheExecution(execution)) -} -``` - -It receives a set of ABCI 1.0 requests and a flag saying whether they represent a `CleanStart` execution or not and does the following things: -- Checks if the execution is an empty execution. -- Filter the requests by calling the method `filterRequests()`. This method will remove all the requests from the set that are not supported by the current version of the grammar. In addition, it will filter the last height by removing all ABCI 1.0 requests after the -last `Commit`. The function `fetchABCIRequests()` can be called in the middle of the height. As a result, the last height may be incomplete and -classified as invalid, even if that is not the reality. The simple example here is that the last -request fetched via `fetchABCIRequests()` is `FinalizeBlock`; however, `Commit` happens after -`fetchABCIRequests()` was invoked. Consequently, the execution -will be considered as faulty because `Commit` is missing, even though the `Commit` -will happen after. This is why if the execution consists of only one incomplete height and function `filterRequests()` returns an empty set of requests, the `Verify()` method considers this execution as valid and returns `true`. -- Generates an execution string by replacing `abci.Request` with the -corresponding terminal from the grammar. This logic is implemented in -`getExecutionString()` function. This function receives a list of `abci.Request` and generates a string where every request -will be replaced with a corresponding terminal. For example, request `r` of type `abci.Request_PrepareProposal` is replaced with the string `prepare_proposal`, the first part of `r`'s string representation. -- Checks if the resulting string with terminals respects the grammar by calling the -appropriate function (`verifyCleanStart()` or `verifyRecovery()`) depending on the execution type. The implementations of both functions are the same; they just use different parsers and lexers. -- Returns true if the execution is valid and an error if that's not the case. An example of an error is below. - -``` -FAIL: TestABCIGrammar/full02 (8.76s) - abci_test.go:24: ABCI grammar verification failed: The error: "Invalid clean-start execution: parser was expecting one of [init_chain], got [offer_snapshot] instead." has occured at height 0. - - Full execution: - 0: offer_snapshot apply_snapshot_chunk finalize_block commit - 1: finalize_block commit - 2: finalize_block commit - 3: finalize_block commit - ... -``` -The error shown above reports an invalid `CleanStart` execution. Moreover, it says why it is considered invalid (`init_chain` was missing) and the height of the error. Notice here that the height in the case of `CleanStart` execution corresponds to the actual consensus height, while for the `Recovery` execution, height 0 represents the first height after the crash. Lastly, after the error, the full execution, one height per line, is printed. This part may be optional and handled with a config flag, but we left it like this for now. - -*Note:* The `gogll` parser can return many errors because it returns an error at every point at which the parser fails to parse -a grammar production. Usually, the error of interest is the one that has -parsed the largest number of tokens. This is why, by default, we are printing only the last error; however, this can be configured with the `NumberOfErrorsToShow` field of `GrammarChecker`'s config. - -**Changing the grammar** - -Any modification to the grammar (`test/e2e/pkg/grammar/clean-start/abci_grammar_clean_start.md` or `test/e2e/pkg/grammar/recovery/abci_grammar_recovery.md`) requires generating a new parser and lexer. This is done by -going to the `test/e2e/` directory and running: - -```bash -make grammar -``` -Notice here that you need to have `gogll` installed -on your machine to run the make successfully. If this is not the case, you can install it with the following command: - -```bash -go get github.com/goccmack/gogll/v3 -``` -Make sure you commit any changes to the auto-generated code together with the changes to the grammar. - -### Suporting additional ABCI requests - -Here we present all the steps we need to do if we want to support other -ABCI requests in the future: - -- The application needs to log the new request in the same way as we do now. -- We should include the new request to the grammar and generate a new parser and lexer. -- We should add new requests to the list of supported requests. Namely, we should modify the function `isSupportedByGrammar()` in `test/e2e/pkg/grammar/checker.go` to return `true` for the new type of requests. - -## Status - -Implemented. - -To-do list: -- adding the CI workflow to check if make grammar is executed. -- extend this ADR (and implementation) to support ABCI 2.0 (i.e., ABCI calls related to vote extensions) -- in the future, we might consider whether the logging (actually, tracing) should be done on the e2e application side, or on CometBFT side, so this infra can be reused for MBT-like activities) -## Consequences - -### Positive -- We should be able to check whether CommetBFT respects ABCI 1.0 grammar. -### Negative - -### Neutral - diff --git a/docs/architecture/tendermint-core/README.md b/docs/architecture/tendermint-core/README.md deleted file mode 100644 index d1d8c276f34..00000000000 --- a/docs/architecture/tendermint-core/README.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -order: 1 -parent: - order: false ---- - -# Tendermint Core Architecture Decision Records (ADR) - -Here we record all high-level architecture decisions in the Tendermint Core -project. All implemented ADRs in this list naturally affect CometBFT, since -CometBFT is a fork of Tendermint Core as of December 2022. - -This list is currently frozen and kept for reference purposes. To add new ADRs, -please do so for CometBFT [here](../). - -## Table of Contents - -### Implemented - -- [ADR-001: Logging](./adr-001-logging.md) -- [ADR-002: Event-Subscription](./adr-002-event-subscription.md) -- [ADR-003: ABCI-APP-RPC](./adr-003-abci-app-rpc.md) -- [ADR-004: Historical-Validators](./adr-004-historical-validators.md) -- [ADR-005: Consensus-Params](./adr-005-consensus-params.md) -- [ADR-008: Priv-Validator](./adr-008-priv-validator.md) -- [ADR-009: ABCI-Design](./adr-009-ABCI-design.md) -- [ADR-010: Crypto-Changes](./adr-010-crypto-changes.md) -- [ADR-011: Monitoring](./adr-011-monitoring.md) -- [ADR-014: Secp-Malleability](./adr-014-secp-malleability.md) -- [ADR-015: Crypto-Encoding](./adr-015-crypto-encoding.md) -- [ADR-016: Protocol-Versions](./adr-016-protocol-versions.md) -- [ADR-017: Chain-Versions](./adr-017-chain-versions.md) -- [ADR-018: ABCI-Validators](./adr-018-ABCI-Validators.md) -- [ADR-019: Multisigs](./adr-019-multisigs.md) -- [ADR-020: Block-Size](./adr-020-block-size.md) -- [ADR-021: ABCI-Events](./adr-021-abci-events.md) -- [ADR-025: Commit](./adr-025-commit.md) -- [ADR-026: General-Merkle-Proof](./adr-026-general-merkle-proof.md) -- [ADR-033: Pubsub](./adr-033-pubsub.md) -- [ADR-034: Priv-Validator-File-Structure](./adr-034-priv-validator-file-structure.md) -- [ADR-043: Blockchain-RiRi-Org](./adr-043-blockchain-riri-org.md) -- [ADR-044: Lite-Client-With-Weak-Subjectivity](./adr-044-lite-client-with-weak-subjectivity.md) -- [ADR-046: Light-Client-Implementation](./adr-046-light-client-implementation.md) -- [ADR-047: Handling-Evidence-From-Light-Client](./adr-047-handling-evidence-from-light-client.md) -- [ADR-051: Double-Signing-Risk-Reduction](./adr-051-double-signing-risk-reduction.md) -- [ADR-052: Tendermint-Mode](./adr-052-tendermint-mode.md) -- [ADR-053: State-Sync-Prototype](./adr-053-state-sync-prototype.md) -- [ADR-054: Crypto-Encoding-2](./adr-054-crypto-encoding-2.md) -- [ADR-055: Protobuf-Design](./adr-055-protobuf-design.md) -- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks.md) -- [ADR-059: Evidence-Composition-and-Lifecycle](./adr-059-evidence-composition-and-lifecycle.md) -- [ADR-065: Custom Event Indexing](./adr-065-custom-event-indexing.md) -- [ADR-066: E2E-Testing](./adr-066-e2e-testing.md) -- [ADR-072: Restore Requests for Comments](./adr-072-request-for-comments.md) -- [ADR-076: Combine Spec and Tendermint Repositories](./adr-076-combine-spec-repo.md) -- [ADR-077: Configurable Block Retention](./adr-077-block-retention.md) -- [ADR-078: Non-zero Genesis](./adr-078-nonzero-genesis.md) - -### Accepted - -- [ADR-006: Trust-Metric](./adr-006-trust-metric.md) -- [ADR-024: Sign-Bytes](./adr-024-sign-bytes.md) -- [ADR-039: Peer-Behaviour](./adr-039-peer-behaviour.md) -- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md) -- [ADR-067: Mempool Refactor](./adr-067-mempool-refactor.md) -- [ADR-071: Proposer-Based Timestamps](./adr-071-proposer-based-timestamps.md) -- [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md) -- [ADR-079: Ed25519 Verification](./adr-079-ed25519-verification.md) -- [ADR-081: Protocol Buffers Management](./adr-081-protobuf-mgmt.md) - -### Deprecated - -- [ADR-035: Documentation](./adr-035-documentation.md) - -### Rejected - -- [ADR-023: ABCI-Propose-tx](./adr-023-ABCI-propose-tx.md) -- [ADR-029: Check-Tx-Consensus](./adr-029-check-tx-consensus.md) -- [ADR-058: Event-Hashing](./adr-058-event-hashing.md) - -### Proposed - -- [ADR-007: Trust-Metric-Usage](./adr-007-trust-metric-usage.md) -- [ADR-012: Peer-Transport](./adr-012-peer-transport.md) -- [ADR-013: Symmetric-Crypto](./adr-013-symmetric-crypto.md) -- [ADR-022: ABCI-Errors](./adr-022-abci-errors.md) -- [ADR-030: Consensus-Refactor](./adr-030-consensus-refactor.md) -- [ADR-036: Empty Blocks via ABCI](./adr-036-empty-blocks-abci.md) -- [ADR-037: Deliver-Block](./adr-037-deliver-block.md) -- [ADR-038: Non-Zero-Start-Height](./adr-038-non-zero-start-height.md) -- [ADR-040: Blockchain Reactor Refactor](./adr-040-blockchain-reactor-refactor.md) -- [ADR-041: Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md) -- [ADR-042: State Sync Design](./adr-042-state-sync.md) -- [ADR-045: ABCI-Evidence](./adr-045-abci-evidence.md) -- [ADR-050: Improved Trusted Peering](./adr-050-improved-trusted-peering.md) -- [ADR-057: RPC](./adr-057-RPC.md) -- [ADR-060: Go-API-Stability](./adr-060-go-api-stability.md) -- [ADR-061: P2P-Refactor-Scope](./adr-061-p2p-refactor-scope.md) -- [ADR-062: P2P-Architecture](./adr-062-p2p-architecture.md) -- [ADR-064: Batch Verification](./adr-064-batch-verification.md) -- [ADR-068: Reverse-Sync](./adr-068-reverse-sync.md) -- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md) -- [ADR-073: Adopt LibP2P](./adr-073-libp2p.md) -- [ADR-074: Migrate Timeout Parameters to Consensus Parameters](./adr-074-timeout-params.md) -- [ADR-080: Reverse Sync](./adr-080-reverse-sync.md) diff --git a/docs/core/README.md b/docs/core/README.md deleted file mode 100644 index 26cb7a40607..00000000000 --- a/docs/core/README.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -order: 1 -parent: - title: Core - order: 4 ---- - -# Overview - -This section dives into the internals of the CometBFT's implementation. - -- [Using CometBFT](./using-cometbft.md) -- [Configuration](./configuration.md) -- [Running in Production](./running-in-production.md) -- [Metrics](./metrics.md) -- [Validators](./validators.md) -- [Subscribing to events](./subscription.md) -- [Block Structure](./block-structure.md) -- [RPC](./rpc.md) -- [Block Sync](./block-sync.md) -- [State Sync](./state-sync.md) -- [Mempool](./mempool.md) -- [Light Client](./light-client.md) diff --git a/docs/core/block-structure.md b/docs/core/block-structure.md deleted file mode 100644 index a422aa9cd9a..00000000000 --- a/docs/core/block-structure.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -order: 8 ---- - -# Block Structure - -The CometBFT consensus engine records all agreements by a -supermajority of nodes into a blockchain, which is replicated among all -nodes. This blockchain is accessible via various RPC endpoints, mainly -`/block?height=` to get the full block, as well as -`/blockchain?minHeight=_&maxHeight=_` to get a list of headers. But what -exactly is stored in these blocks? - -The [specification](https://github.com/cometbft/cometbft/blob/main/spec/core/data_structures.md) contains a detailed description of each component - that's the best place to get started. - -To dig deeper, check out the [types package documentation](https://godoc.org/github.com/cometbft/cometbft/types). diff --git a/docs/core/mempool.md b/docs/core/mempool.md deleted file mode 100644 index 8dd96878196..00000000000 --- a/docs/core/mempool.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -order: 12 ---- - -# Mempool - -## Transaction ordering - -Currently, there's no ordering of transactions other than the order they've -arrived (via RPC or from other nodes). - -So the only way to specify the order is to send them to a single node. - -valA: - -- `tx1` -- `tx2` -- `tx3` - -If the transactions are split up across different nodes, there's no way to -ensure they are processed in the expected order. - -valA: - -- `tx1` -- `tx2` - -valB: - -- `tx3` - -If valB is the proposer, the order might be: - -- `tx3` -- `tx1` -- `tx2` - -If valA is the proposer, the order might be: - -- `tx1` -- `tx2` -- `tx3` - -That said, if the transactions contain some internal value, like an -order/nonce/sequence number, the application can reject transactions that are -out of order. So if a node receives `tx3`, then `tx1`, it can reject `tx3` and then -accept `tx1`. The sender can then retry sending `tx3`, which should probably be -rejected until the node has seen `tx2`. diff --git a/docs/core/metrics.md b/docs/core/metrics.md deleted file mode 100644 index 71cc8d2093b..00000000000 --- a/docs/core/metrics.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -order: 5 ---- - -# Metrics - -CometBFT can report and serve the Prometheus metrics, which in their turn can -be consumed by Prometheus collector(s). - -This functionality is disabled by default. - -To enable the Prometheus metrics, set `instrumentation.prometheus=true` in your -config file. Metrics will be served under `/metrics` on 26660 port by default. -Listen address can be changed in the config file (see -`instrumentation.prometheus\_listen\_addr`). - -## List of available metrics - -The following metrics are available: - -| **Name** | **Type** | **Tags** | **Description** | -|--------------------------------------------|-----------|------------------|--------------------------------------------------------------------------------------------------------------------------------------------| -| abci\_connection\_method\_timing\_seconds | Histogram | method, type | Timings for each of the ABCI methods | -| blocksync\_syncing | Gauge | | Either 0 (not block syncing) or 1 (syncing) | -| consensus\_height | Gauge | | Height of the chain | -| consensus\_validators | Gauge | | Number of validators | -| consensus\_validators\_power | Gauge | | Total voting power of all validators | -| consensus\_validator\_power | Gauge | | Voting power of the node if in the validator set | -| consensus\_validator\_last\_signed\_height | Gauge | | Last height the node signed a block, if the node is a validator | -| consensus\_validator\_missed\_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator | -| consensus\_missing\_validators | Gauge | | Number of validators who did not sign | -| consensus\_missing\_validators\_power | Gauge | | Total voting power of the missing validators | -| consensus\_byzantine\_validators | Gauge | | Number of validators who tried to double sign | -| consensus\_byzantine\_validators\_power | Gauge | | Total voting power of the byzantine validators | -| consensus\_block\_interval\_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds | -| consensus\_rounds | Gauge | | Number of rounds | -| consensus\_num\_txs | Gauge | | Number of transactions | -| consensus\_total\_txs | Gauge | | Total number of transactions committed | -| consensus\_block\_parts | Counter | peer\_id | Number of blockparts transmitted by peer | -| consensus\_latest\_block\_height | Gauge | | /status sync\_info number | -| consensus\_block\_size\_bytes | Gauge | | Block size in bytes | -| consensus\_step\_duration | Histogram | step | Histogram of durations for each step in the consensus protocol | -| consensus\_round\_duration | Histogram | | Histogram of durations for all the rounds that have occurred since the process started | -| consensus\_block\_gossip\_parts\_received | Counter | matches\_current | Number of block parts received by the node | -| consensus\_quorum\_prevote\_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum | -| consensus\_full\_prevote\_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted | -| consensus\_vote\_extension\_receive\_count | Counter | status | Number of vote extensions received | -| consensus\_proposal\_receive\_count | Counter | status | Total number of proposals received by the node since process start | -| consensus\_proposal\_create\_count | Counter | | Total number of proposals created by the node since process start | -| consensus\_round\_voting\_power\_percent | Gauge | vote\_type | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round | -| consensus\_late\_votes | Counter | vote\_type | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. | -| p2p\_message\_send\_bytes\_total | Counter | message\_type | Number of bytes sent to all peers per message type | -| p2p\_message\_receive\_bytes\_total | Counter | message\_type | Number of bytes received from all peers per message type | -| p2p\_peers | Gauge | | Number of peers node's connected to | -| p2p\_peer\_receive\_bytes\_total | Counter | peer\_id, chID | Number of bytes per channel received from a given peer | -| p2p\_peer\_send\_bytes\_total | Counter | peer\_id, chID | Number of bytes per channel sent to a given peer | -| p2p\_peer\_pending\_send\_bytes | Gauge | peer\_id | Number of pending bytes to be sent to a given peer | -| p2p\_num\_txs | Gauge | peer\_id | Number of transactions submitted by each peer\_id | -| p2p\_pending\_send\_bytes | Gauge | peer\_id | Amount of data pending to be sent to peer | -| mempool\_size | Gauge | | Number of uncommitted transactions | -| mempool\_tx\_size\_bytes | Histogram | | Transaction sizes in bytes | -| mempool\_failed\_txs | Counter | | Number of failed transactions | -| mempool\_recheck\_times | Counter | | Number of transactions rechecked in the mempool | -| state\_block\_processing\_time | Histogram | | Time between BeginBlock and EndBlock in ms | -| state\_consensus\_param\_updates | Counter | | Number of consensus parameter updates returned by the application since process start | -| state\_validator\_set\_updates | Counter | | Number of validator set updates returned by the application since process start | -| statesync\_syncing | Gauge | | Either 0 (not state syncing) or 1 (syncing) | - -## Useful queries - -Percentage of missing + byzantine validators: - -```md -((consensus\_byzantine\_validators\_power + consensus\_missing\_validators\_power) / consensus\_validators\_power) * 100 -``` diff --git a/docs/data-companion/README.md b/docs/data-companion/README.md deleted file mode 100644 index 5943a444067..00000000000 --- a/docs/data-companion/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -order: false -parent: - title: Data Companion - order: 7 ---- - -# Guides - -- [Introduction](./intro.md) -- [gRPC services](./grpc.md) -- [Pruning service](./pruning.md) diff --git a/docs/explanation/README.md b/docs/explanation/README.md new file mode 100644 index 00000000000..78ba3141f40 --- /dev/null +++ b/docs/explanation/README.md @@ -0,0 +1,26 @@ +--- +order: 3 +title: CometBFT Explained +parent: + order: 3 +--- + +# CometBFT Explained + +## The comprehensive guide to understanding CometBFT + +This section is designed to provide you with a comprehensive understanding of the core +concepts that underpin CometBFT. You will delve into the block structure, explore the +light client, gain insights into mempool, and learn about state sync, among other essential +concepts. + +This section also includes information about the new Data Companion gRPC endpoints +that allow external applications to access data from the node and manage data pruning. +This allows storage optimization and node performance by keeping only necessary data. + +By the end of this section, you will have a firm grasp of the fundamental +principles that make CometBFT a powerful technology in the realm of distributed systems. + +- [Introduction](./introduction/README.md) +- [Core Concepts](./core/README.md) +- [Data Companion](./data-companion/README.md) diff --git a/docs/explanation/core/README.md b/docs/explanation/core/README.md new file mode 100644 index 00000000000..2512adc18be --- /dev/null +++ b/docs/explanation/core/README.md @@ -0,0 +1,23 @@ +--- +order: 1 +parent: + title: Core + order: 4 +--- + +# Overview + +This section dives into the internals of the CometBFT's implementation. + +- [Using CometBFT](using-cometbft.md) +- [Configuration](configuration.md) +- [Running in Production](running-in-production.md) +- [Metrics](metrics.md) +- [Validators](validators.md) +- [Subscribing to events](subscription.md) +- [Block Structure](block-structure.md) +- [RPC](rpc.md) +- [Block Sync](block-sync.md) +- [State Sync](state-sync.md) +- [Mempool](mempool.md) +- [Light Client](light-client.md) diff --git a/docs/explanation/core/block-structure.md b/docs/explanation/core/block-structure.md new file mode 100644 index 00000000000..dc6819be20a --- /dev/null +++ b/docs/explanation/core/block-structure.md @@ -0,0 +1,19 @@ +--- +order: 8 +--- + +# Block Structure + +The CometBFT consensus engine records all agreements by a 2/3+ of nodes +into a blockchain, which is replicated among all nodes. This blockchain is +accessible via various RPC endpoints, mainly `/block?height=` to get the full +block, as well as `/blockchain?minHeight=_&maxHeight=_` to get a list of +headers. But what exactly is stored in these blocks? + +The [specification][data_structures] contains a detailed description of each +component - that's the best place to get started. + +To dig deeper, check out the [types package documentation][types]. + +[data_structures]: https://github.com/cometbft/cometbft/blob/main/spec/core/data_structures.md +[types]: https://pkg.go.dev/github.com/cometbft/cometbft/types diff --git a/docs/core/block-sync.md b/docs/explanation/core/block-sync.md similarity index 95% rename from docs/core/block-sync.md rename to docs/explanation/core/block-sync.md index aac917180ed..2938a96bf6f 100644 --- a/docs/core/block-sync.md +++ b/docs/explanation/core/block-sync.md @@ -25,7 +25,7 @@ process. Once caught up, the daemon will switch out of Block Sync and into the normal consensus mode. After running for some time, the node is considered `caught up` if it has at least one peer and its height is at least as high as the max reported peer height. See [the IsCaughtUp -method](https://github.com/cometbft/cometbft/blob/main/blocksync/pool.go#L168). +method](https://github.com/cometbft/cometbft/blob/main/internal/blocksync/pool.go#L190). Note: While there have historically been multiple versions of blocksync, v0, v1, and v2, all versions other than v0 have been deprecated in favor of the simplest and most well understood algorithm. diff --git a/docs/explanation/core/configuration.md b/docs/explanation/core/configuration.md new file mode 100644 index 00000000000..953feb74aff --- /dev/null +++ b/docs/explanation/core/configuration.md @@ -0,0 +1,12 @@ +--- +order: 3 +--- + +# Configuration + +CometBFT can be configured via a TOML file in `$CMTHOME/config/config.toml`. + +Some of the parameters can be overridden by command-line flags. + +For more information and a detailed explanation of all the parameters in the configuration file, please refer to the +[config.toml document in the CometBFT Configuration Manual](../../references/config/config.toml.md). diff --git a/docs/core/how-to-read-logs.md b/docs/explanation/core/how-to-read-logs.md similarity index 98% rename from docs/core/how-to-read-logs.md rename to docs/explanation/core/how-to-read-logs.md index de3eb7dfe99..f42fe49c9bc 100644 --- a/docs/core/how-to-read-logs.md +++ b/docs/explanation/core/how-to-read-logs.md @@ -111,7 +111,7 @@ I[10-04|13:54:30.410] Recheck txs module=mempoo Here is the list of modules you may encounter in CometBFT's log and a little overview what they do. -- `abci-client` As mentioned in [Application Development Guide](../app-dev/abci-cli.md), CometBFT acts as an ABCI +- `abci-client` As mentioned in [Application Development Guide](../../guides/app-dev/abci-cli.md), CometBFT acts as an ABCI client with respect to the application and maintains 3 connections: mempool, consensus and query. The code used by CometBFT can be found [here](https://github.com/cometbft/cometbft/blob/main/abci/client). @@ -126,13 +126,13 @@ little overview what they do. found [here](https://github.com/cometbft/cometbft/blob/main/types/events.go). You can subscribe to them by calling `subscribe` RPC method. Refer - to [RPC docs](./rpc.md) for additional information. + to [RPC docs](rpc.md) for additional information. - `mempool` Mempool module handles all incoming transactions, whenever they are coming from peers or the application. - `p2p` Provides an abstraction around peer-to-peer communication. For more details, please check out the [README](https://github.com/cometbft/cometbft/blob/main/p2p/README.md). -- `rpc` [CometBFT's RPC](./rpc.md). +- `rpc` [CometBFT's RPC](rpc.md). - `rpc-server` RPC server. For implementation details, please read the [doc.go](https://github.com/cometbft/cometbft/blob/main/rpc/jsonrpc/doc.go). - `state` Represents the latest state and execution submodule, which diff --git a/docs/core/light-client.md b/docs/explanation/core/light-client.md similarity index 94% rename from docs/core/light-client.md rename to docs/explanation/core/light-client.md index a698d03ddc7..4a237aab705 100644 --- a/docs/core/light-client.md +++ b/docs/explanation/core/light-client.md @@ -38,7 +38,7 @@ One way to obtain semi-trusted hash & height is to query multiple full nodes and compare their hashes: ```bash -$ curl -s https://233.123.0.140:26657:26657/commit | jq "{height: .result.signed_header.header.height, hash: .result.signed_header.commit.block_id.hash}" +$ curl -s https://233.123.0.140:26657/commit | jq "{height: .result.signed_header.header.height, hash: .result.signed_header.commit.block_id.hash}" { "height": "273", "hash": "188F4F36CBCD2C91B57509BBF231C777E79B52EE3E0D90D06B1A25EB16E6E23D" diff --git a/docs/explanation/core/mempool.md b/docs/explanation/core/mempool.md new file mode 100644 index 00000000000..f38668ca18c --- /dev/null +++ b/docs/explanation/core/mempool.md @@ -0,0 +1,103 @@ +--- +order: 12 +--- + +# Mempool + +A mempool (a contraction of memory and pool) is a node’s data structure for +storing information on uncommitted transactions. It acts as a sort of waiting +room for transactions that have not yet been committed. + +CometBFT currently supports two types of mempools: `flood` and `nop`. + +## 1. Flood + +The `flood` mempool stores transactions in a concurrent linked list. When a new +transaction is received, it first checks if there's a space for it (`size` and +`max_txs_bytes` config options) and that it's not too big (`max_tx_bytes` config +option). Then, it checks if this transaction has already been seen before by using +an LRU cache (`cache_size` regulates the cache's size). If all checks pass and +the transaction is not in the cache (meaning it's new), the ABCI +[`CheckTxAsync`][1] method is called. The ABCI application validates the +transaction using its own rules. + +If the transaction is deemed valid by the ABCI application, it's added to the linked list. + +The mempool's name (`flood`) comes from the dissemination mechanism. When a new +transaction is added to the linked list, the mempool sends it to all connected +peers. Peers themselves gossip this transaction to their peers and so on. One +can say that each transaction "floods" the network, hence the name `flood`. + +Note there are experimental config options +`experimental_max_gossip_connections_to_persistent_peers` and +`experimental_max_gossip_connections_to_non_persistent_peers` to limit the +number of peers a transaction is broadcasted to. Also, you can turn off +broadcasting with `broadcast` config option. + +After each committed block, CometBFT rechecks all uncommitted transactions (can +be disabled with the `recheck` config option) by repeatedly calling the ABCI +`CheckTxAsync`. + +### Transaction ordering + +Currently, there's no ordering of transactions other than the order they've +arrived (via RPC or from other nodes). + +So the only way to specify the order is to send them to a single node. + +valA: + +- `tx1` +- `tx2` +- `tx3` + +If the transactions are split up across different nodes, there's no way to +ensure they are processed in the expected order. + +valA: + +- `tx1` +- `tx2` + +valB: + +- `tx3` + +If valB is the proposer, the order might be: + +- `tx3` +- `tx1` +- `tx2` + +If valA is the proposer, the order might be: + +- `tx1` +- `tx2` +- `tx3` + +That said, if the transactions contain some internal value, like an +order/nonce/sequence number, the application can reject transactions that are +out of order. So if a node receives `tx3`, then `tx1`, it can reject `tx3` and then +accept `tx1`. The sender can then retry sending `tx3`, which should probably be +rejected until the node has seen `tx2`. + +## 2. Nop + +`nop` (short for no operation) mempool is used when the ABCI application developer wants to +build their own mempool. When `type = "nop"`, transactions are not stored anywhere +and are not gossiped to other peers using the P2P network. + +Submitting a transaction via the existing RPC methods (`BroadcastTxSync`, +`BroadcastTxAsync`, and `BroadcastTxCommit`) will always result in an error. + +Because there's no way for the consensus to know if transactions are available +to be committed, the node will always create blocks, which can be empty +sometimes. Using `consensus.create_empty_blocks=false` is prohibited in such +cases. + +The ABCI application becomes responsible for storing, disseminating, and +proposing transactions using [`PrepareProposal`][2]. The concrete design is up +to the ABCI application developers. + +[1]: ../../../spec/abci/abci++_methods.md#checktx +[2]: ../../../spec/abci/abci++_methods.md#prepareproposal diff --git a/docs/explanation/core/metrics.md b/docs/explanation/core/metrics.md new file mode 100644 index 00000000000..f81102cc943 --- /dev/null +++ b/docs/explanation/core/metrics.md @@ -0,0 +1,94 @@ +--- +order: 5 +--- + +# Metrics + +CometBFT can report and serve the Prometheus metrics, which in their turn can +be consumed by Prometheus collector(s). + +This functionality is disabled by default. + +To enable the Prometheus metrics, set `instrumentation.prometheus=true` in your +config file. Metrics will be served under `/metrics` on 26660 port by default. +Listen address can be changed in the [config file](./configuration.md) (see +`instrumentation.prometheus_listen_addr`). + +## List of available metrics + +The following metrics are available: + +| **Name** | **Type** | **Tags** | **Description** | +| ------------------------------------------------------- | --------- | ------------------ | -------------------------------------------------------------------------------------------------------------------------------------- | +| abci\_connection\_method\_timing\_seconds | Histogram | method, type | Timings for each of the ABCI methods | +| blocksync\_syncing | Gauge | | Either 0 (not block syncing) or 1 (syncing) | +| consensus\_height | Gauge | | Height of the chain | +| consensus\_validators | Gauge | | Number of validators | +| consensus\_validators\_power | Gauge | validator\_address | Total voting power of all validators | +| consensus\_validator\_power | Gauge | validator\_address | Voting power of the node if in the validator set | +| consensus\_validator\_last\_signed\_height | Gauge | validator\_address | Last height the node signed a block, if the node is a validator | +| consensus\_validator\_missed\_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator | +| consensus\_missing\_validators | Gauge | | Number of validators who did not sign | +| consensus\_missing\_validators\_power | Gauge | | Total voting power of the missing validators | +| consensus\_byzantine\_validators | Gauge | | Number of validators who tried to double sign | +| consensus\_byzantine\_validators\_power | Gauge | | Total voting power of the byzantine validators | +| consensus\_block\_interval\_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds | +| consensus\_rounds | Gauge | | Number of rounds | +| consensus\_num\_txs | Gauge | | Number of transactions | +| consensus\_total\_txs | Gauge | | Total number of transactions committed | +| consensus\_block\_parts | Counter | peer\_id | Number of blockparts transmitted by peer | +| consensus\_latest\_block\_height | Gauge | | /status sync\_info number | +| consensus\_block\_size\_bytes | Gauge | | Block size in bytes | +| consensus\_step\_duration\_seconds | Histogram | step | Histogram of durations for each step in the consensus protocol | +| consensus\_round\_duration\_seconds | Histogram | | Histogram of durations for all the rounds that have occurred since the process started | +| consensus\_block\_gossip\_parts\_received | Counter | matches\_current | Number of block parts received by the node | +| consensus\_quorum\_prevote\_delay | Gauge | proposer\_address | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum | +| consensus\_full\_prevote\_delay | Gauge | proposer\_address | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted | +| consensus\_vote\_extension\_receive\_count | Counter | status | Number of vote extensions received | +| consensus\_proposal\_receive\_count | Counter | status | Total number of proposals received by the node since process start | +| consensus\_proposal\_create\_count | Counter | | Total number of proposals created by the node since process start | +| consensus\_round\_voting\_power\_percent | Gauge | vote\_type | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round | +| consensus\_late\_votes | Counter | vote\_type | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. | +| consensus\_duplicate\_vote | Counter | | Number of times we received a duplicate vote. | +| consensus\_duplicate\_block\_part | Counter | | Number of times we received a duplicate block part. | +| consensus\_proposal\_timestamp\_difference | Histogram | is\_timely | Difference between the timestamp in the proposal message and the local time of the validator at the time it received the message. | +| p2p\_message\_send\_bytes\_total | Counter | message\_type | Number of bytes sent to all peers per message type | +| p2p\_message\_receive\_bytes\_total | Counter | message\_type | Number of bytes received from all peers per message type | +| p2p\_peers | Gauge | | Number of peers node's connected to | +| p2p\_peer\_pending\_send\_bytes | Gauge | peer\_id | Number of pending bytes to be sent to a given peer | +| p2p\_recv\_rate\_limiter\_delay | Counter | peer\_id | Time in seconds spent sleeping by the receive rate limiter, in seconds. | +| p2p\_send\_rate\_limiter\_delay | Counter | peer\_id | Time in seconds spent sleeping by the send rate limiter, in seconds. | +| mempool\_lane\_size | Counter | lane | Number of uncommitted transactions per lane | +| mempool\_lane\_bytes | Counter | lane | Number of used bytes per lane | +| mempool\_size | Gauge | | Number of uncommitted transactions in the mempool | +| mempool\_size\_bytes | Gauge | | Total size of the mempool in bytes | +| mempool\_tx\_size\_bytes | Histogram | | Histogram of transaction sizes in bytes | +| mempool\_evicted\_txs | Counter | | Number of transactions that make it into the mempool and were later evicted for being invalid | +| mempool\_failed\_txs | Counter | | Number of transactions that failed to make it into the mempool for being invalid | +| mempool\_rejected\_txs | Counter | | Number of transactions that failed to make it into the mempool due to resource limits | +| mempool\_recheck\_times | Counter | | Number of times transactions are rechecked in the mempool | +| mempool\_already\_received\_txs | Counter | | Number of times transactions were received more than once | +| mempool\_active\_outbound\_connections | Gauge | | Number of connections being actively used for gossiping transaction (experimental) | +| mempool\_recheck\_duration\_seconds | Gauge | | Cumulative time spent rechecking transactions | +| state\_consensus\_param\_updates | Counter | | Number of consensus parameter updates returned by the application since process start | +| state\_validator\_set\_updates | Counter | | Number of validator set updates returned by the application since process start | +| state\_pruning\_service\_block\_retain\_height | Gauge | | Accepted block retain height set by the data companion | +| state\_pruning\_service\_block\_results\_retain\_height | Gauge | | Accepted block results retain height set by the data companion | +| state\_pruning\_service\_tx\_indexer\_retain\_height | Gauge | | Accepted transactions indices retain height set by the data companion | +| state\_pruning\_service\_block\_indexer\_retain\_height | Gauge | | Accepted blocks indices retain height set by the data companion | +| state\_application\_block\_retain\_height | Gauge | | Accepted block retain height set by the application | +| state\_block\_store\_base\_height | Gauge | | First height at which a block is available | +| state\_abciresults\_base\_height | Gauge | | First height at which ABCI results are available | +| state\_tx\_indexer\_base\_height | Gauge | | First height at which tx indices are available | +| state\_block\_indexer\_base\_height | Gauge | | First height at which block indices are available | +| state\_store\_access\_duration\_seconds | Histogram | method | Duration of accesses to the state store labeled by which method was called on the store | +| state\_fire\_block\_events\_delay\_seconds | Gauge | | Duration of event firing related to a new block | +| statesync\_syncing | Gauge | | Either 0 (not state syncing) or 1 (syncing) | + +## Useful queries + +Percentage of missing + byzantine validators: + +```md +((consensus\_byzantine\_validators\_power + consensus\_missing\_validators\_power) / consensus\_validators\_power) * 100 +``` diff --git a/docs/explanation/core/proposer-based-timestamps.md b/docs/explanation/core/proposer-based-timestamps.md new file mode 100644 index 00000000000..33fd0d66673 --- /dev/null +++ b/docs/explanation/core/proposer-based-timestamps.md @@ -0,0 +1,265 @@ +--- +order: 14 +--- + +# Proposer-Based Timestamp (PBTS) + +This document overviews the Proposer-Based Timestamp (PBTS) +algorithm introduced in CometBFT version v1.0. +It outlines the core functionality of the algorithm and details the consensus +parameters that govern its operation. + +## Overview + +The PBTS algorithm defines a way for a blockchain to create block +timestamps that are within a reasonable bound of the validators' clocks on +the network. +It replaces the BFT Time algorithm for timestamp calculation and assignment, which computes the +timestamp of a block using the timestamps aggregated from precommit messages. + +### Block Timestamps + +Each block produced by CometBFT contains a timestamp, represented by the `Time` +field of the block's `Header`. + +The timestamp of each block is expected to be a meaningful representation of time that is +useful for the protocols and applications built on top of CometBFT. +The following protocols and application features require a reliable source of time: + +* Light Clients [rely on correspondence between their known time][light-client-verification] and the block time for block verification. +* Evidence expiration is determined [either in terms of heights or in terms of time][evidence-verification]. +* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 + days](https://github.com/cosmos/governance/blob/master/params-change/Staking.md#unbondingtime). +* IBC packets can use either a [timestamp or a height to timeout packet + delivery](https://ibc.cosmos.network/v8/ibc/light-clients/updates-and-misbehaviour?_highlight=time#checkformisbehaviour). + +### Enabling PBTS + +The PBTS algorithm is **not enabled by default** in CometBFT v1.0. + +If a network upgrades to CometBFT v1.0, it will still use the BFT Time +algorithm until PBTS is enabled. +The same applies to new networks that do not change the default values for +consensus parameters in the genesis file. + +Enabling PBTS requires configuring the [consensus parameters](#consensus-parameters) +that govern the operation of the algorithm. +There are two `SynchronyParams`, `Precision` and `MessageDelay`, used to +validate block timestamps, as described in the following. +And a `FeatureParams.PbtsEnableHeight` that defines the height from which PBTS +is adopted. + +### Selecting a Timestamp + +When a validator creates a new block, it reads the time from its local clock +and uses this reading as the timestamp for the block. +The proposer of a block is thus free to select the block timestamp, but this +timestamp must be validated by other nodes in the network. + +### Validating Timestamps + +When each validator on the network receives a proposed block, it performs a +series of checks to ensure that the block can be considered valid as a +candidate to be the next block in the chain. +If the block is considered invalid, the validator issues a `nil` prevote, +signaling to the rest of the network that the proposed block is not valid. + +The PBTS algorithm performs a validity check on the timestamp of proposed +blocks. +This only applies to the first time at which a block is proposed. +If the same block is re-proposed in a future round because it was deemed valid +by the network, this check is not performed. +Refer to the PBTS specification for more details. + +When a validator receives a proposal for a new block, it ensures that the timestamp in +the proposal is within a bound of the validator's local clock. +For that it uses `Precision` and `MessageDelay` consensus parameters, +which are the same across all nodes for a given height. +Specifically, the algorithm checks that the proposed block's timestamp is +no more than `Precision` greater than the node's local clock +(i.e., not in the future) +and no less than `MessageDelay + Precision` behind the node's local clock +(i.e., not too far in the past). +If the proposed block's timestamp is within the window of acceptable +timestamps, the timestamp is considered **timely**. +If the block timestamp is **not timely**, the validator rejects the proposed block by +issuing a `nil` prevote. + +### Clock Synchronization + +The PBTS algorithm requires the clocks of the validators in the network to be +within `Precision` of each other. In practice, this means that validators +should periodically synchronize their clocks, e.g. to a reliable NTP server. +Validators whose clocks drift too far away from the rest of the network will no +longer propose blocks with valid timestamps. Additionally, they will not consider +the timestamps of blocks proposed by their peers to be valid either. + + +## Consensus Parameters + +The functionality of the PBTS algorithm is governed by two consensus +parameters: the synchronous parameters `Precision` and `MessageDelay`. +An additional consensus parameter `PbtsEnableHeight` is used to enable PBTS +when instantiating a new network or when upgrading an existing network that +uses BFT Time. + +Consensus parameters are configured through the genesis file, for new chains, or by the ABCI application, for new and existing chains, and are the same +across all nodes in the network at any given height. + +### `SynchronyParams.Precision` + +The `Precision` parameter configures the acceptable upper-bound of clock drift +among all of the validators in the network. +Any two validators are expected to have clocks that differ by at most +`Precision` at any given instant. + +The `Precision` parameter is of [`time.Duration`](https://pkg.go.dev/time#Duration) type. + +Networks should choose a `Precision` that is large enough to represent the +worst-case for the clock drift among all participants. +Due to the [leap second events](https://github.com/tendermint/tendermint/issues/7724), +it is recommended to set `Precision` to at least `500ms`. + +### `SynchronyParams.MessageDelay` + +The `MessageDelay` parameter configures the acceptable upper-bound for the +end-to-end delay for transmitting a `Proposal` message from the proposer to +_all_ validators in the network. + +The `MessageDelay` parameter is of [`time.Duration`](https://pkg.go.dev/time#Duration) type. + +Networks should choose a `MessageDelay` that is large enough to represent the +delay for a `Proposal` message to reach all participants. +As `Proposal` messages are fixed-size, this delay should not depend, a priori, +on the size of proposed blocks. +But it does depend on the number of nodes in the network, the latency of their +connections, and the level of congestion in the network. + +### `FeatureParams.PbtsEnableHeight` + +The `PbtsEnableHeight` parameter configures the first height at which the PBTS +algorithm should be adopted for generating and validating block timestamps in a network. + +The `PbtsEnableHeight` parameter is an integer. + +While `PbtsEnableHeight` is set to `0`, the network will adopt the legacy BFT +Time algorithm. + +When `PbtsEnableHeight` is set to a height `H > 0`, the network will switch to +the PBTS algorithm from height `H` on. +The network will still adopt the legacy BFT Time algorithm to produce block +timestamps until height `H - 1`, and to validate block timestamps produced in +heights up to `H - 1`. +The enable height `H` must be a future height when it is configured, namely it +can only be set to a height that is larger than the current blockchain height. + +Once `PbtsEnableHeight` is set and the PBTS algorithm is enabled (i.e., from height +`PbtsEnableHeight`), it is not possible to return to the legacy BFT Time algorithm. +The switch to PBTS is therefore irreversible. + +Finally, if `PbtsEnableHeight` is set to `InitialHeight` in the genesis file or by the +ABCI `InitChain` method, the network will adopt PBTS from the initial +height. This is the recommended setup for new chains. + + +## Important Notes + +When configuring a network to adopt the PBTS algorithm, the following steps must be considered: + +1. Make sure that the clocks of validators are [synchronized](#clock-synchronization) **before** enabling PBTS. +1. Make sure that the configured value for [`SynchronyParams.Precision`](#synchronyparamsprecision) is + reasonable. +1. Make sure that the configured value for [`SynchronyParams.MessageDelay`](#synchronyparamsmessagedelay) is + reasonable and large enough to reflect the maximum expected delay for messages in the network. + Setting this parameter to a small value may impact the progress of the + network, namely blocks may take very long to be committed. + - An approach to define this parameter is to observe the latency for + fixed-size messages (e.g., `Vote` and `Proposal`) over time and define an + empirical distribution of message delays. + Then pick as value for the `MessageDelay` parameter, a high percentile of + this distribution (e.g., the 99th or 99.9th percentiles). +1. Make sure that the block times **currently** produced by the network do not + differ too much from real time. + This is especially relevant when block times produced by BFT time are in the + future, with respect to real time. + +### Adaptive MessageDelay + +Observation 3. is important because a network that sets +[`SynchronyParams.MessageDelay`](#synchronyparamsmessagedelay) +to a small value is likely to suffer from long block latencies +and even, in extreme cases, from the complete halt of the network. +By a small value here we mean a message delay that is not enough for an important +portion of the validators to receive the `Proposal` message broadcast by the +proposer of a round within the configured message delay. +If the subset of validators that are unlikely to receive the proposal within the +configured `SynchronyParams.MessageDelay` hold more than 1/3 of the total +voting power of the network, the network could stop producing blocks +indefinitely. + +To prevent the network from halting due to the configuration of a small value +for `SynchronyParams.MessageDelay`, we have introduced the concept of +[adaptive synchronous parameters](https://github.com/cometbft/cometbft/issues/2184). +In summary, this means that the synchrony parameters adopted to verify whether +a proposal timestamp is timely are relaxed as more rounds are required to +commit a block. +The maximum message delay for round 0 is still the configured +`SynchronyParams.MessageDelay`; most blocks are committed in round 0, so there +are no changes for the regular case. +From round 1, the maximum message delay adopted by PBTS slowly increases, at a +rate of 10% per round. +As a result, the adopted maximum message delay will eventually converge to the +actual message delay observed in the network. + +While this solution prevents the network from halting, it still delays the +commit of a block by several rounds. +For example, if the configured `SynchronyParams.MessageDelay` is 0.5s but an +important portion of nodes regularly receive the `Proposal` message after 1s, +between 7 and 8 rounds will be necessary to commit a block. +This is an important performance penalty that network operators must avoid at +all costs. Upon noticing this problem, as the network will not halt because of this, +network operators can agree to increase the value of `SynchronyParams.MessageDelay` +in order to fix the problem. + +### BFT Times in the future + +Observation 4. is important because, with the adoption of PBTS, block times are +expected to converge to values that bear resemblance to real time. +At the same time, the property of monotonicity of block times is guaranteed by both BFT +Time and PBTS. +This means that proposers using PBTS will **wait** until the time they read +from their local clocks becomes bigger than the time of the last committed +block before proposing a new block. + +As a result, if the time of the last block produced using BFT Time is too far in +the future, then the first block produced using PBTS will take very long to be +committed: the time it takes for the clock of the proposer to reach the time of +the previously committed block. +To prevent this from happening, first, follow recommendation 1., i.e., synchronize +the validators' clocks. +Then wait until the block times produced by BFT Time converge to values that do +not differ too much from real time. +This may take a long time, because in BFT Time if the value a validator reads +from its local clock is smaller than the time of the previous block, then the +time it sets to a new block will be the time of the previous block plus `1ms`. +It may take a while, but block times will eventually converge to real time. + +## See Also + +* [Block Time specification][block-time-spec]: overview of block timestamps properties. +* [Consensus parameters][consensus-parameters]: list of consensus parameters, their usage and validation. +* [PBTS specification][pbts-spec]: formal specification and all of the details of the PBTS algorithm. +* [BFT Time specification][bft-time-spec]: all details of the legacy BFT Time algorithm to compute block times. +* [Proposer-Based Timestamps Runbook][pbts-runbook]: a guide for diagnosing and + fix issues related to clock synchronization and the configuration of the + `SynchronyParams` consensus parameters adopted by PBTS. + +[pbts-spec]: https://github.com/cometbft/cometbft/blob/main/spec/consensus/proposer-based-timestamp/README.md +[bft-time-spec]: https://github.com/cometbft/cometbft/blob/main/spec/consensus/bft-time.md +[block-time-spec]: https://github.com/cometbft/cometbft/blob/main/spec/consensus/time.md +[pbts-runbook]: ../../guides/tools/proposer-based-timestamps-runbook.md + +[consensus-parameters]: https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_app_requirements.md#consensus-parameters + +[light-client-verification]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/README.md#failure-model +[evidence-verification]: https://github.com/cometbft/cometbft/blob/main/spec/consensus/evidence.md#verification diff --git a/docs/core/rpc.md b/docs/explanation/core/rpc.md similarity index 100% rename from docs/core/rpc.md rename to docs/explanation/core/rpc.md diff --git a/docs/core/running-in-production.md b/docs/explanation/core/running-in-production.md similarity index 77% rename from docs/core/running-in-production.md rename to docs/explanation/core/running-in-production.md index beaee33056e..178f8e16b21 100644 --- a/docs/core/running-in-production.md +++ b/docs/explanation/core/running-in-production.md @@ -7,41 +7,66 @@ order: 4 ## Database By default, CometBFT uses the `syndtr/goleveldb` package for its in-process -key-value database. If you want maximal performance, it may be best to install -the real C-implementation of LevelDB and compile CometBFT to use that using -`make build COMETBFT_BUILD_OPTIONS=cleveldb`. See the [install -instructions](../introduction/install.md) for details. +key-value database. CometBFT keeps multiple distinct databases in the `$CMTHOME/data`: - `blockstore.db`: Keeps the entire blockchain - stores blocks, - block commits, and block meta data, each indexed by height. Used to sync new + block commits, and block metadata, each indexed by height. Used to sync new peers. - `evidence.db`: Stores all verified evidence of misbehavior. -- `state.db`: Stores the current blockchain state (ie. height, validators, +- `state.db`: Stores the current blockchain state (i.e. height, validators, consensus params). Only grows if consensus params or validators change. Also used to temporarily store intermediate results during block processing. -- `tx_index.db`: Indexes txs (and their results) by tx hash and by DeliverTx result events. +- `tx_index.db`: Indexes transactions and by tx hash and height. The tx results are indexed if they are added to the `FinalizeBlock` response in the application. -By default, CometBFT will only index txs by their hash and height, not by their DeliverTx -result events. See [indexing transactions](../app-dev/indexing-transactions.md) for -details. +> By default, CometBFT will only index transactions by their hash and height, if you want the result events to be indexed, see [indexing transactions](../../guides/app-dev/indexing-transactions.md#adding-events) +for details. Applications can expose block pruning strategies to the node operator. Please read the documentation of your application to find out more details. -Applications can use [state sync](./state-sync.md) to help nodes bootstrap quickly. +Applications can use [state sync](state-sync.md) to help nodes bootstrap quickly. ## Logging Default logging level (`log_level = "main:info,state:info,statesync:info,*:error"`) should suffice for -normal operation mode. Read [this -post](https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756) -for details on how to configure `log_level` config variable. Some of the -modules can be found [here](./how-to-read-logs.md#list-of-modules). If -you're trying to debug CometBFT or asked to provide logs with debug -logging level, you can do so by running CometBFT with -`--log_level="*:debug"`. +normal operation mode. It will log info messages from the `main`, `state` and +`statesync` modules and error messages from all other modules. + +The format of the logging level is: + +``` +:,:,...,: +``` + +Where `` is the module that generated the log message, `` is +one of the log levels: `info`, `error`, `debug` or `none`. Some of +the modules can be found [here](how-to-read-logs.md#list-of-modules). Others +could be observed by running CometBFT. `none` log level could be used +to suppress messages from a particular module or all modules (`log_level = +"state:info,*:none"` will only log info messages from the `state` module). + +If you're trying to debug CometBFT or asked to provide logs with debug logging +level, you can do so by running CometBFT with `--log_level="*:debug"`. + +#### Stripping debug log messages at compile-time + +Logging debug messages can lead to significant memory allocations, especially when outputting variable values. In Go, +even if `log_level` is not set to `debug`, these allocations can still occur because the program evaluates the debug +statements regardless of the log level. + +To prevent unnecessary memory usage, you can strip out all debug-level code from the binary at compile time using +build flags. This approach improves the performance of CometBFT by excluding debug messages entirely, even when log_level +is set to debug. This technique is ideal for production environments that prioritize performance optimization over debug logging. + +In order to build a binary stripping all debug log messages (e.g. `log.Debug()`) from the binary, use the `nodebug` tag: +``` +COMETBFT_BUILD_OPTIONS=nodebug make install +``` + +> Note: Compiling CometBFT with this method will completely disable all debug messages. If you require debug output, +> avoid compiling the binary with the `nodebug` build tag. ## Write Ahead Logs (WAL) @@ -62,12 +87,12 @@ If your `consensus.wal` is corrupted, see [below](#wal-corruption). ### Mempool WAL -The `mempool.wal` logs all incoming txs before running CheckTx, but is +The `mempool.wal` logs all incoming transactions before running CheckTx, but is otherwise not used in any programmatic way. It's just a kind of manual safe guard. Note the mempool provides no durability guarantees - a tx sent to one or many nodes may never make it into the blockchain if those nodes crash before being able to -propose it. Clients must monitor their txs by subscribing over websockets, -polling for them, or using `/broadcast_tx_commit`. In the worst case, txs can be +propose it. Clients must monitor their transactions by subscribing over websockets, +polling for them, or using `/broadcast_tx_commit`. In the worst case, transactions can be resent from the mempool WAL manually. For the above reasons, the `mempool.wal` is disabled by default. To enable, set @@ -76,7 +101,7 @@ For the above reasons, the `mempool.wal` is disabled by default. To enable, set ## DoS Exposure and Mitigation -Validators are supposed to setup [Sentry Node Architecture](./validators.md) +Validators are supposed to setup [Sentry Node Architecture](validators.md) to prevent Denial-of-Service attacks. ### P2P @@ -129,7 +154,7 @@ for more information. ## Debugging CometBFT If you ever have to debug CometBFT, the first thing you should probably do is -check out the logs. See [How to read logs](./how-to-read-logs.md), where we +check out the logs. See [How to read logs](how-to-read-logs.md), where we explain what certain log statements mean. If, after skimming through the logs, things are not clear still, the next thing @@ -154,7 +179,7 @@ just the votes seen at the current height. If, after consulting with the logs and above endpoints, you still have no idea what's happening, consider using `cometbft debug kill` sub-command. This command will scrap all the available info and kill the process. See -[Debugging](../tools/debugging.md) for the exact format. +[Debugging](../../guides/tools/debugging.md) for the exact format. You can inspect the resulting archive yourself or create an issue on [Github](https://github.com/cometbft/cometbft). Before opening an issue @@ -171,10 +196,10 @@ Other useful endpoints include mentioned earlier `/status`, `/net_info` and `/validators`. CometBFT also can report and serve Prometheus metrics. See -[Metrics](./metrics.md). +[Metrics](metrics.md). `cometbft debug dump` sub-command can be used to periodically dump useful -information into an archive. See [Debugging](../tools/debugging.md) for more +information into an archive. See [Debugging](../../guides/tools/debugging.md) for more information. ## What happens when my app dies @@ -290,7 +315,7 @@ Both our `ed25519` and `secp256k1` implementations require constant time private keys on both `ed25519` and `secp256k1`. This doesn't exist in hardware on 32 bit x86 platforms ([source](https://bearssl.org/ctmul.html)), and it depends on the compiler to enforce that it is constant time. It's unclear at -this point whenever the Golang compiler does this correctly for all +this point whether the Golang compiler does this correctly for all implementations. **We do not support nor recommend running a validator on 32 bit architectures OR @@ -347,30 +372,35 @@ Setting this to false will stop the mempool from relaying transactions to other peers until they are included in a block. It means only the peer you send the tx to will see it until it is included in a block. -- `consensus.skip_timeout_commit` - -We want `skip_timeout_commit=false` when there is economics on the line -because proposers should wait to hear for more votes. But if you don't -care about that and want the fastest consensus, you can skip it. It will -be kept false by default for public deployments (e.g. [Cosmos -Hub](https://hub.cosmos.network/)) while for enterprise -applications, setting it to true is not a problem. - - `consensus.peer_gossip_sleep_duration` You can try to reduce the time your node sleeps before checking if -theres something to send its peers. +there's something to send its peers. - `consensus.timeout_commit` -You can also try lowering `timeout_commit` (time we sleep before -proposing the next block). +We want `timeout_commit` to be greater than zero when there is economics on the line +because proposers should wait to hear for more votes. But if you don't +care about that and want the fastest consensus, you can skip it. It will +be kept `1s` by default for public deployments (e.g. [Cosmos +Hub](https://hub.cosmos.network/)) while for enterprise +applications, setting it to `0` is not a problem. + +You can try lowering it though. + +**Notice** that the `timeout_commit` configuration flag was deprecated in v1.0. +It is now up to the application to return a `next_block_delay` value upon +[`FinalizeBlock`](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_methods.md#finalizeblock) +to define how long CometBFT should wait from when it has +committed a block until it actually starts the next height. +Notice that this delay includes the time it takes for CometBFT and the +application to process the committed block. - `p2p.addr_book_strict` By default, CometBFT checks whenever a peer's address is routable before saving it to the address book. The address is considered as routable if the IP -is [valid and within allowed ranges](https://github.com/cometbft/cometbft/blob/main/p2p/netaddress.go#L258). +is [valid and within allowed ranges](https://github.com/cometbft/cometbft/blob/main/p2p/netaddr/netaddr.go#L258). This may not be the case for private or local networks, where your IP range is usually strictly limited and private. If that case, you need to set `addr_book_strict` diff --git a/docs/core/state-sync.md b/docs/explanation/core/state-sync.md similarity index 56% rename from docs/core/state-sync.md rename to docs/explanation/core/state-sync.md index 0f477302e29..f1d12cb6185 100644 --- a/docs/core/state-sync.md +++ b/docs/explanation/core/state-sync.md @@ -1,36 +1,45 @@ ---- +--- order: 11 --- # State Sync -With block sync a node is downloading all of the data of an application from genesis and verifying it. -With state sync your node will download data related to the head or near the head of the chain and verify the data. -This leads to drastically shorter times for joining a network. +With block sync a node is downloading all of the data of an application from +genesis and verifying it. + +With state sync your node will download data related to the head or near the +head of the chain and verify the data. This leads to drastically shorter times +for joining a network. ## Using State Sync -State sync will continuously work in the background to supply nodes with chunked data when bootstrapping. +State sync will continuously work in the background to supply nodes with +chunked data when bootstrapping. -> NOTE: Before trying to use state sync, see if the application you are operating a node for supports it. +> NOTE: Before trying to use state sync, see if the application you are +> operating a node for supports it. -Under the state sync section in `config.toml` you will find multiple settings that need to be configured in order for your node to use state sync. +Under the state sync section in `config.toml` you will find multiple settings +that need to be configured in order for your node to use state sync. Lets breakdown the settings: -- `enable`: Enable is to inform the node that you will be using state sync to bootstrap your node. -- `rpc_servers`: RPC servers are needed because state sync utilizes the light client for verification. +- `enable`: Enable is to inform the node that you will be using state sync to + bootstrap your node. +- `rpc_servers`: RPC servers are needed because state sync utilizes the light + client for verification. - 2 servers are required, more is always helpful. -- `temp_dir`: Temporary directory is store the chunks in the machines local storage, If nothing is set it will create a directory in `/tmp` - -The next information you will need to acquire it through publicly exposed RPC's or a block explorer which you trust. -- `trust_height`: Trusted height defines at which height your node should trust the chain. -- `trust_hash`: Trusted hash is the hash in the `BlockID` corresponding to the trusted height. +- `trust_height`: Trusted height defines at which height your node should trust + the chain. +- `trust_hash`: Trusted hash is the hash of the block at the trusted height. - `trust_period`: Trust period is the period in which headers can be verified. > :warning: This value should be significantly smaller than the unbonding period. -If you are relying on publicly exposed RPC's to get the need information, you can use `curl`. +For other settings, visit the [Configuration](./configuration) page. + +If you need to get the information you need from publicly exposed RPCs, you +can use `curl` and [`jq`][jq]. Example: @@ -46,3 +55,5 @@ The response will be: "hash": "188F4F36CBCD2C91B57509BBF231C777E79B52EE3E0D90D06B1A25EB16E6E23D" } ``` + +[jq]: https://jqlang.github.io/jq/ diff --git a/docs/core/subscription.md b/docs/explanation/core/subscription.md similarity index 85% rename from docs/core/subscription.md rename to docs/explanation/core/subscription.md index b6be5f3b36f..33049fccbe6 100644 --- a/docs/core/subscription.md +++ b/docs/explanation/core/subscription.md @@ -36,28 +36,28 @@ method via Websocket along with a valid query. Check out [API docs](https://docs.cometbft.com/main/rpc/) for more information on query syntax and other options. -You can also use tags, given you had included them into DeliverTx +You can also use tags, given you had included them into FinalizeBlock response, to query transaction results. See [Indexing -transactions](../app-dev/indexing-transactions.md) for details. +transactions](../../guides/app-dev/indexing-transactions.md#adding-events) for details. ## Query parameter and event type restrictions -While CometBFT imposes no restrictions on the application with regards to the type of -the event output, there are several considerations that need to be taken into account +While CometBFT imposes no restrictions on the application with regards to the type of +the event output, there are several considerations that need to be taken into account when querying events with numeric values. - Queries convert all numeric event values to `big.Float` , provided by `math/big`. Integers are converted into a float with a precision equal to the number of bits needed -to represent this integer. This is done to avoid precision loss for big integers when they -are converted with the default precision (`64`). -- When comparing two values, if either one of them is a float, the other one will be represented -as a big float. Integers are again parsed as big floats with a precision equal to the number -of bits required to represent them. -- As with all floating point comparisons, comparing floats with decimal values can lead to imprecise -results. -- Queries cannot include negative numbers - -Prior to version `v0.38.x`, floats were not supported as query parameters. +to represent this integer. This is done to avoid precision loss for big integers when they +are converted with the default precision (`64`). +- When comparing two values, if either one of them is a float, the other one will be represented +as a big float. Integers are again parsed as big floats with a precision equal to the number +of bits required to represent them. +- As with all floating point comparisons, comparing floats with decimal values can lead to imprecise +results. +- Queries cannot include negative numbers + +Prior to version `v0.38.x`, floats were not supported as query parameters. ## ValidatorSetUpdates diff --git a/docs/core/using-cometbft.md b/docs/explanation/core/using-cometbft.md similarity index 97% rename from docs/core/using-cometbft.md rename to docs/explanation/core/using-cometbft.md index 546387c8321..ceef831b581 100644 --- a/docs/core/using-cometbft.md +++ b/docs/explanation/core/using-cometbft.md @@ -130,7 +130,7 @@ cometbft node ``` By default, CometBFT will try to connect to an ABCI application on -`127.0.0.1:26658`. If you have the `kvstore` ABCI app installed, run it in +`tcp://127.0.0.1:26658`. If you have the `kvstore` ABCI app installed, run it in another window. If you don't, kill CometBFT and run an in-process version of the `kvstore` app: @@ -139,8 +139,8 @@ cometbft node --proxy_app=kvstore ``` After a few seconds, you should see blocks start streaming in. Note that blocks -are produced regularly, even if there are no transactions. See _No Empty -Blocks_, below, to modify this setting. +are produced regularly, even if there are no transactions. See [No Empty +Blocks](#no-empty-blocks), below, to modify this setting. CometBFT supports in-process versions of the `counter`, `kvstore`, and `noop` apps that ship as examples with `abci-cli`. It's easy to compile your app @@ -223,7 +223,7 @@ which sends a 4 byte transaction: \[01 02 03 04\]. With `POST` (using `json`), the raw hex must be `base64` encoded: ```sh -curl --data-binary '{"jsonrpc":"2.0","id":"anything","method":"broadcast_tx_commit","params": {"tx": "AQIDBA=="}}' -H 'content-type:text/plain;' http://localhost:26657 +curl --data-binary '{"jsonrpc":"2.0","id":"anything","method":"broadcast_tx_commit","params": {"tx": "AQIDBA=="}}' -H 'Content-Type:text/plain;' http://localhost:26657 ``` which sends the same 4 byte transaction: \[01 02 03 04\]. @@ -248,7 +248,7 @@ address book files. ## Configuration CometBFT uses a `config.toml` for configuration. For details, see [the -config specification](./configuration.md). +config specification](configuration.md). Notable options include the socket address of the application (`proxy_app`), the listening address of the CometBFT peer @@ -338,7 +338,7 @@ Note the mempool does not provide strong guarantees - just because a tx passed CheckTx (ie. was accepted into the mempool), doesn't mean it will be committed, as nodes with the tx in their mempool may crash before they get to propose. For more information, see the [mempool -write-ahead-log](./running-in-production.md#mempool-wal) +write-ahead-log](running-in-production.md#mempool-wal) ## CometBFT Networks @@ -552,7 +552,7 @@ failing, you need at least four validator nodes (e.g., 2/3). Updating validators in a live network is supported but must be explicitly programmed by the application developer. See the [application -developers guide](../app-dev/abci-cli.md) for more details. +developers guide](../../guides/app-dev/abci-cli.md) for more details. ### Local Network diff --git a/docs/core/validators.md b/docs/explanation/core/validators.md similarity index 84% rename from docs/core/validators.md rename to docs/explanation/core/validators.md index 34f99290856..631600367b5 100644 --- a/docs/core/validators.md +++ b/docs/explanation/core/validators.md @@ -10,14 +10,13 @@ _votes_ which contain cryptographic signatures signed by each validator's private key. Some Proof-of-Stake consensus algorithms aim to create a "completely" -decentralized system where all stakeholders (even those who are not -always available online) participate in the committing of blocks. -CometBFT has a different approach to block creation. Validators are -expected to be online, and the set of validators is permissioned/curated -by some external process. Proof-of-stake is not required, but can be -implemented on top of CometBFT consensus. That is, validators may be -required to post collateral on-chain, off-chain, or may not be required -to post any collateral at all. +decentralized system where all stakeholders (even those who are not always +available online) participate in the committing of blocks. CometBFT has a +different approach to block creation. Validators are expected to be online, and +the set of validators is permissioned/curated by the ABCI application. +Proof-of-stake is not required, but can be implemented on top of CometBFT +consensus. That is, validators may be required to post collateral on-chain, +off-chain, or may not be required to post any collateral at all. Validators have a cryptographic key-pair and an associated amount of "voting power". Voting power need not be the same. @@ -26,8 +25,8 @@ Validators have a cryptographic key-pair and an associated amount of There are two ways to become validator. -1. They can be pre-established in the [genesis state](./using-cometbft.md#genesis) -2. The ABCI app responds to the EndBlock message with changes to the +1. They can be pre-established in the [genesis state](using-cometbft.md#genesis) +2. The ABCI app responds to the FinalizeBlock message with changes to the existing validator set. ## Setting up a Validator @@ -36,7 +35,7 @@ When setting up a validator there are countless ways to configure your setup. Th ### Network Layout -![ALT Network Layout](../imgs/sentry_layout.png) +![ALT Network Layout](../../imgs/sentry_layout.png) The diagram is based on AWS, other cloud providers will have similar solutions to design a solution. Running nodes is not limited to cloud providers, you can run nodes on bare metal systems as well. The architecture will be the same no matter which setup you decide to go with. @@ -50,7 +49,7 @@ A more persistent solution (not detailed on the diagram) is to have multiple dir ### Local Configuration -![ALT Local Configuration](../imgs/sentry_local_config.png) +![ALT Local Configuration](../../imgs/sentry_local_config.png) The validator will only talk to the sentry that are provided, the sentry nodes will communicate to the validator via a secret connection and the rest of the network through a normal connection. The sentry nodes do have the option of communicating with each other as well. @@ -100,16 +99,3 @@ More Information can be found at these links: Protecting a validator's consensus key is the most important factor to take in when designing your setup. The key that a validator is given upon creation of the node is called a consensus key, it has to be online at all times in order to vote on blocks. It is **not recommended** to merely hold your private key in the default json file (`priv_validator_key.json`). Fortunately, the [Interchain Foundation](https://interchain.io) has worked with a team to build a key management server for validators. You can find documentation on how to use it [here](https://github.com/iqlusioninc/tmkms), it is used extensively in production. You are not limited to using this tool, there are also [HSMs](https://safenet.gemalto.com/data-encryption/hardware-security-modules-hsms/), there is not a recommended HSM. Currently CometBFT uses [Ed25519](https://ed25519.cr.yp.to/) keys which are widely supported across the security sector and HSMs. - -## Committing a Block - -> **+2/3 is short for "more than 2/3"** - -A block is committed when +2/3 of the validator set sign -[precommit votes](https://github.com/cometbft/cometbft/blob/main/spec/core/data_structures.md#vote) -for that block at the same `round`. -The +2/3 set of precommit votes is called a -[commit](https://github.com/cometbft/cometbft/blob/main/spec/core/data_structures.md#commit). -While any +2/3 set of precommits for the same block at the same height&round can serve as -validation, the canonical commit is included in the next block (see -[LastCommit](https://github.com/cometbft/cometbft/blob/main/spec/core/data_structures.md#block)). diff --git a/docs/explanation/data-companion/README.md b/docs/explanation/data-companion/README.md new file mode 100644 index 00000000000..32f97388942 --- /dev/null +++ b/docs/explanation/data-companion/README.md @@ -0,0 +1,12 @@ +--- +order: false +parent: + title: Data Companion + order: 6 +--- + +# Guides + +- [Introduction](intro.md) +- [gRPC services](grpc.md) +- [Pruning service](pruning.md) diff --git a/docs/data-companion/grpc.md b/docs/explanation/data-companion/grpc.md similarity index 99% rename from docs/data-companion/grpc.md rename to docs/explanation/data-companion/grpc.md index 42bdca29859..b34d0d3ae4a 100644 --- a/docs/data-companion/grpc.md +++ b/docs/explanation/data-companion/grpc.md @@ -213,4 +213,4 @@ to effectively influence the pruning of blocks and state, ABCI results (if enabl indexer data on the node. For a comprehensive understanding of the pruning service, please see the document -[Pruning service](./pruning.md). +[Pruning service](pruning.md). diff --git a/docs/data-companion/intro.md b/docs/explanation/data-companion/intro.md similarity index 90% rename from docs/data-companion/intro.md rename to docs/explanation/data-companion/intro.md index c8144542630..eba44b7b303 100644 --- a/docs/data-companion/intro.md +++ b/docs/explanation/data-companion/intro.md @@ -22,5 +22,5 @@ indexer data and transaction indexer data. By also using the new gRPC services, it's possible now to retrieve data from the node, such as `block` and `block results` in a more efficient way. -The [gRPC services](./grpc.md) document provides practical information and insights that will guide you through the +The [gRPC services](grpc.md) document provides practical information and insights that will guide you through the process of using these services in order to create a Data Companion service. diff --git a/docs/data-companion/pruning.md b/docs/explanation/data-companion/pruning.md similarity index 100% rename from docs/data-companion/pruning.md rename to docs/explanation/data-companion/pruning.md diff --git a/docs/introduction/README.md b/docs/explanation/introduction/README.md similarity index 94% rename from docs/introduction/README.md rename to docs/explanation/introduction/README.md index 1c2b5850b38..b3179b50163 100644 --- a/docs/introduction/README.md +++ b/docs/explanation/introduction/README.md @@ -193,17 +193,16 @@ response messages. The messages are specified here: [ABCI Message Types](https://github.com/cometbft/cometbft/blob/main/proto/tendermint/abci/types.proto). -The **DeliverTx** message is the work horse of the application. Each -transaction in the blockchain is delivered with this message. The +The **FinalizeBlock** message is the work horse of the application. Each +transaction in the blockchain is finalized within this message. The application needs to validate each transaction received with the -**DeliverTx** message against the current state, application protocol, -and the cryptographic credentials of the transaction. A validated -transaction then needs to update the application state — by binding a -value into a key values store, or by updating the UTXO database, for -instance. - -The **CheckTx** message is similar to **DeliverTx**, but it's only for -validating transactions. CometBFT's mempool first checks the +**FinalizeBlock** message against the current state, application protocol, +and the cryptographic credentials of the transaction. FinalizeBlock only +prepares the update to be made and does not change the state of the application. +The state change is actually committed in a later stage i.e. in commit phase. + +The **CheckTx** message is used for validating transactions. +CometBFT's mempool first checks the validity of a transaction with **CheckTx**, and only relays valid transactions to its peers. For instance, an application may check an incrementing sequence number in the transaction and return an error upon @@ -220,17 +219,17 @@ lightweight clients, as Merkle-hash proofs can be verified by checking against the block hash, and that the block hash is signed by a quorum. There can be multiple ABCI socket connections to an application. -CometBFT creates three ABCI connections to the application; one -for the validation of transactions when broadcasting in the mempool, one -for the consensus engine to run block proposals, and one more for -querying the application state. +CometBFT creates four ABCI connections to the application; one +for the validation of transactions when broadcasting in the mempool, one for +the consensus engine to run block proposals, one for creating snapshots of the +application state, and one more for querying the application state. It's probably evident that application designers need to very carefully design their message handlers to create a blockchain that does anything useful but this architecture provides a place to start. The diagram below illustrates the flow of messages via ABCI. -![abci](../imgs/abci.png) +![abci](../../imgs/abci.png) ## A Note on Determinism @@ -265,7 +264,7 @@ CometBFT adopts [Tendermint consensus][tendermint-paper], an easy-to-understand, mostly asynchronous, BFT consensus algorithm. The algorithm follows a simple state machine that looks like this: -![consensus-logic](../imgs/consensus_logic.png) +![consensus-logic](../../imgs/consensus_logic.png) Participants in the algorithm are called **validators**; they take turns proposing blocks of transactions and voting on them. Blocks are diff --git a/docs/guides/README.md b/docs/guides/README.md index b563396313a..adade07b8b9 100644 --- a/docs/guides/README.md +++ b/docs/guides/README.md @@ -1,12 +1,21 @@ --- -order: false +order: 2 +title: CometBFT How-to Guides +description: How-to Guides parent: - order: 2 + order: 2 --- -# Guides +# How-To Guides -- [Installing CometBFT](./install.md) -- [Quick-start using CometBFT](./quick-start.md) -- [Creating a built-in application in Go](./go-built-in.md) -- [Creating an external application in Go](./go.md) +CometBFT How-To Guides is a resource center that provides users with comprehensive guides +on how to develop applications, and use essential tools. The guides are suitable for developers, system administrators, +and blockchain enthusiasts who want to gain practical skills and knowledge in distributed +systems and blockchain technology using CometBFT. + +The CometBFT How-To Guides are designed to provide step-by-step instructions and practical +examples to help users acquire real-world experience while learning. + +- [Application Development](./app-dev/README.md) +- [Tools](./tools/README.md) +- [Upgrading Guides](upgrades/README.md) diff --git a/docs/guides/app-dev/README.md b/docs/guides/app-dev/README.md new file mode 100644 index 00000000000..d977e2fb35b --- /dev/null +++ b/docs/guides/app-dev/README.md @@ -0,0 +1,13 @@ +--- +order: false +parent: + order: 3 +--- + +# Apps + +- [Using ABCI-CLI](abci-cli.md) +- [Getting Started](getting-started.md) +- [Indexing transactions](indexing-transactions.md) +- [Application Architecture Guide](app-architecture.md) +- [Mempool Lanes](mempool-lanes.md) diff --git a/docs/app-dev/abci-cli.md b/docs/guides/app-dev/abci-cli.md similarity index 79% rename from docs/app-dev/abci-cli.md rename to docs/guides/app-dev/abci-cli.md index 17aa4f2bf2e..c38da3372e7 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/guides/app-dev/abci-cli.md @@ -1,5 +1,5 @@ --- -order: 2 +order: 3 --- # Using ABCI-CLI @@ -62,51 +62,10 @@ The most important messages are `deliver_tx`, `check_tx`, and `commit`, but there are others for convenience, configuration, and information purposes. -We'll start a kvstore application, which was installed at the same time -as `abci-cli` above. The kvstore just stores transactions in a merkle -tree. Its code can be found -[here](https://github.com/cometbft/cometbft/blob/main/abci/cmd/abci-cli/abci-cli.go) -and looks like the following: - -```go -func cmdKVStore(cmd *cobra.Command, args []string) error { - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) - - // Create the application - in memory or persisted to disk - var app types.Application - if flagPersist == "" { - var err error - flagPersist, err = os.MkdirTemp("", "persistent_kvstore_tmp") - if err != nil { - return err - } - } - app = kvstore.NewPersistentKVStoreApplication(flagPersist) - app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore")) - - // Start the listener - srv, err := server.NewServer(flagAddress, flagAbci, app) - if err != nil { - return err - } - srv.SetLogger(logger.With("module", "abci-server")) - if err := srv.Start(); err != nil { - return err - } - - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - // Cleanup - if err := srv.Stop(); err != nil { - logger.Error("Error while stopping server", "err", err) - } - }) - - // Run forever. - select {} -} - -``` +We'll start a kvstore application, which was installed at the same time as +`abci-cli` above. The kvstore just stores transactions in a Merkle tree. Its +code can be found +[here](https://github.com/cometbft/cometbft/blob/main/abci/example/kvstore/kvstore.go). Start the application by running: @@ -184,11 +143,11 @@ Try running these commands: > process_proposal "abc==456" -> code: OK --> status: REJECT +-> status: PROCESS_PROPOSAL_STATUS_REJECT > process_proposal "abc=123" -> code: OK --> status: ACCEPT +-> status: PROCESS_PROPOSAL_STATUS_ACCEPT > finalize_block "abc=123" -> code: OK @@ -240,11 +199,11 @@ You could put the commands in a file and run Note that the `abci-cli` is designed strictly for testing and debugging. In a real deployment, the role of sending messages is taken by CometBFT, which -connects to the app using three separate connections, each with its own +connects to the app using four separate connections, each with its own pattern of messages. For examples of running an ABCI app with CometBFT, see the -[getting started guide](./getting-started.md). +[getting started guide](getting-started.md). ## Bounties diff --git a/docs/app-dev/app-architecture.md b/docs/guides/app-dev/app-architecture.md similarity index 96% rename from docs/app-dev/app-architecture.md rename to docs/guides/app-dev/app-architecture.md index 97ebf502cf2..a8b4fd41603 100644 --- a/docs/app-dev/app-architecture.md +++ b/docs/guides/app-dev/app-architecture.md @@ -1,5 +1,5 @@ --- -order: 3 +order: 4 --- # Application Architecture Guide @@ -51,5 +51,5 @@ See the following for more extensive documentation: - [Interchain Standard for the Light-Client REST API](https://github.com/cosmos/cosmos-sdk/pull/1617) (legacy/deprecated) - [CometBFT RPC Docs](https://docs.cometbft.com/main/rpc/) -- [CometBFT in Production](../core/running-in-production.md) +- [CometBFT in Production](../../explanation/core/running-in-production.md) - [ABCI spec](https://github.com/cometbft/cometbft/tree/main/spec/abci) diff --git a/docs/app-dev/getting-started.md b/docs/guides/app-dev/getting-started.md similarity index 95% rename from docs/app-dev/getting-started.md rename to docs/guides/app-dev/getting-started.md index 94076287224..d576f5b52a0 100644 --- a/docs/app-dev/getting-started.md +++ b/docs/guides/app-dev/getting-started.md @@ -1,5 +1,5 @@ --- -order: 1 +order: 2 --- # Getting Started @@ -11,7 +11,7 @@ application you want to run. So, to run a complete blockchain that does something useful, you must start two programs: one is CometBFT, the other is your application, which can be written in any programming language. Recall from [the intro to -ABCI](../introduction/what-is-cometbft.md#abci-overview) that CometBFT +ABCI](../explanation/introduction/what-is-cometbft.md#abci-overview) that CometBFT handles all the p2p and consensus stuff, and just forwards transactions to the application when they need to be validated, or when they're ready to be executed and committed. @@ -92,7 +92,7 @@ abci-cli kvstore In another terminal, we can start CometBFT. You should already have the CometBFT binary installed. If not, follow the steps from -[here](../introduction/install.md). If you have never run CometBFT +[here](../explanation/introduction/install.md). If you have never run CometBFT before, use: ```sh @@ -103,7 +103,7 @@ cometbft node If you have used CometBFT, you may want to reset the data for a new blockchain by running `cometbft unsafe-reset-all`. Then you can run `cometbft node` to start CometBFT, and connect to the app. For more -details, see [the guide on using CometBFT](../core/using-cometbft.md). +details, see [the guide on using CometBFT](../../explanation/core/using-cometbft.md). You should see CometBFT making blocks! We can get the status of our CometBFT node as follows: diff --git a/docs/app-dev/indexing-transactions.md b/docs/guides/app-dev/indexing-transactions.md similarity index 64% rename from docs/app-dev/indexing-transactions.md rename to docs/guides/app-dev/indexing-transactions.md index a2ca5a5b18a..bc40506974e 100644 --- a/docs/app-dev/indexing-transactions.md +++ b/docs/guides/app-dev/indexing-transactions.md @@ -1,12 +1,12 @@ --- -order: 6 +order: 5 --- # Indexing Transactions CometBFT allows you to index transactions and blocks and later query or -subscribe to their results. Transactions are indexed by `ResponseFinalizeBlock.tx_results.events` and -blocks are indexed by `ResponseFinalizeBlock.events`. However, transactions +subscribe to their results. Transactions are indexed by `FinalizeBlockResponse.tx_results.events` and +blocks are indexed by `FinalizeBlockResponse.events`. However, transactions are also indexed by a primary key which includes the transaction hash and maps to and stores the corresponding transaction results. Blocks are indexed by a primary key which includes the block height and maps to and stores the block height, i.e. @@ -14,11 +14,7 @@ the block itself is never stored. Each event contains a type and a list of attributes, which are key-value pairs denoting something about what happened during the method's execution. For more -details on `Events`, see the - -[ABCI](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci++_basic_concepts.md#events) - -documentation. +details on `Events`, see the [ABCI][abci-events] documentation. An `Event` has a composite key associated with it. A `compositeKey` is constructed by its type and key separated by a dot. @@ -36,7 +32,7 @@ would be equal to the composite key of `jack.account.number`. By default, CometBFT will index all transactions by their respective hashes and height and blocks by their height. -CometBFT allows for different events within the same height to have +CometBFT allows for different events within the same height to have equal attributes. ## Configuration @@ -74,7 +70,7 @@ entirely in the future. **Implementation and data layout** -The kv indexer stores each attribute of an event individually, by creating a composite key +The kv indexer stores each attribute of an event individually, by creating a composite key with - event type, - attribute key, @@ -83,7 +79,7 @@ with - the height, and - event counter. For example the following events: - + ``` Type: "transfer", Attributes: []abci.EventAttribute{ @@ -92,9 +88,9 @@ Type: "transfer", {Key: "balance", Value: "100", Index: true}, {Key: "note", Value: "nothing", Index: true}, }, - + ``` - + ``` Type: "transfer", Attributes: []abci.EventAttribute{ @@ -105,7 +101,7 @@ Type: "transfer", }, ``` -will be represented as follows in the store, assuming these events result from the `FinalizeBlock` call for height 1: +will be represented as follows in the store, assuming these events result from the `FinalizeBlock` call for height 1: ``` Key value @@ -119,12 +115,12 @@ transferSenderTomFinalizeBlock12 1 transferRecepientAliceFinalizeBlock12 1 transferBalance200FinalizeBlock12 1 transferNodeNothingFinalizeBlock12 1 - + ``` -The event number is a local variable kept by the indexer and incremented when a new event is processed. -It is an `int64` variable and has no other semantics besides being used to associate attributes belonging to the same events within a height. +The event number is a local variable kept by the indexer and incremented when a new event is processed. +It is an `int64` variable and has no other semantics besides being used to associate attributes belonging to the same events within a height. This variable is not atomically incremented as event indexing is deterministic. **Should this ever change**, the event id generation -will be broken. +will be broken. #### PostgreSQL @@ -146,6 +142,19 @@ Example: psql ... -f state/indexer/sink/psql/schema.sql ``` +The schema file adopts standard table names: `blocks`, `tx_results`, `events`, and `attributes`. +In order to adopt customizable table names, the user should adapt the schema file **and** configure CometBFT's indexer to employ the appropriate table names. + +Example: +```toml +[tx-index] +psql-conn = "your connection string" +table_blocks = "cometbft_blocks" +table_tx_results = "cometbft_tx_results" +table_events = "cometbft_events" +table_attributes = "cometbft_attributes" +``` + ## Default Indexes The CometBFT tx and block event indexer indexes a few select reserved events @@ -175,51 +184,50 @@ UTF-8 encoded strings (e.g. "transfer.sender": "Bob", "transfer.recipient": Example: ```go -func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { - +func (app *Application) FinalizeBlock(_ context.Context, req *types.FinalizeBlockRequest) (*types.FinalizeBlockResponse, error) { //... - tx_results[0] := &types.ExecTxResult{ - Code: CodeTypeOK, - // With every transaction we can emit a series of events. To make it simple, we just emit the same events. - Events: []types.Event{ - { - Type: "app", - Attributes: []types.EventAttribute{ - {Key: "creator", Value: "Cosmoshi Netowoko", Index: true}, - {Key: "key", Value: key, Index: true}, - {Key: "index_key", Value: "index is working", Index: true}, - {Key: "noindex_key", Value: "index is working", Index: false}, - }, - }, - { - Type: "app", - Attributes: []types.EventAttribute{ - {Key: "creator", Value: "Cosmoshi", Index: true}, - {Key: "key", Value: value, Index: true}, - {Key: "index_key", Value: "index is working", Index: true}, - {Key: "noindex_key", Value: "index is working", Index: false}, - }, - }, - }, - } + tx_results[0] := &types.ExecTxResult{ + Code: CodeTypeOK, + // With every transaction we can emit a series of events. To make it simple, we just emit the same events. + Events: []types.Event{ + { + Type: "app", + Attributes: []types.EventAttribute{ + {Key: "creator", Value: "Cosmoshi Netowoko", Index: true}, + {Key: "key", Value: key, Index: true}, + {Key: "index_key", Value: "index is working", Index: true}, + {Key: "noindex_key", Value: "index is working", Index: false}, + }, + }, + { + Type: "app", + Attributes: []types.EventAttribute{ + {Key: "creator", Value: "Cosmoshi", Index: true}, + {Key: "key", Value: value, Index: true}, + {Key: "index_key", Value: "index is working", Index: true}, + {Key: "noindex_key", Value: "index is working", Index: false}, + }, + }, + }, + } block_events = []types.Event{ - { - Type: "loan", - Attributes: []types.EventAttribute{ - { Key: "account_no", Value: "1", Index: true}, - { Key: "amount", Value: "200", Index: true }, - }, - }, - { - Type: "loan", - Attributes: []types.EventAttribute{ - { Key: "account_no", Value: "2", Index: true }, - { Key: "amount", Value: "300", Index: true}, - }, - }, - } - return &types.ResponseFinalizeBlock{TxResults: tx_results, Events: block_events} + { + Type: "loan", + Attributes: []types.EventAttribute{ + {Key: "account_no", Value: "1", Index: true}, + {Key: "amount", Value: "200", Index: true}, + }, + }, + { + Type: "loan", + Attributes: []types.EventAttribute{ + {Key: "account_no", Value: "2", Index: true}, + {Key: "amount", Value: "300", Index: true}, + }, + }, + } + return &types.FinalizeBlockResponse{TxResults: tx_results, Events: block_events, NextBlockDelay: 1 * time.Second}, nil } ``` @@ -264,24 +272,44 @@ You can query for a paginated set of blocks by their events by calling the `/block_search` RPC endpoint: ```bash -curl "localhost:26657/block_search?query=\"block.height > 10 AND val_set.num_changed > 0\"" +curl "localhost:26657/block_search?query=\"block.height > 10\"" ``` -Storing the event sequence was introduced in CometBFT 0.34.26. Before that, up until Tendermint Core 0.34.26, -the event sequence was not stored in the kvstore and events were stored only by height. That means that queries -returned blocks and transactions whose event attributes match within the height but can match across different -events on that height. -This behavior was fixed with CometBFT 0.34.26+. However, if the data was indexed with earlier versions of -Tendermint Core and not re-indexed, that data will be queried as if all the attributes within a height -occurred within the same event. +Storing the event sequence was introduced in CometBFT 0.34.26. Before that, up +until Tendermint Core 0.34.26, the event sequence was not stored in the kvstore +and events were stored only by height. That means that queries returned blocks +and transactions whose event attributes match within the height but can match +across different events on that height. + +This behavior was fixed with CometBFT 0.34.26+. However, if the data was +indexed with earlier versions of Tendermint Core and not re-indexed, that data +will be queried as if all the attributes within a height occurred within the +same event. ## Event attribute value types -Users can use anything as an event value. However, if the event attribute value is a number, the following needs to be taken into account: +Users can use anything as an event value. However, if the event attribute value +is a number, the following needs to be taken into account: - Negative numbers will not be properly retrieved when querying the indexer. -- Event values are converted to big floats (from the `big/math` package). The precision of the floating point number is set to the bit length -of the integer it is supposed to represent, so that there is no loss of information due to insufficient precision. This was not present before CometBFT v0.38.x and all float values were ignored. +- Event values are converted to big floats (from the `big/math` package). The + precision of the floating point number is set to the bit length of the + integer it is supposed to represent, so that there is no loss of information + due to insufficient precision. This was not present before CometBFT v0.38.x + and all float values were ignored. - As of CometBFT v0.38.x, queries can contain floating point numbers as well. -- Note that comparing to floats can be imprecise with a high number of decimals. \ No newline at end of file +- Note that comparing to floats can be imprecise with a high number of decimals. + +## Event type and attribute key format + +An event type/attribute key is a string that can contain any Unicode letter or +digit, as well as the following characters: `.` (dot), `-` (dash), `_` +(underscore). The event type/attribute key must not start with `-` (dash) or +`.` (dot). + +``` +^[\w]+[\.-\w]?$ +``` + +[abci-events]: https://github.com/cometbft/cometbft/blob/main/spec/abci/abci++_basic_concepts.md#events diff --git a/docs/guides/app-dev/mempool-lanes.md b/docs/guides/app-dev/mempool-lanes.md new file mode 100644 index 00000000000..96838d03547 --- /dev/null +++ b/docs/guides/app-dev/mempool-lanes.md @@ -0,0 +1,163 @@ +# Mempool Lanes + +The Lanes feature allows applications to classify and prioritise transactions for providing Quality +of Service guarantees to the mempool ([ADR-118](adr)). + +This guide provides instructions, along with a set of best practices and rules of thumb to help +setting up and using Lanes within your application. + +## How to set up lanes in an application + +Application developers that want to make use of Lanes must pre-define a list of lane names and their +priorities and then populate specific fields in `Info` and `CheckTx` responses. In contrast, if +Lanes are not to be used by the application, no modifications to the code are necessary. + +We will explain with an example taken from our implementation of Lanes in the `kvstore` application +(in `abci/example/kvstore/kvstore.go`). + +### Define lanes and their priorities + +First, the application should keep a list of lane IDs (of type `string`) and their priorities (of +type `uint32`). In this example we store it as a map in the `Application` struct. And we define as a +constant the lane ID used as default when assigning lanes to transactions. + +```go +const defaultLane = "C" + +type Application struct { + ... + lanePriorities map[string]uint32 +} + +func NewApplication(...) *Application { + ... + return &Application{ + ... + lanePriorities: map[string]uint32{ + "A": 100, + "B": 50, + defaultLane: 10, + "D": 1, + }, + } +} +``` + +### Handling Info requests + +When a CometBFT node starts, it performs a handshake with the application by sending an `Info` +request. This process allows CometBFT to retrieve essential data from the application to initialize +and configure itself. + +Upon receiving an `Info` request, the application must reply with the lane IDs and +their priorities in the `LanePriorities` field, and the default lane in the `DefaultLane` field. The +default lane ID must be a key in the map `LanePriorities`. + +```go +func (app *Application) Info(ctx context.Context, req *types.InfoRequest) (*types.InfoResponse, error) { + ... + return &types.InfoResponse{ + ... + LanePriorities: app.lanePriorities, + DefaultLane: defaultLane, + }, nil +} +``` + +If the application does not populate these fields, CometBFT will use a single, default lane. + +### Handling CheckTx requests + +Upon receiving a `CheckTx` request for validating a transaction, the application must reply with the +lane that it assigns to the transaction in the `LaneId` field. The mempool will only use the lane if +(1) the transaction is valid and (2) the transaction is being validated for the first time at the +local node, that is, when `req.Type` equals `types.CHECK_TX_TYPE_CHECK` (not when rechecking). +Otherwise, the mempool will ignore the `LaneId` field. + +```go +func (app *Application) CheckTx(ctx context.Context, req *types.CheckTxRequest) (*types.CheckTxResponse, error) { + ... + laneID := assignLane(req.Tx) + return &types.CheckTxResponse{Code: CodeTypeOK, GasWanted: 1, LaneId: laneID}, nil +} +``` +In this example, `assignLane` is a deterministic function that, given the content of a transaction, +returns a valid lane ID. The lane ID must be one of the keys in the `app.lanePriorities` map, and it +may be the default lane if no other lane is chosen to be assigned. + +## Best practices + +### Transaction classification and ordering + +- **Independent transactions**: Transactions can only be classified into different lanes if they are + independent of each other. If there is a relationship or dependency between transactions (e.g + transaction A must be executed before transaction B), both must be placed in the same lane. + Failing to do so may result in an incorrect ordering, where B could be processed and executed + before A. +- **Ordering across lanes**: Transactions in separate lanes are not guaranteed to maintain the order + in which they are processed and disseminated to other nodes. Developers should be aware that + classification in lanes can result in transactions being committed to different blocks and executed + in different order. +- **Immutable lane assignment**: Once a transaction is assigned to a lane upon entering the mempool, + its lane cannot be changed, even during rechecking. +- **Execution timing**: The time gap between the execution of two transactions is unpredictable, + especially if they are in lanes with significantly different priority levels. + +### Number of lanes + +- **One lane minimum**: Setting up one lane replicates the behavior of the mempool before lanes were + introduced. The same behaviour is obtained when the application does not set up lanes: the mempool + will assign all transactions to the single, default lane. The latter is transparent to users. +- **Start small**: We recommend starting with fewer than 5 or 10 lanes and test them thoroughly on a + testnet. You can gradually introduce more lanes as necessary once performance and behavior are + validated. +- **Constraints**: Lanes are identified by strings. In theory, there is no limit to the number of + lanes that can be defined. However, keep in mind that both memory and CPU usage will increase in + proportion to the number of lanes. + +### Lane priorities + +- **Priority values**: Lane priorities are values of type `uint32`. Valid priorities range from 1 to + `math.MaxUint32`. Priority 0 is reserved for cases where there are no lanes to assign, such as + invalid transactions or applications that do not utilize lanes. However, if the application + returns an empty `lane_id` on `CheckTx`, the mempool will assign the default lane as specified in + `InfoResponse`. +- **Fair scheduling**: Lanes implement a scheduling algorithm for picking transactions + for dissemination to peers and for creating blocks. The algorithm is designed to be + _starvation-free_, ensuring that even transactions from lower-priority lanes will eventually be + disseminated and included in blocks. It is also _fair_, because it picks transactions across all + lanes by interleaving them when possible. +- **Equal priorities**: Multiple lanes are allowed to have the same priority. This could help + prevent one class of transaction monopolizing the entire mempool. When lanes share the same + priority, the order in which they are processed is undefined. However, transactions within the + same lane are locally treated in FIFO order as usual. + +### Lane capacity + +- **Capacity distribution**: The mempool's capacity is divided evenly among the lanes, with each + lane's capacity being constrained by both the number of transactions and the total transaction + size in bytes. Once either limit is reached, no further transactions will be accepted into that + lane. +- **Preventing spam**: Lane capacity helps mitigate the risk of large transactions flooding the + network. For optimal performance, large transactions should be assigned to lower-priority lanes + whenever possible. +- **Adjusting capacities**: If you find that the capacity of a lane is insufficient, you still have + the option of increasing the total mempool size, which will proportionally increase the capacity + of all lanes. In future releases, we may introduce more granular control over lane capacities if + needed. + +### Network setup + +- **Limited resources**: Lanes are especially useful in networks with constrained resources, such as + block size, mempool capacity, or network throughput. In such environments, lanes ensure + higher-priority transactions will be prioritized during dissemination and block inclusion. In + networks without these limitations, lanes will not significantly affect the behavior compared to + nodes that do not implement lanes. +- **Consistent setup**: To fully benefit from lanes, all nodes in the network should implement the + same lanes configuration and transaction classification logic. If some nodes do not support lanes, + the benefits of lane prioritization will not be observed, because transaction ordering during + dissemination and processing will be inconsistent across nodes. While mixing nodes with and + without lanes does not affect network correctness, consistent lane configuration is strongly + recommended for improved performance and consistent behavior. + +[adr]: ../../../docs/references/architecture/adr-118-mempool-lanes.md \ No newline at end of file diff --git a/docs/tools/README.md b/docs/guides/tools/README.md similarity index 52% rename from docs/tools/README.md rename to docs/guides/tools/README.md index de29e17f122..068b927ffa5 100644 --- a/docs/tools/README.md +++ b/docs/guides/tools/README.md @@ -2,19 +2,19 @@ order: 1 parent: title: Tools - order: 6 + order: 5 --- # Overview CometBFT has some tools that are associated with it for: -- [Debugging](./debugging.md) +- [Debugging](debugging.md) - [Benchmarking](#benchmarking) ## Benchmarking -- +- -`tm-load-test` is a distributed load testing tool (and framework) for load +`cometbft-load-test` is a distributed load testing tool (and framework) for load testing CometBFT networks. diff --git a/docs/tools/debugging.md b/docs/guides/tools/debugging.md similarity index 98% rename from docs/tools/debugging.md rename to docs/guides/tools/debugging.md index 69a73fa9503..719731f4abb 100644 --- a/docs/tools/debugging.md +++ b/docs/guides/tools/debugging.md @@ -102,4 +102,4 @@ The list of available RPC endpoints can be found by making a request to the RPC For an `inspect` process running on `127.0.0.1:26657`, navigate your browser to `http://127.0.0.1:26657/` to retrieve the list of enabled RPC endpoints. -Additional information on the CometBFT RPC endpoints can be found in the [rpc documentation](https://docs.cometbft.com/master/rpc). +Additional information on the CometBFT RPC endpoints can be found in the [rpc documentation](https://docs.cometbft.com/v1.0/rpc/). diff --git a/docs/guides/tools/proposer-based-timestamps-runbook.md b/docs/guides/tools/proposer-based-timestamps-runbook.md new file mode 100644 index 00000000000..0fb499d684c --- /dev/null +++ b/docs/guides/tools/proposer-based-timestamps-runbook.md @@ -0,0 +1,263 @@ +--- +order: 3 +--- + +# Proposer-Based Timestamps Runbook + +From version `v1.0`, CometBFT has new constraints for the timestamps included +in produced blocks. + +The new constraints mean that validators may fail to produce valid blocks, +which causes other validators to issue `nil` prevotes, thus rejecting the +proposed block, depending on the configuration of the validator's local clock. + +## What is this document for? + +This document provides a set of actionable steps for application developers and +node operators to diagnose and fix issues related to clock synchronization and +configuration of the [`SynchronyParams`](../../explanation/core/proposer-based-timestamps.md#consensus-parameters) +consensus parameters. + +Use this runbook if you observe that validators are frequently voting `nil` for a block that the rest +of the network votes for, or if validators are frequently producing block proposals +that are rejected by the rest of the network. + +## Requirements + +To use this runbook, you must be running a node that has the [Prometheus metrics endpoint enabled](../../explanation/core/metrics.md) +and the [RPC endpoint](../../explanation/core/rpc.md) enabled and accessible. + +It is strongly recommended to also run a Prometheus metrics collector to gather and +analyze metrics from the CometBFT node. + +## Debugging a Single Node + +If you observe that a single validator is frequently failing to produce blocks or +voting `nil` for proposals that other validators vote for and suspect it may be +related to clock synchronization, use the following steps to debug and correct the issue. + +### Check Timely Metric + +CometBFT exposes a histogram metric with the difference between the timestamp in the proposal +and the time read from the node's local clock when the proposal is received. + +The histogram exposes multiple metrics on the Prometheus `/metrics` endpoint called + +* `consensus_proposal_timestamp_difference_bucket` +* `consensus_proposal_timestamp_difference_sum` +* `consensus_proposal_timestamp_difference_count` + +Each metric is also labeled with the key `is_timely`, which can have a value of +`true` or `false`. +When `is_timely="true"`, the timestamp in the proposal was accepted by the node. +When `is_timely="false"`, the timestamp in the proposal was rejected by the node +that, as result, has prevoted `nil` for the proposal. + +#### From the Prometheus Collector UI + +If you are running a Prometheus collector, navigate to the query web interface and select the 'Graph' tab. + +Issue a query for the following: + +``` +consensus_proposal_timestamp_difference_count{is_timely="false"} / +consensus_proposal_timestamp_difference_count{is_timely="true"} +``` + +This query will graph the ratio of proposals the node considered timely to those it +considered untimely. If the ratio is increasing, it means that your node is consistently +seeing more proposals that are far from its local clock. If this is the case, you should +check to make sure your local clock is properly synchronized to NTP. + +#### From the `/metrics` url + +If you are not running a Prometheus collector, navigate to the `/metrics` endpoint +exposed on the Prometheus metrics port with `curl` or a browser. + +Search for the `consensus_proposal_timestamp_difference_count` metrics. +This metric is labeled with `is_timely`. Investigate the value of +`consensus_proposal_timestamp_difference_count` where `is_timely="false"` +and where `is_timely="true"`. Refresh the endpoint and observe if the value of `is_timely="false"` +is growing. + +If you observe that `is_timely="false"` is growing, it means that your node is consistently +seeing proposals that are far from its local clock. If this is the case, you should check +to make sure your local clock is properly synchronized to NTP. + +## Debugging a Network + +If you observe that a network is frequently failing to produce blocks and suspect +it may be related to clock synchronization, use the following steps to debug and correct the issue. + +### Check Prevote Message Delay + +CometBFT exposes metrics that help determine how synchronized the clocks on a network are. + +These metrics are visible on the Prometheus `/metrics` endpoint and are called: + +* `consensus_quorum_prevote_delay` +* `consensus_full_prevote_delay` + +These metrics calculate the difference between the timestamp in the proposal message and +the timestamp of a prevote that was issued during consensus. + +The `consensus_quorum_prevote_delay` metric is the interval in seconds +between the proposal timestamp and the timestamp of the earliest prevote that +achieved a `2/3+` quorum during the prevote step. + +The `consensus_full_prevote_delay` metric is the interval in seconds +between the proposal timestamp and the timestamp of the latest prevote in a round +where 100% of the validators voted. + +#### From the Prometheus Collector UI + +If you are running a Prometheus collector, navigate to the query web interface and select the 'Graph' tab. + +Issue a query for the following: + +``` +sum(consensus_quorum_prevote_delay) by (proposer_address) +``` + +This query will graph the difference in seconds for each proposer on the network. + +If the value is much larger for some proposers, then the issue is likely related to the clock +synchronization of their nodes. Contact those proposers and ensure that their nodes +are properly connected to NTP using the steps for [Debugging a Single Node](#debugging-a-single-node). + +If the value is relatively similar for all proposers you should next compare this +value to the `SynchronyParams` values for the network. Continue to the +[Checking Synchrony](#checking-synchronyparams) steps. + +#### From the `/metrics` url + +If you are not running a Prometheus collector, navigate to the `/metrics` endpoint +exposed on the Prometheus metrics port. + +Search for the `consensus_quorum_prevote_delay` metric. There will be one +entry of this metric for each `proposer_address`. If the value of this metric is +much larger for some proposers, then the issue is likely related to synchronization of their +nodes with NTP. Contact those proposers and ensure that their nodes are properly connected +to NTP using the steps for [Debugging a Single Node](#debugging-a-single-node). + +If the values are relatively similar for all proposers, +you'll need to compare this value to the `SynchronyParams` for the network. Continue +to the [Checking Synchrony](#checking-synchronyparams) steps. + +## Checking Clock Sync + +NTP configuration and tooling is very specific to the operating system and distribution +that your validator node is running. This guide assumes you have `timedatectl` installed with +[`systemd-timesyncd`](https://www.freedesktop.org/software/systemd/man/latest/systemd-timesyncd.service.html), +which provides a simple NTP client, or the more complete +[chrony](https://chrony.tuxfamily.org/), a popular tool for interacting with time +synchronization on Linux distributions. If you are using an operating system or +distribution with a different time synchronization mechanism, please consult the +documentation for your operating system to check the status and re-synchronize the daemon. + +### Check if NTP is Enabled + +```shell +timedatectl +``` + +From the output, ensure that `NTP service` is `active`. If `NTP service` is `inactive`, run: + +```shell +timedatectl set-ntp true +``` + +Re-run the `timedatectl` command and verify that the change has taken effect. + +### Check if Your NTP Daemon is Synchronized + +We provide two examples here, for `chrony` and `timesync`, but these steps +should be adapted if you are using a different daemon. + +If you find that the NTP is not synchronizing, remember to allow NTP traffic +(123/UDP) to your NTP servers. + +#### `chrony` + +Check the status of your `chrony` daemon by running the following command: + +```shell +chronyc tracking +``` + +If the `chrony` daemon is running, you will see output that indicates its current status. +If the `chrony` daemon is not running, restart it and re-run `chronyc tracking`. + +The `System time` field of the response should show a value that is much smaller than 100 +milliseconds. + +If the value is very large, restart the `chronyd` daemon. + +#### `timesync` + +If you are using `systemd-timesyncd`, then execute the following command: + +```shell +timedatectl timesync-status --monitor +``` + +If the output indicates an error, restart the service by running + +```shell +timedatectl set-ntp false +timedatectl set-ntp true +``` + +Once running, the output should include a `Packet count`, indicating how many times the protocol +has been executed, and a small `Precision` value. +Observe that this daemon increases the polling interval over time, up to a limit. +You may want to decrease the maximum value of the polling interval by tweaking +the `/etc/systemd/timesyncd.conf` file. + +## Checking SynchronyParams + +To determine the currently configured `SynchronyParams` for your network, issue a +request to your node's RPC endpoint. For a node running locally with the RPC server +exposed on port `26657`, run the following command: + +```shell +curl localhost:26657/consensus_params +``` + +The json output will contain a field named `synchrony`, with the following structure: + +```json +{ + "precision": "505000000", + "message_delay": "12000000000" +} +``` + +The `precision` and `message_delay` values returned are listed in nanoseconds: +In the examples above, the precision is 505ms and the message delay is 12s. +Remember, `consensus_quorum_prevote_delay` is listed in seconds. +If the `consensus_quorum_prevote_delay` value approaches the sum of `precision` and `message_delay`, +then the value selected for these parameters is too small. Your application will +need to be modified to update the `SynchronyParams` to have larger values. + +Note that the `message_delay` adopted by CometBFT +[increases over rounds](../../explanation/core/proposer-based-timestamps.md#adaptive-messagedelay), +so that the chain does not block forever when it is set to an improper value. +However, if the standard `message_delay`, used in round 0, is too small, there +is an important performance impact, and the value of this parameter should be +updated in order to be aligned with actual message delays in the network. + +### Updating SynchronyParams + +The `SynchronyParams` are Consensus Parameters, which means they are the same +for all nodes in the network and are set and updated +by the application running alongside CometBFT. Updates to these parameters must +be passed to the application during the `FinalizeBlock` ABCI method call. + +If the application was built using the CosmosSDK, then these parameters can be updated +programmatically using a governance proposal. For more information, see the +[CosmosSDK documentation](https://docs.cosmos.network/v0.50/build/modules/gov#proposal-submission). + +If the application does not implement a way to update the consensus parameters +programmatically, then the application itself must be updated to do so. More information on updating +the consensus parameters via ABCI can be found in the [FinalizeBlock documentation](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_methods.md#finalizeblock). diff --git a/docs/guides/upgrades/README.md b/docs/guides/upgrades/README.md new file mode 100644 index 00000000000..74a4b0c64e8 --- /dev/null +++ b/docs/guides/upgrades/README.md @@ -0,0 +1,14 @@ +--- +order: false +parent: + order: 4 +--- + +# Upgrading Guides + +Our upgrading guides offer precise, step-by-step instructions to facilitate the +transition to new versions or configurations of CometBFT. + +Follow these structured guidelines to ensure a successful and efficient upgrade process. + +- [Upgrading from CometBFT v0.38.x to v1.0](v0.38-to-v1.0.md) diff --git a/docs/guides/upgrades/v0.38-to-v1.0.md b/docs/guides/upgrades/v0.38-to-v1.0.md new file mode 100644 index 00000000000..d47213a0cf0 --- /dev/null +++ b/docs/guides/upgrades/v0.38-to-v1.0.md @@ -0,0 +1,605 @@ +--- +order: 4 +title: CometBFT v0.38.x to v1.0 +--- + +# Upgrading from CometBFT v0.38.x to v1.0 + +This guide outlines the key updates and adjustments needed when upgrading from CometBFT +v0.38.x to v1.0. It highlights significant changes and provides references to additional +documentation for a smoother transition. + +The introduction of CometBFT `v1.0` brings numerous new features. This guide outlines the +most significant additions and changes. + +For additional information about changes please refer to the [CHANGELOG.md](https://github.com/cometbft/cometbft/blob/main/CHANGELOG.md) +and [UPGRADING.md](https://github.com/cometbft/cometbft/blob/main/UPGRADING.md) documents. + +> **NOTE**: It is essential to emphasize that this is a [major version bump](https://github.com/cometbft/cometbft/blob/main/README.md#versioning) +> upgrade (`v0.38.x` -> `v1.0`). Therefore, a **coordinated upgrade** is necessary. A mixed network with `v0.38.x` and `v1.0` nodes is **not supported**. + +## Proposer-Based Timestamps (PBTS) + +CometBFT `v1.0` contains a new algorithm for generating and verifying block timestamps +called Proposer-Based Timestamps (`PBTS`). +The existing algorithm used in CometBFT releases prior to `v1.0`, called [BFT Time][bft-time] is kept for backwards compatibility. + +Upgrading to `v1.0` **does not automatically switch the chain from BFT Time +to PBTS**; rather a `ConsensusParam` called `PbtsEnableHeight` was introduced and can be set to a future +height to transition from BFT Time to PBTS. + +This flexible mechanism allows chains disentangle the upgrade to `v1.0` from the transition +in the algorithm used for block times. + +For further information, please check the [PBTS documentation][pbts-docs]. + +## ABCI Local Client Mutex + +CometBFT's existing `ABCI` local client (used when CometBFT and the application run in the same process) is prevented +from making concurrent calls to ABCI implementations by virtue of a mutex taken by the client's implementation. + +In the `v1.0` release, two additional [local ABCI clients have been added](https://github.com/cometbft/cometbft/pull/1141). + +- The first adopts one mutex per ABCI connection (consensus connection, mempool connection, etc.), allowing concurrency +when CometBFT calls methods from different ABCI connections, but still serializing ABCI calls within the same connection. + +- The second totally removes mutexes from the ABCI client. + +When using either of the new ABCI local clients, the application is now responsible for coordinating concurrent ABCI +calls in order to prevent race conditions or non-deterministic behavior. + +If you are uncertain about how to ensure these guarantees in your application, it is strongly recommended +to continue using the existing ABCI local client, which relies on a single global mutex. + +## Protobuf Changes + +Several major changes have been implemented relating to the Protobuf +definitions: + +CometBFT now makes use of the `cometbft.*` Protobuf definitions under + [`proto/cometbft`](https://github.com/cometbft/cometbft/tree/main/proto/cometbft). +This is a breaking change for all users who rely on serialization of the Protobuf +type paths, such as integrators who serialize CometBFT's Protobuf data types into +`Any` typed fields. For example, the `tendermint.types.Block` type in CometBFT `v0.38.x` is +now accessible as `cometbft.types.v1.Block` (see the next point in the list for details on versioning). + +All CometBFT Protobuf packages include a version whose number will be independent of +the CometBFT version. As mentioned in (1), the `tendermint.types.Block` type is now available under +`cometbft.types.v1.Block` - the `v1` in the type path indicates the version of the `types` package +used by this version of CometBFT. The Protobuf definitions that are wire-level compatible (but not type +path-compatible) with CometBFT `v0.34.x`, ``v0.37.x`` and `v0.38.x`, where breaking changes were introduced, are available under `v1beta*`-versioned types. + +For example: + +- The `tendermint.abci.Request` type from CometBFT `v0.34.x` is now available as `cometbft.abci.v1beta1.Request`. +- The `tendermint.abci.Request` type from CometBFT `v0.37.x` is now available as `cometbft.abci.v1beta2.Request`. +- The `tendermint.abci.Request` type from CometBFT `v0.38.x` is now available as `cometbft.abci.v1beta3.Request`. + +All Go code generated from the `cometbft.*` types is now available under the + [`api`](https://github.com/cometbft/cometbft/tree/main/api) directory. This directory is also an independently versioned + Go module. This code is still generated using the Cosmos SDK's [gogoproto + fork](https://github.com/cosmos/gogoproto). + +Several [ABCI-related types were renamed](https://github.com/cometbft/cometbft/pull/1533) in order to align with [Buf + guidelines](https://buf.build/docs/best-practices/style-guide/). `Request*` + and `Response*` were renamed to `*Request` and `*Response` (e.g. + `RequestQuery` was renamed to `QueryRequest`). + +See the CometBFT Protobufs [README](https://github.com/cometbft/cometbft/tree/main/proto#readme) section for more details. + +## Crypto + +### BLS12-381 curve + +CometBFT `v1.0` adds [support for BLS12-381](https://github.com/cometbft/cometbft/pull/2765) keys. Since the implementation needs +`cgo` and brings in new dependencies, a build flag `bls12381` needs to be used if you want to enable it. + +## Mempool + +### `nop` mempool + +CometBFT `v1.0` provides an option of using a `nop` (no-op) mempool which, +if selected via configuration, turns off all mempool-related functionality in +CometBFT (e.g. ability to receive transactions, transaction gossip). CometBFT then +expects applications to manage transactions and provide transactions to include in a new block when it calls +the `PrepareProposal` ABCI method, and that application developers will use some external means +for disseminating their transactions. + +If you want to use the `nop` mempool, change mempool's `type` to `nop` in your `config.toml` +file: + +```toml +[mempool] +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" +# is not supported. +type = "nop" +``` + +### Internal `CheckTx` Go API changes + +The `Mempool` interface was modified on `CheckTx` method. Note that this interface is +meant for internal use only, so you should be aware of these changes only if you +happen to call these methods directly. + +`CheckTx`'s signature changed from +`CheckTx(tx types.Tx, cb func(*abci.ResponseCheckTx), txInfo TxInfo) error` to +`CheckTx(tx types.Tx, sender p2p.ID) (*abcicli.ReqRes, error)`. +The method used to take a callback function `cb` to be applied to the +ABCI `CheckTx` response and a `TxInfo` structure containing a sender. +Now the sender ID is passed directly and `CheckTx` returns the ABCI response +of type `*abcicli.ReqRes`, on which one can apply any callback manually. + +For example: +```golang +reqRes, err := CheckTx(tx, sender) +// check `err` here +cb(reqRes.Response.GetCheckTx()) +``` + +The `*abcicli.ReqRes` structure that `CheckTx` returns has a callback to +process the response already set (namely, the function `handleCheckTxResponse`). +The callback will be invoked internally when the response is ready. We need only +to wait for it; for example: + +```golang +reqRes, err := CheckTx(tx, sender) +// check `err` here +reqRes.Wait() +``` + +## RPC + +### Versioned API + +The RPC API is now versioned, with the existing RPC being available under both the `/` +path (as in CometBFT `v0.38.x`) and a `/v1` path. + +Although invoking methods without specifying the version is still supported for now, +support will be dropped in future releases and users are encouraged to use the versioned +approach. For example, instead of `curl localhost:26657/block?height=5`, +use `curl localhost:26657/v1/block?height=5`. + +### Websocket endpoint + +The `/websocket` endpoint path is no longer configurable in the client or server. +Creating an RPC client now takes the form: + +```golang +// The WebSocket endpoint in the following example is assumed to be available +// at http://localhost:26657/v1/websocket +rpcClient, err := client.New("http://localhost:26657/v1") +``` + +For more information please see this [PR](https://github.com/cometbft/cometbft/pull/1412) + +## Go API Surface Area + +### Package Internalization + +As per [ADR 109](https://staging-cometbft-docs.netlify.app/staging/references/architecture/adr-109-reduce-go-api-surface.md), the +following packages that were publicly accessible in CometBFT `v0.38.x` were moved +into the [internal](https://github.com/cometbft/cometbft/tree/main/internal) directory: + +- `blocksync` +- `consensus` +- `evidence` +- `inspect` +- `libs/async` +- `libs/autofile` +- `libs/bits` +- `libs/clist` +- `libs/cmap` +- `libs/events` +- `libs/fail` +- `libs/flowrate` +- `libs/net` +- `libs/os` +- `libs/progressbar` +- `libs/rand` +- `libs/strings` +- `libs/tempfile` +- `libs/timer` + +If you depend on any of these packages and wish for them to be made public again, please [submit an issue](https://github.com/cometbft/cometbft/issues/new/choose) +outlining your use case. We will then assess the most effective way to assist you. + +## Legacy gRPC + +### Broadcast API removed + +CometBFT `v0.38.x` offered a simplistic gRPC support with only one method equivalent to the `/broadcast_tx_commit` endpoint. +This has been [removed](https://github.com/cometbft/cometbft/pull/659) from CometBFT `v1.0` in favor of +the new [Data Companion gRPC services](https://docs.cometbft.com/v1.0/explanation/data-companion/grpc). + +The following configuration parameters in `config.toml` are not valid anymore in `v1.0`: + +```toml +[rpc] + +grpc_laddr = "" +grpc_max_open_connections = 900 +``` + +## Data Companion API + +CometBFT `v1.0` introduces support for a new Data Companion Pull API as specified in +[ADR-101](https://github.com/cometbft/cometbft/blob/main/docs/references/architecture/adr-101-data-companion-pull-api.md) + +### gRPC services + +There is a whole new [section](#grpc-section) in the `config.toml` to support the new Data Companion gRPC services. + +In the CometBFT `v1.0` configuration file, if the `laddr` parameter in the `[grpc]` section is not specified, +all the services will be **disabled**. If an address is specified, the node operator can selectively choose +which services individually should be enabled or disabled. + +### Pruning service + +CometBFT offers a privileged gRPC endpoint for the pruning service, which is separate from the regular gRPC +endpoint and requires its own configuration and activation. These "privileged" services have the ability to +manage the storage on the node. The new pruning service allows for the removal of older blocks, block results, and block +and transaction indexed data. + +A crucial concept that can impact node pruning is the "retain height." The retain height specifies the specific height +from which data can be safely deleted from the node's storage. By taking the retain height into account, nodes can +efficiently manage their storage usage and ensure that they only retain the necessary data for their operations. +This is crucial as storage space is a limited resource, and nodes with limited storage may struggle to keep up with +the blockchain's growth. + +To enable (or disable) and control the Pruning Service please refer the [gRPC section](#grpc-section) in the `config.toml` file. + +Please refer to the [Data Companion API documentation](https://docs.cometbft.com/v1.0/explanation/data-companion/intro) for additional information. + +## CLI Subcommands + +### Replay and Replay-console removed + +The `replay` and `replay-console` subcommands were removed ([\#1170](https://github.com/cometbft/cometbft/pull/1170)). + +### New 'key-type' flag + +Added [support for all key types](https://github.com/cometbft/cometbft/pull/3258) in gen-validator subcommand. + +The `key-type` flag has [also been added](https://github.com/cometbft/cometbft/pull/3517) to other subcommands +such as `init`, `unsafe-reset-priv-validator`, `unsafe-reset-all` and `start` + +## Database + +### Pebble Database Support + +CometBFT `v1.0` [adds support](https://github.com/cometbft/cometbft/pull/2132) for [Pebble](https://github.com/cockroachdb/pebble) as a database backend. + +### Default Database Change + +The default database has been changed from `goleveldb` to [`pebbledb`](https://github.com/cockroachdb/pebble). + +A default `config.toml` file will have `pebbledb` as the default `db_backend` value. + +```toml +db_backend = "pebbledb" +``` + +### Deprecated Databases + +CometBFT `v1.0` is upgrading to [cometbft-db v1.0.1](https://github.com/cometbft/cometbft-db/blob/main/CHANGELOG.md#v101), +which [deprecates](https://github.com/cometbft/cometbft-db/pull/153) `cleveldb` and `boltdb`. + +If you are currently using any of these databases, please note that we +discourage their use, as we plan to discontinue support in future releases. + +## Configuration File + +Review the `config.toml` official [documentation](https://docs.cometbft.com/v1.0/references/config/config.toml), as several parameters have been +added, modified, or deprecated. + +You may need to add, remove or modify parameters to optimize your node's performance or properly run a node. + +### New Parameters + +Check for newly introduced parameters in the CometBFT `v1.0` configuration file. + +`[consensus]` section + +The `peer_gossip_intraloop_sleep_duration` [has been added](https://github.com/cometbft/cometbft/pull/904). + +The default value in `v1.0` is `"0s"`: + +```toml +peer_gossip_intraloop_sleep_duration = "0s" +``` + +`[grpc]` section + +A new `[grpc]` section has been introduced to support all the parameters for the new [Data Companion API services](#data-companion-api) +, +```toml +####################################################### +### gRPC Server Configuration Options ### +####################################################### + +# +# Note that the gRPC server is exposed unauthenticated. It is critical that +# this server not be exposed directly to the public internet. If this service +# must be accessed via the public internet, please ensure that appropriate +# precautions are taken (e.g. fronting with a reverse proxy like nginx with TLS +# termination and authentication, using DDoS protection services like +# CloudFlare, etc.). +# + +[grpc] + +# TCP or UNIX socket address for the RPC server to listen on. If not specified, +# the gRPC server will be disabled. +laddr = "" + +# +# Each gRPC service can be turned on/off, and in some cases configured, +# individually. If the gRPC server is not enabled, all individual services' +# configurations are ignored. +# + +# The gRPC version service provides version information about the node and the +# protocols it uses. +[grpc.version_service] +enabled = true + +# The gRPC block service returns block information +[grpc.block_service] +enabled = true + +# The gRPC block results service returns block results for a given height. If no height +# is given, it will return the block results from the latest height. +[grpc.block_results_service] +enabled = true + +# +# Configuration for privileged gRPC endpoints, which should **never** be exposed +# to the public internet. +# +[grpc.privileged] +# The host/port on which to expose privileged gRPC endpoints. +laddr = "" + +# +# Configuration specifically for the gRPC pruning service, which is considered a +# privileged service. +# +[grpc.privileged.pruning_service] + +# Only controls whether the pruning service is accessible via the gRPC API - not +# whether a previously set pruning service retain height is honored by the +# node. See the [storage.pruning] section for control over pruning. +# +# Disabled by default. +enabled = false +``` + +`[storage]` section + +**Experimental Key Layouts** + +The `experimental_db_key_layout` parameter [has been added](https://github.com/cometbft/cometbft/pull/2327/) to the +configuration file. + +The default value is `v1`: + +```toml +# The representation of keys in the database. +# The current representation of keys in Comet's stores is considered to be v1 +# Users can experiment with a different layout by setting this field to v2. +# Note that this is an experimental feature and switching back from v2 to v1 +# is not supported by CometBFT. +# If the database was initially created with v1, it is necessary to migrate the DB +# before switching to v2. The migration is not done automatically. +# v1 - the legacy layout existing in CometBFT prior to v1. +# v2 - Order preserving representation ordering entries by height. +experimental_db_key_layout = "v1" +``` + +**Database Compaction** + +Two new parameters [were added](https://github.com/cometbft/cometbft/issues/49) to support database compaction. The `compact` and the `compaction_interval` can be customized +to allow compaction and how frequent that should be triggered (number of blocks interval). + +```toml +# If set to true, CometBFT will force compaction to happen for databases that support this feature. +# and save on storage space. Setting this to true is most benefits when used in combination +# with pruning as it will physically delete the entries marked for deletion. +# false by default (forcing compaction is disabled). +compact = false +``` +```toml +# To avoid forcing compaction every time, this parameter instructs CometBFT to wait +# the given amount of blocks to be pruned before triggering compaction. +# It should be tuned depending on the number of items. If your retain height is 1 block, +# it is too much of an overhead to try compaction every block. But it should also not be a very +# large multiple of your retain height as it might occur bigger overheads. +compaction_interval = "1000" +``` + +**Pruning** + +CometBFT `v1.0` implemented support for a [pruning mechanism](https://github.com/cometbft/cometbft/pull/1150) + +A new parameter `interval` was added to control the time period between automated background pruning operations. + +```toml +# The time period between automated background pruning operations. +interval = "10s" +``` + +Other parameters were also introduced to configure the [Data Companion pruning service](https://docs.cometbft.com/v1.0/explanation/data-companion/pruning) + +```toml +# +# Storage pruning configuration relating only to the data companion. +# +[storage.pruning.data_companion] + +# Whether automatic pruning respects values set by the data companion. Disabled +# by default. All other parameters in this section are ignored when this is +# disabled. +# +# If disabled, only the application retain height will influence block pruning +# (but not block results pruning). Only enabling this at a later stage will +# potentially mean that blocks below the application-set retain height at the +# time will not be available to the data companion. +enabled = false + +# The initial value for the data companion block retain height if the data +# companion has not yet explicitly set one. If the data companion has already +# set a block retain height, this is ignored. +initial_block_retain_height = 0 + +# The initial value for the data companion block results retain height if the +# data companion has not yet explicitly set one. If the data companion has +# already set a block results retain height, this is ignored. +initial_block_results_retain_height = 0 +``` + +### Removed Parameters + +Some parameters have been removed in `v1.0` and are not applicable anymore. + +`[mempool]` section + +The `max_batch_bytes` [has been removed](https://github.com/cometbft/cometbft/pull/2050). + +```toml +max_batch_bytes = 0 +``` + +### Changed Parameters + +Some sections changed their structure or naming conventions. Ensure you adapt your configuration accordingly. + +`[p2p]` section + +The `flush_throttle_timeout` [default value has been lowered](https://github.com/cometbft/cometbft/issues/2988). + +Default configuration in `v0.38.x`: +```toml +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" +``` +Default configuration in `v1.0`: +```toml +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "10ms" +``` + +`[mempool]` section + +The `max_txs_bytes` [default value has been lowered](https://github.com/cometbft/cometbft/issues/2756). + +Default configuration in `v0.38.x`: +```toml +max_txs_bytes = 1073741824 +``` +Default configuration in `v1.0`: +```toml +max_txs_bytes = 67108864 +``` + +### Deprecated Parameters + +Some parameters have been deprecated in `v1.0` and will be removed in future releases. + +`[consensus]` section + +The `skip_timeout_commit` [has been deprecated](https://github.com/cometbft/cometbft/pull/3093) in CometBFT `v1.0`. In a future release it [will be removed](https://github.com/cometbft/cometbft/pull/2892) in favor of `timeout_commit=0` + +```toml +# Deprecated: set `timeout_commit` to 0 instead. +skip_timeout_commit = false +``` + +## Genesis File + +The format and structure of the genesis file changed in `v1.0`. Review the official [documentation](https://docs.cometbft.com/v1.0/references/config/genesis.json) for `v1.0` to identify any new fields or +modified data types that need to be included. + +After updating your genesis file, validate the file's structure and ensure compatibility with `v1.0`. This step is +crucial to avoid issues during node initialization. + +### PTBS Synchrony Parameters + +There are two new parameters related to PBTS, the precision and the message delay. For more information about these parameters +please see the [Proposer-Based Timestamps](#proposer-based-timestamps-pbts) section in this document. + +``` + "synchrony": { + "precision": "505000000", + "message_delay": "15000000000" + }, +``` + +### ABCI Consensus Parameters Deprecated + +The `ABCI` consensus parameters have been deprecated. + +``` + "abci": { + "vote_extensions_enable_height": "0" + } +``` + +### Feature Consensus Parameters Added + +Please use the new `Feature` parameters. You can specify two parameters, one related to vote extensions, +and the other for PBTS (check the documentation [here](../../explanation/core/proposer-based-timestamps.md#featureparamspbtsenableheight) for more information). + +``` + "feature": { + "vote_extensions_enable_height": "0", + "pbts_enable_height": "0" + } +``` + +### Block Consensus Parameters Changes + +The `Block` consensus parameters default values have been updated. + +In `v0.38.x`, the default values were: + +``` + "block": { + "max_bytes": "22020096", + "max_gas": "-1" + }, +``` + +And in CometBFT `v1.0` the default parameters are: + +``` + "block": { + "max_bytes": "4194304", + "max_gas": "10000000" + }, +``` + +For additional context on the `block` value updates please refer to this [PR](https://github.com/cometbft/cometbft/pull/1518). + +## Additional Resources + +By following this guide, you should be well-prepared to successfully upgrade from CometBFT `v0.38.x` to `v1.0`. Ensure you stay +updated with community feedback and best practices post-upgrade. + +### Release Notes + +Review the official [CHANGELOG](https://github.com/cometbft/cometbft/blob/main/CHANGELOG.md) for detailed information on changes and improvements in `v1.0`. + +### Documentation + +Consult the [CometBFT v1 documentation](https://docs.cometbft.com/v1.0/) for specifics on configuration +parameters and API usage. + +### Community Support + +Engage with the [CometBFT community](https://linktr.ee/cometbft) for assistance or to share +experiences regarding the upgrade process. + +[pbts-spec]: https://github.com/cometbft/cometbft/blob/main/spec/consensus/proposer-based-timestamp/README.md +[pbts-docs]: ../../explanation/core/proposer-based-timestamps.md +[bft-time]: https://github.com/cometbft/cometbft/blob/main/spec/consensus/bft-time.md diff --git a/docs/imgs/light_client_bisection_alg.png b/docs/imgs/light_client_bisection_alg.png index 2a12c7542e5..a960ee69f88 100644 Binary files a/docs/imgs/light_client_bisection_alg.png and b/docs/imgs/light_client_bisection_alg.png differ diff --git a/docs/imgs/sentry_layout.png b/docs/imgs/sentry_layout.png index 240abde18fa..7d7dff44d6d 100644 Binary files a/docs/imgs/sentry_layout.png and b/docs/imgs/sentry_layout.png differ diff --git a/docs/imgs/sentry_local_config.png b/docs/imgs/sentry_local_config.png index 050a6df2fac..4fdb2fe580a 100644 Binary files a/docs/imgs/sentry_local_config.png and b/docs/imgs/sentry_local_config.png differ diff --git a/docs/networks/README.md b/docs/networks/README.md deleted file mode 100644 index ceea235985d..00000000000 --- a/docs/networks/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -order: 1 -parent: - title: Networks - order: 5 ---- - -# Overview - -Use [Docker Compose](./docker-compose.md) to spin up CometBFT testnets on your -local machine. - -See the `cometbft testnet --help` command for more help initializing testnets. diff --git a/docs/networks/docker-compose.md b/docs/networks/docker-compose.md deleted file mode 100644 index 9d99ff65f35..00000000000 --- a/docs/networks/docker-compose.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -order: 2 ---- - -# Docker Compose - -With Docker Compose, you can spin up local testnets with a single command. - -## Requirements - -1. [Install CometBFT](../introduction/install.md) -2. [Install docker](https://docs.docker.com/engine/installation/) -3. [Install docker-compose](https://docs.docker.com/compose/install/) - -## Build - -Build the `cometbft` binary and, optionally, the `cometbft/localnode` -docker image. - -Note the binary will be mounted into the container so it can be updated without -rebuilding the image. - -```sh -# Build the linux binary in ./build -make build-linux - -# (optionally) Build cometbft/localnode image -make build-docker-localnode -``` - -## Run a testnet - -To start a 4 node testnet run: - -```sh -make localnet-start -``` - -The nodes bind their RPC servers to ports 26657, 26660, 26662, and 26664 on the -host. - -This file creates a 4-node network using the localnode image. - -The nodes of the network expose their P2P and RPC endpoints to the host machine -on ports 26656-26657, 26659-26660, 26661-26662, and 26663-26664 respectively. - -To update the binary, just rebuild it and restart the nodes: - -```sh -make build-linux -make localnet-start -``` - -## Configuration - -The `make localnet-start` creates files for a 4-node testnet in `./build` by -calling the `cometbft testnet` command. - -The `./build` directory is mounted to the `/cometbft` mount point to attach -the binary and config files to the container. - -To change the number of validators / non-validators change the `localnet-start` Makefile target [here](../../Makefile): - -```makefile -localnet-start: localnet-stop - @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/cometbft:Z cometbft/localnode testnet --v 5 --n 3 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi - docker-compose up -``` - -The command now will generate config files for 5 validators and 3 -non-validators. Along with generating new config files the docker-compose file needs to be edited. -Adding 4 more nodes is required in order to fully utilize the config files that were generated. - -```yml - node3: # bump by 1 for every node - container_name: node3 # bump by 1 for every node - image: "cometbft/localnode" - environment: - - ID=3 - - LOG=${LOG:-cometbft.log} - ports: - - "26663-26664:26656-26657" # Bump 26663-26664 by one for every node - volumes: - - ./build:/cometbft:Z - networks: - localnet: - ipv4_address: 192.167.10.5 # bump the final digit by 1 for every node -``` - -Before running it, don't forget to cleanup the old files: - -```sh -# Clear the build folder -rm -rf ./build/node* -``` - -## Configuring ABCI containers - -To use your own ABCI applications with 4-node setup edit the [docker-compose.yaml](https://github.com/cometbft/cometbft/blob/main/docker-compose.yml) file and add images to your ABCI application. - -```yml - abci0: - container_name: abci0 - image: "abci-image" - build: - context: . - dockerfile: abci.Dockerfile - command: - networks: - localnet: - ipv4_address: 192.167.10.6 - - abci1: - container_name: abci1 - image: "abci-image" - build: - context: . - dockerfile: abci.Dockerfile - command: - networks: - localnet: - ipv4_address: 192.167.10.7 - - abci2: - container_name: abci2 - image: "abci-image" - build: - context: . - dockerfile: abci.Dockerfile - command: - networks: - localnet: - ipv4_address: 192.167.10.8 - - abci3: - container_name: abci3 - image: "abci-image" - build: - context: . - dockerfile: abci.Dockerfile - command: - networks: - localnet: - ipv4_address: 192.167.10.9 - -``` - -Override the [command](https://github.com/cometbft/cometbft/blob/main/networks/local/localnode/Dockerfile#L11) in each node to connect to it's ABCI. - -```yml - node0: - container_name: node0 - image: "cometbft/localnode" - ports: - - "26656-26657:26656-26657" - environment: - - ID=0 - - LOG=$${LOG:-cometbft.log} - volumes: - - ./build:/cometbft:Z - command: node --proxy_app=tcp://abci0:26658 - networks: - localnet: - ipv4_address: 192.167.10.2 -``` - -Similarly do for node1, node2 and node3 then [run testnet](#run-a-testnet). - -## Logging - -Log is saved under the attached volume, in the `cometbft.log` file. If the -`LOG` environment variable is set to `stdout` at start, the log is not saved, -but printed on the screen. - -## Special binaries - -If you have multiple binaries with different names, you can specify which one -to run with the `BINARY` environment variable. The path of the binary is relative -to the attached volume. diff --git a/docs/qa/method.md b/docs/qa/method.md deleted file mode 100644 index 464356f50ca..00000000000 --- a/docs/qa/method.md +++ /dev/null @@ -1,262 +0,0 @@ ---- -order: 1 -parent: - title: Method - order: 1 ---- - -# Method - -This document provides a detailed description of the QA process. -It is intended to be used by engineers reproducing the experimental setup for future tests of CometBFT. - -The (first iteration of the) QA process as described [in the RELEASES.md document][releases] -was applied to version v0.34.x in order to have a set of results acting as benchmarking baseline. -This baseline is then compared with results obtained in later versions. - -Out of the testnet-based test cases described in [the releases document][releases] we focused on two of them: -_200 Node Test_, and _Rotating Nodes Test_. - -[releases]: https://github.com/cometbft/cometbft/blob/main/RELEASES.md#large-scale-testnets - -## Software Dependencies - -### Infrastructure Requirements to Run the Tests - -* An account at Digital Ocean (DO), with a high droplet limit (>202) -* The machine to orchestrate the tests should have the following installed: - * A clone of the [testnet repository][testnet-repo] - * This repository contains all the scripts mentioned in the remainder of this section - * [Digital Ocean CLI][doctl] - * [Terraform CLI][Terraform] - * [Ansible CLI][Ansible] - -[testnet-repo]: https://github.com/cometbft/qa-infra -[Ansible]: https://docs.ansible.com/ansible/latest/index.html -[Terraform]: https://www.terraform.io/docs -[doctl]: https://docs.digitalocean.com/reference/doctl/how-to/install/ - -### Requirements for Result Extraction - -* [Prometheus DB][prometheus] to collect metrics from nodes -* Prometheus DB to process queries (may be different node from the previous) -* blockstore DB of one of the full nodes in the testnet - - -[prometheus]: https://prometheus.io/ - -## 200 Node Testnet - -### Running the test - -This section explains how the tests were carried out for reproducibility purposes. - -1. [If you haven't done it before] - Follow steps 1-4 of the `README.md` at the top of the testnet repository to configure Terraform, and `doctl`. -2. Copy file `testnets/testnet200.toml` onto `testnet.toml` (do NOT commit this change) -3. Set the variable `VERSION_TAG` in the `Makefile` to the git hash that is to be tested. - * If you are running the base test, which implies an homogeneous network (all nodes are running the same version), - then make sure makefile variable `VERSION2_WEIGHT` is set to 0 - * If you are running a mixed network, set the variable `VERSION2_TAG` to the other version you want deployed - in the network. - Then adjust the weight variables `VERSION_WEIGHT` and `VERSION2_WEIGHT` to configure the - desired proportion of nodes running each of the two configured versions. -4. Follow steps 5-10 of the `README.md` to configure and start the 200 node testnet - * WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests (see step 9) -5. As a sanity check, connect to the Prometheus node's web interface (port 9090) - and check the graph for the `cometbft_consensus_height` metric. All nodes - should be increasing their heights. - - * You can find the Prometheus node's IP address in `ansible/hosts` under section `[prometheus]`. - * The following URL will display the metrics `cometbft_consensus_height` and `cometbft_mempool_size`: - - ``` - http://:9090/classic/graph?g0.range_input=1h&g0.expr=cometbft_consensus_height&g0.tab=0&g1.range_input=1h&g1.expr=cometbft_mempool_size&g1.tab=0 - ``` - -6. You now need to start the load runner that will produce transaction load. - * If you don't know the saturation load of the version you are testing, you need to discover it. - * Run `make loadrunners-init`. This will copy the loader scripts to the - `testnet-load-runner` node and install the load tool. - * Find the IP address of the `testnet-load-runner` node in - `ansible/hosts` under section `[loadrunners]`. - * `ssh` into `testnet-load-runner`. - * Edit the script `/root/200-node-loadscript.sh` in the load runner - node to provide the IP address of a full node (for example, - `validator000`). This node will receive all transactions from the - load runner node. - * Run `/root/200-node-loadscript.sh` from the load runner node. - * This script will take about 40 mins to run, so it is suggested to - first run `tmux` in case the ssh session breaks. - * It is running 90-seconds-long experiments in a loop with different - loads. - * If you already know the saturation load, you can simply run the test (several times) for 90 seconds with a load somewhat - below saturation: - * set makefile variables `LOAD_CONNECTIONS`, `LOAD_TX_RATE`, to values that will produce the desired transaction load. - * set `LOAD_TOTAL_TIME` to 90 (seconds). - * run "make runload" and wait for it to complete. You may want to run this several times so the data from different runs can be compared. -7. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine - * Alternatively, you may want to run `make retrieve-prometheus-data` and `make retrieve-blockstore` separately. - The end result will be the same. - * `make retrieve-blockstore` accepts the following values in makefile variable `RETRIEVE_TARGET_HOST` - * `any`: (which is the default) picks up a full node and retrieves the blockstore from that node only. - * `all`: retrieves the blockstore from all full nodes; this is extremely slow, and consumes plenty of bandwidth, - so use it with care. - * the name of a particular full node (e.g., `validator01`): retrieves the blockstore from that node only. -8. Verify that the data was collected without errors - * at least one blockstore DB for a CometBFT validator - * the Prometheus database from the Prometheus node - * for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s) -9. **Run `make terraform-destroy`** - * Don't forget to type `yes`! Otherwise you're in trouble. - -### Result Extraction - -The method for extracting the results described here is highly manual (and exploratory) at this stage. -The CometBFT team should improve it at every iteration to increase the amount of automation. - -#### Steps - -1. Unzip the blockstore into a directory -2. To identify saturation points - 1. Extract the latency report for all the experiments. - * Run these commands from the directory containing the `blockstore.db` folder. - * It is advisable to adjust the hash in the `go run` command to the latest possible. - * ```bash - mkdir results - go run github.com/cometbft/cometbft/test/loadtime/cmd/report@3003ef7 --database-type goleveldb --data-dir ./ > results/report.txt - ``` - 2. File `report.txt` contains an unordered list of experiments with varying concurrent connections and transaction rate. - You will need to separate data per experiment. - - * Create files `report01.txt`, `report02.txt`, `report04.txt` and, for each experiment in file `report.txt`, - copy its related lines to the filename that matches the number of connections, for example - - ```bash - for cnum in 1 2 4; do echo "$cnum"; grep "Connections: $cnum" results/report.txt -B 2 -A 10 > results/report$cnum.txt; done - ``` - - * Sort the experiments in `report01.txt` in ascending tx rate order. Likewise for `report02.txt` and `report04.txt`. - * Otherwise just keep `report.txt`, and skip to the next step. - 4. Generate file `report_tabbed.txt` by showing the contents `report01.txt`, `report02.txt`, `report04.txt` side by side - * This effectively creates a table where rows are a particular tx rate and columns are a particular number of websocket connections. - * Combine the column files into a single table file: - * Replace tabs by spaces in all column files. For example, - `sed -i.bak 's/\t/ /g' results/report1.txt`. - * Merge the new column files into one: - `paste results/report1.txt results/report2.txt results/report4.txt | column -s $'\t' -t > report_tabbed.txt` - -3. To generate a latency vs throughput plot, extract the data as a CSV - * ```bash - go run github.com/cometbft/cometbft/test/loadtime/cmd/report@3003ef7 --database-type goleveldb --data-dir ./ --csv results/raw.csv - ``` - * Follow the instructions for the [`latency_throughput.py`] script. - This plot is useful to visualize the saturation point. - * Alternatively, follow the instructions for the [`latency_plotter.py`] script. - This script generates a series of plots per experiment and configuration that may - help with visualizing Latency vs Throughput variation. - -[`latency_throughput.py`]: ../../scripts/qa/reporting/README.md#Latency-vs-Throughput-Plotting -[`latency_plotter.py`]: ../../scripts/qa/reporting/README.md#Latency-vs-Throughput-Plotting-version-2 - -#### Extracting Prometheus Metrics - -1. Stop the prometheus server if it is running as a service (e.g. a `systemd` unit). -2. Unzip the prometheus database retrieved from the testnet, and move it to replace the - local prometheus database. -3. Start the prometheus server and make sure no error logs appear at start up. -4. Identify the time window you want to plot in your graphs. -5. Execute the [`prometheus_plotter.py`] script for the time window. - -[`prometheus_plotter.py`]: ../../scripts/qa/reporting/README.md#prometheus-metrics - -## Rotating Node Testnet - -### Running the test - -This section explains how the tests were carried out for reproducibility purposes. - -1. [If you haven't done it before] - Follow steps 1-4 of the `README.md` at the top of the testnet repository to configure Terraform, and `doctl`. -2. Copy file `testnet_rotating.toml` onto `testnet.toml` (do NOT commit this change) -3. Set variable `VERSION_TAG` to the git hash that is to be tested. -4. Run `make terraform-apply EPHEMERAL_SIZE=25` - * WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests -5. Follow steps 6-10 of the `README.md` to configure and start the "stable" part of the rotating node testnet -6. As a sanity check, connect to the Prometheus node's web interface and check the graph for the `tendermint_consensus_height` metric. - All nodes should be increasing their heights. -7. On a different shell, - * run `make runload LOAD_CONNECTIONS=X LOAD_TX_RATE=Y LOAD_TOTAL_TIME=Z` - * `X` and `Y` should reflect a load below the saturation point (see, e.g., - [this paragraph](CometBFT-QA-34.md#finding-the-saturation-point) for further info) - * `Z` (in seconds) should be big enough to keep running throughout the test, until we manually stop it in step 9. - In principle, a good value for `Z` is `7200` (2 hours) -8. Run `make rotate` to start the script that creates the ephemeral nodes, and kills them when they are caught up. - * WARNING: If you run this command from your laptop, the laptop needs to be up and connected for the full length - of the experiment. - * [This](http://:9090/classic/graph?g0.range_input=100m&g0.expr=cometbft_consensus_height%7Bjob%3D~%22ephemeral.*%22%7D%20or%20cometbft_blocksync_latest_block_height%7Bjob%3D~%22ephemeral.*%22%7D&g0.tab=0&g1.range_input=100m&g1.expr=cometbft_mempool_size%7Bjob!~%22ephemeral.*%22%7D&g1.tab=0&g2.range_input=100m&g2.expr=cometbft_consensus_num_txs%7Bjob!~%22ephemeral.*%22%7D&g2.tab=0) - is an example Prometheus URL you can use to monitor the test case's progress -9. When the height of the chain reaches 3000, stop the `make runload` script. -10. When the rotate script has made two iterations (i.e., all ephemeral nodes have caught up twice) - after height 3000 was reached, stop `make rotate` -11. Run `make stop-network` -12. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine -13. Verify that the data was collected without errors - * at least one blockstore DB for a CometBFT validator - * the Prometheus database from the Prometheus node - * for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s) -14. **Run `make terraform-destroy`** - -Steps 8 to 10 are highly manual at the moment and will be improved in next iterations. - -### Result Extraction - -In order to obtain a latency plot, follow the instructions above for the 200 node experiment, -but the `results.txt` file contains only one experiment. - -As for prometheus, the same method as for the 200 node experiment can be applied. - -## Vote Extensions Testnet - -### Running the test - -This section explains how the tests were carried out for reproducibility purposes. - -1. [If you haven't done it before] - Follow steps 1-4 of the `README.md` at the top of the testnet repository to configure Terraform, and `doctl`. -2. Copy file `varyVESize.toml` onto `testnet.toml` (do NOT commit this change). -3. Set variable `VERSION_TAG` in the `Makefile` to the git hash that is to be tested. -4. Follow steps 5-10 of the `README.md` to configure and start the testnet - * WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests -5. Configure the load runner to produce the desired transaction load. - * set makefile variables `ROTATE_CONNECTIONS`, `ROTATE_TX_RATE`, to values that will produce the desired transaction load. - * set `ROTATE_TOTAL_TIME` to 150 (seconds). - * set `ITERATIONS` to the number of iterations that each configuration should run for. -6. Execute steps 5-10 of the `README.md` file at the testnet repository. - -7. Repeat the following steps for each desired `vote_extension_size` - 1. Update the configuration (you can skip this step if you didn't change the `vote_extension_size`) - * Update the `vote_extensions_size` in the `testnet.toml` to the desired value. - * `make configgen` - * `ANSIBLE_SSH_RETRIES=10 ansible-playbook ./ansible/re-init-testapp.yaml -u root -i ./ansible/hosts --limit=validators -e "testnet_dir=testnet" -f 20` - * `make restart` - 2. Run the test - * `make runload` - This will repeat the tests `ITERATIONS` times every time it is invoked. - 3. Collect your data - * `make retrieve-data` - Gathers all relevant data from the testnet into the orchestrating machine, inside folder `experiments`. - Two subfolders are created, one blockstore DB for a CometBFT validator and one for the Prometheus DB data. - * Verify that the data was collected without errors with `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s). -8. Clean up your setup. - * `make terraform-destroy`; don't forget that you need to type **yes** for it to complete. - - -### Result Extraction - -In order to obtain a latency plot, follow the instructions above for the 200 node experiment, but: - -* The `results.txt` file contains only one experiment -* Therefore, no need for any `for` loops - -As for Prometheus, the same method as for the 200 node experiment can be applied. diff --git a/docs/references/README.md b/docs/references/README.md new file mode 100644 index 00000000000..8fc36459af4 --- /dev/null +++ b/docs/references/README.md @@ -0,0 +1,97 @@ +--- +order: 4 +title: CometBFT Reference Documentation +description: References +parent: + order: 4 +--- + +# Reference Documentation + +Welcome to the CometBFT Reference Documentation, a comprehensive resource for essential information and +standards related to CometBFT. + +In this documentation, you'll find valuable references in three key areas: + +## Architecture Decision Records (ADRs) + +[Architecture Decision Records (ADRs)](./architecture/README.md) provide a structured approach to documenting key architectural +decisions made during the development of CometBFT. These records help us maintain transparency, +share knowledge, and ensure that architectural choices align with CometBFT's goals and constraints. + +### What You'll Find in ADRs: + +- Clear explanations of architectural decisions. +- The context and background that led to each decision. +- Rationale for choosing a particular solution. +- Any associated risks and trade-offs. +- Decision status and tracking for ongoing changes. + +Browse the ADRs to gain insights into the architectural decisions that shape CometBFT. + +## Quality Assurance (QA) Documents + +[Quality Assurance (QA)](./qa/README.md) Documents are the foundation of the commitment to delivering a high-quality CometBFT +implementation. These standards guide the testing processes, quality control measures, and best practices +to ensure that CometBFT meets and exceeds industry benchmarks. + +### What You'll Find in QA: + +- Testing methodologies and strategies. +- Documentation on test plans and test cases. + +Explore the QA documents to understand the testing methods to ensure the quality and performance of CometBFT. + +## Configuration Manual + +The [Configuration Manual](./config/README.md) is a reference manual describing CometBFT configuration options. +It details all configuration files deployed and includes examples and best practices to ensure that +CometBFT is deployed with a proper configuration. + +### What You'll Find in the Configuration Manual: + +- Documentation of each configuration option. +- Examples for real-world use-cases. + +Explore the Configuration Manual to understand the different options of CometBFT to ensure a high-performing deployment. + +## Request for Comments (RFCs) + +[Request for Comments (RFCs)](./rfc/README.md) documents serve as a platform for open and collaborative discussions on proposed +changes, new features, and improvements within CometBFT. RFCs encourage cross-team communication +and feedback, ensuring that decisions are well-informed and aligned with CometBFT's goals. + +### What You'll Find in RFCs: + +- Proposals for changes or enhancements. +- Technical details and specifications. +- Discussions and feedback from team members. +- Status updates and implementation progress. + +Engage with RFCs to participate in shaping the direction of CometBFT, share your insights, +and contribute to its continuous evolution. + +## Storage + +[Storage](./storage/README.md) references comprise findings from storage sub-systems +(eg block store, state store) improvements in CometBFT. + +### What You'll Find in Storage: + +- Prior improvements efforts and reports that documents those efforts +- The testing methodology used to improve storage footprint and performance in Comet +- Conclusions and potential future work + +Explore the Storage references to get a deeper understanding of prior work and future plans in these +sub-systems of CometBFT. + +## Explore CometBFT References + +The CometBFT Reference Documentation empowers you with knowledge and information that enhances +your understanding of CometBFT's architecture, quality assurance, and ongoing improvements. +Whether you're a chain developer, application developer, or simply interested in CometBFT, +you'll find valuable insights and documentation here. + +Feel free to explore the sections on ADRs, QA Standards, and RFCs to access the resources you need. +We are committed to transparency, collaboration, and excellence, and this documentation reflects +our dedication to those principles. diff --git a/docs/architecture/README.md b/docs/references/architecture/README.md similarity index 51% rename from docs/architecture/README.md rename to docs/references/architecture/README.md index 18822235c1c..2cb3b6227a6 100644 --- a/docs/architecture/README.md +++ b/docs/references/architecture/README.md @@ -21,7 +21,7 @@ provide: - References - Changelog -To create a new ADR, please use the [ADR template](./adr-template.md). +To create a new ADR, please use the [ADR template](adr-template.md). Note the distinction between an ADR and a spec. An ADR provides the context, intuition, reasoning, and justification for a change in architecture, or for the @@ -42,21 +42,33 @@ numbering our ADRs from 100 onwards. ### Proposed -- [ADR-103: Protobuf definition versioning](./adr-103-proto-versioning.md) -- [ADR-105: Refactor list of senders in mempool](./adr-105-refactor-mempool-senders.md) +- [ADR-113: Modular transaction hashing](adr-113-modular-transaction-hashing.md) +- [ADR-118: Mempool Lanes](adr-118-mempool-lanes.md) ### Accepted -- [ADR-101: Data companion pull API](./adr-101-data-companion-pull-api.md) -- [ADR-104: State sync from local snapshot](./adr-104-out-of-band-state-sync.md) -- [ADR-107: Rename protobuf versions of 0.x releases to pre-v1 betas](./adr-107-betaize-proto-versions.md) - -### Implemented - -- [ADR-108: E2E tests for CometBFT's behaviour in respect to ABCI 1.0.](./adr-108-e2e-abci++.md) +- [ADR-101: Data companion pull API](adr-101-data-companion-pull-api.md) +- [ADR-102: RPC Companion](adr-102-rpc-companion.md) +- [ADR-103: Protobuf definition versioning](adr-103-proto-versioning.md) +- [ADR-104: State sync from local snapshot](adr-104-out-of-band-state-sync.md) +- [ADR-105: Refactor list of senders in mempool](adr-105-refactor-mempool-senders.md) +- [ADR-106: gRPC API](adr-106-grpc-api) +- [ADR-107: Rename protobuf versions of 0.x releases to pre-v1 betas](adr-107-betaize-proto-versions.md) +- [ADR-109: Reduce CometBFT Go API Surface Area](adr-109-reduce-go-api-surface.md) +- [ADR-111: `nop` Mempool](adr-111-nop-mempool.md) +- [ADR-112: Proposer-Based Timestamps](adr-112-proposer-based-timestamps.md) +- [ADR-114: Partly Undo ADR 109](adr-114-undo-109.md) +- [ADR-115: Predictable Block Times](adr-115-predictable-block-times.md) + +### Accepted but Not (Yet) Implemented + +- [ADR-102: RPC Companion](adr-102-rpc-companion.md) +- [ADR-104: State sync from local snapshot](adr-104-out-of-band-state-sync.md) +- [ADR-105: Refactor list of senders in mempool](adr-105-refactor-mempool-senders.md) ### Deprecated ### Rejected -- [ADR-100: Data companion push API](./adr-100-data-companion-push-api.md) +- [ADR-100: Data companion push API](adr-100-data-companion-push-api.md) +- [ADR-110: Remote mempool](adr-110-remote-mempool.md) diff --git a/docs/architecture/adr-100-data-companion-push-api.md b/docs/references/architecture/adr-100-data-companion-push-api.md similarity index 99% rename from docs/architecture/adr-100-data-companion-push-api.md rename to docs/references/architecture/adr-100-data-companion-push-api.md index c41bffbed37..5e04b3595b3 100644 --- a/docs/architecture/adr-100-data-companion-push-api.md +++ b/docs/references/architecture/adr-100-data-companion-push-api.md @@ -273,7 +273,7 @@ message BlockCommittedRequest { optional tendermint.abci.FinalizeBlockResponse finalize_block_response = 2; } -// BlockCommittedResponse is either empty upon succes, or returns one or more +// BlockCommittedResponse is either empty upon success, or returns one or more // errors. Note that returning any errors here will cause CometBFT to crash. message BlockCommittedResponse { // If an error occurred during the companion's processing of the request. @@ -505,7 +505,7 @@ PostgreSQL). [\#7471]: https://github.com/tendermint/tendermint/issues/7471 [rfc-003]: ../rfc/tendermint-core/rfc-003-performance-questions.md [rfc-006]: ../rfc/tendermint-core/rfc-006-event-subscription.md -[adr-075]: ./tendermint-core/adr-075-rpc-subscription.md +[adr-075]: tendermint-core/adr-075-rpc-subscription.md [websocket-api]: https://docs.cometbft.com/v0.34/rpc/#/Websocket [`/tx_search`]: https://docs.cometbft.com/v0.34/rpc/#/Info/tx_search [`/block_search`]: https://docs.cometbft.com/v0.34/rpc/#/Info/block_search diff --git a/docs/architecture/adr-101-data-companion-pull-api.md b/docs/references/architecture/adr-101-data-companion-pull-api.md similarity index 97% rename from docs/architecture/adr-101-data-companion-pull-api.md rename to docs/references/architecture/adr-101-data-companion-pull-api.md index 963dd578beb..b941ae5b54f 100644 --- a/docs/architecture/adr-101-data-companion-pull-api.md +++ b/docs/references/architecture/adr-101-data-companion-pull-api.md @@ -278,7 +278,7 @@ message GetBlockResultsRetainHeightResponse { ##### Indexer pruning service This gRPC service can be used to instruct CometBFT to prune the transaction and -block events indexed by CometBFT. +block events indexed by CometBFT. To support this, the above described pruning service is extended as follows: @@ -418,13 +418,13 @@ interaction between a node and its data companion: is influenced by the application and pruning service block retain heights. - `abci_results_base_height` - The actual base height of stored block results, which is influenced by the pruning service block results retain height. -- `block_indexer_retain_height` - The current block indexer retain height +- `block_indexer_retain_height` - The current block indexer retain height requested by the pruning service. -- `tx_indexer_retain_height` - The current tx indexer retain height +- `tx_indexer_retain_height` - The current tx indexer retain height requested by the pruning service. -- `block_indexer_base_height` - The minimum height at which we have block events +- `block_indexer_base_height` - The minimum height at which we have block events (should demonstrate the effects of pruning the block indexer) -- `tx_indexer_base_height` - The minimum height at which we have transaction events +- `tx_indexer_base_height` - The minimum height at which we have transaction events (should demonstrate the effects of pruning the tx indexer) Other metrics may be proposed as part of the non-privileged gRPC API that could @@ -463,10 +463,10 @@ companion, but only if the data companion is the exclusive user of those APIs. - [\#81 - rpc: Add gRPC support][\#81] - [Documentation on current implementation of ADR-101][dc-docs] -[adr-100-context]: ./adr-100-data-companion-push-api.md#context -[adr-100]: ./adr-100-data-companion-push-api.md -[adr-100-req]: ./adr-100-data-companion-push-api.md#requirements -[adr-100-alt]: ./adr-100-data-companion-push-api.md#alternative-approaches +[adr-100-context]: adr-100-data-companion-push-api.md#context +[adr-100]: adr-100-data-companion-push-api.md +[adr-100-req]: adr-100-data-companion-push-api.md#requirements +[adr-100-alt]: adr-100-data-companion-push-api.md#alternative-approaches [\#81]: https://github.com/cometbft/cometbft/issues/81 -[abci-commit]: ../../spec/abci/abci++_methods.md#commit -[dc-docs]: https://github.com/cometbft/cometbft/tree/main/docs/data-companion \ No newline at end of file +[abci-commit]: ../../../spec/abci/abci++_methods.md#commit +[dc-docs]: https://github.com/cometbft/cometbft/tree/main/docs/data-companion diff --git a/docs/references/architecture/adr-102-rpc-companion.md b/docs/references/architecture/adr-102-rpc-companion.md new file mode 100644 index 00000000000..334c11ab6c7 --- /dev/null +++ b/docs/references/architecture/adr-102-rpc-companion.md @@ -0,0 +1,352 @@ +# ADR-102: RPC Companion + +## Changelog + +- 2022-03-27: First draft (@andynog) +- 2024-03-19: Update ADR information (@andynog) +- 2024-03-25: Final updates before publishing (@andynog) + +## Status + +Accepted + +[Tracking issue](https://github.com/cometbft/cometbft/issues/707) + +## Context + +This solution can run as a sidecar, a separate process that runs concurrently with the full node. However, the RPC +Companion is optional, meaning that the full node will still provide RPC services that can be queried if operators +don't want to run an RPC Companion service. + +This ADR provides a reference implementation of a system that can be used to offload queryable data from a CometBFT +full node to a database and offer a service exposing the same JSON-RPC methods on an endpoint as the regular JSON-RPC +methods of a CometBFT node endpoint. This makes it easier for integrators of RPC clients, such as client libraries and +applications, to switch to this RPC Companion with as little effort as possible. + +This architecture also makes it possible to scale horizontally the querying capacity of a full node by running multiple +copies of the RPC Companion server instances that can be behind a scalable load-balancer (e.g., Cloudflare), which makes +it possible to serve the data in a more scalable way. + +One of the benefits of utilizing an RPC Companion is that it enables data indexing on external storage, leading to +improved performance compared to the internal indexer of CometBFT. The internal indexer of CometBFT has certain +limitations and might not be suitable for specific application use cases. + +## Alternative Approaches + +The Data Companion Pull API concept, identified as [[ADR-101]](adr-101-data-companion-pull-api.md), is a novel idea. As it gains popularity and acceptance, +users are expected to develop their own versions of it to meet their specific requirements. The RPC Companion is the +initial implementation of a Data Companion that can serve as a model for others to follow. + +## Decision + +TBD + +## Detailed Design + +### Requirements + +The target audience for this solution are operators and integrators that want to alleviate the load on their nodes by offloading +the queryable data requests to the **RPC Companion**. + +This solution shall meet the following requirements in order to provide real benefits to these users. + +The **RPC Companion** solution shall: + +1. Provide an **[Ingest Service](#ingest-service)** implemented as a data companion that can pull data from a CometBFT node and store it on +its own storage (database) +2. Provide a storage ([Database](#database)) that can persist the data using a [database schema](#database-schema) that +can store information that was fetched from the full node. +3. Do not force breaking changes to the existing RPC. +4. Ensure the responses returned by the RPC Companion v1 endpoint is wire compatible with the existing CometBFT +JSON-RPC endpoint. +5. Implement tests to verify backwards compatibility. + +### [RPC Endpoint](#rpc-endpoint) + +The RPC Companion endpoint will be the same as the CometBFT JSON-RPC endpoint but with a `/v1` appended to it. The RPC +Companion endpoint can be hosted on a different URL and might also use a different port than the default CometBFT RPC +port (e.g. `26657`) as shown below. + +For example, suppose these are the URLs for each RPC endpoint: + +CometBFT RPC -> `http://cosmos.host:26657` + +RPC Companion -> `http://rpc-companion.host:8080/v1` + +To make a request for a `block` at height `5` using the CometBFT JSON-RPC endpoint: + + curl --header "Content-Type: application/json" --request POST --data '{"method": "block", "params": ["5"], "id": 1}' http://cosmos.host:26657 + +To make the same request to the RPC Companion endpoint: + + curl --header "Content-Type: application/json" --request POST --data '{"method": "block", "params": ["5"], "id": 1}' http://rpc-companion.host:8080/v1 + +> Note that only the URL and port changes between these two `curl` commands + +The RPC Companion will accept JSON-RPC requests, the same way as the CometBFT JSON-RPC endpoint does. + +The RPC Companion endpoint methods listed in the following table should be implemented first as they are straightforward +and less complex. + +| **JSON-RPC method** | **JSON-RPC Parameters** | **Description** | **Notes** | +|---------------------|----------------------------------------|-------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `abci_info` | | Get information about the application | This method will return the same response structure as the equivalent CometBFT method. It will return the latest information stored in its database that was retrieved from the full node. | +| `block` | * height | Get block at a specified height | This method will return the same response structure as the equivalent CometBFT method. The data retrieved from the companion database for a particular block will have to be properly serialized into the `block` struct in order to be returned as a response. | +| `block_by_hash` | * hash | Get block by its hash | This method will return the same response structure as the equivalent CometBFT method. | +| `block_results` | * height | Get block results at a specified height | This method will return the same response structure as the equivalent CometBFT method. The data retrieved from the companion database for a particular block result will have to be properly serialized into the `ResultsBlockResults` struct in order to be returned as a response. | +| `blockchain` | * minHeight
* maxHeight | Get blocks in a specified height range | This method will return the same response structure as the equivalent CometBFT method. The data retrieved from the companion database will include one or more blocks. | +| `commit` | * height | Get commit results at a specified height | This method will return the same response structure as the equivalent CometBFT method. | +| `consensus_params` | * height | Get consensus parameters at a specified height | This method will return the same response structure as the equivalent CometBFT method. | +| `header` | * height | Get header at a specified height | This method will return the same response structure as the equivalent CometBFT method. | +| `header_by_hash` | * hash | Get header by its hash | This method will return the same response structure as the equivalent CometBFT method. | +| `health` | | Get node health | This method basically only returns an empty response. This can be used to test if the server RPC is up. While this on CometBFT is used to return a response if the full node is up, when using the companion service this will return an `OK` status if the companion service is up. | +| `tx` | * hash
* prove | Get a transaction by its hash | This method will return the same response structure as the equivalent CometBFT method. | +| `validators` | * height
* page
* per_page | Get validator set at a specified height | This method will return the same response structure as the equivalent CometBFT method. | + +The following methods can also be implemented, but might require some additional effort and complexity to be implemented. +These are mostly the ones that provide `search` and `query` functionalities. These methods will proxy the request to the +full node. Since they are not dependent on data retrieval from the RPC Companion database they should just act as proxies +to the full node. In the future, it might be possible to implement these methods in the RPC Companion if the database +stores all the information required to be indexed and the queries specified in the JSON-RPC methods can be translated into +SQL statements to return the queried data from the database. + +| **JSON-RPC method** | **JSON-RPC Parameters** | **Description** | **Notes** | +|---------------------|----------------------------------------------------------------------|----------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `abci_query` | * path
* data
* height
* prove | Query information from the application | This method will return the same response structure as the equivalent CometBFT method. The RPC companion service will have to implement a proper abci parameter to sql query translation. | +| `block_search` | * query
* page
* per_page
* order_by | Query information about a block | This method will return the same response structure as the equivalent CometBFT method. The RPC companion service will have to implement a proper query parameter to sql query translation. | +| `tx_search` | * query
* page
* per_page
* prove
* order_by | Query information about transactions | This method will return the same response structure as the equivalent CometBFT method. The RPC companion service will have to implement a proper query parameter to sql query translation. | + +The following methods will proxy the requests through the RPC Companion endpoint to the full node to ensure that clients don't need to implement a routing logic for methods that would not be available in the RPC Companion endpoint. + +> The `/broadcast_tx_*` methods might need some additional logic for proxying since some of them have different asynchronicity patterns. + +| **JSON-RPC method** | *JSON-RPC Parameters** | **Description** | **Notes** | Proxy | +|------------------------|------------------------|-------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------| +| `broadcast_evidence` | * evidence | Broadcast evidence of the misbehavior | The evidence parameter is in JSON format | yes | +| `broadcast_tx_async` | * tx | Broadcast a transaction | Returns right away with no response | yes | +| `broadcast_tx_sync` | * tx | Broadcast a transaction | Returns with the response from CheckTx | yes | +| `broadcast_tx_commit` | * tx | Broadcast a transaction | Returns with the responses from CheckTx and DeliverTx | yes | +| `check_tx` | * tx | Check a transaction | Checks a transaction without executing it | yes | +| `consensus_state` | | Gets consensus state | The consensus state will not be stored in the RPC companion database so it should proxy the request to the full node | yes | +| `dump_consensus_state` | | Gets the full consensus state | The consensus state will not be stored in the RPC companion database so it should proxy the request to the full node | yes | +| `genesis` | | Gets the genesis information | The RPC companion service can proxy the genesis request to the full node. If there are use cases that serving the genesis from the RPC companion service (no proxy) is desirable then it can be implemented as method | yes | +| `net_info` | | Gets network information | The request should proxy to the full node since the RPC companion database will not store network information | yes | +| `unconfirmed_txs` | * limit | Gets the list of unconfirmed transactions | The request should proxy to the full node since the RPC companion database will not store unconfirmed transactions information | yes | +| `num_unconfirmed_txs` | | Gets data about unconfirmed transactions | The request should proxy to the full node since the RPC companion database will not store unconfirmed transactions information | yes | +| `status` | | Gets node status | The request should proxy to the full node since the RPC companion database will not store node status information | yes | + +> NOTE: The RPC Companion should not implement logic to store data in its database that can modify state in the blockchain such as +the `broadcast_tx_*` methods. These requests will proxy to the full node as outlined above. + +### High-level architecture + +![High-level architecture](images/adr-102-architecture.jpeg) + +This diagram shows all the required components for a full RPC Companion solution. The solution implementation contains +many parts and each one is described below: + +### [Ingest Service](#ingest-service) + +The **Ingest Service** pulls the data from the full node JSON-RPC endpoint and stores the information retrieved in +the RPC Companion database. + +The **Ingest Service** should run as a "singleton" which means only one instance of this service +should be fetching the information from the CometBFT full node. + +Currently, the Data Companion Pull APIs offer gRPC services to retrieve `Block` and `BlockResults`. These can be used +to pull the data from the server. + +The **Ingest Service** can influence the pruning of `Blocks` and `BlockResults` on the full node via a [pruning service](https://docs.cometbft.com/v1.0/explanation/data-companion/pruning). +Once the Ingest Service pulls the data from the full node and is able to process it, and it gets an acknowledgement from +the database that the data was inserted, the **Ingest Service** can communicate with the full node notifying it that +a specific height has been processed and set the processed height as the `retain height` on the full node signaling +this way to the node that this height can be pruned. + +If the **Ingest Service** becomes unavailable (e.g. stops), then it should resume synchronization with the full node when it is back online. +The **Ingest Service** should query the full node for the last `retain height` and the **Ingest Service** should request +and process all the heights missing on the database until it catches up with the full node latest height. + +In case the **Ingest Service** becomes unavailable for a long time and there are several heights to be synchronized, it is +important for the **Ingest Service** to do it in a throttled way (in short intervals) to prevent the server to become overloaded. + +### [Database](#database) + +The database stores the data retrieved from the full node and provides this data for the RPC server instance. + +It is proposed that the relational database [PostgreSQL](https://www.postgresql.org/) be used in order to support +the [RPC server instance](#rpc-instance) scalability + +Also, using a relational database will also provide more flexibility when implementing a future RPC Companion `/v2` +endpoint that can return data in different forms and database indexes might also be leveraged in order to boost the +query responses performance. + +The data needs to be available both for the Ingest Service (database writes) and the RPC server instance (database reads) +and these services might be running from different machines so an embedded database is not recommended in this case +since accessing the data remotely might not be optimal for an embedded key-value database. Also since the RPC might have +many server instances (or processes) running that will need to retrieve data concurrently it is recommended to use +a well-known robust database engine that can support such a load. + +Also, PostgreSQL supports ACID transactions, which is important to provide more guarantees that the data was successfully +inserted in the database and that an acknowledgement can be sent back to the Ingest Service to notify the +full node to prune the inserted data. Supporting ACID transactions can also ensure that there are no partial reads +(return data that was partially written to the database), avoiding that readers access incomplete or inconsistent data. + +#### [Database Schema](#database-schema) + +Implementing this solution comes with a challenge - designing a database schema that can facilitate return responses +equivalent to the existing CometBFT JSON-RPC endpoint. However, the schema should also be flexible enough to return +customized responses in the future. + +Since the RPC Companion stores the information in a relational database, there are opportunities to better structure and +normalize the data in the future. For example, here is the schema definition for a table to persist a `Block` data +structure in the PostgreSQL database: + +```sql +-- TABLE: comet.v1.block + +DROP TABLE IF EXISTS comet.v1.block CASCADE; + +CREATE TABLE comet.v1.block +( + height comet.uint64 NOT NULL, + data bytea NOT NULL, + CONSTRAINT block_pkey PRIMARY KEY (height) +); +``` + +It's important to add a version to the table (e.g `v1`) to ensure schema changes can be supported + +> Note that only the height is stored as structure data. More data field can be normalized and stored in individual fields, +but trying to normalize the CometBFT data structures into a database schema should be only performed if there's a need to +do so (e.g. search for information and using the field as a query parameter). + +##### Data types + +This solution utilizes PostgreSQL's built-in data types to implement a data schema in the database. By using a relational +database, it is possible to normalize the data structures, which can lead to significant storage savings. However, +it's important to note that normalizing the schema too much can make it complex to retrieve a specific dataset due to +the need for data joins. As a result, it's crucial to exercise caution when over-normalizing the schema. + +Also, when normalizing, it is important to ensure the referential integrity is not violated since this can cause issues +to the clients consuming the data. + +In order to accurately ensure the database is storing the full node data types properly, the database implements +custom data types (`domains` in PostgreSQL). + +For example, PostgreSQL doesn't have an unsigned `uint8`, `uint32`, `uint64` datatype, therefore in order to support +this in the database, you can use a `domain`, which is a base type with additional constraints: + +```sql +-- DOMAIN: comet.uint8 + +DO $$ BEGIN +CREATE DOMAIN comet.uint8 AS numeric; + +ALTER DOMAIN comet.uint8 + ADD CONSTRAINT value_max CHECK (VALUE <= '255'::numeric); + +ALTER DOMAIN comet.uint8 + ADD CONSTRAINT value_positive CHECK (VALUE >= 0::numeric); +EXCEPTION + WHEN duplicate_object THEN null; +END $$; + +-- DOMAIN: comet.uint32 + +DO $$ BEGIN +CREATE DOMAIN comet.uint32 AS numeric; + +ALTER DOMAIN comet.uint32 + ADD CONSTRAINT value_max CHECK (VALUE <= '4294967295'::numeric); + +ALTER DOMAIN comet.uint32 + ADD CONSTRAINT value_positive CHECK (VALUE >= 0::numeric); +EXCEPTION + WHEN duplicate_object THEN null; +END $$; + +-- DOMAIN: comet.uint64 + +DO $$ BEGIN +CREATE DOMAIN comet.uint64 + AS numeric; + +ALTER DOMAIN comet.uint64 OWNER TO postgres; + +ALTER DOMAIN comet.uint64 + ADD CONSTRAINT value_max CHECK (VALUE <= '18446744073709551615'::numeric); + +ALTER DOMAIN comet.uint64 + ADD CONSTRAINT value_positive CHECK (VALUE >= 0::numeric); +EXCEPTION + WHEN duplicate_object THEN null; +END $$; +``` + +##### Schema migration + +Another point to consider is when the data structures change across CometBFT releases. + +A potential solution for this scenario is to find a way in the database that can support "versioning" of data structures. +For example, let's assume there's a `Block` structure, let's call it `v1`. If in the future there's a need to modify +this structure that is not compatible with the previous data structure, then the database would support a `v2` schema +for `ResultBlock` and an `index` table could determine the criteria on which data structure should be used for inserting +or querying data. + +### [RPC server instance](#rpc-instance) + +The **RPC server instance** is a node that runs the RPC API process for the data companion. This server instance provides +an RPC API (`/v1`) with the same JSON-RPC methods of the full node JSON-RPC endpoint. The RPC Companion service will expose +the same JSON-RPC methods and will accept the same request types and return wire compatible responses (should match the +same response as the equivalent full node JSON-RPC endpoint). + +The **RPC server instance**, when serving a particular request, retrieves the required data from the database in order to +fulfill the request. The data should be serialized in a way that makes it wire compatible with the CometBFT JSON-RPC endpoint. + +It is possible to integrate a caching layer as part of the RPC server instance solution. Caching can be useful for queries +that are idempotent, which means they produce the same result regardless of how many times they are executed. There are +various caching solutions available, either off-the-shelf or custom-built, that can be added to the RPC server instance +logic. However, implementing a caching solution is not within the scope of this ADR. + +The RPC service endpoints from the server instances should be exposed through an external load-balancer service such +as Cloudflare or AWS ELB, or a server running its own load balancer mechanism (e.g. nginx). + +The RPC clients should make requests to the **RPC Companion** server instances through this load balancer for scalability +reasons. + +The **RPC server instance** will also implement logic to proxy requests to the full node. It should properly handle +the proxy requests and responses from the full node. + +The **RPC Companion** endpoint should support the `https` protocol in order to support a secure endpoint access. It's recommended that +the `https` support is provided by the load balancer. + +## Consequences + +### Positive + +- The **RPC Companion** can be more scalable and consequently provide a higher query throughput. +- Less backpressure on the full node that is running consensus. +- Possibility for future additional custom endpoints (e.g a `/v2`) with additional methods not available in the `/v1` endpoint. +- Can act as a basis for users to create better and faster indexers solutions. +- Possibility to turn off indexing on the full node if the data can be offloaded to an external data storage that supports +indexing. + +### Negative + +- Additional infrastructure complexity to set up and maintain. +- Additional infrastructure costs if using a load balanced setup for the RPC service endpoint (multiple nodes) and a fail-over +database setup (master/replica) + +### Neutral + +- Optional feature, users will only use it if their use case requires such solution +- No privacy / security issues should arise since the data returned by the **RPC Companion** will be the same +as the current RPC. + +## References + +- [ADR-101: Data Companions Pull API](adr-101-data-companion-pull-api.md) +- [Data Companion Guide - CometBFT documentation](https://docs.cometbft.com/v1.0/explanation/data-companion/) +- [RPC Companion - Reference Implementation](https://github.com/cometbft/rpc-companion) + + diff --git a/docs/architecture/adr-103-proto-versioning.md b/docs/references/architecture/adr-103-proto-versioning.md similarity index 100% rename from docs/architecture/adr-103-proto-versioning.md rename to docs/references/architecture/adr-103-proto-versioning.md diff --git a/docs/architecture/adr-104-out-of-band-state-sync.md b/docs/references/architecture/adr-104-out-of-band-state-sync.md similarity index 90% rename from docs/architecture/adr-104-out-of-band-state-sync.md rename to docs/references/architecture/adr-104-out-of-band-state-sync.md index 88481e93a38..d8ef8c9502b 100644 --- a/docs/architecture/adr-104-out-of-band-state-sync.md +++ b/docs/references/architecture/adr-104-out-of-band-state-sync.md @@ -28,7 +28,7 @@ of its weaknesses. In fact, while downloading recent snapshots is very convenient for new nodes (clients of the protocol), providing snapshots to multiple peers (as servers of the protocol) is _bandwidth-consuming_, especially without a clear incentive for -node operators to provide this service. +node operators to provide this service. As a result, the number of nodes in production CometBFT networks offering the state sync service (i.e., servers offering snapshots) has been limited, which has rendered the service _fragile_ (from the client's point of view). In other words, it is very @@ -48,22 +48,22 @@ sync and/or Consensus to catch up with the latest state of the network. The purpose of this ADR is to provide node operators with more flexibility in defining how or where state sync should look for application snapshots. More precisely, it enables state sync to support the bootstrap of nodes from -application snapshots obtained _out-of-band_ by operators, available to the +application snapshots obtained _out-of-band_ by operators, available to the node _locally_. Applications dump snapshots into an exportable format, which can be then obtained -by the operators and placed on the syncing node. The node can then sync locally +by the operators and placed on the syncing node. The node can then sync locally without transferring snapshots via the network. The goal is to provide an alternative to the mechanism currently adopted by state sync, discovering and fetching application snapshots from peers in the network, in order to address the above mentioned limitations, while preserving -most of state sync's operation. +most of state sync's operation. The ADR presents two solutions: -1. The first solution is implemented by the application and was proposed and -implemented by the Cosmos SDK (PR [#16067][sdk-pr2] and [#16061][sdk-pr1] ). This ADR describes the solution -in a general way, proividing guidlines to non-SDK based applications if they wish -to implement their own local state sync. +1. The first solution is implemented by the application and was proposed and +implemented by the Cosmos SDK (PR [#16067][sdk-pr2] and [#16061][sdk-pr1] ). This ADR describes the solution +in a general way, proividing guidelines to non-SDK based applications if they wish +to implement their own local state sync. 2. The second part of the ADR proposes a more general solution, that uses ABCI to achieve the same behavior provided by the SDK's solution. @@ -124,14 +124,14 @@ handled by state sync, [ADR 083][adr083]/[ADR 103][adr103] assumes that node operators are able to manually synchronize the application state from a running node (it might be necessary to stop it) to a not-yet-started fresh node. -The main limitation of the approach in [ADR 083][adr083]/[ADR 103][adr103] +The main limitation of the approach in [ADR 083][adr083]/[ADR 103][adr103] is that it relies on the ability of node operators to properly synchronize the application state between two nodes. While experienced node operators are likely able to perform this operation in a proper way, we have to consider a broader set of users and emphasize that it is -an operation susceptible to errors. Operators need to know the format of the +an operation susceptible to errors. Operators need to know the format of the application's stored state, export the state properly and guarantee consistency. That is, -they have to make sure that nobody is trying to read the exported state while they are +they have to make sure that nobody is trying to read the exported state while they are exporting it. Furthermore, it is an operation that is, by definition, application-specific: applications are free to manage their state as they see fit, and this includes @@ -144,7 +144,7 @@ in order to adopt this solution. State sync should support the bootstrap of new nodes from application snapshots available locally. Implementing this option does not mean that networked State sync -should be removed, but not both should be enabled at the same time. +should be removed, but not both should be enabled at the same time. In other words, the following operation should be, in general terms, possible: @@ -162,22 +162,22 @@ In other words, the following operation should be, in general terms, possible: As Cosmos SDK implemented a solution for all applications using it, and we do not have non-SDK in production users requesting this feature, at the moment we will not implement the generally applicable -solution. +solution. -This ADR will outline both the solution implemented by the SDK and a design proposal of a +This ADR will outline both the solution implemented by the SDK and a design proposal of a generally applicable solution, that can be later on picked up and implemented in CometBFT. ## Detailed Design ### Application-implemented local state sync -This section describes the solution to local state sync implemented by the SDK. An application can +This section describes the solution to local state sync implemented by the SDK. An application can chose to implement local state sync differently, or implement only a subset of the functionalities implemented by the SDK. This solution exposes a command line interface enabling a node to manipulate the snapshots including dumping existing snapshots to an exportable format, loading, restoring and deleting exported snapshots, -as well as a command to bootstrap the node by resetting CometBFT's state and block store. +as well as a command to bootstrap the node by resetting CometBFT's state and block store. The SDK exposes the following commands for snapshot manipulation: @@ -201,32 +201,32 @@ comet bootstrap-state Bootstrap the state of CometBFT's state and block ``` -These commands enable the implementation of both the client and server side of statesync. +These commands enable the implementation of both the client and server side of statesync. Namely, a statesync server can use `dump` to create a portable archive format out existing snapshots, -or trigger snapshot creation using `export`. +or trigger snapshot creation using `export`. The client side, restores the application state from a local snapshot that was previously exported, using `restore`. Before `restore` can be called, the client has to `load` an archived snapshot into its local snapshot store. Upon successful completion of the previous sequence of commands, the state of CometBFT is bootstrapped -using `bootstrap-state` and CometBFT can be launched. +using `bootstrap-state` and CometBFT can be launched. There are three prerequisites for this solution to work when a node is syncing: 1. The application has access to the snapshot store (usually as a separate database used by applications) 2. CometBFT's state and block stores are empty or reset -3. CometBFT is not running while the node is state syncing +3. CometBFT is not running while the node is state syncing The server side of state sync (snapshot generation and dumping), can be performed while the node is running. The application has to be careful not to interfere with normal node operations, and to use a snapshot store -and dumping mechanism that will mitigate the risk of requesting snapshots while they are being dumped to an archive format. +and dumping mechanism that will mitigate the risk of requesting snapshots while they are being dumped to an archive format. -In order to be able to dump or export the snapshots, the application must have access to the snapshot store. +In order to be able to dump or export the snapshots, the application must have access to the snapshot store. We describe the main interface expected from the snapshot database and used by the above mentioned CLI commands -for snapshot manipulation. -The interface was derived from the SDK's implementation of local State sync. +for snapshot manipulation. +The interface was derived from the SDK's implementation of local State sync. ```golang @@ -234,7 +234,7 @@ The interface was derived from the SDK's implementation of local State sync. func (s *Store) Delete(height uint64, format uint32) error // Retrieves a snapshot for a certain height -func (s *Store) Get(height uint64, format uint32) (*Snapshot, error) +func (s *Store) Get(height uint64, format uint32) (*Snapshot, error) // List recent snapshots, in reverse order (newest first) func (s *Store) List() ([]*Snapshot, error) @@ -248,30 +248,30 @@ func (s *Store) Load(height uint64, format uint32) (*Snapshot, <-chan io.ReadClo func (s *Store) LoadChunk(height uint64, format, chunk uint32) (io.ReadCloser, error) // Save saves a snapshot to disk, returning it. -func (s *Store) Save(height uint64, format uint32, chunks <-chan io.ReadCloser) (*Snapshot, error) +func (s *Store) Save(height uint64, format uint32, chunks <-chan io.ReadCloser) (*Snapshot, error) // PathChunk generates a snapshot chunk path. func (s *Store) PathChunk(height uint64, format, chunk uint32) string ``` -In order to dump a snapshot, an application needs to retrieve all the chunks stored at a certain path. +In order to dump a snapshot, an application needs to retrieve all the chunks stored at a certain path. #### CometBFT state bootstrap In addition to managing snapshots, it is necessary to bootstrap (setup) the state and block store of CometBFT before starting up the node. -Upon a successful start, CometBFT performs block sync and consensus. +Upon a successful start, CometBFT performs block sync and consensus. At the moment of writing this ADR, there is no command line in CometBFT that supports this, but an [issue][state-bootstrap] has been opened to address this. -Until it has been resolved, the application developers have to, within their bootstraping command: +Until it has been resolved, the application developers have to, within their bootstrapping command: - Create a state and block store - Launch a light client to obtain and verify the block header for the snapshot's height. -- Use the light client's `State` to +- Use the light client's `State` to verify that the `AppHash` on the retrieved block header matches the `AppHash` obtained via -the ABCI `Info` call to the application (the same procedure is performed by the node on startup). -- Use `Commit` to retrieve the last commit for the snapshot's height. -- Save the retrieved values into the state and block stores. +the ABCI `Info` call to the application (the same procedure is performed by the node on startup). +- Use `Commit` to retrieve the last commit for the snapshot's height. +- Save the retrieved values into the state and block stores. This code essentially mimics what CometBFT does as part of node setup, once state sync is complete. @@ -281,44 +281,44 @@ This code essentially mimics what CometBFT does as part of node setup, once stat Given that snapshot manipulation is entirely application defined, and to avoid pulling this functionality into CometBFT, we propose a solution using ABCI, that mimics the behaviour described in the previous section. -On the client side, the main difference between local State sync done by the application and CometBFT is that the +On the client side, the main difference between local State sync done by the application and CometBFT is that the application has to perform the sync offline, in order to properly set up CometBFT's initial state. Furthermore, the -application developer has to manually bootstrap CometBFTs state and block stores. +application developer has to manually bootstrap CometBFTs state and block stores. With support for local State sync, a node can simply load a snapshot from a predefined location and offer it to the application -as is currently done via networked State sync. +as is currently done via networked State sync. On the server side, without any support for local State sync, an operator has to manually instruct the application -to export the snapshots into a portable format (via `dump`). +to export the snapshots into a portable format (via `dump`). -Having support for this within CometBFT, the app can automatically perform this export when taking snapshots. +Having support for this within CometBFT, the app can automatically perform this export when taking snapshots. In order to support local State sync, the following changes to CometBFT are necessary: 1. Adding new configuration options to the config file. -2. Introduce a CLI command that can explicitly tell the application to create a snapshot export, in case +2. Introduce a CLI command that can explicitly tell the application to create a snapshot export, in case operators decide not to generate periodical exports. 3. Extract a snapshot from the exported format. -4. Potentially alter existing ABCI calls to signal to the application that we want to create a snapshot export periodically. -5. Allow reading a snaphsot from a compressed format into CometBFT and offer it to the application via -the existing `OfferSnapshot` ABCI call. +4. Potentially alter existing ABCI calls to signal to the application that we want to create a snapshot export periodically. +5. Allow reading a snapshot from a compressed format into CometBFT and offer it to the application via +the existing `OfferSnapshot` ABCI call. At a very high level, there are two possible solutions and we will present both: 1. The application will export snapshots into an exportable format on the server side. When a node syncs up, CometBFT will -send this as a blob of bytes to the application to uncompress and apply the snapshot. +send this as a blob of bytes to the application to uncompress and apply the snapshot. 2. CometBFT creates a snapshot using existing ABCI calls and exports it into a format of our choosing. CometBFT is then in charge of -reading in and parsing the exported snapshot into a snapshot that can be offered to the application via the existing `OfferSnapshot` +reading in and parsing the exported snapshot into a snapshot that can be offered to the application via the existing `OfferSnapshot` and `ApplyChunk` methods. This option does not alter any current APIs and is a good candidate for an initial implementation. - + #### Config file additions ```bash [statesync] # State syncing from a local snapshot -local_sync=false +local_sync=false # Path to snapshot, will be ignored if local_sync=false snapshot_load_path="" # Periodically dump snapshots into archive format (optional) @@ -344,10 +344,10 @@ as snapshot generation, dumping and loading a local snapshot would be built into The `dump` command can be implemented in two ways: -1. Rely on the existing ABCI functions `ListSnapshots` and `LoadChunks` to retrieve the snapshots and chunks from a peer. +1. Rely on the existing ABCI functions `ListSnapshots` and `LoadChunks` to retrieve the snapshots and chunks from a peer. This approach requires no change to the current API and is easy to implement. Furthermore, CometBFT has complete control -over the format of the exported snapshot. It does however involve more than one ABCI call and network data transfer. -2. Extend `RequestListSnapshots` with a flag to indicate that we want an exportable snapshot format and extend `ResponseListSnapshot` to return a +over the format of the exported snapshot. It does however involve more than one ABCI call and network data transfer. +2. Extend `RequestListSnapshots` with a flag to indicate that we want an exportable snapshot format and extend `ResponseListSnapshot` to return a path and format of the exported snapshots. An improvement to the second option mentioned above would be to add path parameters to the command, @@ -360,18 +360,18 @@ A third option is the introduction of a new ABCI call: `ExportSnapshots`, which #### *Automatic snapshot exporting* Applications generate snapshots with a regular cadence (fixed time intervals or heights). The application itself measures the time or number of heights passed since the last snapshot, -and CometBFT has no role in instructing the application when to take snapshots. +and CometBFT has no role in instructing the application when to take snapshots. -The State sync reactor currently retrieves a list of snapshots from a peer, who obtains these snapshots from the local instance of the application using the ABCI `RequestListSnapshots` call. +The State sync reactor currently retrieves a list of snapshots from a peer, who obtains these snapshots from the local instance of the application using the ABCI `RequestListSnapshots` call. Applications can thus themselves be in charge of dumping the snapshots into a given file format, the same way they generate snapshots. - If `auto_snapshot_dump` is true, + If `auto_snapshot_dump` is true, CometBFT instructs the application to export the snapshots periodically. -An alternative solution is that CometBFT, itself, using the implementation of the `dump` command, whichever -is chosen at the time, creates or asks the application to create snapshot exports. This is not forcing the +An alternative solution is that CometBFT, itself, using the implementation of the `dump` command, whichever +is chosen at the time, creates or asks the application to create snapshot exports. This is not forcing the application to create a snapshot at the time of he request, rather *dumps* existing snapshots into -an exportable file format. +an exportable file format. **Export file consistency** @@ -385,17 +385,17 @@ If it is the application that exports the snapshots, it is something the applica #### Syncing a node using local snapshots On startup, if `local_sync` is set to `true`, CometBFT will look for an existing snapshot at the path -given by the operator. If a snapshot is available, it will be loaded and state restored as if it came from a peer in the current implementation. +given by the operator. If a snapshot is available, it will be loaded and state restored as if it came from a peer in the current implementation. Note that, if it is not CometBFT that created the snapshot export from the data retrieved via ABCI (a combination of `ListSnapshots` and `LoadChunks`), CometBFT might not be aware of how the snapshot was exported, and needs to ask the application to restore the snapshot. -If a snapshot was created using option 1 (by CometBFT) from the previous section, or the export format is known to CometBFT (like `tar, gzip` etc.), -CometBFT can extract the snapshot itself, and offer it to the application via `RequestOfferSnapshot` without any API changes. +If a snapshot was created using option 1 (by CometBFT) from the previous section, or the export format is known to CometBFT (like `tar, gzip` etc.), +CometBFT can extract the snapshot itself, and offer it to the application via `RequestOfferSnapshot` without any API changes. -If CometBFT is **not** the one who created the exported file, we introduce a new ABCI call `UnpackSnapshot` -to send the exported snapshot as a blob of bytes to the application, which uncompresses it, installs it -and responds whether the snapshot is accepted (as in `OfferSnapshot`) and the chunks application has passed (as in `LoadChunk`). +If CometBFT is **not** the one who created the exported file, we introduce a new ABCI call `UnpackSnapshot` +to send the exported snapshot as a blob of bytes to the application, which uncompresses it, installs it +and responds whether the snapshot is accepted (as in `OfferSnapshot`) and the chunks application has passed (as in `LoadChunk`). * **Request**: @@ -410,7 +410,7 @@ and responds whether the snapshot is accepted (as in `OfferSnapshot`) and the ch | Name | Type | Description | Field Number | |--------------------|-------------------------------------------------------- |----------------------------------------|--------------| - | result | [Result](../../spec/abci/abci%2B%2B_methods.md#result) | The result of the snapshot offer. | 1 | + | result | [Result](../../../spec/abci/abci++_methods.md#result) | The result of the snapshot offer. | 1 | | resultChunk | ResultC | The result of applying the chunks. | 2 | ```proto @@ -423,12 +423,12 @@ and responds whether the snapshot is accepted (as in `OfferSnapshot`) and the ch } ``` -Unlike networked state sync, we do not re-fetch individual chunks, thus if the application of a chunk fails, then the application of the whole snapshot fails. +Unlike networked state sync, we do not re-fetch individual chunks, thus if the application of a chunk fails, then the application of the whole snapshot fails. ## Consequences Adding the support for a node to sync up using a local snapshot can speed up the syncing process, especially as -network based State sync has proven to be fragile. +network based State sync has proven to be fragile. ### Positive @@ -440,7 +440,7 @@ network based State sync has proven to be fragile. ### Negative -- Implementing additional ABCI functions is API breaking and might not be backwards compatible. +- Implementing additional ABCI functions is API breaking and might not be backwards compatible. ### Neutral diff --git a/docs/architecture/adr-105-refactor-mempool-senders.md b/docs/references/architecture/adr-105-refactor-mempool-senders.md similarity index 94% rename from docs/architecture/adr-105-refactor-mempool-senders.md rename to docs/references/architecture/adr-105-refactor-mempool-senders.md index cbafa971cbc..2a9ad4a630b 100644 --- a/docs/architecture/adr-105-refactor-mempool-senders.md +++ b/docs/references/architecture/adr-105-refactor-mempool-senders.md @@ -5,10 +5,11 @@ - 2023-07-19: Choose callbacks option and mark as accepted (@hvanz) - 2023-07-10: Add callback alternative (@hvanz) - 2023-06-26: Initial draft (@hvanz) +- 2024-05-22: Reverted (@hvanz) ## Status -Accepted +Reverted ## Context @@ -147,6 +148,11 @@ chances of concurrent accesses to the list of senders and it removes the transaction immediately, keeping the mempool and the list of senders better synchoronized. +Update: We have decided to revert this change as it may cause in some cases that the transaction is +sent back to the sender. Storing the sender in the reactor after storing the transaction in `CList` +introduces a race condition resulting in the transaction having no sender during a very small window of +time. + ## Consequences The refactoring proposed here does not affect how users and other peers @@ -166,6 +172,8 @@ stored internally. - If chosen, adding a channel and a goroutine for communicating that a transaction was removed may increase the concurrency complexity. +- Storing the sender separated from the transaction results in the transaction having no sender + during a brief period of time, allowing the transaction to be sent back to the sender. ### Neutral diff --git a/docs/references/architecture/adr-106-grpc-api.md b/docs/references/architecture/adr-106-grpc-api.md new file mode 100644 index 00000000000..68438f8bd92 --- /dev/null +++ b/docs/references/architecture/adr-106-grpc-api.md @@ -0,0 +1,240 @@ +# ADR 106: gRPC API + +## Changelog + +- 2024-03-27: Minor updates based on user feedback and ADR 101 implementation (@andynog) +- 2023-07-04: Expand available endpoints based on user feedback (@thanethomson) +- 2023-05-16: First draft (@thanethomson) + +## Status + +Accepted + +Tracking issue: [\#81] + +## Context + +There has been discussion over the years as to which type of RPC interface would +be preferable for Tendermint Core, and now CometBFT, to offer to integrators. +[ADR 057][adr-057] captures some pros and cons of continuing to support the +JSON-RPC API versus implementing a gRPC API. Previously it was decided to remove +the gRPC API from Tendermint Core (see [tendermint/tendermint\#7121] and +[tendermint/tendermint\#9683]). + +After discussion with users, and in considering the implementation of [ADR +101][adr-101] (the data companion pull API), a decision has been taken to +implement a gRPC API _in addition to_ the JSON-RPC API. + +Some services for this gRPC API have already been implemented as part of the [Data Companion Pull API implementation][adr-101-poc], +such as `Block`, `BlockResults` and `Version` services. Also the existing gRPC API (which only provides a +`BroadcastService` with a single method) was removed. These services will be available starting with the CometBFT`v1` release +(there was also a backport to an experimental `v0.38` release) + +It is also envisaged that once it is +feasible to provide the RPC service independently of the node itself (see [ADR +102][adr-102]), the JSON-RPC API on the node itself could eventually be +deprecated and removed. + +## Alternative Approaches + +The primary alternative approach involves continuing to only provide support +for, and potentially evolve, the JSON-RPC API. This API currently exposes many +data types in non-standard and rather complex ways, making it difficult to +implement clients. As per [ADR 075][adr-075], it also does not conform fully to +the JSON-RPC 2.0 specification, further increasing client implementation +complexity. + +## Decision + +Implement gRPC services corresponding to a minimal subset of the currently +exposed [JSON-RPC endpoints][rpc-docs]. This set of services can always be +expanded over time according to user needs, but once released it is hard to +deprecate and remove such APIs. + +## Detailed Design + +### Services + +The initial services to be exposed via gRPC are informed by [Penumbra's +`TendermintProxyService`][penumbra-proxy-svc], as well as the needs of the data +companion API proposed in [ADR 101][adr-101]. Additional services can be rolled +out additively in subsequent releases of CometBFT. + +Services are roughly organized by way of their respective domain. The details of +each service, e.g. request/response types and precise Protobuf definitions, will +be worked out in the implementation. + +- `VersionService` - A simple service that aims to be quite stable over time in + order to be utilized by clients to establish the version of the software with + which they are interacting (e.g. to pre-emptively determine compatibility). + This could technically be part of the `NodeService`, but if the `NodeService` + interface were to be modified, a new version of the service would need to be + created, and all old versions would need to be maintained, since the + `GetVersion` method needs to be quite stable. + - `GetVersion` - Query the version of the software and protocols employed by + the node (e.g. CometBFT, ABCI, block, P2P and application version). +- `NodeService` - Provides information about the node providing the gRPC + interface. + - `GetStatus` - Query the current node status, including node info, public + key, latest block hash, app hash, block height and time. + - `GetHealth` - Lightweight mechanism to query the health of the node. +- `TransactionService` - Facilitates broadcasting and querying of transactions. + - `BroadcastAsync` - Broadcast a transaction asynchronously. Does not wait for + the transaction to be validated via `CheckTx`, nor does it wait for the + transaction to be committed. + - `BroadcastSync` - Broadcast a transaction, but only return once `CheckTx` + has been called on the transaction. Does not wait for the transaction to be + committed. + - `GetByHash` - Fetch a transaction by way of its hash. + - `Search` - Search for transactions with their results. +- `ApplicationService` - Provides a proxy interface through which to access the + application being run by the node (via ABCI). + - `Query` - Submit a query directly to the application via ABCI. +- `BlockService` - Provides information about blocks. + - `GetLatestHeight` - Return a stream of latest block heights as new blocks + are committed to the blockchain. + - `GetByHeight` - Fetch the block associated with a particular height. + - `GetHeaderByHeight` - Fetch the header associated with the block at a + particular height. + - `Search` - Search for blocks by way of block events. +- `BlockResultsService` - Provides information about block execution results. + - `GetBlockResults` - Fetch the block results associated with a particular height. +- `ConsensusService` - Provides information about consensus. + - `GetParams` - Fetch the consensus parameters for a particular height. +- `NetworkService` - Provides information about the blockchain network. + - `GetGenesis` - Fetch paginated genesis data. + - `GetPeers` - Fetch information about the peers to which the node is + connected. + +### Service Versioning + +Every service will be versioned, for example: + +- `VersionService` will have its Protobuf definition under + `cometbft.services.version.v1` +- `NodeService` will have its Protobuf definition under `cometbft.services.node.v1` +- `TransactionService` will have its Protobuf definition under + `cometbft.services.transaction.v1` +- etc. + +The general approach to versioning our Protobuf definitions is captured in [ADR +103][adr-103]. + +### Go API + +#### Server + +The following Go API is proposed for constructing the gRPC server to allow for +ease of construction within the node, and configurability for users who have +forked CometBFT. + +```go +package server + +// Option is any function that allows for configuration of the gRPC server +// during its creation. +type Option func(*serverBuilder) + +// WithVersionService enables the version service on the CometBFT gRPC server. +// +// (Similar methods should be provided for every other service that can be +// exposed via the gRPC interface) +func WithVersionService() Option { + // ... +} + +// WithGRPCOption allows one to specify Google gRPC server options during the +// construction of the CometBFT gRPC server. +func WithGRPCOption(opt grpc.ServerOption) Option { + // ... +} + +// Serve constructs and runs a CometBFT gRPC server using the given listener and +// options. +func Serve(listener net.Listener, opts ...Option) error { + // ... +} +``` + +#### Client + +For convenience, a Go client API should be provided for use within the E2E +testing framework. + +```go +package client + +type Option func(*clientBuilder) + +// Client defines the full client interface for interacting with a CometBFT node +// via its gRPC. +type Client interface { + ApplicationServiceClient + BlockResultsServiceClient + BlockServiceClient + NodeServiceClient + TransactionServiceClient + VersionServiceClient + + // Close the connection to the server. Any subsequent requests will fail. + Close() error +} + +// WithInsecure disables transport security for the underlying client +// connection. +// +// A shortcut for using grpc.WithTransportCredentials and +// insecure.NewCredentials from google.golang.org/grpc. +func WithInsecure() Option { + // ... +} + +// WithGRPCDialOption allows passing lower-level gRPC dial options through to +// the gRPC dialer when creating the client. +func WithGRPCDialOption(opt ggrpc.DialOption) Option { + // ... +} + +// New constructs a client for interacting with a CometBFT node via gRPC. +// +// Makes no assumptions about whether or not to use TLS to connect to the given +// address. To connect to a gRPC server without using TLS, use the WithInsecure +// option. +// +// To connect to a gRPC server with TLS, use the WithGRPCDialOption option with +// the appropriate gRPC credentials configuration. See +// https://pkg.go.dev/google.golang.org/grpc#WithTransportCredentials +func New(ctx context.Context, addr string, opts ...Option) (Client, error) { + // ... +} +``` + +## Consequences + +### Positive + +- Protocol buffers provide a relatively simple, standard way of defining RPC + interfaces across languages. +- gRPC service definitions can be versioned and published via the [Buf Schema + Registry][bsr] (BSR) for easy consumption by integrators. + +### Negative + +- Only programming languages with reasonable gRPC support will be able to + integrate with the gRPC API (although most major languages do have such + support). +- Increases complexity maintaining multiple APIs (gRPC and JSON-RPC) in the short-term (until the JSON-RPC API is definitively extracted and moved outside the node). + +[\#81]: https://github.com/cometbft/cometbft/issues/81 +[\#94]: https://github.com/cometbft/cometbft/issues/94 +[adr-057]: ./tendermint-core/adr-057-RPC.md +[tendermint/tendermint\#7121]: https://github.com/tendermint/tendermint/pull/7121 +[tendermint/tendermint\#9683]: https://github.com/tendermint/tendermint/pull/9683 +[adr-101]: https://github.com/cometbft/cometbft/pull/82 +[adr-101-poc]: https://github.com/cometbft/cometbft/issues/816 +[adr-102]: adr-102-rpc-companion.md +[adr-103]: ./adr-103-proto-versioning.md +[adr-075]: ./tendermint-core/adr-075-rpc-subscription.md +[rpc-docs]: https://docs.cometbft.com/v0.37/rpc/ +[penumbra-proxy-svc]: https://buf.build/penumbra-zone/penumbra/docs/main:penumbra.util.tendermint_proxy.v1 +[bsr]: https://buf.build/explore diff --git a/docs/architecture/adr-107-betaize-proto-versions.md b/docs/references/architecture/adr-107-betaize-proto-versions.md similarity index 100% rename from docs/architecture/adr-107-betaize-proto-versions.md rename to docs/references/architecture/adr-107-betaize-proto-versions.md diff --git a/docs/references/architecture/adr-108-e2e-abci++.md b/docs/references/architecture/adr-108-e2e-abci++.md new file mode 100644 index 00000000000..c72b5aff0c9 --- /dev/null +++ b/docs/references/architecture/adr-108-e2e-abci++.md @@ -0,0 +1,337 @@ +# ADR 108: E2E tests for CometBFT's behaviour in respect to ABCI 2.0. + +## Changelog +- 2023-08-08: Initial version (@nenadmilosevic95) +- 2023-15-12: Updated to account for grammar changes (@nenadmilosevic95) +- 2024-07-03: Updated to support vote extensions (@nenadmilosevic95) + + +## Context + +ABCI 2.0 defines the interface between the application and CometBFT. A part of the specification is the [ABCI 2.0 grammar](../../../spec/abci/abci%2B%2B_comet_expected_behavior) that describes the sequences of calls that the application can expect from CometBFT. +In order to demonstrate that CometBFT behaves as expected from the viewpoint of the application, we need to test whether CometBFT respects this ABCI 2.0 grammar. To do this, we need to enhance the e2e tests infrastructure. Specifically, we plan to do three things: +- Log every CometBFT's ABCI 2.0 request during the execution. +- Parse the logs post-mortem and extract all ABCI 2.0 requests. +- Check if the set of observed requests respects the ABCI 2.0 grammar. + + +Issue: [353](https://github.com/cometbft/cometbft/issues/353). + + + +## Decision + +### 1) ABCI 2.0 requests logging +The idea was to do this at the Application side. Every time the Application +receives a request, it logs it. + +**Implementation** + +The rationale behind this part of the implementation was to log the request concisely and use the existing structures as much as possible. + +Whenever an ABCI 2.0 request is made, the application will create `abci.Request` (`abci` stands for `"github.com/cometbft/cometbft/abci/types"`) and log it. The example is below. + +```go +func (app *Application) InitChain(_ context.Context, req *abci.InitChainRequest) (*abci.InitChainResponse, error) { + r := &abci.Request{Value: &abci.Request_InitChain{InitChain: &abci.InitChainRequest{}}} + err := app.logABCIRequest(r) + if err != nil { + return nil, err + } + + ... +} +``` +Notice here that we create an empty `abci.InitChainRequest` object while we can also use the one passed to the `InitChain` function. The reason behind this is that, at the moment, we do not need specific fields of the request; we just need to be able to extract the information about the request type. For this, an empty object of a particular type is enough. + +The `app.logABCIRequest(r)` function is a new function implemented in the same file (`test/e2e/app/app.go`). If the `ABCIRequestsLoggingEnabled` flag is set to `true`, set automatically when ABCI 2.0 tests are enabled, it logs received requests. The full implementation is the following: + +```go +func (app *Application) logABCIRequest(req *abci.Request) error { + if !app.cfg.ABCIRequestsLoggingEnabled { + return nil + } + s, err := GetABCIRequestString(req) + if err != nil { + return err + } + app.logger.Info(s) + return nil +} +``` + +`GetABCIRequestString(req)` is a new method that receives a request and returns its string representation. The implementation and tests for this function and the opposite function `GetABCIRequestFromString(req)` +that returns `abci.Request` from the string are provided in files `test/e2e/app/log_abci.go` and `test/e2e/app/log_abci_test.go`, respectively. To create a string representation of a request, we first marshal the request via `proto.Marshal()` method and then convert received bytes in the string using `base64.StdEncoding.EncodeToString()` method. In addition, we surround the new string with `abci-req` constants so that we can find lines with ABCI 2.0 request more easily. The code of the method is below: + +```go +func GetABCIRequestString(req *abci.Request) (string, error) { + b, err := proto.Marshal(req) + if err != nil { + return "", err + } + reqStr := base64.StdEncoding.EncodeToString(b) + return AbciReq + reqStr + AbciReq, nil +} +``` + +*Note:* At the moment, we are not compressing the marshalled request before converting it to `base64` `string` because we are logging the empty requests that take at most 24 bytes. However, if we decide to log the actual requests in the future, we might want to compress them. Based on a few tests, we observed that the size of a request can go up to 7KB. + +If in the future we want to log another ABCI 2.0 request type, we just need to do the same thing: +create a corresponding `abci.Request` and log it via +`app.logABCIRequest(r)`. + +### 2) Parsing the logs +We need a code that will take the logs from all nodes and collect the ABCI 2.0 requests that were logged by the application. + +**Implementation** + +This logic is implemented inside the `fetchABCIRequests(t *testing.T, nodeName string)` function that resides in `test/e2e/tests/e2e_test.go` file. This function does three things: +- Takes the output of a specific node in the testnet from the moment we launched the testnet until the function is called. The node name is passed as a function parameter. It uses the `docker-compose logs` command. +- Parses the logs line by line and extracts the `abci.Request`, if one exists. The request is received by forwarding each line to the `app.GetABCIRequestFromString(req)` method. +- Returns the array of slices where each slice contains the set of `abci.Request`s logged on that node. Every time a crash happens, a new array element (new slice `[]*abci.Request`) will be created. We know a crash has happened because we log "Application started" every time the application starts. Specifically, we added this log inside `NewApplication()` function in `test/e2e/app/app.go` file. In the end, `fetchABCIRequests()` will return just one slice if the node did not experience any crashes and $n+1$ slices if there were $n$ crashes. The benefit of logging the requests in the previously described way is that now we can use `[]*abci.Request` to store ABCI 2.0 requests of any type. + + + +### 3) ABCI 2.0 grammar checker +The idea here was to find a library that automatically verifies whether a specific execution respects the prescribed grammar. + +**Implementation** + +We found the following library - https://github.com/goccmack/gogll. It generates a GLL or LR(1) parser and an FSA-based lexer for any context-free grammar. What we needed to do is to rewrite [ABCI 2.0 grammar](../../../spec/abci/abci++_comet_expected_behavior.md#valid-method-call-sequences) +using the syntax that the library understands. +The new grammar is below and can be found in `test/e2e/pkg/grammar/abci_grammar.md` file. + +```abnf + +Start : CleanStart | Recovery; + +CleanStart : InitChain ConsensusExec | StateSync ConsensusExec ; +StateSync : StateSyncAttempts SuccessSync | SuccessSync ; +StateSyncAttempts : StateSyncAttempt | StateSyncAttempt StateSyncAttempts ; +StateSyncAttempt : OfferSnapshot ApplyChunks | OfferSnapshot ; +SuccessSync : OfferSnapshot ApplyChunks ; +ApplyChunks : ApplyChunk | ApplyChunk ApplyChunks ; + +Recovery : InitChain ConsensusExec | ConsensusExec ; + +ConsensusExec : ConsensusHeights ; +ConsensusHeights : ConsensusHeight | ConsensusHeight ConsensusHeights ; +ConsensusHeight : ConsensusRounds FinalizeBlock Commit | FinalizeBlock Commit ; +ConsensusRounds : ConsensusRound | ConsensusRound ConsensusRounds ; +ConsensusRound : Proposer | NonProposer ; + +Proposer : GotVotes | ProposerSimple | Extend | GotVotes ProposerSimple | GotVotes Extend | ProposerSimple Extend | GotVotes ProposerSimple Extend ; +ProposerSimple : PrepareProposal | PrepareProposal ProcessProposal ; +NonProposer: GotVotes | ProcessProposal | Extend | GotVotes ProcessProposal | GotVotes Extend | ProcessProposal Extend | GotVotes ProcessProposal Extend ; +Extend : ExtendVote | GotVotes ExtendVote | ExtendVote GotVotes | GotVotes ExtendVote GotVotes ; +GotVotes : GotVote | GotVote GotVotes ; + +InitChain : "init_chain" ; +FinalizeBlock : "finalize_block" ; +Commit : "commit" ; +OfferSnapshot : "offer_snapshot" ; +ApplyChunk : "apply_snapshot_chunk" ; +PrepareProposal : "prepare_proposal" ; +ProcessProposal : "process_proposal" ; +ExtendVote : "extend_vote" ; +GotVote : "verify_vote_extension" ; + +``` + +If you compare this grammar with the original one, you will notice that +`Info` is removed. The reason is that, as explained in the section [CometBFT's expected behaviour](../../../spec/abci/abci++_comet_expected_behavior.md#valid-method-call-sequences), one of the +purposes of the `Info` method is being part of the RPC handling from an external +client. Since this can happen at any time, it complicates the +grammar. +This is not true in other cases, but since the Application does +not know why the `Info` is called, we removed +it totally from the new grammar. The Application is still logging the `Info` +call, but a specific test would need to be written to check whether it happens +at the right moment. + +Moreover, it is worth noticing that the `(inf)` part of the grammar is replaced with the `*`. This results in the new grammar being finite compared to the original, which represents an infinite (omega) grammar. + +The `gogll` library receives the file with the grammar as input, and it generates the corresponding parser and lexer. The actual commands are integrated into `test/e2e/Makefile` and executed when `make grammar-gen` is invoked. +The resulting code is stored inside `test/e2e/pkg/grammar/grammar-auto` directory. + +Apart from this auto-generated code, we implemented `Checker` abstraction +which knows how to use the generated parsers and lexers to verify whether a +specific execution (list of ABCI 2.0 calls logged by the Application while the +testnet was running) respects the ABCI 2.0 grammar. The implementation and tests +for it are inside `test/e2e/pkg/grammar/checker.go` and +`test/e2e/pkg/grammar/checker_test.go`, respectively. + +How the `Checker` works is demonstrated with the test `TestCheckABCIGrammar` +implemented in `test/e2e/tests/abci_test.go` file. + +```go +func TestCheckABCIGrammar(t *testing.T) { + checker := grammar.NewGrammarChecker(grammar.DefaultConfig()) + testNode(t, func(t *testing.T, node e2e.Node) { + t.Helper() + if !node.Testnet.ABCITestsEnabled { + return + } + executions, err := fetchABCIRequests(t, node.Name) + require.NoError(t, err) + for i, e := range executions { + isCleanStart := i == 0 + _, err := checker.Verify(e, isCleanStart) + require.NoError(t, err) + } + }) +} + +``` + +Specifically, the test first creates a `Checker` object. Then for each node in the testnet, it collects all requests +logged by this node. Remember here that `fetchABCIRequests()` returns an array of slices(`[]*abci.Request`) where the slice +with index 0 corresponds to the node's `CleanStart` execution, and each additional slice corresponds to the `Recovery` +execution after a specific crash. Each node must have one `CleanStart` execution and the same number of `Recovery` executions +as the number of crashes that happened on this node. If collecting was successful, the test checks whether each execution +respects the ABCI 2.0 +grammar by calling `checker.Verify()` method. If `Verify` returns an error, the specific execution does not respect the +grammar, and the test will fail. + +The tests are executed only if `ABCITestsEnabled` is set to `true`. This is done through the manifest file. Namely, if we +want to test whether CometBFT respects ABCI 2.0 grammar, we would need to enable these tests by adding `abci_tests_enabled = true` in the manifest file of a particular testnet (e.g. `networks/ci.toml`). This will automatically activate logging on the +application side. + +The `Verify()` method is shown below. +```go +func (g *Checker) Verify(reqs []*abci.Request, isCleanStart bool) (bool, error) { + if len(reqs) == 0 { + return false, errors.New("execution with no ABCI calls") + } + fullExecution := g.getExecutionString(reqs) + r := g.filterRequests(reqs) + // Check if the execution is incomplete. + if len(r) == 0 { + return true, nil + } + execution := g.getExecutionString(r) + errors := g.verify(execution, isCleanStart) + if errors == nil { + return true, nil + } + return false, fmt.Errorf("%v\nFull execution:\n%v", g.combineErrors(errors, g.cfg.NumberOfErrorsToShow), g.addHeightNumbersToTheExecution(fullExecution)) +} +``` + +It receives a set of ABCI 2.0 requests and a flag saying whether they represent a `CleanStart` execution or not and does the following things: +- Checks if the execution is an empty execution. +- Filter the requests by calling the method `filterRequests()`. This method will remove all the requests from the set that are not supported by the current version of the grammar. In addition, it will filter the last height by removing all ABCI 2.0 requests after the +last `Commit`. The function `fetchABCIRequests()` can be called in the middle of the height. As a result, the last height may be incomplete and +classified as invalid, even if that is not the reality. The simple example here is that the last +request fetched via `fetchABCIRequests()` is `FinalizeBlock`; however, `Commit` happens after +`fetchABCIRequests()` was invoked. Consequently, the execution +will be considered as faulty because `Commit` is missing, even though the `Commit` +will happen after. This is why if the execution consists of only one incomplete height and function `filterRequests()` returns an empty set of requests, the `Verify()` method considers this execution as valid and returns `true`. +- Generates an execution string by replacing `abci.Request` with the +corresponding terminal from the grammar. This logic is implemented in +`getExecutionString()` function. This function receives a list of `abci.Request` and generates a string where every request +will be replaced with a corresponding terminal. For example, request `r` of type `abci.Request_PrepareProposal` is replaced with the string `prepare_proposal`, the first part of `r`'s string representation. +- Checks if the resulting string with terminals respects the grammar by calling the +`verify()` function. +- Returns true if the execution is valid and an error if that's not the case. An example of an error is below. + +``` +FAIL: TestCheckABCIGrammar/full02 (8.76s) + abci_test.go:24: ABCI grammar verification failed: The error: "Invalid execution: parser was expecting one of [init_chain], got [offer_snapshot] instead." has occurred at height 0. + + Full execution: + 0: offer_snapshot apply_snapshot_chunk finalize_block commit + 1: finalize_block commit + 2: finalize_block commit + 3: finalize_block commit + ... +``` +The error shown above reports an invalid execution. Moreover, it says why it is considered invalid (`init_chain` was missing) and the height of the error. Notice here that the height in the case of `CleanStart` execution corresponds to the actual consensus height, while for the `Recovery` execution, height 0 represents the first height after the crash. Lastly, after the error, the full execution, one height per line, is printed. This part may be optional and handled with a config flag, but we left it like this for now. + +*Note:* The `gogll` parser can return many errors because it returns an error at every point at which the parser fails to parse +a grammar production. Usually, the error of interest is the one that has +parsed the largest number of tokens. This is why, by default, we are printing only the last error; however, this can be configured with the `NumberOfErrorsToShow` field of `Checker`'s config. + +Lastly, we present the `verify()` function since this function is the heart of this code. + +```go +func (g *Checker) verify(execution string, isCleanStart bool) []*Error { + errors := make([]*Error, 0) + lexer := lexer.New([]rune(execution)) + bsrForest, errs := parser.Parse(lexer) + for _, err := range errs { + exp := []string{} + for _, ex := range err.Expected { + exp = append(exp, ex) + } + expectedTokens := strings.Join(exp, ",") + unexpectedToken := err.Token.TypeID() + e := &Error{ + description: fmt.Sprintf("Invalid execution: parser was expecting one of [%v], got [%v] instead.", expectedTokens, unexpectedToken), + height: err.Line - 1, + } + errors = append(errors, e) + } + if len(errors) != 0 { + return errors + } + eType := symbols.NT_Recovery + if isCleanStart { + eType = symbols.NT_CleanStart + } + roots := bsrForest.GetRoots() + for _, r := range roots { + for _, s := range r.Label.Slot().Symbols { + if s == eType { + return nil + } + } + } + e := &Error{ + description: "The execution is not of valid type.", + height: 0, + } + errors = append(errors, e) + return errors +} + +``` + +This function first checks if the specific execution represents a valid execution concerning the ABCI grammar. For this, it uses +the auto-generated parser and lexer. If the execution passes this initial test, it checks whether the execution is of a valid type (`CleanStart` or `Recovery`). Namely, it checks whether the execution is of the type specified with the function's second parameter (`isCleanStart`). + +**Changing the grammar** + +Any modification to the grammar (`test/e2e/pkg/grammar/abci_grammar.md`) requires generating a new parser and lexer. This is done by +going to the `test/e2e/` directory and running: + +```bash +make grammar-gen +``` + +Make sure you commit any changes to the auto-generated code together with the changes to the grammar. + +### Supporting additional ABCI requests + +Here we present all the steps we need to do if we want to support other +ABCI requests in the future: + +- The application needs to log the new request in the same way as we do now. +- We should include the new request to the grammar and generate a new parser and lexer. +- We should add new requests to the list of supported requests. Namely, we should modify the function `isSupportedByGrammar()` in `test/e2e/pkg/grammar/checker.go` to return `true` for the new type of requests. + +## Status + +Implemented. + +To-do list: +- adding the CI workflow to check if make grammar is executed. +- in the future, we might consider whether the logging (actually, tracing) should be done on the e2e application side, or on CometBFT side, so this infra can be reused for MBT-like activities) +## Consequences + +### Positive +- We should be able to check whether CommetBFT respects ABCI 2.0 grammar. +### Negative + +### Neutral + diff --git a/docs/references/architecture/adr-109-reduce-go-api-surface.md b/docs/references/architecture/adr-109-reduce-go-api-surface.md new file mode 100644 index 00000000000..52930c5e7f2 --- /dev/null +++ b/docs/references/architecture/adr-109-reduce-go-api-surface.md @@ -0,0 +1,211 @@ +# ADR 109: Reduce CometBFT Go API Surface Area + +## Changelog + +- 2023-10-09: First draft (@thanethomson) + +## Status + +Accepted ([\#1484]) + +## Context + +At present, the CometBFT codebase is somewhat monolithic, resulting in a very +large Go API surface area. This results in much more difficulty in terms of +changing the Go APIs, since making trivial breaking changes in non-critical +packages requires a major version bump. Doing so ultimately results in much +slower uptake of CometBFT releases and has produced substantial stagnation in +the codebase. + +In order to mitigate this, several changes are proposed: + +1. From when CometBFT v1.0 is released, major version bumps are only made when + state-breaking changes are released. Minor version bumps can result in Go + API-breaking changes (after deprecation warning for a reasonable period of + time, as is customary for the Go standard library). Patch version bumps would + guarantee no breaking changes. +2. Internalize a number of packages that do not need to be externally accessible + along similar lines to that proposed in [ADR 060]. This involves moving these + packages under the `/internal/` path in the repository, making those packages + only accessible to the CometBFT codebase. + +## Alternative Approaches + +The following alternative approaches were considered. + +1. Do nothing. This approach will keep the status quo along with its related + problems. +2. Implement only one or two of the proposed changes. This will result in less + flexibility than implementing all three. +3. Implement [ADR 060] as-is. The context in which ADR 060 was written, however, + has changed, so certain changes need to be made to accommodate the new + context. + +## Decision + +To implement all three approaches, using [ADR 060] as input, but updating +recommendations based on the current context. + +## Detailed Design + +### Versioning + +The Go API stability guarantees provided by the new versioning policy must be +explicitly added to the documentation. + +### Package Internalization + +In order to move certain packages into the `internal` folder, effectively hiding +them from public use, the current package usage by some of the primary CometBFT +users should be considered. This ADR considers the [Cosmos SDK], [IBC Go] and +the [Cosmos Hub]. + +#### Cosmos SDK Imports + +Since the [Cosmos SDK] is one of the primary users of CometBFT, it would make +sense to expose the minimal surface area needed by the Cosmos SDK in CometBFT +v1. Exposing internalized packages at a later stage constitutes a non-breaking +change, whereas internalizing packages later is breaking. + +At the time of this writing, on the `main` branch, the Cosmos SDK imports the +following packages from the CometBFT repository for use at compile/run time and +during testing: + +```bash +> go list -json ./... | jq '.Imports, .TestImports, .XTestImports' | grep cometbft | sort | uniq | tr -d '", ' +github.com/cometbft/cometbft/abci/server +github.com/cometbft/cometbft/abci/types +github.com/cometbft/cometbft/abci/types +github.com/cometbft/cometbft/cmd/cometbft/commands +github.com/cometbft/cometbft/config +github.com/cometbft/cometbft/crypto +github.com/cometbft/cometbft/crypto/ed25519 +github.com/cometbft/cometbft/crypto/encoding +github.com/cometbft/cometbft/crypto/secp256k1 +github.com/cometbft/cometbft/crypto/sr25519 +github.com/cometbft/cometbft/crypto/tmhash +github.com/cometbft/cometbft/libs/bytes +github.com/cometbft/cometbft/libs/cli +github.com/cometbft/cometbft/libs/json +github.com/cometbft/cometbft/libs/log +github.com/cometbft/cometbft/mempool +github.com/cometbft/cometbft/node +github.com/cometbft/cometbft/p2p +github.com/cometbft/cometbft/privval +github.com/cometbft/cometbft/proto/tendermint/crypto +github.com/cometbft/cometbft/proto/tendermint/p2p +github.com/cometbft/cometbft/proto/tendermint/types +github.com/cometbft/cometbft/proto/tendermint/types +github.com/cometbft/cometbft/proto/tendermint/version +github.com/cometbft/cometbft/proxy +github.com/cometbft/cometbft/rpc/client +github.com/cometbft/cometbft/rpc/client/http +github.com/cometbft/cometbft/rpc/client/local +github.com/cometbft/cometbft/rpc/client/mock +github.com/cometbft/cometbft/rpc/core/types +github.com/cometbft/cometbft/rpc/jsonrpc/server +github.com/cometbft/cometbft/types +github.com/cometbft/cometbft/types/time +github.com/cometbft/cometbft/version +``` + +#### Packages used by IBC Go + +[IBC Go] on its `main` branch imports the following packages from CometBFT, +while using CometBFT v0.38.x: + +```bash +> go list -json ./... | jq '.Imports, .TestImports, .XTestImports' | grep cometbft | sort | uniq | tr -d '", ' +github.com/cometbft/cometbft/abci/types +github.com/cometbft/cometbft/config +github.com/cometbft/cometbft/crypto +github.com/cometbft/cometbft/crypto/secp256k1 +github.com/cometbft/cometbft/crypto/tmhash +github.com/cometbft/cometbft/libs/bytes +github.com/cometbft/cometbft/libs/math +github.com/cometbft/cometbft/light +github.com/cometbft/cometbft/proto/tendermint/crypto +github.com/cometbft/cometbft/proto/tendermint/types +github.com/cometbft/cometbft/proto/tendermint/version +github.com/cometbft/cometbft/state +github.com/cometbft/cometbft/types +github.com/cometbft/cometbft/version +``` + +#### Packages used by the Cosmos Hub + +The [Cosmos Hub], at the time of this writing, still uses the CometBFT v0.34.x +series (effectively still using Tendermint Core with the CometBFT alias): + +```bash +> go list -json ./... | jq '.Imports, .TestImports, .XTestImports' | grep 'tendermint/tendermint' | sort | uniq | tr -d '", ' +github.com/tendermint/tendermint/abci/types +github.com/tendermint/tendermint/abci/types +github.com/tendermint/tendermint/config +github.com/tendermint/tendermint/crypto +github.com/tendermint/tendermint/libs/cli +github.com/tendermint/tendermint/libs/json +github.com/tendermint/tendermint/libs/log +github.com/tendermint/tendermint/libs/os +github.com/tendermint/tendermint/libs/rand +github.com/tendermint/tendermint/libs/strings +github.com/tendermint/tendermint/p2p +github.com/tendermint/tendermint/privval +github.com/tendermint/tendermint/proto/tendermint/types +github.com/tendermint/tendermint/proto/tendermint/types +github.com/tendermint/tendermint/rpc/client/http +github.com/tendermint/tendermint/types +github.com/tendermint/tendermint/types/time +``` + +#### Public Package Inventory + +Only the packages from the following table marked as necessary should still +remain publicly exported. All other packages in CometBFT should be moved under +`internal`. + +| Package | Used By | Necessary | Explanation | +|----------------|--------------------------|-----------|-------------| +| `abci` | Cosmos SDK, IBC Go, Gaia | ✅ | | +| `cmd` | Cosmos SDK | ✅ | | +| `config` | Cosmos SDK, IBC Go, Gaia | ✅ | | +| `crypto` | Cosmos SDK, IBC Go, Gaia | ✅ | | +| `libs/bytes` | Cosmos SDK, IBC Go | ✅ | | +| `libs/cli` | Cosmos SDK, Gaia | ✅ | | +| `libs/json` | Cosmos SDK, Gaia | ✅ | | +| `libs/log` | Cosmos SDK, Gaia | ✅ | | +| `libs/math` | IBC Go | ❓ | Necessary for `Fraction` type used by light client, which could be moved into `light` package instead | +| `libs/os` | Gaia | ❌ | Uses `Exit` and `EnsureDir` functions | +| `libs/rand` | Gaia | ❌ | | +| `libs/strings` | Gaia | ❌ | Uses `StringInSlice` function | +| `light` | IBC Go | ✅ | | +| `mempool` | Cosmos SDK | ✅ | | +| `node` | Cosmos SDK | ✅ | | +| `p2p` | Cosmos SDK, Gaia | ✅ | | +| `privval` | Cosmos SDK, Gaia | ✅ | | +| `proto` | Cosmos SDK, IBC Go, Gaia | ✅ | | +| `proxy` | Cosmos SDK | ✅ | | +| `rpc` | Cosmos SDK, Gaia | ✅ | | +| `state` | IBC Go | ❌ | Only uses `TxResultsHash` type to check hash equivalence in test | +| `types` | Cosmos SDK, IBC Go, Gaia | ✅ | | +| `version` | Cosmos SDK, IBC Go | ✅ | | + +## Consequences + +### Positive + +- A smaller, more manageable Go API surface area. +- The team will be able to make internal Go API-breaking changes much quicker. + +### Negative + +- Some users (especially "power users" that make more extensive use of CometBFT + internals) may experience breakages. If absolutely necessary, certain packages + can be moved back out of the `internal` directory in subsequent minor + releases. + +[\#1484]: https://github.com/cometbft/cometbft/issues/1484 +[ADR 060]: tendermint-core/adr-060-go-api-stability.md +[Cosmos SDK]: https://github.com/cosmos/cosmos-sdk/ +[Cosmos Hub]: https://github.com/cosmos/gaia +[IBC Go]: https://github.com/cosmos/ibc-go diff --git a/docs/references/architecture/adr-110-remote-mempool.md b/docs/references/architecture/adr-110-remote-mempool.md new file mode 100644 index 00000000000..93b84480951 --- /dev/null +++ b/docs/references/architecture/adr-110-remote-mempool.md @@ -0,0 +1,362 @@ +# ADR 110: Remote mempool + +## Changelog + +- 2023-11-13: Marked as rejected in favour of ADR 111 (@thanethomson) +- 2023-11-13: Updated with feedback (@thanethomson) +- 2023-11-04: Renamed ADR to "Remote mempool" instead of "External mempool" to + align with gRPC service definition (@thanethomson) +- 2023-11-03: First draft (@thanethomson) + +## Status + +Rejected in favour of ADR 111 + +## Context + +Over time it has become apparent that the generic mempool provided by Tendermint +Core and Comet is not sufficient to meet evolving application needs. In fact, it +appears as though the choice of what kind of mempool to use is inherently +application-specific. The mempool within Comet is also not scalable +independently of Comet, which can become a DoS vector for certain types of +networks (especially those that allow low- or zero-fee transactions). + +As such, and as part of a broader effort to modularize Comet and separate +consensus- and application-related concerns, this ADR proposes a mechanism +whereby Comet can interact with an **external mempool**. This involves +implementing a mempool variant in Comet which is effectively just a client for +an external mempool process or set of processes. + +This approach changes the way that validators obtain transactions, and implies +that full nodes are not necessary as sentries for receiving transactions (in +fact, in this model, full nodes are not intended to interact with the mempool at +all). DDoS mitigation mechanisms, however, are left to those who implement +remote mempools. + +## Alternative Approaches + +The current alternatives considered are: + +1. Do nothing, which does not seem sustainable. +2. Extend ABCI to allow external processes direct access to the P2P layer of the + consensus engine, as per [\#1112]. This, however, is a much more complex + solution that will require more in-depth discussion and design prior to + implementation, as it requires ABCI to allow for _bidirectional_ initiation + of requests. This will also require substantial breaking changes to ABCI + (primarily for remote applications, since a new transport layer will be + required that allows bidirectional initiation of requests, as per [\#1117]). +3. Provide multiple mempool implementations. This does not seem feasible due to + the varying requirements of different applications, including the possibility + that different applications need different types of gossip protocols. It is + unfeasible capacity-wise for the current CometBFT team to maintain different + mempool implementations for specific applications, especially since this + falls outside of the purview of maintaining public goods for the _entire_ + ecosystem as opposed to specific networks. + +## Decision + +N/A + +## Detailed Design + +### High-level architecture + +#### Single-process mempool + +The simplest possible architecture for a Comet validator with an external +mempool is as follows. + +```mermaid +flowchart LR + comet[Comet Validator] + app[Application] + mempool[Mempool] + user[User] + + comet --> app + + comet -- 4. Reap transactions --> mempool + user -- 1. Submit transactions --> app + app -- 2. CheckTx --> app + app -- 3. Publish transactions --> mempool + mempool -- CheckTx --> app +``` + +In this flow: + +1. Users submit transactions directly to some form of RPC in the application. It + is up to operators to secure this RPC endpoint and implement measures to + prevent DDoS attacks. +2. The application validates incoming transactions internally in a similar way + to how `CheckTx` currently works. +3. Transactions that the application deems valid are then sent to the mempool by + the application, e.g. through some form of RPC mechanism. The mempool is then + expected to propagate these transactions to the rest of the network using its + own communication layer, independent of the CometBFT P2P layer. +4. During `PrepareProposal`, a Comet validator will reap transactions from the + mempool by way of an RPC call. + +Additionally, when transactions are received by a remote mempool instance from +foreign remote mempool instances, it is expected that the mempool instance will +send the transaction to the application for validation (similar to how `CheckTx` +currently functions) prior to inclusion. + +#### Scalable mempool + +The recommended high-level architecture for a modular Comet validator with an +externalized, scalable mempool, is as follows. + +```mermaid +flowchart LR + comet[Comet Validator] + app[Application] + mempoolrouter[Mempool Router] + mempool1[Mempool Instance 1] + mempool2[Mempool Instance 2] + mempoolN[Mempool Instance N] + user[User] + + comet --> app + + comet -- 4. Reap transactions --> mempoolrouter + user -- 1. Submit transactions --> app + app -- 2. CheckTx --> app + app -- 3. Publish transactions --> mempoolrouter + + mempoolrouter --> mempool1 + mempoolrouter --> mempool2 + mempoolrouter --> mempoolN + + mempool1 -- CheckTx --> app + mempool2 -- CheckTx --> app + mempoolN -- CheckTx --> app +``` + +Here the interaction is the same as the simple variant discussed earlier, but +instead of interacting directly with the mempool, all interactions are with some +form of routing mechanism. For example, this could be a reverse proxy like nginx +configured to deliver requests to mempool instances in a round-robin fashion. + +This configuration would be application-specific, and would need to be set up +correctly by the operator for the specific application. + +### Configuration + +The following change to the `config.toml` file is envisaged: + +```toml +[mempool] +# The type of mempool for this CometBFT node to use. +# +# Valid types of mempools supported by CometBFT: +# - "local" : Default clist mempool with flooding gossip protocol +# - "remote" : Remote mempool in a separate process +type = "remote" + +# +# Configuration specific to the remote mempool. If mempool.type is not "remote", +# this section will be ignored. +# +# A remote mempool is only usable by a validator node. Turning on the remote +# mempool for a full node will simply disable any mempool-related functionality +# on that full node, and the full node will not interact with any mempool at +# all. +# +[mempool.remote] +# The base URL for the gRPC interface to the remote mempool. +url = "http://localhost:28880" + +# The timeout for reaping (removing) transaction data after a block has been +# committed. +timeout_reap = "1s" + +# The timeout for initiating the TxsAvailable call. +timeout_txs_available = "1s" +``` + +### RPC API + +At the time of this writing, it is recommended to implement a gRPC-based RPC +standard for interacting with the remote mempool (which uses Protobuf encoding +over HTTP/2) because: + +- gRPC, as a standard, is well-established and code generation technologies are + widely available for many different programming languages, allowing mempool + developers more flexibility in how they build those mempools. +- Comet is moving toward making more extensive use of gRPC in future. +- Load balancing technologies for HTTP/2 using reverse proxies such as nginx are + relatively well-established, making routing of requests to scaled-out mempool + clusters relatively straightforward. + +An alternative here would be to implement a RESTful HTTP/3 API (using QUIC), but +HTTP/3 support in reverse proxies is still in its early stages. Such a protocol +can be considered in a future iteration of the interface if it provides +substantial performance/latency benefits. + +The following gRPC API is proposed. + +```protobuf +syntax = "proto3"; +package tendermint.services.remote_mempool.v1; + +// RemoteMempoolService is implemented by a remote mempool instance. A CometBFT +// validator makes use of a client generated from this interface definition to +// interact with the remote mempool. +// +// It is up to the implementer of the remote mempool to define transaction +// submission mechanisms/interfaces. +service RemoteMempoolService { + // Fetch allows a CometBFT validator to obtain the next batch of + // transactions to be included during PrepareProposal when the current node + // is a proposer. + rpc Fetch(FetchRequest) returns (FetchResponse); + + // Remove takes a set of transaction keys and removes the corresponding + // transactions from the mempool. + // + // This will only be called after the block is committed by consensus. + rpc Remove(RemoveRequest) returns (RemoveResponse); + + // TxsAvailable streams notifications back to the client that new + // transactions are available in the mempool. + // + // The mempool is expected to stream a TxsAvailableResponse as it has + // transactions available. A response, however, is only expected in two + // cases: + // + // 1. Once for each newly encountered height (as supplied to the mempool via + // the Remove request). + // + // 2. Upon initiation of the TxsAvailable call. This caters for instances + // where the consensus engine may have failed and restarted. + // + // The consensus engine will only ever make one call to TxsAvailable and + // will attempt to keep the connection alive. + rpc TxsAvailable(TxsAvailableRequest) returns (stream TxsAvailableResponse); +} + +message FetchRequest { + // Fetch as many transactions as possible that cumulatively take up at most + // this number of bytes. Setting this value to -1 implies no limit (this + // implies that the remote mempool is fully in control of this value). + int64 max_bytes = 1; +} + +message FetchResponse { + // Transactions to be included in the proposal. + repeated bytes txs = 1; +} + +message RemoveRequest { + // The current height of the chain. + uint64 height = 1; + + // A list of IDs of transactions to be removed from the mempool. At present + // this is a list of SHA256 hashes of the transactions. + repeated bytes tx_ids = 2; +} + +message RemoveResponse {} + +message TxsAvailableRequest {} + +// TxsAvailableResponse is to be sent once upon initiation of the TxsAvailable +// request, as well as once for each new height, iff new transactions are +// available to be fetched by the consensus engine. +// +// If a TxsAvailableResponse is sent more than once per height, the consensus +// engine is expected to ignore the extra messages. +message TxsAvailableResponse { + // Supplied for informational purposes to the consensus engine to indicate + // the height seen by the mempool during the last call to Remove. + uint64 last_reap_height = 1; +} +``` + +**Notes**: + +- The terminology used in the gRPC interface is different to that used in the + [`Mempool`] interface. The term "reap" technically implies removal from the + mempool, but the [`Mempool`] interface incorrectly uses this term to imply + fetching a batch of transactions. The combination of `Fetch` and `Remove` can + be thought of as a "reap" operation. + +- The gRPC interface purposefully does not facilitate limiting fetched + transactions by gas in an effort to separate consensus- and + application-related concerns (as per [RFC 011]). Should remote mempool + developers want to limit returned transactions by gas, this should be + implemented as part of the configuration of the remote mempool. + +### Impact and properties + +A new mempool that implements the [`Mempool`] interface will be created, and +when enabled it will have the following impacts/properties: + +1. The mempool-related ABCI methods will not be called. +2. The following methods will do nothing: + - `CheckTx`, since transaction validation is assumed to happen prior to + transaction insertion in the mempool. + - `FlushAppConn`, which is only relevant for use via ABCI. + - `RemoveTxByKey`, which is only used in the callback in the context of ABCI. + - `ReapMaxTxs`, which is only used internally and by the `unconfirmed_txs` + RPC endpoint. + - `Flush`, which is only ever used via the `unsafe_flush_mempool` RPC + endpoint. + - `Size` and `SizeBytes`, since these are only used for informational + purposes in the RPC. +3. Reaping of transactions will result in network calls to the remote mempool + instance. +4. The following RPC endpoints will be disabled, returning relevant errors when + users attempt to call them: + - `broadcast_tx_*` + - `unconfirmed_txs` + - `num_unconfirmed_txs` + - `unsafe_flush_mempool` +5. All P2P-based transaction broadcast functionality will be disabled. It will + be up to the mempool developer to implement connectivity to other mempool + instances and the relevant broadcast/gossip mechanisms to disseminate + incoming transactions. + +### Startup and failure modes + +- When a CometBFT-based validator starts up with `mempool.type` set to `remote`, + it will expect to be able to connect to the remote mempool. If it cannot, it + will log an error and continue retrying to connect. + +- When a validator attempts to call any of the `Fetch` or `Remove` methods and + the call fails for whatever reason, it will log an error. + +- It must be kept in mind that transactions will persist in the mempool between + CometBFT node restarts. This implies a different set of assumptions as + compared to when using the default mempool, as when a node fails with the + default mempool, the contents of the mempool are automatically flushed. + +### Rollout plan + +The remote mempool will initially land in CometBFT v1.0, and will potentially be +considered for backporting to the `v0.38.x-experimental` branch. + +## Consequences + +### Positive + +- Application developers can provide their own mempools that live in a separate + process to the validator in which they can implement properties that benefit + their application specifically. +- Transactions can be submitted to processes outside of a validator without + needing to run a full node or sentry, potentially reducing operational costs. +- Separating the mempool out into its own process, if the mempool is built + correctly, can allow it to scale independently of the consensus engine. + +### Negative + +- Application developers would need to potentially build their own mempools, + which involves a substantial amount of effort. +- In some cases, application-specific remote mempools would currently need to + implement their own gossip mechanism (until an interface such as [\#1112] is + available). + +[\#1112]: https://github.com/cometbft/cometbft/discussions/1112 +[\#1117]: https://github.com/cometbft/cometbft/issues/1117 +[`Mempool`]: ../../../mempool/mempool.go +[RFC 011]: ../rfc/tendermint-core/rfc-011-delete-gas.md diff --git a/docs/references/architecture/adr-111-nop-mempool.md b/docs/references/architecture/adr-111-nop-mempool.md new file mode 100644 index 00000000000..cd4a8dce6e9 --- /dev/null +++ b/docs/references/architecture/adr-111-nop-mempool.md @@ -0,0 +1,324 @@ +# ADR 111: `nop` Mempool + +## Changelog + +- 2023-11-07: First version (@sergio-mena) +- 2023-11-15: Addressed PR comments (@sergio-mena) +- 2023-11-17: Renamed `nil` to `nop` (@melekes) +- 2023-11-20: Mentioned that the app could reuse p2p network in the future (@melekes) +- 2023-11-22: Adapt ADR to implementation (@melekes) + +## Status + +Accepted + +[Tracking issue](https://github.com/cometbft/cometbft/issues/1666) + +## Context + +### Summary + +The current mempool built into CometBFT implements a robust yet somewhat inefficient transaction gossip mechanism. +While the CometBFT team is currently working on more efficient general-purpose transaction gossiping mechanisms, +some users have expressed their desire to manage both the mempool and the transaction dissemination mechanism +outside CometBFT (typically at the application level). + +This ADR proposes a fairly simple way for CometBFT to fulfill this use case without moving away from our current architecture. + +### In the Beginning... + +It is well understood that a dissemination mechanism +(sometimes using _Reliable Broadcast_ [\[HT94\]][HT94] but not necessarily), +is needed in a distributed system implementing State-Machine Replication (SMR). +This is also the case in blockchains. +Early designs such as Bitcoin or Ethereum include an _internal_ component, +responsible for dissemination, called mempool. +Tendermint Core chose to follow the same design given the success +of those early blockchains and, since inception, Tendermint Core and later CometBFT have featured a mempool as an internal piece of its architecture. + + +However, the design of ABCI clearly dividing the application logic (i.e., the appchain) +and the consensus logic that provides SMR semantics to the app is a unique innovation in Cosmos +that sets it apart from Bitcoin, Ethereum, and many others. +This clear separation of concerns entailed many consequences, mostly positive: +it allows CometBFT to be used underneath (currently) tens of different appchains in production +in the Cosmos ecosystem and elsewhere. +But there are other implications for having an internal mempool +in CometBFT: the interaction between the mempool, the application, and the network +becomes more indirect, and thus more complex and hard to understand and operate. + +### ABCI++ Improvements and Remaining Shortcomings + +Before the release of ABCI++, `CheckTx` was the main mechanism the app had at its disposal to influence +what transactions made it to the mempool, and very indirectly what transactions got ultimately proposed in a block. +Since ABCI 1.0 (the first part of ABCI++, shipped in `v0.37.x`), the application has +a more direct say in what is proposed through `PrepareProposal` and `ProcessProposal`. + +This has greatly improved the ability for appchains to influence the contents of the proposed block. +Further, ABCI++ has enabled many new use cases for appchains. However some issues remain with +the current model: + +* We are using the same P2P network for disseminating transactions and consensus-related messages. +* Many mempool parameters are configured on a per-node basis by node operators, + allowing the possibility of inconsistent mempool configuration across the network + with potentially serious scalability effects + (even causing unacceptable performance degradation in some extreme cases). +* The current mempool implementation uses a basic (robust but sub-optimal) flood algorithm + * the CometBFT team is working on improving it as one of our current priorities, + but any improvement we come up with must address the needs of a vast spectrum of applications, + as well as be heavily scaled-tested in various scenarios + (in an attempt to cover the applications' wide spectrum) + * a mempool designed specifically for one particular application + would reduce the search space as its designers can devise it with just their application's + needs in mind. +* The interaction with the application is still somewhat convoluted: + * the application has to decide what logic to implement in `CheckTx`, + what to do with the transaction list coming in `RequestPrepareProposal`, + whether it wants to maintain an app-side mempool (more on this below), and whether or not + to combine the transactions in the app-side mempool with those coming in `RequestPrepareProposal` + * all those combinations are hard to fully understand, as the semantics and guarantees are + often not clear + * when using exclusively an app-mempool (the approach taken in the Cosmos SDK `v0.47.x`) + for populating proposed blocks, with the aim of simplifying the app developers' life, + we still have a suboptimal model where we need to continue using CometBFT's mempool + in order to disseminate the transactions. So, we end up using twice as much memory, + as in-transit transactions need to be kept in both mempools. + +The approach presented in this ADR builds on the app-mempool design released in `v0.47.x` +of the Cosmos SDK, +and briefly discussed in the last bullet point above (see [SDK app-mempool][sdk-app-mempool] for further details of this model). + +In the app-mempool design in Cosmos SDK `v0.47.x` +an unconfirmed transaction must be both in CometBFT's mempool for dissemination and +in the app's mempool so the application can decide how to manage the mempool. +There is no doubt that this approach has numerous advantages. However, it also has some implications that need to be considered: + +* Having every transaction both in CometBFT and in the application is suboptimal in terms of memory. + Additionally, the app developer has to be careful + that the contents of both mempools do not diverge over time + (hence the crucial role `re-CheckTx` plays post-ABCI++). +* The main reason for a transaction needing to be in CometBFT's mempool is + because the design in Cosmos SDK `v0.47.x` does not consider an application + that has its own means of disseminating transactions. + It reuses the peer to peer network underneath CometBFT reactors. +* There is no point in having transactions in CometBFT's mempool if an application implements an ad-hoc design for disseminating transactions. + +This proposal targets this kind of applications: +those that have an ad-hoc mechanism for transaction dissemination that better meets the application requirements. + +The ABCI application could reuse the P2P network once this is exposed via ABCI. +But this will take some time as it needs to be implemented, and has a dependency +on bi-directional ABCI, which is also quite substantial. See +[1](https://github.com/cometbft/cometbft/discussions/1112) and +[2](https://github.com/cometbft/cometbft/discussions/494) discussions. + +We propose to introduce a `nop` (short for no operation) mempool which will effectively act as a stubbed object +internally: + +* it will reject any transaction being locally submitted or gossipped by a peer +* when a _reap_ (as it is currently called) is executed in the mempool, an empty answer will always be returned +* the application running on the proposer validator will add transactions it received + using the appchains's own mechanism via `PrepareProposal`. + +## Alternative Approaches + +These are the alternatives known to date: + +1. Keep the current model. Useful for basic apps, but clearly suboptimal for applications + with their own mechanism to disseminate transactions and particular performance requirements. +2. Provide more efficient general-purpose mempool implementations. + This is an ongoing effort (e.g., [CAT mempool][cat-mempool]), but will take some time, and R&D effort, to come up with + advanced mechanisms -- likely highly configurable and thus complex -- which then will have to be thoroughly tested. +3. A similar approach to this one ([ADR110][adr-110]) whereby the application-specific + mechanism directly interacts with CometBFT via a newly defined gRPC interface. +4. Partially adopting this ADR. There are several possibilities: + * Use the current mempool, disable transaction broadcast in `config.toml`, and accept transactions from users via `BroadcastTX*` RPC methods. + Positive: avoids transaction gossiping; app can reuse the mempool existing in ComeBFT. + Negative: requires clients to know the validators' RPC endpoints (potential security issues). + * Transaction broadcast is disabled in `config.toml`, and have the application always reject transactions in `CheckTx`. + Positive: effectively disables the mempool; does not require modifications to Comet (may be used in `v0.37.x` and `v0.38.x`). + Negative: requires apps to disseminate txs themselves; the setup for this is less straightforward than this ADR's proposal. + +## Decision + +Implement as described in the [detailed design](#detailed-design) section. + +## Detailed Design + +What this ADR proposes can already be achieved with an unmodified CometBFT since +`v0.37.x`, albeit with a complex, poor UX (see the last alternative in section +[Alternative Approaches](#alternative-approaches)). The core of this proposal +is to make some internal changes so it is clear an simple for app developers, +thus improving the UX. + +#### `nop` Mempool + +We propose a new mempool implementation, called `nop` Mempool, that effectively disables all mempool functionality +within CometBFT. +The `nop` Mempool implements the `Mempool` interface in a very simple manner: + +* `CheckTx(tx types.Tx) (*abcicli.ReqRes, error)`: returns `nil, ErrNotAllowed` +* `RemoveTxByKey(txKey types.TxKey) error`: returns `ErrNotAllowed` +* `ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs`: returns `nil` +* `ReapMaxTxs(max int) types.Txs`: returns `nil` +* `Lock()`: does nothing +* `Unlock()`: does nothing +* `Update(...) error`: returns `nil` +* `FlushAppConn() error`: returns `nil` +* `Flush()`: does nothing +* `TxsAvailable() <-chan struct{}`: returns `nil` +* `EnableTxsAvailable()`: does nothing +* `SetTxRemovedCallback(cb func(types.TxKey))`: does nothing +* `Size() int` returns 0 +* `SizeBytes() int64` returns 0 + +Upon startup, the `nop` mempool reactor will advertise no channels to the peer-to-peer layer. + +### Configuration + +We propose the following changes to the `config.toml` file: + +```toml +[mempool] +# The type of mempool for this CometBFT node to use. +# +# Valid types of mempools supported by CometBFT: +# - "flood" : clist mempool with flooding gossip protocol (default) +# - "nop" : nop-mempool (app has implemented an alternative tx dissemination mechanism) +type = "nop" +``` + +The config validation logic will be modified to add a new rule that rejects a configuration file +if all of these conditions are met: + +* the mempool is set to `nop` +* `create_empty_blocks`, in `consensus` section, is set to `false`. + +The reason for this extra validity rule is that the `nop`-mempool, as proposed here, +does not support the "do not create empty blocks" functionality. +Here are some considerations on this: + +* The "do not create empty blocks" functionality + * entangles the consensus and mempool reactors + * is hardly used in production by real appchains (to the best of CometBFT team's knowledge) + * its current implementation for the built-in mempool has undesired side-effects + * app hashes currently refer to the previous block, + * and thus it interferes with query provability. +* If needed in the future, this can be supported by extending ABCI, + but we will first need to see a real need for this before committing to changing ABCI + (which has other, higher-impact changes waiting to be prioritized). + +### RPC Calls + +There are no changes needed in the code dealing with RPC. Those RPC paths that call methods of the `Mempool` interface, +will simply be calling the new implementation. + +### Impacted Workflows + +* *Submitting a transaction*. Users are not to submit transactions via CometBFT's RPC. + `BroadcastTx*` RPC methods will fail with a reasonable error and the 501 status code. + The application running on a full node must offer an interface for users to submit new transactions. + It could also be a distinct node (or set of nodes) in the network. + These considerations are exclusively the application's concern in this approach. +* *Time to propose a block*. The consensus reactor will call `ReapMaxBytesMaxGas` which will return a `nil` slice. + `RequestPrepareProposal` will thus contain no transactions. +* *Consensus waiting for transactions to become available*. `TxsAvailable()` returns `nil`. + `cs.handleTxsAvailable()` won't ever be executed. + At any rate, a configuration with the `nop` mempool and `create_empty_blocks` set to `false` + will be rejected in the first place. +* *A new block is decided*. + * When `Update` is called, nothing is done (no decided transaction is removed). + * Locking and unlocking the mempool has no effect. +* *ABCI mempool's connection* + CometBFT will still open a "mempool" connection, even though it won't be used. + This is to avoid doing lots of breaking changes. + +### Impact on Current Release Plans + +The changes needed for this approach, are fairly simple, and the logic is clear. +This might allow us to even deliver it as part of CometBFT `v1` (our next release) +even without a noticeable impact on `v1`'s delivery schedule. + +The CometBFT team (learning from past dramatic events) usually takes a conservative approach +for backporting changes to release branches that have already undergone a full QA cycle +(and thus are in code-freeze mode). +For this reason, although the limited impact of these changes would limit the risks +of backporting to `v0.38.x` and `v0.37.x`, a careful risk/benefit evaluation will +have to be carried out. + +Backporting to `v0.34.x` does not make sense as this version predates the release of `ABCI 1.0`, +so using the `nop` mempool renders CometBFT's operation useless. + +### Config parameter _vs._ application-enforced parameter + +In the current proposal, the parameter selecting the mempool is in `config.toml`. +However, it is not a clear-cut decision. These are the alternatives we see: + +* *Mempool selected in `config.toml` (our current design)*. + This is the way the mempool has always been selected in Tendermint Core and CometBFT, + in those versions where there were more than one mempool to choose from. + As the configuration is in `config.toml`, it is up to the node operators to configure their + nodes consistently, via social consensus. However this cannot be guaranteed. + A network with an inconsistent choice of mempool at different nodes might + result in undesirable side effects, such as peers disconnecting from nodes + that sent them messages via the mempool channel. +* *Mempool selected as a network-wide parameter*. + A way to prevent any inconsistency when selecting the mempool is to move the configuration out of `config.toml` + and have it as a network-wide application-enforced parameter, implemented in the same way as Consensus Params. + The Cosmos community may not be ready for such a rigid, radical change, + even if it eliminates the risk of operators shooting themselves in the foot. + Hence we went currently favor the previous alternative. +* *Mempool selected as a network-wide parameter, but allowing override*. + A third option, half way between the previous two, is to have the mempool selection + as a network-wide parameter, but with a special value called _local-config_ that still + allows an appchain to decide to leave it up to operators to configure it in `config.toml`. + +Ultimately, the "config parameter _vs._ application-enforced parameter" discussion +is a more general one that is applicable to other parameters not related to mempool selection. +In that sense, it is out of the scope of this ADR. + +## Consequences + +### Positive + +- Applications can now find mempool mechanisms that fit better their particular needs: + - Ad-hoc ways to add, remove, merge, reorder, modify, prioritize transactions according + to application needs. + - A way to disseminate transactions (gossip-based or other) to get the submitted transactions + to proposers. The application developers can devise simpler, efficient mechanisms tailored + to their application. + - Back-pressure mechanisms to prevent malicious users from abusing the transaction + dissemination mechanism. +- In this approach, CometBFT's peer-to-peer layer is relieved from managing transaction gossip, freeing up its resources for other reactors such as consensus, evidence, block-sync, or state-sync. +- There is no risk for the operators of a network to provide inconsistent configurations + for some mempool-related parameters. Some of those misconfigurations are known to have caused + serious performance issues in CometBFT's peer to peer network. + Unless, of course, the application-defined transaction dissemination mechanism ends up + allowing similar configuration inconsistencies. +- The interaction between the application and CometBFT at `PrepareProposal` time + is simplified. No transactions are ever provided by CometBFT, + and no transactions can ever be left in the mempool when CometBFT calls `PrepareProposal`: + the application trivially has all the information. +- UX is improved compared to how this can be done prior to this ADR. + +### Negative + +- With the `nop` mempool, it is up to the application to provide users with a way + to submit transactions and deliver those transactions to validators. + This is a considerable endeavor, and more basic appchains may consider it is not worth the hassle. +- There is a risk of wasting resources by those nodes that have a misconfigured + mempool (bandwidth, CPU, memory, etc). If there are TXs submitted (incorrectly) + via CometBFT's RPC, but those TXs are never submitted (correctly via an + app-specific interface) to the App. As those TXs risk being there until the node + is stopped. Moreover, those TXs will be replied & proposed every single block. + App developers will need to keep this in mind and panic on `CheckTx` or + `PrepareProposal` with non-empty list of transactions. +- Optimizing block proposals by only including transaction IDs (e.g. TX hashes) is more difficult. + The ABCI app could do it by submitting TX hashes (rather than TXs themselves) + in `PrepareProposal`, and then having a mechanism for pulling TXs from the + network upon `FinalizeBlock`. + +[sdk-app-mempool]: https://docs.cosmos.network/v0.47/build/building-apps/app-mempool +[adr-110]: https://github.com/cometbft/cometbft/pull/1565 +[HT94]: https://dl.acm.org/doi/book/10.5555/866693 +[cat-mempool]: https://github.com/cometbft/cometbft/pull/1472 \ No newline at end of file diff --git a/docs/references/architecture/adr-112-proposer-based-timestamps.md b/docs/references/architecture/adr-112-proposer-based-timestamps.md new file mode 100644 index 00000000000..93fa5e582f3 --- /dev/null +++ b/docs/references/architecture/adr-112-proposer-based-timestamps.md @@ -0,0 +1,435 @@ +# ADR 112: Proposer-Based Timestamps + +## Changelog + + - July 15 2021: Created by @williambanfield + - Aug 4 2021: Draft completed by @williambanfield + - Aug 5 2021: Draft updated to include data structure changes by @williambanfield + - Aug 20 2021: Language edits completed by @williambanfield + - Oct 25 2021: Update the ADR to match updated spec from @cason by @williambanfield + - Nov 10 2021: Additional language updates by @williambanfield per feedback from @cason + - Feb 2 2022: Synchronize logic for timely with latest version of the spec by @williambanfield + - Feb 1 2024: Renamed to ADR 112 as basis for its adoption ([#1731](https://github.com/cometbft/cometbft/issues/1731)) in CometBFT v1.0 by @cason + - Feb 7 2024: Multiple revisions, fixes, and backwards compatibility discussion by @cason + - Feb 12 2024: More detailed backwards compatibility discussion by @cason + - Feb 22 2024: Consensus parameters for backwards compatibility by @cason + +## Status + +**Accepted** + +## Context + +CometBFT currently provides a monotonically increasing source of time known as [BFT Time][bfttime]. +This mechanism for producing a source of time is reasonably simple. +Each validator adds a timestamp to each `Precommit` message it sends. +The timestamp a correct validator sends is either the validator's current known Unix time or one millisecond greater than the previous block time, depending on which value is greater. +When a block is produced, the proposer chooses the block timestamp as the weighted median of the times in all of the `Precommit` messages the proposer received. +The weighting is defined by the amount of voting power, or stake, each validator has on the network. +This mechanism for producing timestamps is both deterministic and Byzantine fault tolerant. + +This current mechanism for producing timestamps has a few drawbacks. +Validators do not have to agree at all on how close the selected block timestamp is to their own currently known Unix time. +Additionally, any amount of voting power `>1/3` may control the block timestamp. +As a result, it is quite possible that the timestamp is not particularly meaningful. + +These drawbacks present issues in CometBFT. +Timestamps are used by light clients to verify blocks. +Light clients rely on correspondence between their own currently known Unix time and the block timestamp to verify blocks they see. +However, their currently known Unix time may be greatly divergent from the block timestamp as a result of the limitations of `BFT Time`. + +The [Proposer-Based Timestamps specification (PBTS)][pbts-spec] suggests an alternative approach for producing block timestamps that remedies these issues. +Proposer-based timestamps alter the current mechanism for producing block timestamps in two main ways: + +1. The block proposer is amended to offer up its currently known Unix time as the timestamp for the next block instead of the `BFT Time`. +1. Correct validators are assumed to be equipped with synchronized clocks and only approve the proposed block timestamp if it is close enough to their own currently known Unix time. + +The result of these changes is a more meaningful timestamp that cannot be controlled by `<= 2/3` of the validator voting power. +This document outlines the necessary code changes in CometBFT to implement the corresponding [specification][pbts-spec]. + +## Alternative Approaches + +### Remove timestamps altogether + +Computer clocks are bound to skew for a variety of reasons. +Using timestamps in our protocol means either accepting the timestamps as not reliable or impacting the protocol’s liveness guarantees. +This design requires impacting the protocol’s liveness in order to make the timestamps more reliable. +An alternate approach is to remove timestamps altogether from the block protocol. +`BFT Time` is deterministic but may be arbitrarily inaccurate. +However, having a reliable source of time is quite useful for applications and protocols built on top of a blockchain. + +We therefore decided not to remove the timestamp. +Applications often wish for some transactions to occur on a certain day, on a regular period, or after some time following a different event. +All of these require some meaningful representation of agreed upon time. +The following protocols and application features require a reliable source of time: + +* Light Clients [rely on correspondence between their known time](https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/README.md#failure-model) and the block time for block verification. +* Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/cometbft/cometbft/blob/main/spec/consensus/evidence.md#verification). +* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 days](https://github.com/cosmos/governance/blob/ce75de4019b0129f6efcbb0e752cd2cc9e6136d3/params-change/Staking.md#unbondingtime). +* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.45/ibc/overview.html#acknowledgements) + +Finally, inflation distribution in the Cosmos Hub uses an approximation of time to calculate an annual percentage rate. +This approximation of time is calculated using [block heights with an estimated number of blocks produced in a year](https://github.com/cosmos/governance/blob/master/params-change/Mint.md#blocksperyear). +Proposer-based timestamps will allow this inflation calculation to use a more meaningful and accurate source of time. + +## Decision + +Implement Proposer-Based Timestamps while maintaining backwards compatibility with `BFT Time`. + +## Detailed Design + +### Overview + +Implementing Proposer-Based Timestamps (PBTS) will require a few changes to CometBFT’s code. +These changes will be to the following components: + +* The consensus parameters. +* The `internal/consensus/` package. +* The `internal/state/` package. + +The original version of this document ([ADR 071][original-adr]) dir not +consider that the introduced `PBTS` and the previous method `BFT Time` could +be adopted in the same chain/network. +The [backwards compatibility](#backwards-compatibility) section below was thus +added to address topic. + + + +### Backwards compatibility + +In order to ensure backwards compatibility, PBTS should be enabled using a [consensus parameter](#compatibility-parameters). +The proposed approach is similar to the one adopted to enable vote extensions via +[`VoteExtensionsEnableHeight`](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci++_app_requirements.md#featureparamsvoteextensionsenableheight). + +In summary, the network will migrate from the `BFT Time` method for assigning +and validating timestamps to the new method for assigning and validating +timestamps adopted by `PBTS` from a given, configurable height. +Once `PBTS` is activated, there are no provisions for the network to revert +back to `BFT Time` (see [issue 2063][issue2063]). + +Moreover, when compared to the original ([ADR 071][original-adr]), we will **NOT**: + +- Update `CommitSigs` and `Vote` types, removing the `Timestamp` field +- Remove the `MedianTime` method used by `BFT Time` to produce and validate the block's time +- Remove the `voteTime` method used by `BFT Time` to set timestamps to precommits +- Remove the [validation logic](#current-block-time-validation-logic) used by `BFT Time` + +### New consensus parameters + +The PBTS specification includes some new parameters that must be the same among across all validators. +The set of [consensus parameters](https://github.com/cometbft/cometbft/blob/main/proto/cometbft/types/v1/params.proto#L13) +will be updated to include new fields as follows: + +```diff +type ConsensusParams struct { + Block BlockParams `json:"block"` + Evidence EvidenceParams `json:"evidence"` + Validator ValidatorParams `json:"validator"` + Version VersionParams `json:"version"` + ABCI ABCIParams `json:"abci"` +++ Synchrony SynchronyParams `json:"synchrony"` +++ Feature FeatureParams `json:"feature"` +} +``` + +#### Synchrony parameters + +The `PRECISION` and `MSGDELAY` parameters are used to determine if the proposed timestamp is acceptable. +A validator will only Prevote a proposal if the proposal timestamp is considered `timely`. +A proposal timestamp is considered `timely` if it is within `PRECISION` and `MSGDELAY` of the Unix time known to the validator. +More specifically, the timestamp of a proposal received at `proposalReceiveTime` is `timely` if + + proposalTimestamp - PRECISION ≤ proposalReceiveTime ≤ proposalTimestamp + PRECISION + MSGDELAY + +`PRECISION` and `MSGDELAY` will be added to the consensus synchrony parameters as [durations](https://protobuf.dev/reference/protobuf/google.protobuf/#duration): + +```go +type SynchronyParams struct { + Precision time.Duration `json:"precision,string"` + MessageDelay time.Duration `json:"message_delay,string"` +} +``` + +#### Compatibility parameters + +In order to ensure backwards compatibility, PBTS should be enabled using a consensus parameter: + +```go +type FeatureParams struct { + PbtsEnableHeight int64 `json:"pbts_enable_height"` + ... +} +``` + +The semantics are similar to the ones adopted to enable vote extensions via +[`VoteExtensionsEnableHeight`](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci++_app_requirements.md#abciparamsvoteextensionsenableheight). +The PBTS algorithm is enabled from `FeatureParams.PbtsEnableHeight`, when this +parameter is set to a value greater than zero, and greater to the height at +which it was set. +Until that height, the BFT Time algorithm is used. + +For more discussion of this, see [issue 2197][issue2197]. + + +### Changes to the block proposal step + +#### Proposer selects block timestamp + +CometBFT currently uses the `BFT Time` algorithm to produce the block's `Header.Timestamp`. +The [block production logic](https://github.com/cometbft/cometbft/blob/1f430f51f0e390cd7c789ba9b1e9b35846e34642/internal/state/state.go#L248) +sets the weighted median of the times in the `LastCommit.CommitSigs` as the proposed block's `Header.Timestamp`. +This method will be preserved, but it is only used while operating in `BFT Time` mode. + +In PBTS, the proposer will still set a timestamp into the `Header.Timestamp`. +The timestamp the proposer sets into the `Header` will change depending on whether the block has previously received `2/3+` prevotes in a previous round. +Receiving +2/3 prevotes in a round is frequently referred to as a 'Polka' and we will use this term for simplicity. + +#### Proposal of a block that has not previously received a Polka + +If a proposer is proposing a new block then it will set the Unix time currently known to the proposer into the `Header.Timestamp` field. +The proposer will also set this same timestamp into the `Timestamp` field of the `Proposal` message that it issues. + +#### Re-proposal of a block that has previously received a Polka + +If a proposer is re-proposing a block that has previously received a Polka on the network, then the proposer does not update the `Header.Timestamp` of that block. +Instead, the proposer simply re-proposes the exact same block. +This way, the proposed block has the exact same block ID as the previously proposed block and the nodes that have already received that block do not need to attempt to receive it again. + +The proposer will set the re-proposed block's `Header.Timestamp` as the `Proposal` message's `Timestamp`. + +#### Proposer waits + +Block timestamps must be monotonically increasing. +In `BFT Time`, if a validator’s clock was behind, the [validator added 1 millisecond to the previous block’s time and used that in its vote messages](https://github.com/cometbft/cometbft/blob/1f430f51f0e390cd7c789ba9b1e9b35846e34642/internal/consensus/state.go#L2460). +A goal of adding PBTS is to enforce some degree of clock synchronization, so having a mechanism that completely ignores the Unix time of the validator time no longer works. +Validator clocks will not be perfectly in sync. +Therefore, the proposer’s current known Unix time may be less than the previous block's `Header.Time`. +If the proposer’s current known Unix time is less than the previous block's `Header.Time`, the proposer will sleep until its known Unix time exceeds it. + +This change will require amending the [`defaultDecideProposal`](https://github.com/cometbft/cometbft/blob/1f430f51f0e390cd7c789ba9b1e9b35846e34642/internal/consensus/state.go#L1195) method. +This method should now schedule a timeout that fires when the proposer’s time is greater than the previous block's `Header.Time`. +When the timeout fires, the proposer will finally issue the `Proposal` message. + +### Changes to proposal validation rules + +The rules for validating a proposed block will be modified to implement PBTS. +We will change the validation logic to ensure that a proposal is `timely`. +The `timely` verification is adopted once the node enabled PBTS. + +Per the PBTS spec, `timely` only needs to be checked if a block has not received a Polka in a previous round. +If a block previously received a +2/3 majority of prevotes in a round, then +2/3 of the voting power considered the block's timestamp near enough to their own currently known Unix time in that round. + +The validation logic will be updated to check `timely` for blocks that did not previously receive a Polka in a round. + +#### Timestamp validation when a block has not received a Polka + +The [`POLRound`](https://github.com/cometbft/cometbft/blob/1f430f51f0e390cd7c789ba9b1e9b35846e34642/types/proposal.go#L29) in the `Proposal` message indicates which round the block received a Polka. +A negative value in the `POLRound` field indicates that the block has not previously been proposed on the network. +Therefore the validation logic will check for timely when `POLRound == -1`. + +When a node receives a `Proposal` message, it records it `proposalReceiveTime` as the current Unix time known to the node. +The node will check that the `Proposal.Timestamp` is at most `PRECISION` greater than `proposalReceiveTime`, and at maximum `PRECISION + MSGDELAY` less than `proposalReceiveTime`. +If the timestamp is not within these bounds, the proposed block will not be considered `timely`. +A validator prevotes nil when the proposed block is not considered `timely`. + +Once a full block matching the `Proposal` message is received, the node will also check that the timestamp in the `Header.Timestamp` of the block matches this `Proposal.Timestamp`. +Using the `Proposal.Timestamp` to check `timely` allows for the `MSGDELAY` parameter to be more finely tuned since `Proposal` messages do not change sizes and are therefore faster to gossip than full blocks across the network. + +A node will also check that the proposed timestamp is greater than the timestamp of the block for the previous height. +If the timestamp is not greater than the previous block's timestamp, the block will not be considered valid, which is the same as the current logic. + +#### Timestamp validation when a block has received a Polka + +When a block is re-proposed that has already received a +2/3 majority of `Prevote`s (i.e., a Polka) on the network, the `Proposal` message for the re-proposed block is created with a `POLRound` that is `>= 0`. +A node will not check that the `Proposal` is `timely` if the proposal message has a non-negative `POLRound`. +If the `POLRound` is non-negative, each node (although this is only relevant for validators) will simply ensure that it received the `Prevote` messages for the proposed block in the round indicated by `POLRound`. + +If the node is a validator and it does not receive `Prevote` messages for the proposed block before the proposal timeout, then it will prevote nil. +Validators already check that +2/3 prevotes were seen in `POLRound`, so this does not represent a change to the prevote logic. + +A node will also check that the proposed timestamp is greater than the timestamp of the block for the previous height. +If the timestamp is not greater than the previous block's timestamp, the block will not be considered valid, which is the same as the current logic. + +Additionally, this validation logic can be updated to check that the `Proposal.Timestamp` matches the `Header.Timestamp` of the proposed block, but it is less relevant since checking that votes were received is sufficient to ensure the block timestamp is correct. + +#### Relaxation of the 'Timely' check + +The `Synchrony` parameters, `MessageDelay` and `Precision` provide a means to bound the timestamp of a proposed block. +Selecting values that are too small presents a possible liveness issue for the network. +If a CometBFT network selects a `MessageDelay` parameter that does not accurately reflect the time to broadcast a proposal message to all of the validators on the network, validators will begin rejecting proposals from otherwise correct proposers because these proposals will appear to be too far in the past. + +`MessageDelay` and `Precision` are planned to be configured as `ConsensusParams`. +A very common way to update `ConsensusParams` is by executing a transaction included in a block that specifies new values for them. +However, if the network is unable to produce blocks because of this liveness issue, no such transaction may be executed. +To prevent this dangerous condition, we will add a relaxation mechanism to the `Timely` predicate. + +The chosen solution for this issue is to adopt the configured `MessageDelay` +for the first round (0) of consensus. +Then, as more rounds are needed to commit a value, we increase the +adopted value for `MessageDelay`, at a rate of 10% per additional round. +More precisely, the `MessageDelay(r)` adopted for round `r` of consensus is +given by `MessageDelay(r) = MessageDelay * (1.1)^r`. +Of course, `MessageDelay(0) = MessageDelay`. + +This liveness issue is not as problematic for chains with very small `Precision` values. +Operators can more easily readjust local validator clocks to be more aligned. +Additionally, chains that wish to increase a small `Precision` value can still take advantage of the `MessageDelay` relaxation, waiting for the `MessageDelay` value to grow significantly and issuing proposals with timestamps that are far in the past of their peers. + +For more discussion of this, see [issue 2184][issue2184]. + +### Changes to the prevote step + +Currently, a validator will prevote a proposal in one of three cases: + +* Case 1: Validator has no locked block and receives a valid proposal. +* Case 2: Validator has a locked block and receives a valid proposal matching its locked block. +* Case 3: Validator has a locked block, sees a valid proposal not matching its locked block but sees +2/3 prevotes for the proposal’s block, either in the current round or in a round greater than or equal to the round in which it locked its locked block. + +The only change we will make to the prevote step is to what a validator considers a valid proposal as detailed above. + +### Changes to the precommit step + +The precommit step will not require much modification. +Its proposal validation rules will change in the same ways that validation will change in the prevote step with the exception of the `timely` check: precommit validation will never check that the timestamp is `timely`. + + + +### Changes to the block validation + +To provide a better understanding of the changes needed for timestamp validation, we first detail how timestamp validation works currently with BFT Time, +then presents how it will work with PBTS. + +#### Current block time validation logic + +The [`validateBlock` function](https://github.com/cometbft/cometbft/blob/1f430f51f0e390cd7c789ba9b1e9b35846e34642/internal/state/validation.go#L15) currently [validates the proposed block timestamp in three ways](https://github.com/cometbft/cometbft/blob/1f430f51f0e390cd7c789ba9b1e9b35846e34642/internal/state/validation.go#L116). +First, the validation logic checks that this timestamp is greater than the previous block’s timestamp. + +Second, it validates that the block timestamp is correctly calculated as the weighted median of the timestamps in the [block’s `LastCommit`](https://github.com/cometbft/cometbft/blob/1f430f51f0e390cd7c789ba9b1e9b35846e34642/types/block.go#L49). + +Finally, the validation logic authenticates the timestamps in the `LastCommit.CommitSig`. +The cryptographic signature in each `CommitSig` is created by signing a hash of fields in the block with the voting validator’s private key. +One of the items in this `signedBytes` hash is the timestamp in the `CommitSig`. +To authenticate the `CommitSig` timestamp, the node authenticating votes builds a hash of fields that includes the `CommitSig` timestamp and checks this hash against the signature. +This takes place in the [`VerifyCommit` function](https://github.com/cometbft/cometbft/blob/1f430f51f0e390cd7c789ba9b1e9b35846e34642/types/validation.go#L26). + + + +#### PBTS block time validation + +PBTS does not perform a validation of the timestamp of a block, as part of the `validateBlock` method. +This means that nodes will no longer check that the block time is a weighted median of `LastCommit` timestamps. + +Instead of validating the timestamp of proposed blocks, +PBTS validates the timestamp of the `Proposal` message for a block, as detailed [here](#changes-to-proposal-validation-rules). +Notice that the `Proposal` timestamp must match the proposed block's `Time` field. + +This also means that committed blocks, retrieved from peers via consensus catch-up mechanisms or via block sync, +will not have their timestamps validated, since the timestamp validation is now part of the consensus logic. + + +## Future Improvements + +* Implement BLS signature aggregation. +If we remove the `Timestamp` field from the `Precommit` messages, we are able to aggregate signatures, +as votes for the same block, height and round become identical. + +We have left the removal of the `Timestamp` field of vote messages out for the time being, as it would break the block format and validation +rules (signature verification) and thus may force a hard-fork on chains upgrading to the latest version of CometBFT. +We will remove the timestamps in votes when changing the block format is supported in CometBFT without +requiring a hard-fork (this feature is called [Soft Upgrades](https://github.com/cometbft/cometbft/issues/122)). + +## Consequences + +### Positive + +* `<2/3` of validators can no longer arbitrarily influence block timestamps. +* Block timestamps will have stronger correspondence to real time. +* Improves the reliability of [components](#remove-timestamps-altogether) that rely on block timestamps: + Light Client verification, Evidence validity, Unbonding of staked assets, IBC packet timeouts, inflation distribution, etc. +* It is a step towards enabling BLS signature aggregation. + +### Neutral + +* Alters the liveness requirements for the consensus algorithm. +Liveness now requires that all correct validators have synchronized clocks, with inaccuracy bound by `PRECISION`, +and that end-to-end delays of `PROPOSAL` messages are bound by `MSGDELAY`. + +### Negative + +* May increase the duration of the propose step if there is a large skew between the clocks of the previous proposer and the current proposer. +The clock skew between correct validators is supposed to be bound by `PRECISION`, so this impact is relevant when block times are shorter than `PRECISION`. +* Existing chains that adopt PBTS may have block times far in the future, which may cause the transition height to have a very long duration (to preserve time monotonicity). +The workaround in this case is, first, to synchronize the validators' clocks, then to maintain the legacy operation (using BFT Time), until block times align with real time. +At this point, the transition from BFT Time to PBTS should be smooth. + +## References + +* [PBTS Spec][pbts-spec] +* [BFT Time spec][bfttime] +* [PBTS: support both PBTS and legacy BFT Time #2063][issue2063] +* [PBTS: should synchrony parameters be adaptive? #2184][issue2184] + +[issue2184]: https://github.com/cometbft/cometbft/issues/2184 +[issue2197]: https://github.com/cometbft/cometbft/issues/2197 +[issue2063]: https://github.com/cometbft/cometbft/issues/2063 +[bfttime]: https://github.com/cometbft/cometbft/blob/main/spec/consensus/bft-time.md +[pbts-spec]: https://github.com/cometbft/cometbft/tree/main/spec/consensus/proposer-based-timestamp/README.md +[original-adr]: https://github.com/cometbft/cometbft/blob/main/docs/references/architecture/tendermint-core/adr-071-proposer-based-timestamps.md diff --git a/docs/references/architecture/adr-113-modular-transaction-hashing.md b/docs/references/architecture/adr-113-modular-transaction-hashing.md new file mode 100644 index 00000000000..c9ab52e34b2 --- /dev/null +++ b/docs/references/architecture/adr-113-modular-transaction-hashing.md @@ -0,0 +1,162 @@ +# ADR 113: Modular transaction hashing + +## Changelog + +- 2024-02-05: First version (@melekes) +- 2024-05-28: Complete refactor (@melekes) +- 2024-06-07: Limit the scope to transaction hashing (@melekes) +- 2024-06-19: Explain why we don't expose this functionality in the CLI (@melekes) + +## Status + +Proposed + +## Context + +Hashing in CometBFT is currently implemented using `crypto/tmhash` +package, which itself relies on [`sha256`](https://pkg.go.dev/crypto/sha256). + +Among the things which are hashed are the block's header, evidence, consensus +params, commit, partset header, transactions. + +### Transaction hashing + +The transaction hash is used by: + +- the built-in transaction indexer; +- the `/tx` and `/tx_search` RPC endpoints, which allow users +to search for a transaction using its hash; +- mempool to identify transactions. + +The problem some application developers are facing is a mismatch between the +internal/app representation of transactions and the one employed by CometBFT. For +example, [Evmos](https://evmos.org/) wants transactions to be hashed using +the [RLP][rlp]. + +In order to be flexible, CometBFT needs to allow changing the transaction +hashing algorithm if desired by the app developers. + +### General hashing + +The suggested solution could be used to change the hashing function for all +structs, not just transactions. But the result of such a change is quite +significant. If the chain is using a different hashing scheme, then it looses +IBC-compatibility. The IBC modules assumes fixed hashing scheme. The +destination chain needs to know the hashing function of the source chain in +order to verify the validators hash. So, this remains a future work for now. + +## Alternative Approaches + +1. Add `TxHashFunc` (transaction hashing function) to `NewNode` as an option + and pass this function down the stack => avoids gloval variables, but leads + to a massive API breakage. The problem is we're not 100% sure this will be a + final solution. So every time we decide to change it, we will be breaking + tons of API. The suggested solution allows us to be more flexible. +2. Allow changing the hashing function for all structs => breaks IBC + compatibility (see 'General hashing' above). + +## Decision + +Give app developers a way to provide their own hash function. + +## Detailed Design + +Use `sha256` by default, but give developers a way to change the hashing function: + +```go +import ( + "crypto" + "hash" + "crypto/sha256" +) + +var ( + // Hash function used for transaction hashing. + txHash = crypto.SHA256 + + // fmtHash is a function that converts a byte slice to a string. + fmtHash = func(bz []byte) string { + return fmt.Sprintf("%X", bz) + } +) + +// SetTxHash sets the hash function used for transaction hashing. +// +// Call this function before starting the node. Changing the hashing function +// after the chain has started can ONLY be done with a hard fork. +func SetTxHash(h crypto.Hash) { + txHash = h +} + +// SetFmtHash sets the function used to convert a checksum to a string. +func SetFmtHash(f func([]byte) string) { + fmtHash = f +} + +// Bytes is a wrapper around a byte slice that implements the fmt.Stringer. +type Bytes []byte + +func (bz Bytes) String() string { + return fmtHash(bz) +} + +func (bz Bytes) Bytes() []byte { + return bz +} + +// Sum returns the checksum of the data as Bytes. +func Sum(bz []byte) Bytes { + return Bytes(TxHash.Hash.Sum(bz)) +} +``` + +Let's break this down. By default, we use `sha256` standard crypto library. +`SetTxHash` allows developers to swap the default hashing function +with the hashing function of their choice. It will be used in: + +- mempool; +- transaction indexer; +- `/tx` and `/tx_search` RPC endpoints. + +Note the Header's `data_hash` will be different if the default hashing function +is changed. + +`SetFmtHash` allows developers to swap the default string function +(`fmt.Sprintf("%X", bz)`) with their own implementation. + +The design in the current ADR only aims to support custom hash functions, +it does not support _changing_ the hash function for an existing chain. +If the application developer decides to change the default hashing scheme, they +can only do so once before launching their app. If they attempt to upgrade +after without a hard fork, the resulting hashes won't match. A hard fork would +work. + +The majority of chains should still use the default hashing function. That's +why we don't expose this functionality in the CLI or anything like that +(`TxHashFunc` in `NewNode`). Even though the number of chains using a different +hashing function can be significant, it's not the use-case we're optimizing +for. It's good to support it, but it's not the primary goal. Similarly, it's +good to support different p2p protocols, but we're optimizing for the default +one. + +## Consequences + +### Positive + +- Modular transaction hashing + +### Neutral + +- App developers need to take performance into account when choosing custom + hash function. + +### Negative + +- Global variables. + +## References + +- [tendermint#6539](https://github.com/tendermint/tendermint/issues/6539) +- [tendermint#6773](https://github.com/tendermint/tendermint/pull/6773) + +[rlp]: https://ethereum.org/developers/docs/data-structures-and-encoding/rlp diff --git a/docs/references/architecture/adr-114-undo-109.md b/docs/references/architecture/adr-114-undo-109.md new file mode 100644 index 00000000000..e5cdbde72e8 --- /dev/null +++ b/docs/references/architecture/adr-114-undo-109.md @@ -0,0 +1,172 @@ +# ADR 114: Partly Undo ADR 109 (Reduce Go API Surface) + +## Changelog + +- 2024-04-25: First draft (@adizere) + +## Status + +Accepted (PR [\#2897]) + +## Context + +In [ADR 109] we have decided to internalize numerous Go APIs following the research and due diligence +in that ADR. This will take effect with CometBFT v1 release. + +Prior to releasing v1 RC, we have found that our diligence was insufficient and several Go modules +that we internalize with ADR 109 would either (i) force a difficult upgrade on users or, worse, would +(ii) provoke some users to fork CometBFT. + +The question in the present ADR is how to deal with the potential damage our internalized APIs will create +on users' codebases. + +## Alternative Approaches + +The following alternative approaches were considered. + +1. Do nothing. This approach will make the upgrade to v1 very difficult and potentially lead to new forks of Comet. +2. Fully undo ADR 109. This approach will minimize disruption with v1 release, but will bring the CometBFT codebase + into a state prior to the implementation of that ADR, i.e., if we do breaking changes in non-critical + modules that will require major version bumps, which will encourage stagnation and slow uptake of new releases. + +## Decision + +We will partly undo ADR 109, by selectively re-exporting (i.e., make public) certain modules. For modules `state` +and `store`, we have made them public ([\#2610]) because this blocked the SDK upgrade to CometBFT v1. + +To select additional modules that we will make public (again) we will follow this high-level strategy: + +1. Identify all `/internal` modules that are being imported by open-source projects using a tool + such as https://www.sourcegraph.com/search. +2. For each of these modules, categorize them by importance. There are 3 levels: _high, medium, low_. + By 'importance' we mean "important for current or later modularization work in CometBFT." +3. For modules that have _high_ importance, we will: + 1. make the public, + 2. mark the module as deprecated, + 3. establish communication with the team(s) using that module to find a way in `v2` to make the package + internal again with minimal user disruption. +4. For modules with _medium_ importance, we will: + 1. make them public, + 2. mark them as deprecated; the rationale is that these modules being public is unlikely to block us in + the future, and if we find they do block us, we will internalize them in `v2` and follow the same + approach as for _high_ importance. +5. For modules with _low_ importance, we will: + 1. If there is _no_ project using that module, then we keep it private, as decided in [ADR 109]. + 2. If there are projects using the module, then there are two sub-cases to consider: + - i) If the APIs in that module contain Comet-specific features, then we'll make the module + public; the rationale is that otherwise we would encourage users to fork Comet. + - ii) If the APIs in that module comprise general-purpose features, then keep the module private; + the rationale is that such modules have replacements and users will find it easy to replace + them (e.g. rand number generation, file manipulation, synchronization primitives). + +We will present these decisions to the community call, and we will err on the side of +_exposing more_ (i.e., making public) rather than retaining modules as private when there is +ambiguity around the decision for a certain module. + +## Detailed Design + +### Module Inventory + +The following table contains our research, categorization by importance, and decision for each module +in the current `internal` -- as of [v1.0.0-alpha.2] -- directory of CometBFT. + +Column legend: +* Comet internal module name: The name of the module +* Decision: The decision we are taking (either make public, or keep private) for this module +* \# Repositories affected \(non-forks\): Count of how many public, open-source projects we have identified that are using APIs from this module +* Affected files: How many files (among the affected repositories, both forks and non-forks) would be affected if we make this module private; this is rough measure of the impact -- or "damage" -- of making the module private +* Importance: Our assessment of how important is it that we make this module (eventually) private +* URL: The public source of data we have used to research the data in this table + +| Comet internal module name | Decision | \# Repositories affected \(non-forks\) | Affected files | Importance | URL | +|:---------------------------|:-------------------|:---------------------------------------|:---------------|:-----------|:------------------| +| timer | keep private | 0 | 7 | low | [timer-url] | +| progressbar | keep private | 0 | 4 | low | [progressbar-url] | +| inspect | keep private | 0 | 0 | low | [inspect-url] | +| fail | keep private | 0 | 10 | low | [fail-url] | +| events | keep private | 0 | 10 | low | [events-url] | +| cmap | keep private | 0 | 10 | low | [cmap-url] | +| autofile | keep private | 0 | 10 | low | [autofile-url] | +| async | keep private | 0 | 8 | low | [async-url] | +| flowrate | keep private | 0 | 10 | low | [flowrate-url] | +| bits | keep private | 0 | 26 | low | [bits-url] | +| blocksync | keep private | 0 | 6 | low | [blocksync-url] | +| clist | keep private | 0 | 24 | low | [clist-url] | +| indexer | keep private | 0 | 0 | low | [indexer-url] | +| net | keep private | 1 | 40 | low | [net-url] | +| statesync | **🧹 make public** | 1 | 16 | medium | [statesync-url] | +| evidence | keep private | 1 | 26 | high | [evidence-url] | +| consensus | keep private | 1 | 64 | high | [consensus-url] | +| protoio | **🧹 make public** | 3 | 44 | low | [protoio-url] | +| sync | **🧹 make public** | 3 | 172 | low | [sync-url] | +| tempfile | keep private | 4 | 16 | low | [tempfile-url] | +| strings | keep private | 4 | 14 | low | [strings-url] | +| service | **🧹 make public** | 6 | 156 | low | [service-url] | +| os | keep private | 7 | 262 | low | [os-url] | +| rand | keep private | 7 | 317 | low | [rand-url] | +| pubsub | **🧹 make public** | 7 | 169 | medium | [pubsub-url] | + +#### Remarks on the table + +For `evidence` and `consensus`: There is a single project we have identified using APIs from these modules, +specifically . The maintainers of this project have agreed it is not a problem +for them if we keep the two modules private. + +### Summary + +To summarize, these modules will remain public in v1 and marked as deprecated: +- `statesync` +- `protoio` +- `sync` +- `service` +- `pubsub` + +For these four modules which are becoming private in v1, we need to be extra-careful by helping +users transition to other general-purpose libraries: +- `tempfile` +- `strings` +- `os` +- `rand` + +## Consequences + +### Positive + +- A smaller, more manageable Go API surface area. +- Less aggressive progression towards the goals set out in ADR 109. + +### Negative + +- Some power users may experience breakages. If absolutely necessary, certain packages + can be moved back out of the `internal` directory in subsequent minor + releases. + +[\#2897]: https://github.com/cometbft/cometbft/pull/2897 +[\#2610]: https://github.com/cometbft/cometbft/issues/2610 +[ADR 109]: adr-109-reduce-go-api-surface.md +[v1.0.0-alpha.2]: https://github.com/cometbft/cometbft/releases/tag/v1.0.0-alpha.2 +[timer-url]: https://sourcegraph.com/search?q=context:global+lang:Go+"github.com/cometbft/cometbft/libs/timer"&patternType=keyword&sm=0 +[progressbar-url]: https://sourcegraph.com/search?q=context:global+lang:Go+"github.com/cometbft/cometbft/libs/progressbar"&patternType=keyword&sm=0 +[inspect-url]: https://sourcegraph.com/search?q=context:global+lang:Go+"github.com/cometbft/cometbft/inspect"&patternType=keyword&sm=0 +[fail-url]: https://sourcegraph.com/search?q=context:global+lang:Go+"github.com/cometbft/cometbft/libs/fail"&patternType=keyword&sm=0 +[events-url]: https://sourcegraph.com/search?q=context:global+lang:Go+"github.com/cometbft/cometbft/libs/events"&patternType=keyword&sm=0 +[cmap-url]: https://sourcegraph.com/search?q=context:global+lang:Go+"github.com/cometbft/cometbft/libs/cmap"&patternType=keyword&sm=0 +[autofile-url]: https://sourcegraph.com/search?q=context:global+lang:Go+"github.com/cometbft/cometbft/libs/autofile"&patternType=keyword&sm=0 +[async-url]: https://sourcegraph.com/search?q=context:global+lang:Go+"github.com/cometbft/cometbft/libs/async"&patternType=keyword&sm=0 +[flowrate-url]: https://sourcegraph.com/search?q=context:global+lang:Go+"github.com/cometbft/cometbft/libs/flowrate"&patternType=keyword&sm=0 +[bits-url]: https://sourcegraph.com/search?q=context:global+lang:Go+%22github.com/cometbft/cometbft/libs/bits%22&patternType=keyword&sm=0 +[blocksync-url]: https://sourcegraph.com/search?q=context:global+lang:Go+%22github.com/cometbft/cometbft/blocksync%22&patternType=keyword&sm=0 +[clist-url]: https://sourcegraph.com/search?q=context:global+lang:Go+%22github.com/cometbft/cometbft/libs/clist%22&patternType=keyword&sm=0 +[net-url]: https://sourcegraph.com/search?q=context:global+lang:Go+%22github.com/cometbft/cometbft/libs/net%22&patternType=keyword&sm=0 +[statesync-url]: https://sourcegraph.com/search?q=context:global+lang:Go+%22github.com/cometbft/cometbft/statesync%22&patternType=keyword&sm=0 +[evidence-url]: https://sourcegraph.com/search?q=context:global+lang:Go+%22github.com/cometbft/cometbft/evidence%22&patternType=keyword&sm=0 +[consensus-url]: https://sourcegraph.com/search?q=context:global+lang:Go+%22github.com/cometbft/cometbft/consensus%22&patternType=keyword&sm=0 +[indexer-url]: https://sourcegraph.com/search?q=context:global+lang:Go+%22github.com/cometbft/cometbft/indexer%22&patternType=keyword&sm=0 +[protoio-url]: https://sourcegraph.com/search?q=context:global+lang:Go+"github.com/cometbft/cometbft/libs/protoio"&patternType=keyword&sm=0 +[sync-url]: https://sourcegraph.com/search?q=context:global+lang:Go+%22github.com/cometbft/cometbft/libs/sync%22&patternType=keyword&sm=0 +[tempfile-url]: https://sourcegraph.com/search?q=context:global+lang:Go+"github.com/cometbft/cometbft/libs/tempfile"&patternType=keyword&sm=0 +[strings-url]: https://sourcegraph.com/search?q=context:global+lang:Go+%22github.com/cometbft/cometbft/libs/strings%22&patternType=keyword&sm=0 +[service-url]: https://sourcegraph.com/search?q=context:global+lang:Go+%22github.com/cometbft/cometbft/libs/service%22&patternType=keyword&sm=0 +[os-url]: https://sourcegraph.com/search?q=context:global+lang:Go+%22github.com/cometbft/cometbft/libs/os%22&patternType=keyword&sm=0 +[rand-url]: https://sourcegraph.com/search?q=context:global+lang:Go+%22github.com/cometbft/cometbft/libs/rand%22&patternType=keyword&sm=0 +[pubsub-url]: https://sourcegraph.com/search?q=context:global+lang:Go+%22github.com/cometbft/cometbft/libs/pubsub%22&patternType=keyword&sm=0 diff --git a/docs/references/architecture/adr-115-predictable-block-times.md b/docs/references/architecture/adr-115-predictable-block-times.md new file mode 100644 index 00000000000..61cd24b94af --- /dev/null +++ b/docs/references/architecture/adr-115-predictable-block-times.md @@ -0,0 +1,157 @@ +# ADR 115: Predictable Block Times + +## Changelog + + - April 30 2024: Created by @melekes + - May 13 2024: Updated by @melekes + - June 11 2024: Mark as accepted @melekes + +## Status + +**Accepted** + +## Context + +Let's say you're an ABCI application developer and you want to have +constant block times: 1 block each 6s. How would you do that? + +You found out that proposing a block and voting on it takes roughly 2s in your +network. You instruct validators in your network to change `timeout_commit` +(_how long a node waits for additional precommits after committing a block, +before starting on the next height_) from 1s to 4s in the node's config file. + +Do you have predictable block times now? + +No - you don't. The expected block time will be around 6s, but even in +favorable runs it will drift far apart from 6s due to: + +1. CometBFT going into multiple rounds of consensus (it happens rarely but + still). +2. Network latency. +3. Clock drifts. +4. The delay for processing a block. + +A validator could also change `timeout_commit` to 0s in its node's config file. +That means whenever THIS validator proposes a block, the block time will be 2s +(not 6s!). + +Note that the value of `timeout_commit` is static and can't be changed +without restarting the node. + +Because 1-3 are out of your (and our) control, **we can't have constant block +times**. But we can design a mechanism so that the medium-to-long term average +block time converges to a desired value. + +To achieve that, we need to define a form of variable block time. Namely, if a +block takes longer than expected, we should be able to render the next block(s) +faster in order to converge to the desired (average) block time. + +### Use Cases + +In case of Osmosis, committing a big block is expected to take longer than +usual. If we know the approximate size of the next block, we could increase +`timeout_commit` dynamically (if such feature existed) to give the state +machine some extra time to finish execution. + +In case of Berachain, they want to have a constant block time to match +Ethereum's slots, which are equal to 12s. + +Celestia's usecase here for the medium term is to be able to have a longer +proposal timeout so that they can spend a larger percentage of the block time +gossiping while having consistent block times. + +## Proposal + +Move `timeout_commit` into `FinalizeBlockResponse` as `next_block_delay`. This +field's semantics stays essentially the same: delay between the time when the +current block is committed and the next height is started. The idea is +literally to have the same behavior as `timeout_commit`, while allowing the +application to pick a different value for each height. + +If Comet goes into multiple rounds of consensus, the ABCI application can react +by lowering `next_block_delay`. Of course, nobody could guarantee that there +won't be 100000 rounds of consensus, so **it's still best effort** when it +comes to individual block times. + +## Alternative Approaches + +1. Do nothing. The block time will stay largely unpredictable. +2. Make `timeout_commit` global (= consensus parameter). It doesn't solve the + problem of multiple rounds of consensus + the execution is delayed by one + block (as with other consensus parameters). +3. Add `next_block_delay`, but keep `timeout_commit`. Individual validators can + still change `timeout_commit` in their node's config file and mess up the + block times. +4. Add `next_block_delay`, but keep `timeout_commit` and make it global. It's + confusing to have two parameters controlling the same behavior. + +## Detailed Design + +Move `timeout_commit` from the config into `FinalizeBlockResponse` as `next_block_delay`. + +```protobuf +message FinalizeBlockResponse { + // ... + // The delay between this block and the time when the next block is proposed. + google.protobuf.Duration next_block_delay = 6 + [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; +} +``` + +A correct proposer MUST wait until the last block is committed + `next_block_delay` to propose a block. +A correct validator MUST wait until the last block is committed + `next_block_delay` to start the next height. + +`next_block_delay` is a non-deterministic field (unlike most fields in +`FinalizeBlockResponse`), that is: it is not part of the replicated data. This +means that each node may provide a different value, which is supposed to depend +on how longs things are taking at the local node. Or it can replicate the +existing behavior (fixed `timeout_commit`) by always returning a constant value +(e.g. "3s"). + +### ABCI application + +In order to leverage this feature most applications: + +* need to use real --wallclock-- time; +* mandate it's nodes to have synchronized clocks (NTP, or other). This is + not a big deal since PBTS also requires this; +* `time` field in `PrepareProposalRequest`, `ProcessProposalRequest` and + `FinalizeBlockRequest` could be trusted when using PBTS. + +### Specification + +Timeout estimate in the spec should be updated to reflect `next_block_delay`: + +``` +block(i+1).ProposalTime > block(i).CommitTime + NEXTBLOCKDELAY +``` + +See [this comment][spec-comment] for more details. + +### Upgrade path + +* keep `timeout_commit` deprecated; +* if a value for `timeout_commit` is detected at process start-up, warn the + user that they are using a deprecated field; +* if the app provides a value for `next_block_delay`, then `timeout_commit` is + ignored; +* if the app does not provide a value for `next_block_delay`, then CometBFT falls + back to `timeout_commit`. + +## Consequences + +### Positive + +- ABCI application developers will have more control over block times. + +### Neutral + +### Negative + +## References + +* [cometbft/cometbft#2655](https://github.com/cometbft/cometbft/issues/2655) +* [tendermint/tendermint#5911](https://github.com/tendermint/tendermint/issues/5911) +* [discussion about timeout params](https://github.com/cometbft/cometbft/discussions/2266) + +[spec-comment]: https://github.com/tendermint/tendermint/issues/5911#issuecomment-804889910 diff --git a/docs/references/architecture/adr-117-cryptoprovider-implementation.md b/docs/references/architecture/adr-117-cryptoprovider-implementation.md new file mode 100644 index 00000000000..66961fbc124 --- /dev/null +++ b/docs/references/architecture/adr-117-cryptoprovider-implementation.md @@ -0,0 +1,269 @@ +# ADR 117: Implementing CryptoProvider for PrivValidator + +## Change log + +* 2024-07-01: Initial proposal (Zondax AG: @raynaudoe @juliantoledano @jleni @educlerici-zondax @lucaslopezf) +* 2024-08-08: ADR title changed. Update proposal (Zondax AG: @raynaudoe @juliantoledano @jleni @educlerici-zondax @lucaslopezf) + +## Status + +Proposed + +## Abstract + +This ADR proposes the implementation of [ADR-001-crypto-provider](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md) within CometBFT's existing PrivValidator framework. The primary goal is to refactor the current `PrivValidator` implementations by replacing them with `CryptoProvider`-based implementations. This change will maintain the existing PrivValidator logic while allowing for multiple implementations through the CryptoProvider interface. + +This approach enables "pluggable cryptography" that offers a more modular design and future-proofs the crypto tools. The implementation will preserve the existing multi-curve support while providing a more flexible and extensible architecture for cryptographic operations. + +This ADR extensively references concepts like `CryptoProvider`. +Please check the full ADR describing them [here](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md#crypto-provider). + +## Context + +The introduction of the `CryptoProvider` interface into the CometBFT cryptographic package offers several benefits. By using the `CryptoProvider` interface, the existing `PrivValidator` logic can be kept while allowing for multiple implementations of `PrivValidator` through the `CryptoProvider` interface. This means that different cryptographic methods can be plugged in without changing the core logic of `PrivValidator`. + +This flexibility makes it easier to adapt to new security standards and improves performance for different use cases. For example, if a new, more secure signing method is developed, it can be implemented as a new `CryptoProvider` and used by `PrivValidator` without needing to rewrite existing code. This helps to keep the cryptographic code up-to-date and more secure. + +For readability purposes, here's an extract of **adr-001-crypto-provider** that defines the `CryptoProvider` interface: + +```go +// CryptoProvider aggregates the functionalities of signing, verifying, and hashing, and provides metadata. +type CryptoProvider interface { + // GetSigner returns an instance of Signer. + GetSigner() Signer + + // GetVerifier returns an instance of Verifier. + GetVerifier() Verifier + + // GetHasher returns an instance of Hasher. + GetHasher() Hasher + + // Metadata returns metadata for the crypto provider. + Metadata() ProviderMetadata +} +``` + +## Decision + +We will: + +* Define types and interfaces as the code attached +* Refactor existing code into new structure and interfaces (more details in the next section) +* Implement Unit Tests to ensure no backward compatibility issues +* Establish a migration path where the previous implementation will coexist with the new one, ensuring no sudden breaking changes occur + + +## Detailed Design + +The implementation of ADR-001 in CometBFT will require a medium-level refactor of the codebase. Below we describe the proposed strategy to perform such implementation, which is: + +* Add a new `PrivValidator` implementation that uses the `CryptoProvider` interface underneath. + +* New directories reorganization: +Implementations of crypto providers (previously `Privval` implementations) should be in their own directory. See the [Directories reorganization](#directories-reorganization) section for more details. + +* **[Optional]** Adding [Keyring](https://github.com/cosmos/cosmos-sdk/blob/main/crypto/keyring/keyring.go) and [Record](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/crypto/keyring/v1/record.proto) for storing and loading providers. + +* **[Optional]** Use `Keyring` to load and instantiate validators when booting up a node. + + +#### Create a single implementation for `PrivValidator` + +The current CometBFT codebase includes the following implementations of `PrivValidator`: + +* `FilePV`: Handles file-based private validators. +* `SignerClient`: Manages remote signing. +* `RetrySignerClient`: Provides retry mechanisms for remote signing. +* `MockPV`: Used exclusively for testing purposes. + +We propose introducing a new implementation, `CryptoProviderPV`, which will unify and replace all the above implementations. This single implementation will act as an abstraction layer for the `PrivValidator` implementations mentioned above. + +**Current design:** + +```mermaid +classDiagram + class PrivValidator { + <> + } + + class FilePV { + } + + class SignerClientPV { + } + + class RetrySignerClientPV { + } + + class MockPV { + } + + PrivValidator <|.. FilePV + PrivValidator <|.. SignerClientPV + PrivValidator <|.. RetrySignerClientPV + PrivValidator <|.. MockPV +``` + + +**Proposed design:** + +```mermaid +classDiagram + class PrivValidator { + <> + } + + class CryptoProvider { + <> + } + + class CryptoProviderPV { + } + + class FileCP { + } + + class SocketSignerCP { + } + + class RetrySocketSignerCP { + } + + class MockCP { + } + + PrivValidator <|.. CryptoProviderPV + CryptoProviderPV --> CryptoProvider + CryptoProvider <|.. FileCP + CryptoProvider <|.. SocketSignerCP + CryptoProvider <|.. RetrySocketSignerCP + CryptoProvider <|.. MockCP +``` + +For these new implementations, the current code for `File`, `SocketClient`, and `RetrySocketClient` will have to implement the `CryptoProvider` interface instead of the `PrivValidator` one. + +##### Code snippets for `CryptoProviderPV` + +As mentioned above, instead of having several implementations of `PrivValidator`, the proposal is to have only one that, by dependency injection, loads the corresponding `CryptoProvider` that offers the same functionality as the previous implementations of `PrivValidator`. + +Below is an example of how `CryptoProviderPV` would look like. Note that in this particular case, since the PrivateKey is managed inside the corresponding implementation, we're not passing that value to the signer. This is to avoid having to significantly change the code for `FilePV`. This is also valid for all implementations that manage their private keys in their own logic. + +```go +// CryptoProviderPV is the implementation of PrivValidator using CryptoProvider's methods +type CryptoProviderPV struct { + provider CryptoProvider +} + +// NewCryptoProviderPV creates a new instance of CryptoProviderPV +func NewCryptoProviderPV(provider CryptoProvider) (*CryptoProviderPV, error) { + return &CryptoProviderPV{provider: provider}, nil +} + +// SignVote signs a canonical representation of the vote. If signExtension is true, it also signs the vote extension. +func (pv *CryptoProviderPV) SignVote(chainID string, vote *Vote, signExtension bool) error { + signer := pv.provider.GetSigner() + + // code for getting voteBytes goes here + // voteBytes := ... + + // The underlying signer needs these parameters so we pass them through SignerOpts + options := SignerOpts{ + "chainID": chainID, + "vote": vote, + } + + sig, _ := signer.Sign(voteBytes, options) + vote.Signature = sig + return nil +} + +// SignProposal signs a canonical representation of the proposal +func (pv *CryptoProviderPV) SignProposal(chainID string, proposal *Proposal) error { + signer := pv.provider.GetSigner() + + // code for getting proposalBytes goes here + // proposalBytes := ... + + // The underlying signer needs these parameters so we pass them through SignerOpts + options := SignerOpts{ + "chainID": chainID, + "proposal": proposal, + } + + sig, _ := signer.Sign(proposalBytes, options) + proposal.Signature = sig + return nil +} + +// SignBytes signs an arbitrary array of bytes +func (pv *CryptoProviderPV) SignBytes(bytes []byte) ([]byte, error) { + signer := pv.provider.GetSigner() + return signer.Sign(bytes, SignerOpts{}) +} + +``` + +*Note:* Each provider (File, Socket, SignerClient, RetrySignerClient) will need to be modified to satisfy the `CryptoProvider` interface. + + +#### Loading and storing + +When we refer to storing a `CryptoProvider`, we're essentially preserving two key elements: the type of crypto provider (e.g., file-based, hardware security module, remote signer, etc) and its associated configuration parameters. + +These configuration parameters can vary widely depending on the provider type. For instance, a file-based provider might need to store file paths, while a hardware security module provider could require device identifiers or network addresses. Remote signers might need endpoint URLs and authentication details. By storing both the type and these specific configurations, we ensure that the `CryptoProvider` can be correctly instantiated. + +Alternatives: + +* *Low impact / minimal dependencies*: The corresponding `CryptoProvider` can be directly stored on disk in a dedicated directory in an encoding format of choice (text, JSON) + +* *Greater impact / better security*: Use cosmos-sdk's [Keyring](https://github.com/cosmos/cosmos-sdk/blob/8bfcf554275c1efbb42666cc8510d2da139b67fa/client/v2/autocli/keyring/interface.go#L11-L23). to manage `CryptoProvider` along with its private keys. This specifically applies to the `FilePV` implementation, which could store its private keys through `Keyring` instead of a file in the filesystem. This approach will require decoupling the `Keyring` package from the cosmos-sdk, which could be cumbersome. + + +#### Directories reorganization + +Implementations of crypto providers (previously `Privval` implementations) should be in their own directory: + +```plaintext +cometbft/ +├── privval/ +│ ├── provider/ +│ │ ├── file.go +│ │ ├── signer.go +│ │ ├── retry_signer.go +│ │ ├── mock.go +├── types/ +│ ├── priv_validator.go +``` + +#### Other considerations + +##### Node startup + +Node's configuration file should include the name of the `CryptoProvider` to be loaded at startup. Also, the startup logic will need to be changed from creating a validator to loading a `CryptoProvider` + +*Note:* It's important to note that during the migration path, both configurations (priv validators and crypto providers) will coexist. This means that if a `CryptoProvider` ID is passed in the configuration, the loader will give priority to that config and load the corresponding provider. However, if the provider ID is nil, it will load the priv validators as it always has. This approach ensures a smooth transition and maintains backward compatibility while allowing for the gradual adoption of the new `CryptoProvider` system. + +##### `PrivValidator` users + +Every piece of code that makes use of the `PrivValidator` interface should not suffer from changes or require any adaptation. This is because the new `CryptoProviderPV` will implement the same `PrivValidator` interface, ensuring backward compatibility. The internal refactoring to use `CryptoProvider` will be encapsulated within the new implementation, allowing for a seamless transition without affecting the wider codebase that relies on `PrivValidator`. + + +## Consequences + +### Positive + +* Single place of truth +* Easier to extend +* Unit test for each crypto package +* Greater maintainability +* Incentivize addition of implementations instead of forks +* Decoupling behavior from implementation + +### Negative + +* It will involve an effort to adapt existing code. +* It will require attention to detail and audition. + +### Neutral + +* It will involve extensive testing. \ No newline at end of file diff --git a/docs/references/architecture/adr-118-mempool-lanes.md b/docs/references/architecture/adr-118-mempool-lanes.md new file mode 100644 index 00000000000..3bc20237f6e --- /dev/null +++ b/docs/references/architecture/adr-118-mempool-lanes.md @@ -0,0 +1,728 @@ +# ADR 118: Mempool QoS + +## Changelog + +- 2024-04-12: Initial notes (@hvanz) +- 2024-04-12: Comments on the notes (@sergio-mena) +- 2024-04-17: Discussions (@sergio-mena @hvanz) +- 2024-04-18: Preliminary structure (@hvanz) +- 2024-05-01: Add Context and Properties (@hvanz) +- 2024-05-21: Add more Properties + priority mempool (@sergio-mena) +- 2024-06-13: Technical design (@hvanz) +- 2024-07-02: Updates based on reviewer's comments (@hvanz, @sergio-mena) +- 2024-07-09: Updates based on reviewer's comments (@hvanz) +- 2024-09-13: Added pre-confirmations section (@sergio-mena) +- 2024-09-27: Allow lanes to have same priority + lane capacities (@hvanz) + +## Status + +Accepted. Tracking issue: [#2803][tracking-issue]. + +## Context + +In the current implementation, the only property that the mempool tries to enforce when processing +and disseminating transactions is maintaining the order in which transactions arrive to the nodes, +that is, a FIFO ordering. However, ensuring a strict transmission order over the network proves +challenging due to inherent characteristics of the underlying communication protocols that causes +message delays and potential reordering. Consequently, while many Tendermint Core and CometBFT +applications have always assumed this ordering always holds, the FIFO-ness of transactions is not +guaranteed and is offered only as a best effort. + +Beyond the apparent FIFO sequencing, transactions in the mempool are treated equally, meaning that +they are not discriminated as to which are disseminated first or which transactions the mempool +offers to the proposer when creating the next block. In practice, however, not all transactions have +the same importance for the application logic, especially when it comes to latency requirements. +Depending on the application, we may think of countless categories of transactions based on their +importance and requirements, spanning from IBC messages to transactions for exchanges, for smart +contract execution, for smart contract deployment, grouped by SDK modules, and so on. Even +transactions prioritized by economic incentives could be given a preferential treatment. Or big +transactions, regardless of their nature, could be categorized as low priority, to mitigate +potential attacks on the mempool. + +The goal of this document is thus to propose a mechanism enabling the mempool to prioritize +transactions by *classes*, for processing and dissemination, directly impacting block creation and +transaction latency. In IP networking terminology, this is known as Quality of Service (QoS). By +providing certain QoS guarantees, developers will be able to more easily estimate when transactions +will be disseminated and reaped from the mempool to be included in a block. + +In practical terms, we envision an implementation of the transaction class abstraction as *mempool +lanes*. The application will be allowed to split the mempool transaction space into a hierarchy of +lanes, with each lane operating as an independent mempool. At the same time, all of them need to be +coordinated to ensure the delivery of the desired levels of QoS. + +Note that improving the dissemination protocol to reduce bandwidth and/or latency is a separate +concern and falls outside the scope of this proposal. Likewise, graceful degradation under high load +is an orthogonal problem to transaction classification, although the latter may help improve the former. + +## Properties + +Before jumping into the design of the proposal, we define more formally the properties supported by +the current implementation of the mempool. Then we state what properties the new mempool should +offer to guarantee the desired QoS. The following definitions are common to all properties. + +When attempting to add an incoming transaction to the mempool, the node first checks that it is not +already in the cache before checking its validity with the application. + +:memo: _Definition_: We say that a node receives a transaction `tx` _for the first time_ when the +node receives `tx` and `tx` is not in the cache. + +By this definition, it is possible that a node receives a transaction "for the first time", then +gets the transaction evicted from the cache, and at a later time receives it "for the first time" +again. The cache implements a Least-Recently Used (LRU) policy for removing entries when the +cache is full. + +:memo: _Definition_: Given any two different transactions `tx1` and `tx2`, in a given node, we say that: +1. `tx1` is *validated before* `tx2`, when `tx1` and `tx2` are received for the first time, and `tx1` +is validated against the application (via `CheckTx`) before `tx2`, +1. `tx1` is *rechecked before* `tx2`, when `tx1` and `tx2` are in the mempool and `tx1` is +re-validated (rechecked via `CheckTx`) before `tx2`, +1. `tx1` is *reaped before* `tx2`, when `tx1` is reaped from the mempool to be included in a block + proposal before `tx2`, +1. `tx1` is *disseminated before* `tx2`, when `tx1` is sent to a given peer before `tx2`. + +In 2, both transactions are rechecked at the same height, because both are in the mempool. + +In 4, note that in the current implementation there is one dissemination routine per peer, so it +could happen that `tx2` is sent to a peer before `tx1` is sent to a different peer. +Hence the importance of expression "to a given peer" in that definition. + +### Current mempool + +As stated above, the current mempool offers a best-effort FIFO ordering of transactions. We state +this property as follows. + +:parking: _Property_ **FIFO ordering of transactions**: We say that the mempool makes a best effort +in maintaining the FIFO ordering of transactions when transactions are validated, rechecked, reaped, +and disseminated in the same order in which the mempool has received them. + +More formally, given any two different transactions `tx1` and `tx2`, if a node's mempool receives +`tx1` before receiving `tx2`, then `tx1` will be validated, rechecked, reaped, and disseminated +before `tx2` (as defined above). + +Note that a node's mempool can receive a transaction either from a `broadcast_tx_*` RPC endpoint or +from a peer. + +This property guarantees the FIFO ordering at any given node, but it cannot be generalised to all +the nodes in the network because the property does not hold at the network level. Hence, FIFO +ordering on the whole system is best effort. + +### Mempool with QoS + +The main goal of QoS is to guarantee that certain transactions have lower latency than others. +Before stating this property, we need to make some definitions. + +:memo: _Definition_: a *transaction class* is a disjoint set of transactions having some common +characteristics as defined by the application. + +A transaction may only have one class. If it is not assigned any specific class, it will be assigned +a *default class*, which is a special class always present in any set of classes. This is analogous +to the _native VLAN_ for untagged traffic in an 802.1Q network. Because no transaction can belong to +two or more classes, transaction classes form disjoint sets, that is, the intersection between any two +classes is empty. Also, all transactions in the mempool are the union of the transactions in all +classes. + +:memo: _Definition_: Each class has a *priority* and two classes cannot have the same priority. +Therefore all classes can be ordered by priority. + +When a transaction is received for the first time and validated via `CheckTx`, the application MAY +return the class that it assigns to the transaction. If it actually returns a class, the mempool +MUST use it to prioritize the transaction. When transactions are rechecked, applications MAY return +a class, but the mempool will discard it. + +Given these definitions, we want the proposed QoS mechanism to offer the following property: + +#### Basic properties + +:parking: _Property_ **Priorities between classes**: Transactions belonging to a certain class will +be reaped and disseminated before transactions belonging to another class with lower priority. + +Formally, given two transaction classes `c1` and `c2`, with `c1` having more priority than `c2`, if +the application assigns the classes `c1` and `c2` respectively to transactions `tx1` and `tx2`, then +`tx1` will be reaped and disseminated before `tx2`. + +More importantly, as a direct consequence of this property, `tx1` will be disseminated faster and it +will be included in a block before `tx2`. Thus, `tx1` will have a lower latency than `tx2`. +Currently, it is not possible to guarantee this kind of property. + +:memo: _Definition_: The *latency of a transaction* is the difference between the time at which a +user or client submits the transaction for the first time to any node in the network, and the +timestamp of the block in which the transaction finally was included. + +We want also to keep the FIFO ordering within each class (for the time being): + +:parking: _Property_ **FIFO ordering per class**: For transactions within the same class, the +mempool will maintain a FIFO order within the class when transactions are validated, rechecked, +reaped, and disseminated. + +Given any two different transactions `tx1` and `tx2` belonging to the same class, if the mempool +receives `tx1` before receiving `tx2`, then: +- `tx1` will be validated and recheck against the application (via `CheckTx`) before `tx2`, and +- `tx1` will be reaped and disseminated before `tx2`. + +As a consequence, given that classes of transactions have a sequential ordering, and that classes do +not have elements in common, we can state the following property: + +:parking: _Property_ **Partial ordering of all transactions**: The set of all the transactions in +the mempool, regardless of their classes, will have a *partial order*. + +This means that some pairs of transactions are comparable and, thus, have an order, while others +not. + +#### Network-wide consistency + +The properties presented so far may be interpreted as per-node properties. +However, we need to define some network-wide properties in order for a mempool QoS implementation +to be useful and predictable for the whole appchain network. +These properties are expressed in terms of consistency of the information, configuration and behaviour +across nodes in the network. + +:parking: _Property_ **Consistent transaction classes**: For any transaction `tx`, +and any two correct nodes $p$ and $q$ that receive `tx` *for the first time*, +$p$ and $q$ MUST have the same set of transaction classes and their relative priority and configuration. + +The property is only required to hold for on-the-fly transactions: +if a node receives a (late) transaction that has already been decided, this property does not enforce anything. +The same goes for duplicate transactions. +Notice that, if this property does not hold, it is not possible to guarantee any property across the network, +such as transaction latency as defined above. + +:parking: _Property_ **Consistent transaction classification**: For any transaction `tx` +and any two correct nodes $p$ and $q$ that receive `tx` *for the first time*, +$p$'s application MUST classify `tx` into the same transaction class as $q$'s application. + +This property only makes sense when the previous property ("Consistent transaction classes") defined above holds. +Even if we ensure consistent transaction classes, if this property does not hold, a given transaction +may not receive the same classification across the network and it will thus be impossible to reason +about any network-wide guarantees we want to provide that transaction with. + +Additionally, it is important to note that these two properties also constrain the way transaction +classes and transaction classification logic can evolve in an existing implementation. +If either transaction classes or classification logic are not modified in a coordinated manner in a working system, +there will be at least a period where these two properties may not hold for all transactions. + +## Alternative Approaches + +### CometBFT Priority Mempool + +CometBFT used to have a `v1` mempool, specified in Tendermint Core [ADR067][adr067] and deprecated as of `v0.37.x`, +which supported per-transaction priority assignment. +The key point of the priority mempool's design was that `CheckTxResponse` was extended with a few fields, +one of which being an `int64` that the application could use to provide a priority to the transaction being checked. + +This design can be seen as partially addressing the specification of a Mempool with QoS +presented in the previous section. Every possible value of the `int64` priority field returned by the application +can be understood as a _different_ traffic class. +Let us examine whether the properties specified above are fulfilled by the priority mempool design +as described in [ADR067][adr067]: + +1. Partial ordering of all transactions is maintained because the design still keeps a FIFO queue for gossiping transactions. + Also, transactions are reaped according to non-decreasing priority first, and then in FIFO order + for transactions with equal priority (see this `ReapMaxBytesMaxGas`'s [docstring][reapmaxbytesmaxgas]). +1. Since the priority mempool uses FIFO for transactions of equal priority, it also fulfills the "FIFO ordering per class" property. + The problem here is that, since every value of the priority `int64` field is considered a different transaction class, + there are virtually unlimited traffic classes. + So it is too easy for an application to end up using hundreds, if not thousands of transactions classes at a given time. + In this situation, "FIFO ordering per class", while fulfilled, becomes a corner case and thus does not add much value. +1. The consistent transaction classes property is trivially fulfilled, as the set of transaction classes never changes: + it is the set of all possible values of an `int64`. +1. Finally, the priority mempool design does not make any provisions on how the application is to evolve its prioritization + (i.e., transaction classification) logic. + Therefore, the design does not guarantee the fulfillment of the consistent transaction classification property. + +The main hindrance for the wide adoption of the priority mempool was +the dramatic reduction of the _observable_ FIFO guarantees for transactions (as explained in point 2 above) +with respect to the `v0` mempool. + +Besides, the lack of provisions for evolving the prioritization logic (point 4 above) could have also got +in the way of adoption. + + +### Solana + +#### Introduction to Gulf Stream and Comparison with CometBFT's Mempool + +A core part of Solana's design is [Gulf Stream][gulf-stream], +which is marketed as a "mempool-less" way of processing in-flight transactions. +Similarly of a CometBFT- based chain, the sequence of leaders (nodes that produce blocks) is known in advance. +However, unlike CometBFT, Solana keeps the same leader for a whole epoch, whole typical length is approx. 2 days +(what if the leader fails in the middle of an epoch?). +According to the Gulf Stream design, rather than maintaining a mempool at all nodes to ensure transactions +will reach _any_ leader/validator, transactions are directly sent to the current leader and the next, +according to the sequence of leaders calculated locally (known as _leader schedule_). +As a result, Gulf Stream does not use gossip-based primitives to disseminate transactions, +but UDP packets sent directly to the current (and next) leader's IP address. +One of the main points of adopting gossip protocols by Tendermint Core and CometBFT (coming from Bitcoin and Ethereum) +is censorship resistance. It is not clear how Gulf Stream deals with an adversary controlling a part of the network +that stands on the way of those UDP packets containing submitted transactions. + +#### Transaction Priority Design + +In Solana, transaction priority is controlled by fees: they introduce the concept of [_priority fees_][solana-prio-fees]. +The priority fee is an optional configuration parameter when submitting a transaction, +which allows the submitter to increase the likelihood of their transaction making it to a block. +The priority fee is provided in terms of _price per Unit of Computation_ (UC), priced in [micro-lamports per CU][prio-fee-price]. +A UC is the equivalent of Cosmos's _gas_, and so, the priority fee is analogous (in concept) +to the Cosmos SDK's `--gas-prices` [flag][sdk-gas-prices]. +The main difference if that the SDK (currently) uses `--gas-prices` +to set up a per-node threshold of acceptability in gas prices, +whereas Solana uses the (default or user-configured) priority fee as the transaction's _actual_ priority. + +This is very similar to the way CometBFT's priority mempool in `v0.37.x` was supposed to be used by applications, +but in a monolithic manner: there is no "priority" abstraction in Solana as there is nothing similar to ABCI. +In short, the fees _are_ the priority. +Thus, if we were to check the properties specified [above](#mempool-with-qos), +with the caveat that Solana does not have a built-in mempool, +we would reach the same conclusions as with the CometBFT's `v0.37.x` [priority mempool](#cometbft-priority-mempool). +Namely, a _degradation_ in observable FIFO guarantees (affecting applications that depend on it for performance), +and a lack a provisions of evolving priority classification in a consistent manner. +The latter may appear less important as transactions are directly sent to the current leader, +but it is not clear how retried transactions in periods of high load can be receive a consistent priority treatment. + +### Ethereum Pre-confirmations + +#### Brief Explanation + +Ethereum pre-confirmations are a mechanism designed to reduce transaction latency. Justin Drake's [proposal][based-preconfs] +for _based pre-confirmations_ has gained attention in recent months in the Ethereum research community, +though similar ideas date back to Bitcoin's [Oconfs][Oconfs]. + +Pre-confirmations occur in the context of _fast games_, techniques applied between consecutive Layer-1 blocks +to improve certain performance guarantees and help manage _MEV_ (Maximal Extractable Value). + +The process is straightforward. A user submits a transaction and requests a _preconfer_ (a validator) to guarantee specific handling +of that transaction, typically for a fee, called _tip_. +In exchange, the preconfer signs a _promise_ — most often guaranteeing transaction inclusion in the next block. +The preconfer can only claim the tip if the promise is fulfilled, and validators opting in to become preconfers +accept new slashing conditions related to _liveness_ (failure to propose a block) and _safety_ (failure to meet the promise). + +This design enables various implementations of pre-confirmations, and it's still early to determine which form will dominate in Ethereum. + +#### Comparison to Mempool QoS + +Unlike Mempool QoS — the design described [below](#detailed-design) — which prioritizes transactions based +on network resource availability, +pre-confirmations focus on individual user guarantees about transaction treatment and certainty of inclusion. +While the connection to MEV is not fully understood yet, pre-confirmations may provide some mitigation against MEV-related risks. + +Pre-confirmations can also coexist with Mempool QoS in CometBFT-based blockchains. +For instance, particular Mempool QoS configurations, such as a starving, FIFO, high-priority lane, +could be part of an implementation of pre-confirmations in a CometBFT-based chain. + +### Skip's Block-SDK Lanes + +As of version `v0.47.x`, the Cosmos SDK offers application developers the possibility to use an [Application-Side Mempool][sdk-app-mempool]. +It is a mempool structure maintained by the SDK application and populated with valid transactions received via `CheckTx`. +An application maintaining such a mempool is free to define the way transactions are ordered, reaped for a block, aggregated, removed, etc. +Typically, upon `PrepareProposal`, the SDK application disregards the transactions proposed by CometBFT, +and rather proposes transactions reaped from its own mempool, and according to its mempool's rules. + +The Skip team have released an extension of the Application-Side mempool, called [Block-SDK][skip-block-sdk], +that introduces the concept of _lanes_, turning the mempool "into a *highway* consisting of individual *lanes*". +The concept of lanes, introduced in Skip's Block-SDK, is pretty aligned with Mempool QoS as specified above. +Indeed, we use the same term, _lanes_, in the [Detailed Design](#detailed-design) section below, +which describes a minimum viable product (MVP) implementation of the concept of transaction classes. + +The main difference between Skip's Block-SDK's lanes and the design we present below is that +the Block-SDK implements mempool lanes at the application level, whereas this ADR proposes a specification and a design at CometBFT level, +thus including provisions for **transaction gossiping** as an integral part of it. +As a result, the Block-SDK's lanes can be used to implement the Mempool QoS specification in everything that relates to block production, +but not at the network gossip level. + +Importantly, both designs, Skip's Block SDK and the one described [below](#detailed-design), are complementary. +An application using Skip's Block-SDK lanes already contains transaction classification logic, and so, +it can easily be extended to provide `CheckTx` with the information needed by an implementation of CometBFT mempool QoS +(such as the design we propose below) to also achieve a more predictable transaction latency, +depending on the lane/class a transaction belongs to. + +## Decision + +Implement an MVP following the design in the next section. + +## Detailed Design + +This section describes the architectural changes needed to implement an MVP of lanes in the +mempool. The following is a summary of the key design decisions: +- [[Lanes definition](#lanes-definition)] The list of lanes and their corresponding priorities + will be hardcoded in the application logic. +- [[Initialization](#initialization)] How the application configures lanes on CometBFT. +- [[Internal data structures](#internal-data-structures)] There will be one concurrent list (CList) data structure per + lane. +- [[Configuration](#configuration)] All lanes will share the same mempool configuration. +- [[Adding transactions to the mempool](#adding-transactions-to-the-mempool)] When validating a transaction via CheckTx, the + application will optionally return a lane for the transaction. +- [[Transaction dissemination](#transaction-dissemination)] We will continue to use the current P2P + channel for disseminating transactions, and we will implement in the mempool the logic for + selecting the order in which to send transactions. +- [[Reaping transactions for creating blocks](#reaping-transactions-for-creating-blocks)] + Transactions will be reaped from higher-priority lanes first, preserving intra-lane FIFO ordering. +- [[Prioritization logic](#prioritization-logic)] For disseminating and reaping transactions, the + scheduling algorithm should be prevent starvation of low-priority lanes. + +### Lanes definition + +The list of lanes and their associated priorities will be hardcoded in the application logic. A lane +is identified by a **name** of type `string` and assigned a **priority** of type `uint32`. The +application also needs to define which of the lanes is the **default lane**, which is not +necessarily the lane with the lowest priority. + +To obtain the lane information from the application, we need to extend the ABCI `Info` response to +include the following fields. These fields need to be filled by the application only in case it +wants to implement lanes. +```protobuf +message InfoResponse { + ... + map lane_priorities = 6; + uint32 default_lane = 7; +} +``` +The field `lane_priorities` is a map from lane identifiers to priorities. Different lanes may have +the same priority. On the mempool side, lane identifiers will mainly be used for user interfacing +(logging, metric labels). + +The lowest priority a lane may have is 1. Higher values correspond to higher priorities. The value 0 +is reserved for when the application does not have a lane to assign, so it leaves the `lane_id` +field empty in the `CheckTx` response (see [below](#adding-transactions-to-the-mempool)). This +happens either when the application does not classify transactions, or when the transaction is +invalid. + +On receiving the information from the app, CometBFT will validate that: +- `default_lane` is a key in `lane_priorities`, and +- `lane_priorities` is empty if and only if `default_lane` is empty. + +### Initialization + +Currently, querying the app for `Info` happens during the handshake between CometBFT and the app, +during the node initialization, and only when state sync is not enabled. The `Handshaker` first +sends an `Info` request to fetch the app information, and then replays any stored block needed to +sync CometBFT with the app. The lane information is needed regardless of whether state sync is +enabled, so one option is to query the app information outside of the `Handshaker`. + +In this proposed approach, updating the lane definitions will require a single governance proposal +for updating the software. CometBFT will not need to deal with dynamic lane changes: it will just +need to set up the lanes when starting up (whether afresh or in recovery mode). + +Different nodes also need to agree on the lanes they use. When a node connects to a peer, they both +perform a handshake to agree on some basic information (see `DefaultNodeInfo`). Since the +application includes the lane definitions, it suffices to ensure that both nodes agree on the +version of the application. Although the application version is included in `DefaultNodeInfo`, there +is currently no check for compatibility between the versions. To address this, we would need to +modify the P2P handshake process to validate that the application versions are compatible. + +Finally, this approach is compatible with applications that need to swap binaries when +catching up or upgrading, such as SDK applications using [Cosmovisor][cosmovisor]. +When a node is catching up (i.e., state or block syncing), its peers will detect +that the node is late and will not send it any transactions until it is caught up. +So, the particular lane configuration of the node is irrelevant while catching up. +When going through a Cosmovisor-driven upgrade, all nodes will swap binaries at the same +height (which is specified by the corresponding Software Upgrade gov proposal). +If the new version of the software contains modified lane configuration +(and therefore new transaction classification logic), those changes will kick in +in a coordinated manner thanks to the regular Cosmovisor workflow. + +### Internal data structures + +In the mempool, a lane is defined by its priority: +```golang +type Lane uint32 +``` + +Currently, the `CListMempool` data structure has two fields to store and access transactions: +```golang +txs *clist.CList // Concurrent list of mempool entries. +txsMap sync.Map // Map of type TxKey -> *clist.CElement, for quick access to elements in txs. +``` + +With the introduction of lanes, the main change will be to divide the `CList` data structure into +$N$ `Clist`s, one per lane. `CListMempool` will have the following fields: +```golang +lanes map[Lane]*clist.CList +txsMap sync.Map // Map of type TxKey -> *clist.CElement, for quick access to elements in lanes. +txLanes sync.Map // Map of type TxKey -> Lane, for quick access to the lane corresponding to a tx. + +// Fixed variables set during initialization. +defaultLane Lane +sortedLanes []Lane // Lanes sorted by priority +``` +The auxiliary fields `txsMap` and `txLanes` are, respectively, for direct access to the mempool +entries, and for direct access to the lane of a given transaction. + +If the application does not implement lanes (that is, it responds with empty values in +`InfoResponse`), then `defaultLane` will be set to 1, and `lanes` will have only one entry for the +default lane. In this case, the new mempool's behaviour will be equivalent to that of the current mempool. + +`CListMempool` also contains the cache, which is only needed before transactions have a lane +assigned. Since the cache is independent of the lanes, we do not need to modify it. + +### Configuration + +For an MVP, we do not need to have a customized configuration for each lane. The current mempool +configuration will continue to apply to the mempool, which now is a union of lanes. The total size +of the mempool will be the sum of the sizes of all lanes. Therefore, the mempool capacities as +currently defined in the configuration will put an upper limit on the union of all lanes. These +configurations are: +- `Size`, the total number of transactions allowed in the mempool, +- `MaxTxsBytes`, the maximum total number of bytes of the mempool, and +- `MaxTxBytes`, the maximum size in bytes of a single transaction accepted into the mempool. + +However, we still need to enforce limits on each lane's capacity. Without such limits, a +low-priority lane could end up occupying all the mempool space. Since we want to avoid introducing +new configuration options unless absolutely necessary, we propose two simple approaches for +partitioning the mempool space. + +1. Proportionally to lane priorities: This approach could lead to under-utilization of the mempool if + there are significant discrepancies between priority values, as it would allocate space unevenly. +2. Evenly across all lanes: Assuming high-priority transactions are smaller in size than + low-priority transactions, this approach would still allow for more high-priority transactions to + fit in the mempool compared to lower-priority ones. + +Note that each lane's capacity will be limited both by the number of transactions and their total +size in bytes. + +For the MVP, we've chosen the second approach. If users find that the lane capacity is insufficient, +they still have the option of increasing the total mempool size, which will proportionally increase +the capacity of all lanes. In future iterations, we may introduce more granular control over lane +capacities if needed. + +Additionally, the `Recheck` and `Broadcast` flags will apply to all lanes or to none. Remember that, +if `PrepareProposal`'s app logic can ever add a new transaction, it becomes _always_ mandatory to +recheck remaining transactions in the mempool, so there is no point in disabling `Recheck` per lane. + +### Adding transactions to the mempool + +When validating a transaction received for the first time with `CheckTx`, the application will +optionally return its lane identifier in the response. +```protobuf +message CheckTxResponse { + ... + string lane_id = 12; +} +``` +The callback that handles the first-time CheckTx response will append the new mempool entry to the +corresponding `CList`, namely `lanes[lane_id]`, and update the other auxiliary variables accordingly. +If `lane_id` is an empty string, it means that the application did not set any lane in the response +message, so the transaction will be assigned to the default lane. + +### Removing transactions from the mempool + +A transaction may be removed in two scenarios: when updating the mempool with a list of committed +transactions, or during rechecking if the transaction is reassessed as invalid. In either case, the +first step is to identify the lane the transaction belongs to by accessing the `txLanes` map. Then, +we remove the entry from the CList corresponding to its lane and update the auxiliary variables +accordingly. + +As an optimization, we could prioritize the removal of transactions from high-priority lanes first. +The broadcast goroutines are constantly reading the list of transactions to disseminate them, though +there is no guarantee that they will not send transactions that are about to be removed. + +When updating the mempool, there is potential for a slight optimization by removing transactions +from different lanes in parallel. To achieve this, we would first need to preprocess the list of +transactions to determine the lane of each transaction. However, this optimization has minimal +impact if the committed block contains few transactions. Therefore, we decided to exclude it from +the MVP. + +### Transaction dissemination + +For broadcasting transactions from multiple lanes, we have considered two possible approaches: +1. Reserve $N$ p2p channels for use by the mempool. P2P channels have priorities that we can reuse + as lane priorities. There are a maximum of 256 P2P channels, thus limiting the number of lanes. +2. Continue using the current P2P channel for disseminating transactions and implement logic within + the mempool to select the order of transactions to put in the channel. This option theoretically + allows for an unlimited number of lanes, constrained only by the nodes’ capacity to store the + lane data structures. + +We choose the second approach for its flexibility, allowing us to start with a simple scheduling +algorithm that can be refined over time (see below). Another reason is that on the first option we +would need to initialize channels dynamically (currently there is a fixed list of channels passed as +node info) and assign lanes to channels. + +Before modifying the dissemination logic, we need to refactor the current implementation and the +`Mempool` interface to clearly separate the broadcast goroutine in the mempool reactor from +`CListMempool` that includes the mempool data structures. `CListMempool` provides two methods used +by the broadcast code, `TxsWaitChan() <-chan struct{}` and `TxsFront() *clist.CElement`, which are +just wrappers around the methods `WaitChan` and `Front` of the `CList` implementation. In +particular, `TxsFront` is leaking implementation details outside the `Mempool` interface. + +### Reaping transactions for block creation + +In the current single-lane mempool, the function `ReapMaxBytesMaxGas(maxBytes, maxGas)` collects +transactions in FIFO order from the CList until either reaching `maxBytes` or `maxGas` (both of +these values are consensus parameters). + +With multiple CLists, we need to collect transactions from higher-priority lanes first, also in FIFO +order, continuing with successive lanes in the `sortedLanes` array, that is, in decreasing priority +order, and breaking the iteration when reaching `maxBytes` or `maxGas`. Note that the mempool is +locked during `ReapMaxBytesMaxGas`, so no transaction will be added or removed from the mempool +during reaping. + +This simple algorithm, though good enough for an MVP, does not guarantee that low-priority lanes +will not starve. That is why we prefer to implement one that is starvation-free, as explained in the +next section. It could be the same algorithm or similar to the one used for transaction +dissemination. + +### Prioritization logic + +For transaction dissemination and for reaping transactions for creating blocks we want a scheduling +algorithm that satisfies the properties "Priorities between classes" and "FIFO ordering per class". +This means that it must support selection by _weight_, ensuring each lane gets a fraction of the P2P +channel capacity proportional to its priority. Additionally, we want the algorithm to be _fair_ to +prevent starvation of low-priority lanes. + +A first option that meets these criteria is the current prioritization algorithm on the P2P reactor, +which we could easily reimplement in the mempool. It works as follows: +- On each P2P channel, the variable `recentlySent` keeps track of how many bytes were recently sent + over the channel. Every time data is sent, increase `recentlySent` with the number of bytes + written to the channel. Every 2 seconds, decrease `recentlySent` by 20% on all channels (these + values are fixed). +- When sending the next message, [pick the channel][selectChannelToGossipOn] whose ratio + `recentlySent/Priority` is the least. + +From the extensive research in operating systems and networking, we can pick for the MVP an existing +scheduling algorithm that meets these requirements and is straightforward to implement, such as a +variant of the [Weighted Round Robin][wrr] (WRR) algorithm. We choose this option at it gives us +more flexibility for improving the logic in the future, for example, by adding a mechanism for +congestion control or by allowing some lanes to have customized, non-FIFO scheduling algorithms. + +### Validating lanes of received transactions + +Transactions are transmitted without lane information because peers cannot be trusted to send the +correct data. A node may take advantage of the network by sending lower-priority transactions before +higher-priority ones. Although the receiving node could easily verify the priority of a transaction +when it calls `CheckTx`, it cannot detect if a peer is sending transactions out of order over a +single P2P channel. For the moment, we leave out of the MVP any mechanism for detecting and possibly +penalizing nodes for this kind of behaviour. + +## Alternative designs + +### Identify lanes by their priorities + +In the initial prototype we identified lanes by their priorities, meaning each priority could only +be assigned to a single lane. This simplified approach proved too restrictive for applications. To +address this, now we identify lanes by `string` names, decoupling lane identifiers from their +priorities. + +### One CList for all lanes + +We briefly considered sharing one CList for all lanes, changing the internal logic of CList to +accommodate lane requirements. However, this design significantly increases code complexity, +particularly in the transaction dissemination logic. + +### One P2P channel per lane + +Since P2P channels already have a built-in priority mechanism, they present a reasonable option to +implement transaction dissemination from lanes. By assigning a P2P channel to each lane, we could +simply append new transactions to their respective channels and allow the P2P layer to manage the +order of transmission. We decided against this option mainly because the prioritization logic cannot +be easily modified without altering the P2P code, potentially affecting other non-mempool channels. + +Another drawback is that this option imposes a limit to the number of P2P channels. Channels use a +byte as an ID, and the current distribution among all reactors goes up to channel `0x61`. For +example, the current mempool’s P2P channel ID is `0x30`, which would serve as the default lane. We +could reserve a range of channels for the mempool, such as starting from channel ID `0x80` and above +(all channels with the most significant byte set to 1). This would provide a maximum of 128 lanes, +which should suffice for most users. + +Nodes would also need to agree on the channel assignments during the P2P handshake. Currently, one +of the conditions for the handshake to succeed is that there must exist an intersection of P2P +channels. Since lanes are defined in the application logic, the nodes only need to agree on the +application version, as it already happens in the current implementation. + +### Duality lane/priority + +The duality lane/priority could introduce a powerful indirection. The app could just define the lane +of a transaction in `CheckTx`, but the priority of the lane itself could be configured (and +fine-tuned) elsewhere. For example, by the app itself or by node operators. The proposed design for +the MVP does not support this pattern. + +### Custom configuration per lane + +A straightforward, future improvement that we leave for after the MVP is to allow customized +configuration of the lanes instead of sharing the current mempool configuration among lanes. The +application would need to define new configuration values per lane and pass them to CometBFT during +the handshake. + +### Where to define lanes and priorities + +We have considered two alternative approaches for _where_ to configure lanes and priorities: +1. In `config.toml` or `app.toml`. We have discarded this option as it does not make sense for + different nodes to have different lane configurations. The properties defined in the + specification above are end-to-end, and so, the lane configuration has to be consistent across + the network. +1. In `ConsensusParams`. There are several disadvantages with this approach. If we allow changing + lane information via `ConsensusParams`, the mempool would need to update lanes dynamically. The + updating process would be very complex and cumbersome, and not really appealing for an MVP. Two + governance proposals would be required to pass to update the lane definitions. A first proposal + would be required for upgrading the application, because the lane classification logic (thus the + application's code) needs to know the lane configuration beforehand. And a second proposal would + be needed for upgrading the lanes via `ConsensusParams`. While it is true that SDK applications + could pass a governance proposal with both elements together, it would be something to _always_ + do, and it is not clear what the situation would be for non-SDK applications. + + Also, it is not clear in which order the proposals should apply. The community should be careful + not to break performance between the passing of both proposals. The `gov` module could be + modified to allow the two changes to be shipped in the same gov proposal, but this does not seem + a feasible solution. + +Moreover, these two alternatives have a common problem which is how to deal with nodes that are +late, possibly having lane definitions that do not match with those of nodes at the latest heights. + +## Consequences + +### Positive + +- Application developers will be able to better predict when transactions will be disseminated and + reaped from the mempool to be included in a block. This has direct impact on block creation and + transaction latency. +- The mempool will be able to offer Quality of Service (QoS) guarantees, which does not exist in the + current implementation. This MVP will serve as a base to further extend QoS in future iterations + of lanes. +- Applications that are unaware of this feature, and therefore not classifying transactions in + `CheckTx`, will observe the same behavior from the mempool as the current implementation. + +### Negative + +- The best-effort FIFO ordering that currently applies to all transactions may be broken when using + multiple lanes, which will apply FIFO ordering per lane. Since FIFO ordering is important within + the same class of transactions, we expect this will not be a real problem. +- Increased complexity in the logic of `CheckTx` (ante handlers) in order to classify transactions, + with a possibility of introducing bugs in the classification logic. + +### Neutral + +- Lanes are optional. Current applications do not need to make any change to their code. Future + applications will not be forced to use the lanes feature. +- Lanes will preserve the "FIFO ordering of transactions" property within the same class (with a + best effort approach, as the current implementation). +- The proposed prioritization algorithm (WRR) for transaction dissemination and block creation is + fair, so low-priority transactions will not get stuck in the mempool for long periods of time, and + will get included in blocks proportionally to their priorities. + +## References + +- [ADR067][adr067], Priority mempool +- [Docstring][reapmaxbytesmaxgas] of `ReapMaxBytesMaxGas` +- Solana's [Gulf Stream][gulf-stream] +- Solana's [Priority Fees][solana-prio-fees] +- Solana's [priority fee pricing][prio-fee-price] +- Cosmos SDK's [gas prices][sdk-gas-prices] +- Cosmos SDK's [application-side mempool][sdk-app-mempool] +- Skip's [Block SDK][skip-block-sdk] +- P2P's [selectChannelToGossipOn][selectChannelToGossipOn] function +- [Weighted Round Robin][wrr] +- [Cosmovisor][cosmovisor] +- [Mempool's cache][cache] + +[cache]: https://github.com/cometbft/cometbft/blob/main/spec/mempool/cache.md +[adr067]: ./tendermint-core/adr-067-mempool-refactor.md +[reapmaxbytesmaxgas]: https://github.com/cometbft/cometbft/blob/v0.37.6/mempool/v1/mempool.go#L315-L324 +[gulf-stream]: https://medium.com/solana-labs/gulf-stream-solanas-mempool-less-transaction-forwarding-protocol-d342e72186ad +[solana-prio-fees]: https://solana.com/developers/guides/advanced/how-to-use-priority-fees +[prio-fee-price]: https://solana.com/developers/guides/advanced/how-to-use-priority-fees +[sdk-gas-prices]: https://docs.cosmos.network/v0.50/learn/beginner/tx-lifecycle#gas-and-fees +[sdk-app-mempool]: https://docs.cosmos.network/v0.47/build/building-apps/app-mempool +[skip-block-sdk]: https://github.com/skip-mev/block-sdk/blob/v2.1.3/README.md +[cosmovisor]: https://docs.cosmos.network/v0.50/build/tooling/cosmovisor +[selectChannelToGossipOn]: https://github.com/cometbft/cometbft/blob/6d3ff343c2d5a06e7522344d1a4e17d24ce982ad/p2p/conn/connection.go#L542-L563 +[wrr]: https://en.wikipedia.org/wiki/Weighted_round_robin +[based-preconfs]: https://ethresear.ch/t/based-preconfirmations/17353 +[Oconfs]: https://www.reddit.com/r/btc/comments/vxr3qf/explaining_0_conf_transactions/ +[tracking-issue]: https://github.com/cometbft/cometbft/issues/2803 diff --git a/docs/architecture/adr-template.md b/docs/references/architecture/adr-template.md similarity index 100% rename from docs/architecture/adr-template.md rename to docs/references/architecture/adr-template.md diff --git a/docs/references/architecture/images/adr-102-architecture.jpeg b/docs/references/architecture/images/adr-102-architecture.jpeg new file mode 100644 index 00000000000..dd664f45c3c Binary files /dev/null and b/docs/references/architecture/images/adr-102-architecture.jpeg differ diff --git a/docs/references/architecture/tendermint-core/README.md b/docs/references/architecture/tendermint-core/README.md new file mode 100644 index 00000000000..89de0c19440 --- /dev/null +++ b/docs/references/architecture/tendermint-core/README.md @@ -0,0 +1,105 @@ +--- +order: 1 +parent: + order: false +--- + +# Tendermint Core Architecture Decision Records (ADR) + +Here we record all high-level architecture decisions in the Tendermint Core +project. All implemented ADRs in this list naturally affect CometBFT, since +CometBFT is a fork of Tendermint Core as of December 2022. + +This list is currently frozen and kept for reference purposes. To add new ADRs, +please do so for CometBFT [here](../). + +## Table of Contents + +### Implemented + +- [ADR-001: Logging](adr-001-logging.md) +- [ADR-002: Event-Subscription](adr-002-event-subscription.md) +- [ADR-003: ABCI-APP-RPC](adr-003-abci-app-rpc.md) +- [ADR-004: Historical-Validators](adr-004-historical-validators.md) +- [ADR-005: Consensus-Params](adr-005-consensus-params.md) +- [ADR-008: Priv-Validator](adr-008-priv-validator.md) +- [ADR-009: ABCI-Design](adr-009-ABCI-design.md) +- [ADR-010: Crypto-Changes](adr-010-crypto-changes.md) +- [ADR-011: Monitoring](adr-011-monitoring.md) +- [ADR-014: Secp-Malleability](adr-014-secp-malleability.md) +- [ADR-015: Crypto-Encoding](adr-015-crypto-encoding.md) +- [ADR-016: Protocol-Versions](adr-016-protocol-versions.md) +- [ADR-017: Chain-Versions](adr-017-chain-versions.md) +- [ADR-018: ABCI-Validators](adr-018-ABCI-Validators.md) +- [ADR-019: Multisigs](adr-019-multisigs.md) +- [ADR-020: Block-Size](adr-020-block-size.md) +- [ADR-021: ABCI-Events](adr-021-abci-events.md) +- [ADR-025: Commit](adr-025-commit.md) +- [ADR-026: General-Merkle-Proof](adr-026-general-merkle-proof.md) +- [ADR-033: Pubsub](adr-033-pubsub.md) +- [ADR-034: Priv-Validator-File-Structure](adr-034-priv-validator-file-structure.md) +- [ADR-043: Blockchain-RiRi-Org](adr-043-blockchain-riri-org.md) +- [ADR-044: Lite-Client-With-Weak-Subjectivity](adr-044-lite-client-with-weak-subjectivity.md) +- [ADR-046: Light-Client-Implementation](adr-046-light-client-implementation.md) +- [ADR-047: Handling-Evidence-From-Light-Client](adr-047-handling-evidence-from-light-client.md) +- [ADR-051: Double-Signing-Risk-Reduction](adr-051-double-signing-risk-reduction.md) +- [ADR-052: Tendermint-Mode](adr-052-tendermint-mode.md) +- [ADR-053: State-Sync-Prototype](adr-053-state-sync-prototype.md) +- [ADR-054: Crypto-Encoding-2](adr-054-crypto-encoding-2.md) +- [ADR-055: Protobuf-Design](adr-055-protobuf-design.md) +- [ADR-056: Light-Client-Amnesia-Attacks](adr-056-light-client-amnesia-attacks.md) +- [ADR-059: Evidence-Composition-and-Lifecycle](adr-059-evidence-composition-and-lifecycle.md) +- [ADR-065: Custom Event Indexing](adr-065-custom-event-indexing.md) +- [ADR-066: E2E-Testing](adr-066-e2e-testing.md) +- [ADR-072: Restore Requests for Comments](adr-072-request-for-comments.md) +- [ADR-076: Combine Spec and Tendermint Repositories](adr-076-combine-spec-repo.md) +- [ADR-077: Configurable Block Retention](adr-077-block-retention.md) +- [ADR-078: Non-zero Genesis](adr-078-nonzero-genesis.md) + +### Accepted + +- [ADR-006: Trust-Metric](adr-006-trust-metric.md) +- [ADR-024: Sign-Bytes](adr-024-sign-bytes.md) +- [ADR-039: Peer-Behaviour](adr-039-peer-behaviour.md) +- [ADR-063: Privval-gRPC](adr-063-privval-grpc.md) +- [ADR-067: Mempool Refactor](adr-067-mempool-refactor.md) +- [ADR-071: Proposer-Based Timestamps](adr-071-proposer-based-timestamps.md) +- [ADR-075: RPC Event Subscription Interface](adr-075-rpc-subscription.md) +- [ADR-079: Ed25519 Verification](adr-079-ed25519-verification.md) +- [ADR-081: Protocol Buffers Management](adr-081-protobuf-mgmt.md) + +### Deprecated + +- [ADR-035: Documentation](adr-035-documentation.md) + +### Rejected + +- [ADR-023: ABCI-Propose-tx](adr-023-ABCI-propose-tx.md) +- [ADR-029: Check-Tx-Consensus](adr-029-check-tx-consensus.md) +- [ADR-058: Event-Hashing](adr-058-event-hashing.md) + +### Proposed + +- [ADR-007: Trust-Metric-Usage](adr-007-trust-metric-usage.md) +- [ADR-012: Peer-Transport](adr-012-peer-transport.md) +- [ADR-013: Symmetric-Crypto](adr-013-symmetric-crypto.md) +- [ADR-022: ABCI-Errors](adr-022-abci-errors.md) +- [ADR-030: Consensus-Refactor](adr-030-consensus-refactor.md) +- [ADR-036: Empty Blocks via ABCI](adr-036-empty-blocks-abci.md) +- [ADR-037: Deliver-Block](adr-037-deliver-block.md) +- [ADR-038: Non-Zero-Start-Height](adr-038-non-zero-start-height.md) +- [ADR-040: Blockchain Reactor Refactor](adr-040-blockchain-reactor-refactor.md) +- [ADR-041: Proposer-Selection-via-ABCI](adr-041-proposer-selection-via-abci.md) +- [ADR-042: State Sync Design](adr-042-state-sync.md) +- [ADR-045: ABCI-Evidence](adr-045-abci-evidence.md) +- [ADR-050: Improved Trusted Peering](adr-050-improved-trusted-peering.md) +- [ADR-057: RPC](adr-057-RPC.md) +- [ADR-060: Go-API-Stability](adr-060-go-api-stability.md) +- [ADR-061: P2P-Refactor-Scope](adr-061-p2p-refactor-scope.md) +- [ADR-062: P2P-Architecture](adr-062-p2p-architecture.md) +- [ADR-064: Batch Verification](adr-064-batch-verification.md) +- [ADR-068: Reverse-Sync](adr-068-reverse-sync.md) +- [ADR-069: Node Initialization](adr-069-flexible-node-initialization.md) +- [ADR-073: Adopt LibP2P](adr-073-libp2p.md) +- [ADR-074: Migrate Timeout Parameters to Consensus Parameters](adr-074-timeout-params.md) +- [ADR-080: Reverse Sync](adr-080-reverse-sync.md) diff --git a/docs/architecture/tendermint-core/adr-001-logging.md b/docs/references/architecture/tendermint-core/adr-001-logging.md similarity index 100% rename from docs/architecture/tendermint-core/adr-001-logging.md rename to docs/references/architecture/tendermint-core/adr-001-logging.md diff --git a/docs/architecture/tendermint-core/adr-002-event-subscription.md b/docs/references/architecture/tendermint-core/adr-002-event-subscription.md similarity index 100% rename from docs/architecture/tendermint-core/adr-002-event-subscription.md rename to docs/references/architecture/tendermint-core/adr-002-event-subscription.md diff --git a/docs/architecture/tendermint-core/adr-003-abci-app-rpc.md b/docs/references/architecture/tendermint-core/adr-003-abci-app-rpc.md similarity index 100% rename from docs/architecture/tendermint-core/adr-003-abci-app-rpc.md rename to docs/references/architecture/tendermint-core/adr-003-abci-app-rpc.md diff --git a/docs/architecture/tendermint-core/adr-004-historical-validators.md b/docs/references/architecture/tendermint-core/adr-004-historical-validators.md similarity index 100% rename from docs/architecture/tendermint-core/adr-004-historical-validators.md rename to docs/references/architecture/tendermint-core/adr-004-historical-validators.md diff --git a/docs/architecture/tendermint-core/adr-005-consensus-params.md b/docs/references/architecture/tendermint-core/adr-005-consensus-params.md similarity index 100% rename from docs/architecture/tendermint-core/adr-005-consensus-params.md rename to docs/references/architecture/tendermint-core/adr-005-consensus-params.md diff --git a/docs/architecture/tendermint-core/adr-006-trust-metric.md b/docs/references/architecture/tendermint-core/adr-006-trust-metric.md similarity index 95% rename from docs/architecture/tendermint-core/adr-006-trust-metric.md rename to docs/references/architecture/tendermint-core/adr-006-trust-metric.md index 608978207be..04d4a208dcc 100644 --- a/docs/architecture/tendermint-core/adr-006-trust-metric.md +++ b/docs/references/architecture/tendermint-core/adr-006-trust-metric.md @@ -36,7 +36,7 @@ where _R_[*i*] denotes the raw trust value at time interval _i_ (where _i_ == 0 `H[i] =` ![formula1](img/formula1.png "Weighted Sum Formula") -The weights can be chosen either optimistically or pessimistically. An optimistic weight creates larger weights for newer history data values, while the the pessimistic weight creates larger weights for time intervals with lower scores. The default weights used during the calculation of the history value are optimistic and calculated as _Wk_ = 0.8^_k_, for time interval _k_. With the history value available, we can now finish calculating the integral value: +The weights can be chosen either optimistically or pessimistically. An optimistic weight creates larger weights for newer history data values, while the pessimistic weight creates larger weights for time intervals with lower scores. The default weights used during the calculation of the history value are optimistic and calculated as _Wk_ = 0.8^_k_, for time interval _k_. With the history value available, we can now finish calculating the integral value: ```math (2) Integral Value = b * H[i] @@ -110,7 +110,7 @@ func (tm *TrustMetric) TrustScore() int {} // NewMetric returns a trust metric with the default configuration func NewMetric() *TrustMetric {} -//------------------------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------------------------ // For example tm := NewMetric() @@ -148,7 +148,7 @@ func DefaultConfig() TrustMetricConfig {} // NewMetricWithConfig returns a trust metric with a custom configuration func NewMetricWithConfig(tmc TrustMetricConfig) *TrustMetric {} -//------------------------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------------------------ // For example config := TrustMetricConfig{ @@ -196,7 +196,7 @@ func (tms *TrustMetricStore) GetPeerTrustMetric(key string) *TrustMetric {} // PeerDisconnected pauses the trust metric associated with the peer identified by the key func (tms *TrustMetricStore) PeerDisconnected(key string) {} -//------------------------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------------------------ // For example db := dbm.NewDB("trusthistory", "goleveldb", dirPathStr) diff --git a/docs/architecture/tendermint-core/adr-007-trust-metric-usage.md b/docs/references/architecture/tendermint-core/adr-007-trust-metric-usage.md similarity index 100% rename from docs/architecture/tendermint-core/adr-007-trust-metric-usage.md rename to docs/references/architecture/tendermint-core/adr-007-trust-metric-usage.md diff --git a/docs/architecture/tendermint-core/adr-008-priv-validator.md b/docs/references/architecture/tendermint-core/adr-008-priv-validator.md similarity index 100% rename from docs/architecture/tendermint-core/adr-008-priv-validator.md rename to docs/references/architecture/tendermint-core/adr-008-priv-validator.md diff --git a/docs/architecture/tendermint-core/adr-009-ABCI-design.md b/docs/references/architecture/tendermint-core/adr-009-ABCI-design.md similarity index 99% rename from docs/architecture/tendermint-core/adr-009-ABCI-design.md rename to docs/references/architecture/tendermint-core/adr-009-ABCI-design.md index 3876ffb3de6..fc4592a896e 100644 --- a/docs/architecture/tendermint-core/adr-009-ABCI-design.md +++ b/docs/references/architecture/tendermint-core/adr-009-ABCI-design.md @@ -118,7 +118,7 @@ v1 will: style union types. That said, an Amino v2 will be worked on to improve the performance of the -format and its useability in cryptographic applications. +format and its usability in cryptographic applications. ### PubKey diff --git a/docs/architecture/tendermint-core/adr-010-crypto-changes.md b/docs/references/architecture/tendermint-core/adr-010-crypto-changes.md similarity index 97% rename from docs/architecture/tendermint-core/adr-010-crypto-changes.md rename to docs/references/architecture/tendermint-core/adr-010-crypto-changes.md index 41d15da354f..b4f7055e8ba 100644 --- a/docs/architecture/tendermint-core/adr-010-crypto-changes.md +++ b/docs/references/architecture/tendermint-core/adr-010-crypto-changes.md @@ -4,7 +4,7 @@ Tendermint is a cryptographic protocol that uses and composes a variety of cryptographic primitives. -After nearly 4 years of development, Tendermint has recently undergone multiple security reviews to search for vulnerabilities and to assess the the use and composition of cryptographic primitives. +After nearly 4 years of development, Tendermint has recently undergone multiple security reviews to search for vulnerabilities and to assess the use and composition of cryptographic primitives. ### Hash Functions diff --git a/docs/architecture/tendermint-core/adr-011-monitoring.md b/docs/references/architecture/tendermint-core/adr-011-monitoring.md similarity index 100% rename from docs/architecture/tendermint-core/adr-011-monitoring.md rename to docs/references/architecture/tendermint-core/adr-011-monitoring.md diff --git a/docs/architecture/tendermint-core/adr-012-peer-transport.md b/docs/references/architecture/tendermint-core/adr-012-peer-transport.md similarity index 97% rename from docs/architecture/tendermint-core/adr-012-peer-transport.md rename to docs/references/architecture/tendermint-core/adr-012-peer-transport.md index 1cf4fb80b81..3351a9ff466 100644 --- a/docs/architecture/tendermint-core/adr-012-peer-transport.md +++ b/docs/references/architecture/tendermint-core/adr-012-peer-transport.md @@ -13,7 +13,7 @@ Addresses: - [#2046](https://github.com/tendermint/tendermint/issues/2046) - [#2047](https://github.com/tendermint/tendermint/issues/2047) -First iteraton in [#2067](https://github.com/tendermint/tendermint/issues/2067) +First iteration in [#2067](https://github.com/tendermint/tendermint/issues/2067) ## Decision diff --git a/docs/architecture/tendermint-core/adr-013-symmetric-crypto.md b/docs/references/architecture/tendermint-core/adr-013-symmetric-crypto.md similarity index 97% rename from docs/architecture/tendermint-core/adr-013-symmetric-crypto.md rename to docs/references/architecture/tendermint-core/adr-013-symmetric-crypto.md index d82e16ae9ee..76ee0387e7d 100644 --- a/docs/architecture/tendermint-core/adr-013-symmetric-crypto.md +++ b/docs/references/architecture/tendermint-core/adr-013-symmetric-crypto.md @@ -46,7 +46,7 @@ but I don't really see this as an issue. If there is no error in encryption, Encrypt will return `algo_name || nonce || aead_ciphertext`. `algo_name` should be length prefixed, using standard varuint encoding. -This will be binary data, but thats not a problem considering the nonce and ciphertext are also binary. +This will be binary data, but that's not a problem considering the nonce and ciphertext are also binary. This solution requires a mapping from aead type to name. We can achieve this via reflection. diff --git a/docs/architecture/tendermint-core/adr-014-secp-malleability.md b/docs/references/architecture/tendermint-core/adr-014-secp-malleability.md similarity index 100% rename from docs/architecture/tendermint-core/adr-014-secp-malleability.md rename to docs/references/architecture/tendermint-core/adr-014-secp-malleability.md diff --git a/docs/architecture/tendermint-core/adr-015-crypto-encoding.md b/docs/references/architecture/tendermint-core/adr-015-crypto-encoding.md similarity index 100% rename from docs/architecture/tendermint-core/adr-015-crypto-encoding.md rename to docs/references/architecture/tendermint-core/adr-015-crypto-encoding.md diff --git a/docs/architecture/tendermint-core/adr-016-protocol-versions.md b/docs/references/architecture/tendermint-core/adr-016-protocol-versions.md similarity index 99% rename from docs/architecture/tendermint-core/adr-016-protocol-versions.md rename to docs/references/architecture/tendermint-core/adr-016-protocol-versions.md index 1960abe17f5..e547053cbaa 100644 --- a/docs/architecture/tendermint-core/adr-016-protocol-versions.md +++ b/docs/references/architecture/tendermint-core/adr-016-protocol-versions.md @@ -53,7 +53,7 @@ as described below. The BlockVersion defines the core of the blockchain data structures and should change infrequently. -The P2PVersion defines how peers connect and communicate with eachother - it's +The P2PVersion defines how peers connect and communicate with each other - it's not part of the blockchain data structures, but defines the protocols used to build the blockchain. It may change gradually. @@ -227,7 +227,7 @@ BlockVersion is included in both the Header and the NodeInfo. Changing BlockVersion should happen quite infrequently and ideally only for critical upgrades. For now, it is not encoded in ABCI, though it's always -possible to use tags to signal an external process to co-ordinate an upgrade. +possible to use tags to signal an external process to coordinate an upgrade. Note Ethereum has not had to make an upgrade like this (everything has been at state machine level, AFAIK). diff --git a/docs/architecture/tendermint-core/adr-017-chain-versions.md b/docs/references/architecture/tendermint-core/adr-017-chain-versions.md similarity index 99% rename from docs/architecture/tendermint-core/adr-017-chain-versions.md rename to docs/references/architecture/tendermint-core/adr-017-chain-versions.md index bc071ef863e..99360a45e42 100644 --- a/docs/architecture/tendermint-core/adr-017-chain-versions.md +++ b/docs/references/architecture/tendermint-core/adr-017-chain-versions.md @@ -82,7 +82,7 @@ Define `ChainID = TMHASH(ChainDescriptor)`. It's the unique ID of a blockchain. It should be Bech32 encoded when handled by users, eg. with `cosmoschain` prefix. -#### Forks and Uprades +#### Forks and Upgrades When a chain forks or upgrades but continues the same history, it takes a new ChainDescription as follows: diff --git a/docs/architecture/tendermint-core/adr-018-ABCI-Validators.md b/docs/references/architecture/tendermint-core/adr-018-ABCI-Validators.md similarity index 100% rename from docs/architecture/tendermint-core/adr-018-ABCI-Validators.md rename to docs/references/architecture/tendermint-core/adr-018-ABCI-Validators.md diff --git a/docs/architecture/tendermint-core/adr-019-multisigs.md b/docs/references/architecture/tendermint-core/adr-019-multisigs.md similarity index 98% rename from docs/architecture/tendermint-core/adr-019-multisigs.md rename to docs/references/architecture/tendermint-core/adr-019-multisigs.md index 7fd3aab0acf..31541c328b3 100644 --- a/docs/architecture/tendermint-core/adr-019-multisigs.md +++ b/docs/references/architecture/tendermint-core/adr-019-multisigs.md @@ -19,7 +19,7 @@ This allows for complex conditionals of when to validate a signature. Suppose the set of signers is of size _n_. If we validate a signature if any subgroup of size _k_ signs a message, -this becomes what is commonly reffered to as a _k of n multisig_ in Bitcoin. +this becomes what is commonly referred to as a _k of n multisig_ in Bitcoin. This ADR specifies the encoding standard for general accountable subgroup multisignatures, k of n accountable subgroup multisignatures, and its weighted variant. diff --git a/docs/architecture/tendermint-core/adr-020-block-size.md b/docs/references/architecture/tendermint-core/adr-020-block-size.md similarity index 97% rename from docs/architecture/tendermint-core/adr-020-block-size.md rename to docs/references/architecture/tendermint-core/adr-020-block-size.md index f32ed7ab5c3..764e0085b1b 100644 --- a/docs/architecture/tendermint-core/adr-020-block-size.md +++ b/docs/references/architecture/tendermint-core/adr-020-block-size.md @@ -13,7 +13,7 @@ We currently use MaxTxs to reap txs from the mempool when proposing a block, but enforce MaxBytes when unmarshaling a block, so we could easily propose a -block thats too large to be valid. +block that's too large to be valid. We should just remove MaxTxs all together and stick with MaxBytes, and have a `mempool.ReapMaxBytes`. @@ -26,7 +26,7 @@ We could also consider using a MaxDataBytes instead of or in addition to MaxByte ## MaxBytes vs MaxDataBytes The [PR #3045](https://github.com/tendermint/tendermint/pull/3045) suggested -additional clarity/justification was necessary here, wither respect to the use +additional clarity/justification was necessary here, with respect to the use of MaxDataBytes in addition to, or instead of, MaxBytes. MaxBytes provides a clear limit on the total size of a block that requires no diff --git a/docs/architecture/tendermint-core/adr-021-abci-events.md b/docs/references/architecture/tendermint-core/adr-021-abci-events.md similarity index 100% rename from docs/architecture/tendermint-core/adr-021-abci-events.md rename to docs/references/architecture/tendermint-core/adr-021-abci-events.md diff --git a/docs/architecture/tendermint-core/adr-022-abci-errors.md b/docs/references/architecture/tendermint-core/adr-022-abci-errors.md similarity index 100% rename from docs/architecture/tendermint-core/adr-022-abci-errors.md rename to docs/references/architecture/tendermint-core/adr-022-abci-errors.md diff --git a/docs/architecture/tendermint-core/adr-023-ABCI-propose-tx.md b/docs/references/architecture/tendermint-core/adr-023-ABCI-propose-tx.md similarity index 100% rename from docs/architecture/tendermint-core/adr-023-ABCI-propose-tx.md rename to docs/references/architecture/tendermint-core/adr-023-ABCI-propose-tx.md diff --git a/docs/architecture/tendermint-core/adr-024-sign-bytes.md b/docs/references/architecture/tendermint-core/adr-024-sign-bytes.md similarity index 100% rename from docs/architecture/tendermint-core/adr-024-sign-bytes.md rename to docs/references/architecture/tendermint-core/adr-024-sign-bytes.md diff --git a/docs/architecture/tendermint-core/adr-025-commit.md b/docs/references/architecture/tendermint-core/adr-025-commit.md similarity index 96% rename from docs/architecture/tendermint-core/adr-025-commit.md rename to docs/references/architecture/tendermint-core/adr-025-commit.md index a23d3803f61..e836dfbcbfe 100644 --- a/docs/architecture/tendermint-core/adr-025-commit.md +++ b/docs/references/architecture/tendermint-core/adr-025-commit.md @@ -33,7 +33,7 @@ continue to be used in the consensus reactor and elsewhere. A primary question is what should be included in the `CommitSig` beyond the signature. One current constraint is that we must include a timestamp, since -this is how we calculuate BFT time, though we may be able to change this [in the +this is how we calculate BFT time, though we may be able to change this [in the future](https://github.com/tendermint/tendermint/issues/2840). Other concerns here include: @@ -89,7 +89,7 @@ BFT time [can be mitigated](https://github.com/tendermint/tendermint/issues/2840#issuecomment-529122431). **ValidatorAddress**: we include it in the `CommitSig` for now. While this -does increase the block size unecessarily (20-bytes per validator), it has some ergonomic and debugging advantages: +does increase the block size unnecessarily (20-bytes per validator), it has some ergonomic and debugging advantages: - `Commit` contains everything necessary to reconstruct `[]Vote`, and doesn't depend on additional access to a `ValidatorSet` - Lite clients can check if they know the validators in a commit without diff --git a/docs/architecture/tendermint-core/adr-026-general-merkle-proof.md b/docs/references/architecture/tendermint-core/adr-026-general-merkle-proof.md similarity index 93% rename from docs/architecture/tendermint-core/adr-026-general-merkle-proof.md rename to docs/references/architecture/tendermint-core/adr-026-general-merkle-proof.md index 5774c10f8db..96dc7da26d4 100644 --- a/docs/architecture/tendermint-core/adr-026-general-merkle-proof.md +++ b/docs/references/architecture/tendermint-core/adr-026-general-merkle-proof.md @@ -2,12 +2,12 @@ ## Context -We are using raw `[]byte` for merkle proofs in `abci.ResponseQuery`. It makes hard to handle multilayer merkle proofs and general cases. Here, new interface `ProofOperator` is defined. The users can defines their own Merkle proof format and layer them easily. +We are using raw `[]byte` for merkle proofs in `abci.ResponseQuery`. It makes hard to handle multilayer merkle proofs and general cases. Here, new interface `ProofOperator` is defined. The users can defines their own Merkle proof format and layer them easily. Goals: - Layer Merkle proofs without decoding/reencoding - Provide general way to chain proofs -- Make the proof format extensible, allowing thirdparty proof types +- Make the proof format extensible, allowing third party proof types ## Decision @@ -40,9 +40,9 @@ Implemented ### Positive - Layering becomes easier (no encoding/decoding at each step) -- Thirdparty proof format is available +- Third party proof format is available -### Negative +### Negative - Larger size for abci.ResponseQuery - Unintuitive proof chaining(it is not clear what `Run()` is doing) diff --git a/docs/architecture/tendermint-core/adr-029-check-tx-consensus.md b/docs/references/architecture/tendermint-core/adr-029-check-tx-consensus.md similarity index 98% rename from docs/architecture/tendermint-core/adr-029-check-tx-consensus.md rename to docs/references/architecture/tendermint-core/adr-029-check-tx-consensus.md index 191a0ec8ed0..52c9571c037 100644 --- a/docs/architecture/tendermint-core/adr-029-check-tx-consensus.md +++ b/docs/references/architecture/tendermint-core/adr-029-check-tx-consensus.md @@ -85,7 +85,7 @@ func (app *CounterApplication) CheckBlock(block types.Request_CheckBlock) types. } ``` -In BeginBlock, the app should restore the state to the orignal state before checking the block: +In BeginBlock, the app should restore the state to the original state before checking the block: ``` func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { diff --git a/docs/architecture/tendermint-core/adr-030-consensus-refactor.md b/docs/references/architecture/tendermint-core/adr-030-consensus-refactor.md similarity index 98% rename from docs/architecture/tendermint-core/adr-030-consensus-refactor.md rename to docs/references/architecture/tendermint-core/adr-030-consensus-refactor.md index 5c8c3d75431..52beb61ce97 100644 --- a/docs/architecture/tendermint-core/adr-030-consensus-refactor.md +++ b/docs/references/architecture/tendermint-core/adr-030-consensus-refactor.md @@ -4,7 +4,7 @@ One of the biggest challenges this project faces is to proof that the implementations of the specifications are correct, much like we strive to -formaly verify our alogrithms and protocols we should work towards high +formally verify our algorithms and protocols we should work towards high confidence about the correctness of our program code. One of those is the core of Tendermint - Consensus - which currently resides in the `consensus` package. Over time there has been high friction making changes to the package due to the @@ -23,10 +23,10 @@ Addresses: ## Decision To remedy these issues we plan a gradual, non-invasive refactoring of the -`consensus` package. Starting of by isolating the consensus alogrithm into +`consensus` package. Starting of by isolating the consensus algorithm into a pure function and a finite state machine to address the most pressuring issue of lack of confidence. Doing so while leaving the rest of the package in tact -and have follow-up optional changes to improve the sepration of concerns. +and have follow-up optional changes to improve the separation of concerns. ### Implementation changes @@ -111,7 +111,7 @@ func TestConsensusXXX(t *testing.T) { ) for _, e := range events { - sate, msg = Consensus(e.event, state) + state, msg = Consensus(e.event, state) // Test message expectation. if msg != e.want.message { diff --git a/docs/architecture/tendermint-core/adr-033-pubsub.md b/docs/references/architecture/tendermint-core/adr-033-pubsub.md similarity index 100% rename from docs/architecture/tendermint-core/adr-033-pubsub.md rename to docs/references/architecture/tendermint-core/adr-033-pubsub.md diff --git a/docs/architecture/tendermint-core/adr-034-priv-validator-file-structure.md b/docs/references/architecture/tendermint-core/adr-034-priv-validator-file-structure.md similarity index 100% rename from docs/architecture/tendermint-core/adr-034-priv-validator-file-structure.md rename to docs/references/architecture/tendermint-core/adr-034-priv-validator-file-structure.md diff --git a/docs/architecture/tendermint-core/adr-035-documentation.md b/docs/references/architecture/tendermint-core/adr-035-documentation.md similarity index 100% rename from docs/architecture/tendermint-core/adr-035-documentation.md rename to docs/references/architecture/tendermint-core/adr-035-documentation.md diff --git a/docs/architecture/tendermint-core/adr-036-empty-blocks-abci.md b/docs/references/architecture/tendermint-core/adr-036-empty-blocks-abci.md similarity index 100% rename from docs/architecture/tendermint-core/adr-036-empty-blocks-abci.md rename to docs/references/architecture/tendermint-core/adr-036-empty-blocks-abci.md diff --git a/docs/architecture/tendermint-core/adr-037-deliver-block.md b/docs/references/architecture/tendermint-core/adr-037-deliver-block.md similarity index 100% rename from docs/architecture/tendermint-core/adr-037-deliver-block.md rename to docs/references/architecture/tendermint-core/adr-037-deliver-block.md diff --git a/docs/architecture/tendermint-core/adr-038-non-zero-start-height.md b/docs/references/architecture/tendermint-core/adr-038-non-zero-start-height.md similarity index 100% rename from docs/architecture/tendermint-core/adr-038-non-zero-start-height.md rename to docs/references/architecture/tendermint-core/adr-038-non-zero-start-height.md diff --git a/docs/architecture/tendermint-core/adr-039-peer-behaviour.md b/docs/references/architecture/tendermint-core/adr-039-peer-behaviour.md similarity index 100% rename from docs/architecture/tendermint-core/adr-039-peer-behaviour.md rename to docs/references/architecture/tendermint-core/adr-039-peer-behaviour.md diff --git a/docs/architecture/tendermint-core/adr-040-blockchain-reactor-refactor.md b/docs/references/architecture/tendermint-core/adr-040-blockchain-reactor-refactor.md similarity index 100% rename from docs/architecture/tendermint-core/adr-040-blockchain-reactor-refactor.md rename to docs/references/architecture/tendermint-core/adr-040-blockchain-reactor-refactor.md diff --git a/docs/architecture/tendermint-core/adr-041-proposer-selection-via-abci.md b/docs/references/architecture/tendermint-core/adr-041-proposer-selection-via-abci.md similarity index 100% rename from docs/architecture/tendermint-core/adr-041-proposer-selection-via-abci.md rename to docs/references/architecture/tendermint-core/adr-041-proposer-selection-via-abci.md diff --git a/docs/architecture/tendermint-core/adr-042-state-sync.md b/docs/references/architecture/tendermint-core/adr-042-state-sync.md similarity index 94% rename from docs/architecture/tendermint-core/adr-042-state-sync.md rename to docs/references/architecture/tendermint-core/adr-042-state-sync.md index a1589318392..32f7ac6f8f7 100644 --- a/docs/architecture/tendermint-core/adr-042-state-sync.md +++ b/docs/references/architecture/tendermint-core/adr-042-state-sync.md @@ -15,7 +15,7 @@ facilitate setting up a new node as quickly as possible. ## Considerations Because Tendermint doesn't know anything about the application state, StateSync will broker messages between nodes and through -the ABCI to an opaque applicaton. The implementation will have multiple +the ABCI to an opaque application. The implementation will have multiple touch points on both the tendermint code base and ABCI application. * A StateSync reactor to facilitate peer communication - Tendermint @@ -60,7 +60,7 @@ optimized for batch read/writes. Additionally the propsosals tend to vary on how they provide safety properties. -**LightClient** Where a client can aquire the merkle root from the block +**LightClient** Where a client can acquire the merkle root from the block headers synchronized from a trusted validator set. Subsets of the application state, called chunks can therefore be validated on receipt to ensure each chunk is part of the merkle root. @@ -85,7 +85,7 @@ happens lazily and in a dynamic way: nodes request key ranges from their peers, and peers respond with some subset of the requested range and with notes on how to request the rest in parallel from other peers. Unlike chunk numbers, keys can be verified directly. And if some keys in the -range are ommitted, proofs for the range will fail to verify. +range are omitted, proofs for the range will fail to verify. This way a node can start by requesting the entire tree from one peer, and that peer can respond with say the first few keys, and the ranges to request from other peers. @@ -100,7 +100,7 @@ design for tendermint was originally tracked in Warp Sync as implemented in OpenEthereum to rapidly download both blocks and state snapshots from peers. Data is carved into ~4MB chunks and snappy compressed. Hashes of snappy compressed chunks are stored in a -manifest file which co-ordinates the state-sync. Obtaining a correct manifest +manifest file which coordinates the state-sync. Obtaining a correct manifest file seems to require an honest majority of peers. This means you may not find out the state is incorrect until you download the whole thing and compare it with a verified block header. @@ -127,19 +127,19 @@ read/write patterns necessitated by serving a snapshot chunk. Specifically, Lazy State Sync performs random reads to the underlying data structure while Eager can optimize for sequential reads. -This distinctin between approaches was demonstrated by Binance's +This distinction between approaches was demonstrated by Binance's [ackratos](https://github.com/ackratos) in their implementation of [Lazy State sync](https://github.com/tendermint/tendermint/pull/3243), The [analysis](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/) of the performance, and follow up implementation of [Warp Sync](http://github.com/tendermint/tendermint/pull/3594). -#### Compairing Security Models +#### Comparing Security Models There are several different security models which have been discussed/proposed in the past but generally fall into two categories. Light client validation: In which the node receiving data is expected to -first perform a light client sync and have all the nessesary block +first perform a light client sync and have all the necessary block headers. Within the trusted block header (trusted in terms of from a validator set subject to [weak subjectivity](https://github.com/tendermint/tendermint/pull/3795)) and @@ -169,12 +169,12 @@ giving the block propser enough time to complete the snapshot asynchronousy. ## Proposal: Eager StateSync With Per Chunk Light Client Validation -The conclusion after some concideration of the advantages/disadvances of +The conclusion after some consideration of the advantages/disadvances of eager/lazy and different security models is to produce a state sync which eagerly produces snapshots and uses light client validation. This approach has the performance advantages of pre-computing efficient snapshots which can streamed to new nodes on demand using sequential IO. -Secondly, by using light client validation we cna validate each chunk on +Secondly, by using light client validation we can validate each chunk on receipt and avoid the potential eclipse attack of majority of peer based security. @@ -214,7 +214,7 @@ will need implement: Proposed -## Concequences +## Consequences ### Neutral diff --git a/docs/architecture/tendermint-core/adr-043-blockchain-riri-org.md b/docs/references/architecture/tendermint-core/adr-043-blockchain-riri-org.md similarity index 99% rename from docs/architecture/tendermint-core/adr-043-blockchain-riri-org.md rename to docs/references/architecture/tendermint-core/adr-043-blockchain-riri-org.md index 6ea46c35847..f340fabb553 100644 --- a/docs/architecture/tendermint-core/adr-043-blockchain-riri-org.md +++ b/docs/references/architecture/tendermint-core/adr-043-blockchain-riri-org.md @@ -160,7 +160,7 @@ func (r *BlockchainReacor) ioRoutine(ioMesgs chan Message, outMsgs chan Message) case scStatusRequestMessage r.sendStatusRequestToPeer(...) case bcPeerError - r.Swtich.StopPeerForError(msg.src) + r.Switch.StopPeerForError(msg.src) ... ... case bcFinished @@ -384,7 +384,7 @@ Implemented ### Positive - Test become deterministic -- Simulation becomes a-termporal: no need wait for a wall-time timeout +- Simulation becomes a-temporal: no need wait for a wall-time timeout - Peer Selection can be independently tested/simulated - Develop a general approach to refactoring reactors diff --git a/docs/architecture/tendermint-core/adr-044-lite-client-with-weak-subjectivity.md b/docs/references/architecture/tendermint-core/adr-044-lite-client-with-weak-subjectivity.md similarity index 100% rename from docs/architecture/tendermint-core/adr-044-lite-client-with-weak-subjectivity.md rename to docs/references/architecture/tendermint-core/adr-044-lite-client-with-weak-subjectivity.md diff --git a/docs/architecture/tendermint-core/adr-045-abci-evidence.md b/docs/references/architecture/tendermint-core/adr-045-abci-evidence.md similarity index 98% rename from docs/architecture/tendermint-core/adr-045-abci-evidence.md rename to docs/references/architecture/tendermint-core/adr-045-abci-evidence.md index 65a0b688ace..40bf255ab36 100644 --- a/docs/architecture/tendermint-core/adr-045-abci-evidence.md +++ b/docs/references/architecture/tendermint-core/adr-045-abci-evidence.md @@ -46,7 +46,7 @@ Arguments in favor of leaving evidence handling in Tendermint: for the ABCI app to detect it (ie. we don't send all votes we receive during consensus to the app ... ). -2) Amensia attacks can not be easily detected - they require an interactive +2) Amnesia attacks can not be easily detected - they require an interactive protocol among all the validators to submit justification for their past votes. Our best notion of [how to do this currently](https://github.com/tendermint/tendermint/blob/c67154232ca8be8f5c21dff65d154127adc4f7bb/docs/spec/consensus/fork-detection.md) @@ -57,7 +57,7 @@ Arguments in favor of leaving evidence handling in Tendermint: Validators must submit all the votes they saw for the relevant consensus height to justify their precommits. This is quite specific to the Tendermint protocol and may change if the protocol is upgraded. Hence it would be awkward - to co-ordinate this from the app. + to coordinate this from the app. 3) Evidence gossipping is similar to tx gossipping, but it should be higher priority. Since the mempool does not support any notion of priority yet, diff --git a/docs/architecture/tendermint-core/adr-046-light-client-implementation.md b/docs/references/architecture/tendermint-core/adr-046-light-client-implementation.md similarity index 99% rename from docs/architecture/tendermint-core/adr-046-light-client-implementation.md rename to docs/references/architecture/tendermint-core/adr-046-light-client-implementation.md index 15d77373dc3..3d4351ab7e2 100644 --- a/docs/architecture/tendermint-core/adr-046-light-client-implementation.md +++ b/docs/references/architecture/tendermint-core/adr-046-light-client-implementation.md @@ -143,7 +143,7 @@ the recursive version. There are two major reasons: _Fig. 1: Differences between recursive and non-recursive bisections_ -![Fig. 1](./img/adr-046-fig1.png) +![Fig. 1](img/adr-046-fig1.png) Specification of the non-recursive bisection can be found [here](https://github.com/tendermint/spec/blob/zm_non-recursive-verification/spec/consensus/light-client/non-recursive-verification.md). diff --git a/docs/architecture/tendermint-core/adr-047-handling-evidence-from-light-client.md b/docs/references/architecture/tendermint-core/adr-047-handling-evidence-from-light-client.md similarity index 98% rename from docs/architecture/tendermint-core/adr-047-handling-evidence-from-light-client.md rename to docs/references/architecture/tendermint-core/adr-047-handling-evidence-from-light-client.md index b0cac65d82d..dbef4e96ff7 100644 --- a/docs/architecture/tendermint-core/adr-047-handling-evidence-from-light-client.md +++ b/docs/references/architecture/tendermint-core/adr-047-handling-evidence-from-light-client.md @@ -8,7 +8,7 @@ * 14-08-2020: Introduce light traces (listed now as an alternative approach) * 20-08-2020: Light client produces evidence when detected instead of passing to full node * 16-09-2020: Post-implementation revision -* 15-03-2020: Ammends for the case of a forward lunatic attack +* 15-03-2020: Amends for the case of a forward lunatic attack ### Glossary of Terms @@ -100,7 +100,7 @@ of a different hash then trigger the detection process between the primary and t This begins with verification of the witness's header via skipping verification which is run in tande with locating the Light Bifurcation Point -![](./img/light-client-detector.png) +![](img/light-client-detector.png) This is done with: @@ -137,7 +137,7 @@ divergent header of the primary as it is likely, as seen in the example to the r headers where required in order to verify the divergent one. This trace will be used later (as is also described later in this document). -![](./img/bifurcation-point.png) +![](img/bifurcation-point.png) Now, that an attack has been detected, the light client must form evidence to prove it. There are three types of attacks that either the primary or witness could have done to try fool the light client diff --git a/docs/architecture/tendermint-core/adr-050-improved-trusted-peering.md b/docs/references/architecture/tendermint-core/adr-050-improved-trusted-peering.md similarity index 100% rename from docs/architecture/tendermint-core/adr-050-improved-trusted-peering.md rename to docs/references/architecture/tendermint-core/adr-050-improved-trusted-peering.md diff --git a/docs/architecture/tendermint-core/adr-051-double-signing-risk-reduction.md b/docs/references/architecture/tendermint-core/adr-051-double-signing-risk-reduction.md similarity index 100% rename from docs/architecture/tendermint-core/adr-051-double-signing-risk-reduction.md rename to docs/references/architecture/tendermint-core/adr-051-double-signing-risk-reduction.md diff --git a/docs/architecture/tendermint-core/adr-052-tendermint-mode.md b/docs/references/architecture/tendermint-core/adr-052-tendermint-mode.md similarity index 100% rename from docs/architecture/tendermint-core/adr-052-tendermint-mode.md rename to docs/references/architecture/tendermint-core/adr-052-tendermint-mode.md diff --git a/docs/architecture/tendermint-core/adr-053-state-sync-prototype.md b/docs/references/architecture/tendermint-core/adr-053-state-sync-prototype.md similarity index 98% rename from docs/architecture/tendermint-core/adr-053-state-sync-prototype.md rename to docs/references/architecture/tendermint-core/adr-053-state-sync-prototype.md index 2d8c37ad1cb..84e1e1ad220 100644 --- a/docs/architecture/tendermint-core/adr-053-state-sync-prototype.md +++ b/docs/references/architecture/tendermint-core/adr-053-state-sync-prototype.md @@ -2,7 +2,7 @@ State sync is now [merged](https://github.com/tendermint/tendermint/pull/4705). Up-to-date ABCI documentation is [available](https://github.com/tendermint/spec/pull/90), refer to it rather than this ADR for details. -This ADR outlines the plan for an initial state sync prototype, and is subject to change as we gain feedback and experience. It builds on discussions and findings in [ADR-042](./adr-042-state-sync.md), see that for background information. +This ADR outlines the plan for an initial state sync prototype, and is subject to change as we gain feedback and experience. It builds on discussions and findings in [ADR-042](adr-042-state-sync.md), see that for background information. ## Changelog @@ -30,7 +30,7 @@ This ADR outlines the plan for an initial state sync prototype, and is subject t State sync will allow a new node to receive a snapshot of the application state without downloading blocks or going through consensus. This bootstraps the node significantly faster than the current fast sync system, which replays all historical blocks. -Background discussions and justifications are detailed in [ADR-042](./adr-042-state-sync.md). Its recommendations can be summarized as: +Background discussions and justifications are detailed in [ADR-042](adr-042-state-sync.md). Its recommendations can be summarized as: * The application periodically takes full state snapshots (i.e. eager snapshots). @@ -251,4 +251,4 @@ Implemented ## References -* [ADR-042](./adr-042-state-sync.md) and its references +* [ADR-042](adr-042-state-sync.md) and its references diff --git a/docs/architecture/tendermint-core/adr-054-crypto-encoding-2.md b/docs/references/architecture/tendermint-core/adr-054-crypto-encoding-2.md similarity index 88% rename from docs/architecture/tendermint-core/adr-054-crypto-encoding-2.md rename to docs/references/architecture/tendermint-core/adr-054-crypto-encoding-2.md index e58681d155a..998cb503a58 100644 --- a/docs/architecture/tendermint-core/adr-054-crypto-encoding-2.md +++ b/docs/references/architecture/tendermint-core/adr-054-crypto-encoding-2.md @@ -14,7 +14,7 @@ Currently amino encodes keys as ` `. ## Decision Previously Tendermint defined all the key types for use in Tendermint and the Cosmos-SDK. Going forward the Cosmos-SDK will define its own protobuf type for keys. This will allow Tendermint to only define the keys that are being used in the codebase (ed25519). -There is the the opportunity to only define the usage of ed25519 (`bytes`) and not have it be a `oneof`, but this would mean that the `oneof` work is only being postponed to a later date. When using the `oneof` protobuf type we will have to manually switch over the possible key types and then pass them to the interface which is needed. +There is the opportunity to only define the usage of ed25519 (`bytes`) and not have it be a `oneof`, but this would mean that the `oneof` work is only being postponed to a later date. When using the `oneof` protobuf type we will have to manually switch over the possible key types and then pass them to the interface which is needed. The approach that will be taken to minimize headaches for users is one where all encoding of keys will shift to protobuf and where amino encoding is relied on, there will be custom marshal and unmarshal functions. diff --git a/docs/architecture/tendermint-core/adr-055-protobuf-design.md b/docs/references/architecture/tendermint-core/adr-055-protobuf-design.md similarity index 100% rename from docs/architecture/tendermint-core/adr-055-protobuf-design.md rename to docs/references/architecture/tendermint-core/adr-055-protobuf-design.md diff --git a/docs/architecture/tendermint-core/adr-056-light-client-amnesia-attacks.md b/docs/references/architecture/tendermint-core/adr-056-light-client-amnesia-attacks.md similarity index 99% rename from docs/architecture/tendermint-core/adr-056-light-client-amnesia-attacks.md rename to docs/references/architecture/tendermint-core/adr-056-light-client-amnesia-attacks.md index 68ac0f70f80..a4b795c727c 100644 --- a/docs/architecture/tendermint-core/adr-056-light-client-amnesia-attacks.md +++ b/docs/references/architecture/tendermint-core/adr-056-light-client-amnesia-attacks.md @@ -16,7 +16,7 @@ Whilst most created evidence of malicious behavior is self evident such that any The schematic below explains a scenario where an amnesia attack can occur such that two sets of honest nodes, C1 and C2, commit different blocks. -![](./img/tm-amnesia-attack.png) +![](img/tm-amnesia-attack.png) 1. C1 and F send PREVOTE messages for block A. 2. C1 sends PRECOMMIT for round 1 for block A. @@ -140,7 +140,7 @@ type ProofOfLockChange struct { This can be either evidence of +2/3 PREVOTES or PRECOMMITS (either warrants the honest node the right to vote) and is valid, among other checks, so long as the PRECOMMIT vote of the node in V2 came after all the votes in the `ProofOfLockChange` i.e. it received +2/3 votes for a block and then voted for that block thereafter (F is unable to prove this). -In the event that an honest node receives `PotentialAmnesiaEvidence` it will first `ValidateBasic()` and `Verify()` it and then will check if it is among the suspected nodes in the evidence. If so, it will retrieve the `ProofOfLockChange` and combine it with `PotentialAmensiaEvidence` to form `AmensiaEvidence`. All honest nodes that are part of the indicted group will have a time, measured in blocks, equal to `ProofTrialPeriod`, the aforementioned evidence paramter, to gossip their `AmnesiaEvidence` with their `ProofOfLockChange` +In the event that an honest node receives `PotentialAmnesiaEvidence` it will first `ValidateBasic()` and `Verify()` it and then will check if it is among the suspected nodes in the evidence. If so, it will retrieve the `ProofOfLockChange` and combine it with `PotentialAmensiaEvidence` to form `AmensiaEvidence`. All honest nodes that are part of the indicted group will have a time, measured in blocks, equal to `ProofTrialPeriod`, the aforementioned evidence parameter, to gossip their `AmnesiaEvidence` with their `ProofOfLockChange` ```golang type AmnesiaEvidence struct { @@ -163,7 +163,7 @@ When, `state.LastBlockHeight > PotentialAmnesiaEvidence.timestamp + ProofTrialPe Other validators will vote `nil` if: - The Amnesia Evidence is not valid -- The Amensia Evidence is not within their own trial period i.e. too soon. +- The Amnesia Evidence is not within their own trial period i.e. too soon. - They don't have the Amnesia Evidence and it is has an empty polc (each validator needs to run their own trial period of the evidence) - Is of an AmnesiaEvidence that has already been committed to the chain. diff --git a/docs/architecture/tendermint-core/adr-057-RPC.md b/docs/references/architecture/tendermint-core/adr-057-RPC.md similarity index 100% rename from docs/architecture/tendermint-core/adr-057-RPC.md rename to docs/references/architecture/tendermint-core/adr-057-RPC.md diff --git a/docs/architecture/tendermint-core/adr-058-event-hashing.md b/docs/references/architecture/tendermint-core/adr-058-event-hashing.md similarity index 99% rename from docs/architecture/tendermint-core/adr-058-event-hashing.md rename to docs/references/architecture/tendermint-core/adr-058-event-hashing.md index 184b921d5fb..40c80c6c9f7 100644 --- a/docs/architecture/tendermint-core/adr-058-event-hashing.md +++ b/docs/references/architecture/tendermint-core/adr-058-event-hashing.md @@ -109,7 +109,7 @@ and how this ought to ultimately be done by Tendermint.** ## References -- [ADR 021](./adr-021-abci-events.md) +- [ADR 021](adr-021-abci-events.md) - [Indexing transactions](../app-dev/indexing-transactions.md) ## Appendix A. Alternative proposals diff --git a/docs/architecture/tendermint-core/adr-059-evidence-composition-and-lifecycle.md b/docs/references/architecture/tendermint-core/adr-059-evidence-composition-and-lifecycle.md similarity index 99% rename from docs/architecture/tendermint-core/adr-059-evidence-composition-and-lifecycle.md rename to docs/references/architecture/tendermint-core/adr-059-evidence-composition-and-lifecycle.md index 6e0f3f40cce..3030c3d2103 100644 --- a/docs/architecture/tendermint-core/adr-059-evidence-composition-and-lifecycle.md +++ b/docs/references/architecture/tendermint-core/adr-059-evidence-composition-and-lifecycle.md @@ -4,7 +4,7 @@ - 04/09/2020: Initial Draft (Unabridged) - 07/09/2020: First Version -- 13/03/2021: Ammendment to accomodate forward lunatic attack +- 13/03/2021: Amendment to accommodate forward lunatic attack - 29/06/2021: Add information about ABCI specific fields ## Scope @@ -181,7 +181,7 @@ For `LightClientAttack` - Check that the hashes of the conflicting header and the trusted header are different -- In the case of a forward lunatic attack, where the trusted header height is less than the conflicting header height, the node checks that the time of the trusted header is later than the time of conflicting header. This proves that the conflicting header breaks monotonically increasing time. If the node doesn't have a trusted header with a later time then it is unable to validate the evidence for now. +- In the case of a forward lunatic attack, where the trusted header height is less than the conflicting header height, the node checks that the time of the trusted header is later than the time of conflicting header. This proves that the conflicting header breaks monotonically increasing time. If the node doesn't have a trusted header with a later time then it is unable to validate the evidence for now. - Lastly, for each validator, check the look up table to make sure there already isn't evidence against this validator @@ -271,7 +271,7 @@ The ABCI evidence is then sent via the `BlockExecutor` to the application. To summarize, we can see the lifecycle of evidence as such: -![evidence_lifecycle](./img/evidence_lifecycle.png) +![evidence_lifecycle](img/evidence_lifecycle.png) Evidence is first detected and created in the light client and consensus reactor. It is verified and stored as `EvidenceInfo` and gossiped to the evidence pools in other nodes. The consensus reactor later communicates with the evidence pool to either retrieve evidence to be put into a block, or verify the evidence the consensus reactor has retrieved in a block. Lastly when a block is added to the chain, the block executor sends the committed evidence back to the evidence pool so a pointer to the evidence can be stored in the evidence pool and it can update it's height and time. Finally, it turns the committed evidence into ABCI evidence and through the block executor passes the evidence to the application so the application can handle it. diff --git a/docs/architecture/tendermint-core/adr-060-go-api-stability.md b/docs/references/architecture/tendermint-core/adr-060-go-api-stability.md similarity index 100% rename from docs/architecture/tendermint-core/adr-060-go-api-stability.md rename to docs/references/architecture/tendermint-core/adr-060-go-api-stability.md diff --git a/docs/architecture/tendermint-core/adr-061-p2p-refactor-scope.md b/docs/references/architecture/tendermint-core/adr-061-p2p-refactor-scope.md similarity index 100% rename from docs/architecture/tendermint-core/adr-061-p2p-refactor-scope.md rename to docs/references/architecture/tendermint-core/adr-061-p2p-refactor-scope.md diff --git a/docs/architecture/tendermint-core/adr-062-p2p-architecture.md b/docs/references/architecture/tendermint-core/adr-062-p2p-architecture.md similarity index 99% rename from docs/architecture/tendermint-core/adr-062-p2p-architecture.md rename to docs/references/architecture/tendermint-core/adr-062-p2p-architecture.md index f3b7d215288..08e31b9b1fc 100644 --- a/docs/architecture/tendermint-core/adr-062-p2p-architecture.md +++ b/docs/references/architecture/tendermint-core/adr-062-p2p-architecture.md @@ -599,7 +599,7 @@ Was partially implemented in v0.35 ([#5670](https://github.com/tendermint/tender ### Negative -* Fully implementing the new design as indended is likely to require breaking changes to the P2P protocol at some point, although the initial implementation shouldn't. +* Fully implementing the new design as intended is likely to require breaking changes to the P2P protocol at some point, although the initial implementation shouldn't. * Gradually migrating the existing stack and maintaining backwards-compatibility will be more labor-intensive than simply replacing the entire stack. diff --git a/docs/architecture/tendermint-core/adr-063-privval-grpc.md b/docs/references/architecture/tendermint-core/adr-063-privval-grpc.md similarity index 100% rename from docs/architecture/tendermint-core/adr-063-privval-grpc.md rename to docs/references/architecture/tendermint-core/adr-063-privval-grpc.md diff --git a/docs/architecture/tendermint-core/adr-064-batch-verification.md b/docs/references/architecture/tendermint-core/adr-064-batch-verification.md similarity index 98% rename from docs/architecture/tendermint-core/adr-064-batch-verification.md rename to docs/references/architecture/tendermint-core/adr-064-batch-verification.md index 13bba25e4fe..b017dc7cd50 100644 --- a/docs/architecture/tendermint-core/adr-064-batch-verification.md +++ b/docs/references/architecture/tendermint-core/adr-064-batch-verification.md @@ -33,7 +33,7 @@ type BatchVerifier interface { - `NewBatchVerifier` creates a new verifier. This verifier will be populated with entries to be verified. - `Add` adds an entry to the Verifier. Add accepts a public key and two slice of bytes (signature and message). -- `Verify` verifies all the entires. At the end of Verify if the underlying API does not reset the Verifier to its initial state (empty), it should be done here. This prevents accidentally reusing the verifier with entries from a previous verification. +- `Verify` verifies all the entries. At the end of Verify if the underlying API does not reset the Verifier to its initial state (empty), it should be done here. This prevents accidentally reusing the verifier with entries from a previous verification. Above there is mention of an entry. An entry can be constructed in many ways depending on the needs of the underlying curve. A simple approach would be: diff --git a/docs/architecture/tendermint-core/adr-065-custom-event-indexing.md b/docs/references/architecture/tendermint-core/adr-065-custom-event-indexing.md similarity index 100% rename from docs/architecture/tendermint-core/adr-065-custom-event-indexing.md rename to docs/references/architecture/tendermint-core/adr-065-custom-event-indexing.md diff --git a/docs/architecture/tendermint-core/adr-066-e2e-testing.md b/docs/references/architecture/tendermint-core/adr-066-e2e-testing.md similarity index 100% rename from docs/architecture/tendermint-core/adr-066-e2e-testing.md rename to docs/references/architecture/tendermint-core/adr-066-e2e-testing.md diff --git a/docs/architecture/tendermint-core/adr-067-mempool-refactor.md b/docs/references/architecture/tendermint-core/adr-067-mempool-refactor.md similarity index 99% rename from docs/architecture/tendermint-core/adr-067-mempool-refactor.md rename to docs/references/architecture/tendermint-core/adr-067-mempool-refactor.md index d217b1df1ef..6fa51da5488 100644 --- a/docs/architecture/tendermint-core/adr-067-mempool-refactor.md +++ b/docs/references/architecture/tendermint-core/adr-067-mempool-refactor.md @@ -71,7 +71,7 @@ can be addressed in an easy and extensible manner in the future. ### Current Design -![mempool](./img/mempool-v0.jpeg) +![mempool](img/mempool-v0.jpeg) At the core of the `v0` mempool reactor is a concurrent linked-list. This is the primary data structure that contains `Tx` objects that have passed `CheckTx`. diff --git a/docs/architecture/tendermint-core/adr-068-reverse-sync.md b/docs/references/architecture/tendermint-core/adr-068-reverse-sync.md similarity index 98% rename from docs/architecture/tendermint-core/adr-068-reverse-sync.md rename to docs/references/architecture/tendermint-core/adr-068-reverse-sync.md index d7ca5a9162f..ec1985a98d3 100644 --- a/docs/architecture/tendermint-core/adr-068-reverse-sync.md +++ b/docs/references/architecture/tendermint-core/adr-068-reverse-sync.md @@ -36,7 +36,7 @@ Furthermore this allows for a new light client provider which offers the ability ## Detailed Design -This section will focus first on the reverse sync (here we call it `backfill`) mechanism as a standalone protocol and then look to decribe how it integrates within the state sync reactor and how we define the new p2p light client provider. +This section will focus first on the reverse sync (here we call it `backfill`) mechanism as a standalone protocol and then look to describe how it integrates within the state sync reactor and how we define the new p2p light client provider. ```go // Backfill fetches, verifies, and stores necessary history diff --git a/docs/architecture/tendermint-core/adr-069-flexible-node-initialization.md b/docs/references/architecture/tendermint-core/adr-069-flexible-node-initialization.md similarity index 96% rename from docs/architecture/tendermint-core/adr-069-flexible-node-initialization.md rename to docs/references/architecture/tendermint-core/adr-069-flexible-node-initialization.md index 31082b1059b..14cd4213e8d 100644 --- a/docs/architecture/tendermint-core/adr-069-flexible-node-initialization.md +++ b/docs/references/architecture/tendermint-core/adr-069-flexible-node-initialization.md @@ -1,6 +1,6 @@ # ADR 069: Flexible Node Initialization -## Changlog +## Changelog - 2021-06-09: Initial Draft (@tychoish) @@ -12,13 +12,13 @@ Proposed. ## Context -In an effort to support [Go-API-Stability](./adr-060-go-api-stability.md), -during the 0.35 development cycle, we have attempted to reduce the the API +In an effort to support [Go-API-Stability](adr-060-go-api-stability.md), +during the 0.35 development cycle, we have attempted to reduce the API surface area by moving most of the interface of the `node` package into unexported functions, as well as moving the reactors to an `internal` package. Having this coincide with the 0.35 release made a lot of sense because these interfaces were _already_ changing as a result of the `p2p` -[refactor](./adr-061-p2p-refactor-scope.md), so it made sense to think a bit +[refactor](adr-061-p2p-refactor-scope.md), so it made sense to think a bit more about how tendermint exposes this API. While the interfaces of the P2P layer and most of the node package are already @@ -31,7 +31,7 @@ vendor copy of the code. Adding these features requires rather extensive ADR describes a model for changing the way that tendermint nodes initialize, in service of providing this kind of functionality. -We consider node initialization, because the current implemention +We consider node initialization, because the current implementation provides strong connections between all components, as well as between the components of the node and the RPC layer, and being able to think about the interactions of these components will help enable these @@ -41,7 +41,7 @@ features and help define the requirements of the node package. These alternatives are presented to frame the design space and to contextualize the decision in terms of product requirements. These -ideas are not inherently bad, and may even be possible or desireable +ideas are not inherently bad, and may even be possible or desirable in the (distant) future, and merely provide additional context for how we, in the moment came to our decision(s). @@ -265,4 +265,4 @@ described by the following dependency graph makes replacing some of these components more difficult relative to other reactors or components. -![consensus blockchain dependency graph](./img/consensus_blockchain.png) +![consensus blockchain dependency graph](img/consensus_blockchain.png) diff --git a/docs/architecture/tendermint-core/adr-071-proposer-based-timestamps.md b/docs/references/architecture/tendermint-core/adr-071-proposer-based-timestamps.md similarity index 99% rename from docs/architecture/tendermint-core/adr-071-proposer-based-timestamps.md rename to docs/references/architecture/tendermint-core/adr-071-proposer-based-timestamps.md index e17226cce75..2d1fafd7566 100644 --- a/docs/architecture/tendermint-core/adr-071-proposer-based-timestamps.md +++ b/docs/references/architecture/tendermint-core/adr-071-proposer-based-timestamps.md @@ -135,7 +135,7 @@ A validator will only Prevote a proposal if the proposal timestamp is considered A proposal timestamp is considered `timely` if it is within `PRECISION` and `MSGDELAY` of the Unix time known to the validator. More specifically, a proposal timestamp is `timely` if `proposalTimestamp - PRECISION ≤ validatorLocalTime ≤ proposalTimestamp + PRECISION + MSGDELAY`. -Because the `PRECISION` and `MSGDELAY` parameters must be the same across all validators, they will be added to the [consensus parameters](https://github.com/tendermint/tendermint/blob/main/proto/tendermint/types/params.proto#L11) as [durations](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration). +Because the `PRECISION` and `MSGDELAY` parameters must be the same across all validators, they will be added to the [consensus parameters](https://github.com/tendermint/tendermint/blob/main/proto/tendermint/types/params.proto#L11) as [durations](https://protobuf.dev/reference/protobuf/google.protobuf/#duration). The consensus parameters will be updated to include this `Synchrony` field as follows: diff --git a/docs/architecture/tendermint-core/adr-072-request-for-comments.md b/docs/references/architecture/tendermint-core/adr-072-request-for-comments.md similarity index 100% rename from docs/architecture/tendermint-core/adr-072-request-for-comments.md rename to docs/references/architecture/tendermint-core/adr-072-request-for-comments.md diff --git a/docs/architecture/tendermint-core/adr-073-libp2p.md b/docs/references/architecture/tendermint-core/adr-073-libp2p.md similarity index 99% rename from docs/architecture/tendermint-core/adr-073-libp2p.md rename to docs/references/architecture/tendermint-core/adr-073-libp2p.md index 080fecbcdf4..549cf64d573 100644 --- a/docs/architecture/tendermint-core/adr-073-libp2p.md +++ b/docs/references/architecture/tendermint-core/adr-073-libp2p.md @@ -230,6 +230,6 @@ the implementation timeline. - [ADR 62: P2P Architecture][adr62] - [P2P Roadmap RFC][rfc] -[adr61]: ./adr-061-p2p-refactor-scope.md -[adr62]: ./adr-062-p2p-architecture.md +[adr61]: adr-061-p2p-refactor-scope.md +[adr62]: adr-062-p2p-architecture.md [rfc]: ../rfc/rfc-000-p2p-roadmap.rst diff --git a/docs/architecture/tendermint-core/adr-074-timeout-params.md b/docs/references/architecture/tendermint-core/adr-074-timeout-params.md similarity index 100% rename from docs/architecture/tendermint-core/adr-074-timeout-params.md rename to docs/references/architecture/tendermint-core/adr-074-timeout-params.md diff --git a/docs/architecture/tendermint-core/adr-075-rpc-subscription.md b/docs/references/architecture/tendermint-core/adr-075-rpc-subscription.md similarity index 98% rename from docs/architecture/tendermint-core/adr-075-rpc-subscription.md rename to docs/references/architecture/tendermint-core/adr-075-rpc-subscription.md index a838f2766a9..8d589c49979 100644 --- a/docs/architecture/tendermint-core/adr-075-rpc-subscription.md +++ b/docs/references/architecture/tendermint-core/adr-075-rpc-subscription.md @@ -308,7 +308,7 @@ type EventParams struct { MaxResults int `json:"max_results"` // Return only items after this cursor. If empty, the limit is just - // before the the beginning of the event log. + // before the beginning of the event log. After string `json:"after"` // Return only items before this cursor. If empty, the limit is just @@ -458,7 +458,7 @@ crashes and connectivity issues: no `before`). 2. If there are more events than the client requested, or if the client needs - to to read older events to recover from a stall or crash, clients will + to read older events to recover from a stall or crash, clients will **page** backward through the event log (by setting `before` and `after`). - While the new API requires explicit polling by the client, it makes better @@ -483,7 +483,7 @@ crashes and connectivity issues: then discard any older than the time window before the latest. To minimize coordination interference between the publisher (the event bus) - and the subcribers (the `events` service handlers), the event log will be + and the subscribers (the `events` service handlers), the event log will be stored as a persistent linear queue with shared structure (a cons list). A single reader-writer mutex will guard the "head" of the queue where new items are published: @@ -516,10 +516,10 @@ crashes and connectivity issues: requires (e.g., reducing to 3/4 of the maximum rather than 1/1). The state of the event log before the publisher acquires the lock: - ![Before publish and pruning](./img/adr-075-log-before.png) + ![Before publish and pruning](img/adr-075-log-before.png) After the publisher has added a new item and pruned old ones: - ![After publish and pruning](./img/adr-075-log-after.png) + ![After publish and pruning](img/adr-075-log-after.png) ### Migration Plan @@ -550,7 +550,7 @@ the new API, to remove a disincentive to upgrading. > disruption for users in the v0.36 cycle, I have decided not to do this for > the first phase. > -> If we wind up pushing this design into v0.37, however, we should re-evaulate +> If we wind up pushing this design into v0.37, however, we should re-evaluate > this partial turn-down of the websocket. ### Future Work diff --git a/docs/architecture/tendermint-core/adr-076-combine-spec-repo.md b/docs/references/architecture/tendermint-core/adr-076-combine-spec-repo.md similarity index 100% rename from docs/architecture/tendermint-core/adr-076-combine-spec-repo.md rename to docs/references/architecture/tendermint-core/adr-076-combine-spec-repo.md diff --git a/docs/architecture/tendermint-core/adr-077-block-retention.md b/docs/references/architecture/tendermint-core/adr-077-block-retention.md similarity index 100% rename from docs/architecture/tendermint-core/adr-077-block-retention.md rename to docs/references/architecture/tendermint-core/adr-077-block-retention.md diff --git a/docs/architecture/tendermint-core/adr-078-nonzero-genesis.md b/docs/references/architecture/tendermint-core/adr-078-nonzero-genesis.md similarity index 100% rename from docs/architecture/tendermint-core/adr-078-nonzero-genesis.md rename to docs/references/architecture/tendermint-core/adr-078-nonzero-genesis.md diff --git a/docs/architecture/tendermint-core/adr-079-ed25519-verification.md b/docs/references/architecture/tendermint-core/adr-079-ed25519-verification.md similarity index 100% rename from docs/architecture/tendermint-core/adr-079-ed25519-verification.md rename to docs/references/architecture/tendermint-core/adr-079-ed25519-verification.md diff --git a/docs/architecture/tendermint-core/adr-080-reverse-sync.md b/docs/references/architecture/tendermint-core/adr-080-reverse-sync.md similarity index 100% rename from docs/architecture/tendermint-core/adr-080-reverse-sync.md rename to docs/references/architecture/tendermint-core/adr-080-reverse-sync.md diff --git a/docs/architecture/tendermint-core/adr-081-protobuf-mgmt.md b/docs/references/architecture/tendermint-core/adr-081-protobuf-mgmt.md similarity index 100% rename from docs/architecture/tendermint-core/adr-081-protobuf-mgmt.md rename to docs/references/architecture/tendermint-core/adr-081-protobuf-mgmt.md diff --git a/docs/architecture/tendermint-core/img/adr-046-fig1.png b/docs/references/architecture/tendermint-core/img/adr-046-fig1.png similarity index 100% rename from docs/architecture/tendermint-core/img/adr-046-fig1.png rename to docs/references/architecture/tendermint-core/img/adr-046-fig1.png diff --git a/docs/architecture/tendermint-core/img/adr-062-architecture.svg b/docs/references/architecture/tendermint-core/img/adr-062-architecture.svg similarity index 100% rename from docs/architecture/tendermint-core/img/adr-062-architecture.svg rename to docs/references/architecture/tendermint-core/img/adr-062-architecture.svg diff --git a/docs/architecture/tendermint-core/img/adr-075-log-after.png b/docs/references/architecture/tendermint-core/img/adr-075-log-after.png similarity index 100% rename from docs/architecture/tendermint-core/img/adr-075-log-after.png rename to docs/references/architecture/tendermint-core/img/adr-075-log-after.png diff --git a/docs/architecture/tendermint-core/img/adr-075-log-before.png b/docs/references/architecture/tendermint-core/img/adr-075-log-before.png similarity index 100% rename from docs/architecture/tendermint-core/img/adr-075-log-before.png rename to docs/references/architecture/tendermint-core/img/adr-075-log-before.png diff --git a/docs/architecture/tendermint-core/img/bc-reactor-refactor.png b/docs/references/architecture/tendermint-core/img/bc-reactor-refactor.png similarity index 100% rename from docs/architecture/tendermint-core/img/bc-reactor-refactor.png rename to docs/references/architecture/tendermint-core/img/bc-reactor-refactor.png diff --git a/docs/architecture/tendermint-core/img/bc-reactor.png b/docs/references/architecture/tendermint-core/img/bc-reactor.png similarity index 100% rename from docs/architecture/tendermint-core/img/bc-reactor.png rename to docs/references/architecture/tendermint-core/img/bc-reactor.png diff --git a/docs/architecture/tendermint-core/img/bifurcation-point.png b/docs/references/architecture/tendermint-core/img/bifurcation-point.png similarity index 100% rename from docs/architecture/tendermint-core/img/bifurcation-point.png rename to docs/references/architecture/tendermint-core/img/bifurcation-point.png diff --git a/docs/architecture/tendermint-core/img/block-retention.png b/docs/references/architecture/tendermint-core/img/block-retention.png similarity index 100% rename from docs/architecture/tendermint-core/img/block-retention.png rename to docs/references/architecture/tendermint-core/img/block-retention.png diff --git a/docs/architecture/tendermint-core/img/blockchain-reactor-v1.png b/docs/references/architecture/tendermint-core/img/blockchain-reactor-v1.png similarity index 100% rename from docs/architecture/tendermint-core/img/blockchain-reactor-v1.png rename to docs/references/architecture/tendermint-core/img/blockchain-reactor-v1.png diff --git a/docs/architecture/tendermint-core/img/blockchain-reactor-v2.png b/docs/references/architecture/tendermint-core/img/blockchain-reactor-v2.png similarity index 100% rename from docs/architecture/tendermint-core/img/blockchain-reactor-v2.png rename to docs/references/architecture/tendermint-core/img/blockchain-reactor-v2.png diff --git a/docs/architecture/tendermint-core/img/blockchain-v2-channels.png b/docs/references/architecture/tendermint-core/img/blockchain-v2-channels.png similarity index 100% rename from docs/architecture/tendermint-core/img/blockchain-v2-channels.png rename to docs/references/architecture/tendermint-core/img/blockchain-v2-channels.png diff --git a/docs/architecture/tendermint-core/img/consensus_blockchain.png b/docs/references/architecture/tendermint-core/img/consensus_blockchain.png similarity index 100% rename from docs/architecture/tendermint-core/img/consensus_blockchain.png rename to docs/references/architecture/tendermint-core/img/consensus_blockchain.png diff --git a/docs/architecture/tendermint-core/img/evidence_lifecycle.png b/docs/references/architecture/tendermint-core/img/evidence_lifecycle.png similarity index 100% rename from docs/architecture/tendermint-core/img/evidence_lifecycle.png rename to docs/references/architecture/tendermint-core/img/evidence_lifecycle.png diff --git a/docs/architecture/tendermint-core/img/formula1.png b/docs/references/architecture/tendermint-core/img/formula1.png similarity index 100% rename from docs/architecture/tendermint-core/img/formula1.png rename to docs/references/architecture/tendermint-core/img/formula1.png diff --git a/docs/architecture/tendermint-core/img/formula2.png b/docs/references/architecture/tendermint-core/img/formula2.png similarity index 100% rename from docs/architecture/tendermint-core/img/formula2.png rename to docs/references/architecture/tendermint-core/img/formula2.png diff --git a/docs/architecture/tendermint-core/img/light-client-detector.png b/docs/references/architecture/tendermint-core/img/light-client-detector.png similarity index 100% rename from docs/architecture/tendermint-core/img/light-client-detector.png rename to docs/references/architecture/tendermint-core/img/light-client-detector.png diff --git a/docs/architecture/tendermint-core/img/mempool-v0.jpeg b/docs/references/architecture/tendermint-core/img/mempool-v0.jpeg similarity index 100% rename from docs/architecture/tendermint-core/img/mempool-v0.jpeg rename to docs/references/architecture/tendermint-core/img/mempool-v0.jpeg diff --git a/docs/architecture/tendermint-core/img/pbts-message.png b/docs/references/architecture/tendermint-core/img/pbts-message.png similarity index 100% rename from docs/architecture/tendermint-core/img/pbts-message.png rename to docs/references/architecture/tendermint-core/img/pbts-message.png diff --git a/docs/architecture/tendermint-core/img/state-sync.png b/docs/references/architecture/tendermint-core/img/state-sync.png similarity index 100% rename from docs/architecture/tendermint-core/img/state-sync.png rename to docs/references/architecture/tendermint-core/img/state-sync.png diff --git a/docs/architecture/tendermint-core/img/tags1.png b/docs/references/architecture/tendermint-core/img/tags1.png similarity index 100% rename from docs/architecture/tendermint-core/img/tags1.png rename to docs/references/architecture/tendermint-core/img/tags1.png diff --git a/docs/architecture/tendermint-core/img/tm-amnesia-attack.png b/docs/references/architecture/tendermint-core/img/tm-amnesia-attack.png similarity index 100% rename from docs/architecture/tendermint-core/img/tm-amnesia-attack.png rename to docs/references/architecture/tendermint-core/img/tm-amnesia-attack.png diff --git a/docs/references/config/README.md b/docs/references/config/README.md new file mode 100644 index 00000000000..c94ed13ddf1 --- /dev/null +++ b/docs/references/config/README.md @@ -0,0 +1,36 @@ +--- +order: 1 +parent: + title: Configuration Manual + description: A comprehensive reference manual for configuring CometBFT + order: false +--- +# CometBFT Configuration Manual + +## Overview +The CometBFT configuration has three distinct parts: +1. The network parameters in [genesis.json](genesis.json.md). +2. The nodeID in [node_key.json](node_key.json.md). +3. The configuration of the node and its services in [config.toml](config.toml.md). + +Validator nodes also require a private/public key-pair to sign consensus messages. + +If a Hardware Security Module (HSM) is not available, CometBFT stores an unencrypted key-pair on the file system in the +[priv_validator_key.json](priv_validator_key.json.md) file and the state of the last block signed in +[priv_validator_state.json](priv_validator_state.json.md). + +## The HOME folder +The CometBFT HOME folder contains all configuration (in the `$HOME/config` folder) for CometBFT as well as all the databases (in the `$HOME/data` folder) +used during execution. + +Path to the folder is defined by these steps: +1. The home folder for CometBFT is read from the `CMTHOME` environment variable. +2. If the variable is undefined, it is assumed the default `$HOME/.cometbft`. +3. The environment variable is overridden by the `--home` command-line parameter. + +By default, all configuration files are stored under the `$CMTHOME/config` directory. +These can be overridden individually for each file in the `config.toml` file, for example to +override the `genesis_file` location change it [here](config.toml.md#genesis_file). + +By default, all databases are stored under the `$CMTHOME/data` directory. +This can be overridden at the [`db_dir`](config.toml.md#db_dir) parameter in the [`config.toml`](config.toml.md) file. diff --git a/docs/references/config/config.toml.md b/docs/references/config/config.toml.md new file mode 100644 index 00000000000..f1bbc29f2bf --- /dev/null +++ b/docs/references/config/config.toml.md @@ -0,0 +1,2282 @@ +--- +order: 1 +parent: + title: config.toml + description: CometBFT general configuration + order: 3 +--- + + + + + +# config.toml +The `config.toml` file is a standard [TOML](https://toml.io/en/v1.0.0) file that configures the basic functionality +of CometBFT, including the configuration of the reactors. + +The default configuration file created by running the command `cometbft init`. `The config.toml` is created with +all the parameters set with their default values. + +All relative paths in the configuration are relative to `$CMTHOME`. +(See [the HOME folder](./README.md#the-home-folder) for more details.) + +## Base configuration +The root table defines generic node settings. It is implemented in a struct called `BaseConfig`, hence the name. + +### version +The version of the CometBFT binary that created or last modified the config file. +```toml +version = "1.0.0" +``` + +| Value type | string | +|:--------------------|:------------------------| +| **Possible values** | semantic version string | +| | `""` | + +This string validates the configuration file for the binary. The string has to be either a +[valid semver](https://semver.org) string or an empty string. In any other case, the binary halts with an +`ERROR: error in config file: invalid version string` error. + +In the future, the code might make restrictions on what version of the file is compatible with what version of the +binary. There is no such check in place right now. Configuration and binary versions are interchangeable. + +### proxy_app +The TCP or UNIX socket of the ABCI application or the name of an example ABCI application compiled in with the CometBFT +library. +```toml +proxy_app = "tcp://127.0.0.1:26658" +``` + +| Value type | string | +|:--------------------|:--------------------------------------------------------| +| **Possible values** | TCP Stream socket (e.g. `"tcp://127.0.0.1:26658"`) | +| | Unix domain socket (e.g. `"unix:///var/run/abci.sock"`) | +| | `"kvstore"` | +| | `"persistent_kvstore"` | +| | `"noop"` | + +When the ABCI application is written in a different language than Golang, (for example the +[Nomic binary](https://github.com/nomic-io/nomic) is written in Rust) the application can open a TCP port or create a +UNIX domain socket to communicate with CometBFT, while CometBFT runs as a separate process. + +IP addresses other than `localhost` (IPv4: `127.0.0.1`, IPv6: `::1`) are strongly discouraged. It has not been tested, and it has strong performance and security implications. +The [abci](#abci) parameter is used in conjunction with this parameter to define the protocol used for communication. + +In other cases (for example in the [Gaia binary](https://github.com/cosmos/gaia)), CometBFT is imported as a library +and the configuration entry is unused. + +For development and testing, the [built-in ABCI application](../../guides/app-dev/abci-cli.md) can be used without additional processes running. + +### moniker +A custom human-readable name for this node. +```toml +moniker = "my.host.name" +``` + +| Value type | string | +|:--------------------|:---------------------------------------------------------| +| **Possible values** | any human-readable string | + +The main use of this entry is to keep track of the different nodes in a local environment. For example, the `/status` RPC +endpoint will return the node moniker in the `.result.moniker` key. + +Monikers do not need to be unique. They are for local administrator use and troubleshooting. + +Nodes on the peer-to-peer network are identified by `nodeID@host:port` as discussed in the +[node_key.json](node_key.json.md) section. + +### db_backend +The chosen database backend for the node. +```toml +db_backend = "pebbledb" +``` + +| Value type | string | dependencies | GitHub | +|:--------------------|:--------------|:--------------|:-------------------------------------------------| +| **Possible values** | `"badgerdb"` | pure Golang | [badger](https://github.com/dgraph-io/badger) | +| | `"goleveldb"` | pure Golang | [goleveldb](https://github.com/syndtr/goleveldb) | +| | `"pebbledb"` | pure Golang | [pebble](https://github.com/cockroachdb/pebble) | +| | `"rocksdb"` | requires gcc | [grocksdb](https://github.com/linxGnu/grocksdb) | + +During the build process, by default, only the `pebbledb` library is built into the binary. +To add support for alternative databases, you need to add them in the build tags. +For example: `go build -tags rocksdb`. + +`goleveldb` is supported by default too, but it is no longer recommended for +production use. + +The RocksDB fork has API changes from the upstream RocksDB implementation. All +other databases claim a stable API. + +The supported databases are part of the [cometbft-db](https://github.com/cometbft/cometbft-db) library +that CometBFT uses as a common database interface to various databases. + +### db_dir +The directory path where the database is stored. +```toml +db_dir = "data" +``` + +| Value type | string | +|:--------------------|:-------------------------------------------------| +| **Possible values** | relative directory path, appended to `$CMTHOME` | +| | absolute directory path | + +The default relative path translates to `$CMTHOME/data`. In case `$CMTHOME` is unset, it defaults to +`$HOME/.cometbft/data`. + +### log_level +A comma-separated list of `module:level` pairs that describe the log level of each module. Alternatively, a single word +can be set which will apply that log level to all modules. +```toml +log_level = "info" +``` + +| Value type | string | | +|:---------------|:----------------|----------------------------------------| +| **Modules** | `"main"` | CometBFT main application logs | +| | `"consensus"` | consensus reactor logs | +| | `"p2p"` | p2p reactor logs | +| | `"pex"` | Peer Exchange logs | +| | `"proxy"` | ABCI proxy service (MultiAppConn) logs | +| | `"abci-client"` | ABCI client service logs | +| | `"rpc-server"` | RPC server logs | +| | `"txindex"` | Indexer service logs | +| | `"events"` | Events service logs | +| | `"pubsub"` | PubSub service logs | +| | `"evidence"` | Evidence reactor logs | +| | `"statesync"` | StateSync reactor logs | +| | `"mempool"` | Mempool reactor logs | +| | `"blocksync"` | BlockSync reactor logs | +| | `"state"` | Pruner service logs | +| | `"*"` | All modules | +| **Log levels** | `"debug"` | | +| | `"info"` | | +| | `"error"` | | +| | `"none"` | | + +At the end of a `module:level` list, a default log level can be set for modules with no level set. Use `*` instead of a +module name to set a default log level. The default is `*:info`. + +Examples: + +Set the consensus reactor to `debug` log level and the `p2p` reactor to `none`. Everything else should be set to `error`: +```toml +log_level = "consensus:debug,p2p:none,*:error" +``` +Set RPC server logs to `debug` and leave everything else at `info`: +```toml +log_level = "rpc-server:debug" +``` + +#### Stripping debug log messages at compile-time + +Logging debug messages can lead to significant memory allocations, especially when outputting variable values. In Go, +even if `log_level` is not set to `debug`, these allocations can still occur because the program evaluates the debug +statements regardless of the log level. + +To prevent unnecessary memory usage, you can strip out all debug-level code from the binary at compile time using +build flags. This approach improves the performance of CometBFT by excluding debug messages entirely, even when log_level +is set to debug. This technique is ideal for production environments that prioritize performance optimization over debug logging. + +In order to build a binary stripping all debug log messages (e.g. `log.Debug()`) from the binary, use the `nodebug` tag: +``` +COMETBFT_BUILD_OPTIONS=nodebug make install +``` + +> Note: Compiling CometBFT with this method will completely disable all debug messages. If you require debug output, +> avoid compiling the binary with the `nodebug` build tag. + +### log_format + +Define the output format of the logs. + +```toml +log_format = "plain" +``` + +| Value type | string | +|:--------------------|:----------| +| **Possible values** | `"plain"` | +| | `"json"` | + +`plain` provides ANSI plain-text logs, by default color-coded (can be changed using [`log_colors`](#log_colors)). + +`json` provides JSON objects (one per line, not prettified) using the following (incomplete) schema: + +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://cometbft.com/log.schema.json", + "title": "JSON log", + "description": "A log entry in JSON object format", + "type": "object", + "properties": { + "level": { + "description": "log level", + "type": "string" + }, + "ts": { + "description": "timestamp in RFC3339Nano format; the trailing zeroes are removed from the seconds field", + "type": "string" + }, + "_msg": { + "description": "core log message", + "type": "string" + }, + "module": { + "description": "module name that emitted the log", + "type": "string" + }, + "impl": { + "description": "some modules point out specific areas or tasks in log entries", + "type": "string" + }, + "msg": { + "description": "some entries have more granular messages than just the core _msg", + "type": "string" + }, + "height": { + "description": "some entries happen at a specific height", + "type": "integer", + "exclusiveMinimum": 0 + }, + "app_hash": { + "description": "some entries happen at a specific app_hash", + "type": "string" + } + }, + "required": [ "level", "ts", "_msg", "module" ] +} +``` +> Note: The list of properties is not exhaustive. When implementing log parsing, check your logs and update the schema. + + + +### log_colors + +Define whether the log output should be colored. +Only relevant when [`log_format`](#log_format) is `plain`. + +```toml +log_colors = true +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `true` | +| | `false` | + +The default is `true` when [`log_format`](#log_format) is `plain`. + +### genesis_file +Path to the JSON file containing the initial conditions for a CometBFT blockchain and the initial state of the application (more details [here](./genesis.json.md)). +```toml +genesis_file = "config/genesis.json" +``` + +| Value type | string | +|:--------------------|:------------------------------------------------| +| **Possible values** | relative directory path, appended to `$CMTHOME` | +| | absolute directory path | + +The default relative path translates to `$CMTHOME/config/genesis.json`. In case `$CMTHOME` is unset, it defaults to +`$HOME/.cometbft/config/genesis.json`. + +### priv_validator_key_file +Path to the JSON file containing the private key to use as a validator in the consensus protocol (more details [here](./priv_validator_key.json.md)). +```toml +priv_validator_key_file = "config/priv_validator_key.json" +``` + +| Value type | string | +|:--------------------|:------------------------------------------------| +| **Possible values** | relative directory path, appended to `$CMTHOME` | +| | absolute directory path | + +The default relative path translates to `$CMTHOME/config/priv_validator_key.json`. In case `$CMTHOME` is unset, it +defaults to `$HOME/.cometbft/config/priv_validator_key.json`. + + +### priv_validator_state_file +Path to the JSON file containing the last sign state of a validator (more details [here](./priv_validator_state.json.md)). +```toml +priv_validator_state_file = "data/priv_validator_state.json" +``` + +| Value type | string | +|:--------------------|:------------------------------------------------| +| **Possible values** | relative directory path, appended to `$CMTHOME` | +| | absolute directory path | + +The default relative path translates to `$CMTHOME/data/priv_validator_state.json`. In case `$CMTHOME` is unset, it +defaults to `$HOME/.cometbft/data/priv_validator_state.json`. + +### priv_validator_laddr +TCP or UNIX socket listen address for CometBFT that allows external consensus signing processes to connect. +```toml +priv_validator_laddr = "" +``` + +| Value type | string | +|:--------------------|:-----------------------------------------------------------| +| **Possible values** | TCP Stream socket (e.g. `"tcp://127.0.0.1:26665"`) | +| | Unix domain socket (e.g. `"unix:///var/run/privval.sock"`) | + +When consensus signing is outsourced from CometBFT (typically to a Hardware Security Module, like a +[YubiHSM](https://www.yubico.com/product/yubihsm-2) device), this address is opened by CometBFT for incoming connections +from the signing service. + +Make sure the port is available on the host machine and firewalls allow the signing service to connect to it. + +More information on a supported signing service can be found in the [TMKMS](https://github.com/iqlusioninc/tmkms) +documentation. + +### node_key_file +Path to the JSON file containing the private key to use for node authentication in the p2p protocol (more details [here](./node_key.json.md)). +```toml +node_key_file = "config/node_key.json" +``` + +| Value type | string | +|:--------------------|:------------------------------------------------| +| **Possible values** | relative directory path, appended to `$CMTHOME` | +| | absolute directory path | + +The default relative path translates to `$CMTHOME/config/node_key.json`. In case `$CMTHOME` is unset, it defaults to +`$HOME/.cometbft/config/node_key.json`. + +### abci +The mechanism used to connect to the ABCI application. +````toml +abci = "socket" +```` + +| Value type | string | +|:--------------------|:-----------| +| **Possible values** | `"socket"` | +| | `"grpc"` | +| | `"" ` | + +This mechanism is used when connecting to the ABCI application over the [proxy_app](#proxy_app) socket. + +### filter_peers +When connecting to a new peer, filter the connection through an ABCI query to decide, if the connection should be kept. +```toml +filter_peers = false +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `false` | +| | `true` | + +When this setting is `true`, the ABCI application has to implement a query that will allow +the connection to be kept of or dropped. + +This feature will likely be deprecated. + +## RPC Server +These configuration options change the behaviour of the built-in RPC server. + +The RPC server is exposed without any kind of security control or authentication. Do NOT expose this server +on the public Internet without appropriate precautions. Make sure it is secured, load-balanced, etc. + +### rpc.laddr +TCP or UNIX socket address for the RPC server to listen on. +```toml +laddr = "tcp://127.0.0.1:26657" +``` + +| Value type | string | +|:--------------------|:--------------------------------------------------| +| **Possible values** | TCP Stream socket (e.g. `"tcp://127.0.0.1:26657"`) | +| | Unix domain socket (e.g. `"unix:///var/run/rpc.sock"`) | + +The RPC server endpoints have OpenAPI specification definitions through [Swagger UI](../../rpc). + + +Please refer to the [RPC documentation](https://docs.cometbft.com/v1.0/rpc/) for more information. + +### rpc.cors_allowed_origins +A list of origins a cross-domain request can be executed from. +```toml +cors_allowed_origins = [] +``` + +| Value type | array of string | | +|:--------------------|:-------------------------------------------|----------------------| +| **Possible values** | `[]` | disable CORS support | +| | `["*"]` | allow any origin | +| | array of strings containing domain origins | | + +Domain origins are fully qualified domain names with protocol prefixed, for example `"https://cometbft.com"` or +they can contain exactly one wildcard to extend to multiple subdomains, for example: `"https://*.myapis.com"`. + +Example: + +Allow only some subdomains for CORS requests: +```toml +cors_allowed_origins = ["https://www.cometbft.com", "https://*.apis.cometbft.com"] +``` + +### rpc.cors_allowed_methods +A list of methods the client is allowed to use with cross-domain requests. +```toml +cors_allowed_methods = ["HEAD", "GET", "POST", ] +``` + +| Value type | array of string | +|:----------------------------------------|:----------------| +| **Possible string values in the array** | `"HEAD"` | +| | `"GET"` | +| | `"POST"` | + +You can read more about the methods in the +[Mozilla CORS documentation](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS). + +### rpc.cors_allowed_headers +A list of headers the client is allowed to use with cross-domain requests. +```toml +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] +``` + +| Value type | array of string | +|:----------------------------------------|:---------------------| +| **Possible string values in the array** | `"Accept"` | +| | `"Accept-Language"` | +| | `"Content-Language"` | +| | `"Content-Type"` | +| | `"Range"` | + +The list of possible values are from the [Fetch spec](https://fetch.spec.whatwg.org/#cors-safelisted-request-header) +which defines `Origin` as a forbidden value. Read the +[Mozilla CORS documentation](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) and do your own tests, if you want +to use this parameter. + + + +### rpc.unsafe +Activate unsafe RPC endpoints. +```toml +unsafe = false +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `false` | +| | `true` | + +| Unsafe RPC endpoints | Description | +|:------------------------|---------------------------------------------------------------------------------------| +| `/dial_seeds` | dials the given seeds (comma-separated id@IP:port) | +| `/dial_peers` | dials the given peers (comma-separated id@IP:port), optionally making them persistent | +| `/unsafe_flush_mempool` | removes all transactions from the mempool | + +Keep this `false` on production systems. + +### rpc.max_open_connections +Maximum number of simultaneous open connections. This includes WebSocket connections. +```toml +max_open_connections = 900 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | > 0 | + +If you want to accept a larger number of connections than the default 900, make sure that you increase the maximum +number of open connections in the operating system. Usually, the `ulimit` command can help with that. + +This value can be estimated by the following calculation: +``` +$(ulimit -Sn) - {p2p.max_num_inbound_peers} - {p2p.max_num_outbound_peers} - {number of WAL, DB and other open files} +``` + +Estimating the number of WAL, DB and other files at `50`, and using the default soft limit of Debian Linux (`1024`): +``` +1024 - 40 - 10 - 50 = 924 (~900) +``` + +Note, that macOS has a default soft limit of `256`. Make sure you calculate this value for the operating system CometBFT +runs on. + +### rpc.max_subscription_clients +Maximum number of unique clientIDs that can subscribe to events at the `/subscribe` RPC endpoint. +```toml +max_subscription_clients = 100 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +### rpc.max_subscriptions_per_client +Maximum number of unique queries a given client can subscribe to at the `/subscribe` RPC endpoint. +```toml +max_subscriptions_per_client = 5 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +### rpc.experimental_subscription_buffer_size +> EXPERIMENTAL parameter! + +Experimental parameter to specify the maximum number of events a node will buffer, per subscription, before returning +an error and closing the subscription. +```toml +experimental_subscription_buffer_size = 200 +``` + +| Value type | integer | +|:--------------------|:----------| +| **Possible values** | >= 100 | + +Higher values will accommodate higher event throughput rates (and will use more memory). + +### rpc.experimental_websocket_write_buffer_size +> EXPERIMENTAL parameter! + +Experimental parameter to specify the maximum number of events that can be buffered per WebSocket client. +```toml +experimental_websocket_write_buffer_size = 200 +``` + +| Value type | integer | +|:--------------------|:------------------------------------------------| +| **Possible values** | >= rpc.experimental_subscription_buffer_size | + +If clients cannot read from the WebSocket endpoint fast enough, they will be disconnected, so increasing this parameter +may reduce the chances of them being disconnected (but will cause the node to use more memory). + +If set lower than `rpc.experimental_subscription_buffer_size`, connections could be dropped unnecessarily. This value +should ideally be somewhat higher to accommodate non-subscription-related RPC responses. + +### rpc.experimental_close_on_slow_client +> EXPERIMENTAL parameter! + +Close the WebSocket client in case it cannot read events fast enough. Allows greater predictability in subscription +behaviour. +```toml +experimental_close_on_slow_client = false +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `false` | +| | `true` | + +The default behaviour for WebSocket clients is to silently drop events, if they cannot read them fast enough. This does +not cause an error and creates unpredictability. +Enabling this setting creates a predictable outcome by closing the WebSocket connection in case it cannot read events +fast enough. + +### rpc.timeout_broadcast_tx_commit +Timeout waiting for a transaction to be committed when using the `/broadcast_tx_commit` RPC endpoint. +```toml +timeout_broadcast_tx_commit = "10s" +``` + +| Value type | string (duration) | +|:--------------------|:---------------------------| +| **Possible values** | > `"0s"`; <= `"10s"` | + +Using a value larger than `"10s"` will result in increasing the global HTTP write timeout, which applies to all connections +and endpoints. There is an old developer discussion about this [here](https://github.com/tendermint/tendermint/issues/3435). + +> Note: It is generally recommended *not* to use the `broadcast_tx_commit` method in production, and instead prefer `/broadcast_tx_sync`. + +### rpc.max_request_batch_size +Maximum number of requests that can be sent in a JSON-RPC batch request. +```toml +max_request_batch_size = 10 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +If the number of requests sent in a JSON-RPC batch exceed the maximum batch size configured, an error will be returned. + +The default value is set to `10`, which will limit the number of requests to 10 requests per a JSON-RPC batch request. + +If you don't want to enforce a maximum number of requests for a batch request set this value to `0`. + +Reference: https://www.jsonrpc.org/specification#batch + +### rpc.max_body_bytes +Maximum size of request body, in bytes. +```toml +max_body_bytes = 1000000 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +### rpc.max_header_bytes +Maximum size of request header, in bytes. +```toml +max_header_bytes = 1048576 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +### rpc.tls_cert_file +TLS certificates file path for HTTPS server use. +```toml +tls_cert_file = "" +``` + +| Value type | string | +|:--------------------|:-------------------------------------------------------| +| **Possible values** | relative directory path, appended to `$CMTHOME/config` | +| | absolute directory path | +| | `""` | + +The default relative path translates to `$CMTHOME/config`. In case `$CMTHOME` is unset, it defaults to +`$HOME/.cometbft/config`. + +If the certificate is signed by a certificate authority, the certificate file should be the concatenation of the +server certificate, any intermediate certificates, and the Certificate Authority certificate. + +The [rpc.tls_key_file](#rpctls_key_file) property also has to be set with the matching private key. + +If this property is not set, the HTTP protocol will be used by the default server + +### rpc.tls_key_file +TLS private key file path for HTTPS server use. +```toml +tls_key_file = "" +``` + +| Value type | string | +|:--------------------|:-------------------------------------------------------| +| **Possible values** | relative directory path, appended to `$CMTHOME/config` | +| | absolute directory path | +| | `""` | + +The default relative path translates to `$CMTHOME/config`. In case `$CMTHOME` is unset, it defaults to +`$HOME/.cometbft/config`. + +The [rpc.tls_cert_file](#rpctls_cert_file) property also has to be set with the matching server certificate. + +If this property is not set, the HTTP protocol will be used by the default server + +### rpc.pprof_laddr +Profiling data listen address and port. Without protocol prefix. +```toml +pprof_laddr = "" +``` + +| Value type | string | +|:--------------------|:-----------------------------| +| **Possible values** | IP:port (`"127.0.0.1:6060"`) | +| | :port (`":6060"`) | +| | `""` | + +HTTP is always assumed as the protocol. + +See the Golang [profiling](https://golang.org/pkg/net/http/pprof) documentation for more information. + +## gRPC Server +These configuration options change the behaviour of the built-in gRPC server. + +Each gRPC service can be turned on/off, and in some cases configured, individually. +If the gRPC server is not enabled, all individual services' configurations are ignored. + +The gRPC server is exposed without any kind of security control or authentication. Do NOT expose this server +on the public Internet without appropriate precautions. Make sure it is secured, authenticated, load-balanced, etc. + +### grpc.laddr +TCP or UNIX socket address for the gRPC server to listen on. +```toml +laddr = "" +``` + +| Value type | string | +|:--------------------|:--------------------------------------------------------| +| **Possible values** | TCP Stream socket (e.g. `"tcp://127.0.0.1:26661"`) | +| | Unix domain socket (e.g. `"unix:///var/run/abci.sock"`) | +| | `""` | + +If not specified, the gRPC server will be disabled. + +### grpc.version_service.enabled +The gRPC version service provides version information about the node and the protocols it uses. +```toml +enabled = true +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `true` | +| | `false` | + +If [`grpc.laddr`](#grpcladdr) is empty, this setting is ignored and the service is not enabled. + +### grpc.block_service.enabled +The gRPC block service returns block information. +```toml +enabled = true +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `true` | +| | `false` | + +If [`grpc.laddr`](#grpcladdr) is empty, this setting is ignored and the service is not enabled. + +### grpc.block_results_service.enabled +The gRPC block results service returns block results for a given height. If no height is given, it will return the block +results from the latest height. +```toml +enabled = true +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `true` | +| | `false` | + +If [`grpc.laddr`](#grpcladdr) is empty, this setting is ignored and the service is not enabled. + +### grpc.privileged.laddr +Configuration for privileged gRPC endpoints, which should **never** be exposed to the public internet. +```toml +laddr = "" +``` + +| Value type | string | +|:--------------------|:--------------------------------------------------------| +| **Possible values** | TCP Stream socket (e.g. `"tcp://127.0.0.1:26662"`) | +| | Unix domain socket (e.g. `"unix:///var/run/abci.sock"`) | +| | `""` | + +If not specified, the gRPC privileged endpoints will be disabled. + +### grpc.privileged.pruning_service +Configuration specifically for the gRPC pruning service, which is considered a privileged service. +```toml +enabled = false +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `false` | +| | `true` | + +Only controls whether the pruning service is accessible via the gRPC API - not whether a previously set pruning service +retain height is honored by the node. See the [storage.pruning](#storagepruninginterval) section for control over pruning. + +If [`grpc.laddr`](#grpcladdr) is empty, this setting is ignored and the service is not enabled. + +## Peer-to-peer + +These configuration options change the behaviour of the peer-to-peer protocol. + +### p2p.laddr + +TCP socket address for the P2P service to listen on and accept connections. +```toml +laddr = "tcp://0.0.0.0:26656" +``` + +| Value type | string | +|:--------------------|:--------------------------------------------------| +| **Possible values** | TCP Stream socket (e.g. `"tcp://0.0.0.0:26657"`) | + +### p2p.external_address + +TCP address that peers should use in order to connect to the node. +This is the address that the node advertises to peers. +If not set, the [`p2p.laddr`](#p2pladdr) is advertised. + +Useful when the node is running on a non-routable address or when the +node does not have the capabilities to figure out its IP public address. +For example, this is useful when running from a cloud service (e.g, AWS, Digital Ocean). +In these scenarios, the public or external address of the node should be set to +`p2p.external_address`, while INADDR_ANY (i.e., `0.0.0.0`) should be used as +the listen address ([`p2p.laddr`](#p2pladdr)). + +```toml +external_address = "" +``` + +| Value type | string | +|:--------------------|:----------------------------| +| **Possible values** | IP:port (`"1.2.3.4:26656"`) | +| | `""` | + +The port has to point to the node's P2P port. + +Example with a node on a NATed non-routable network: +- Node has local or private IP address `10.10.10.10` and uses port `10000` for + P2P communication: set this address as the [listen address](#p2pladdr) (`p2p.laddr`). +- The network gateway has the public IP `1.2.3.4` and we want to use publicly + open port `26656` on the IP address. In this case, a redirection has to be + set up from `1.2.3.4:26656` to `10.10.10.10:1000` in the gateway implementing NAT; +- Or the node has an associated public or external IP `1.2.3.4` + that is mapped to its local or private IP. +- Set `p2p.external_address` to `1.2.3.4:26656`. + +### p2p.seeds + +Comma-separated list of seed nodes. + +```toml +seeds = "" +``` + +| Value type | string (comma-separated list) | +|:----------------------------------|:----------------------------------------| +| **Possible values within commas** | nodeID@IP:port (`"abcd@1.2.3.4:26656"`) | +| | `""` | + +The node will try to connect to any of the configured seed nodes when it needs +addresses of potential peers to connect. +If a node already has enough peer addresses in its address book, it may never +need to dial the configured seed nodes. + +Example: +```toml +seeds = "abcd@1.2.3.4:26656,deadbeef@5.6.7.8:10000" +``` + +### p2p.persistent_peers + +Comma-separated list of nodes to keep persistent connections to. + +```toml +persistent_peers = "" +``` + +| Value type | string (comma-separated list) | +|:----------------------------------|:----------------------------------------| +| **Possible values within commas** | nodeID@IP:port (`"abcd@1.2.3.4:26656"`) | +| | `""` | + +The node will attempt to establish connections to all configured persistent peers. +This in particular means that persistent peers do not count towards +the configured [`p2p.max_num_outbound_peers`](#p2pmax_num_outbound_peers) +(refer to [issue 1304](https://github.com/cometbft/cometbft/issues/1304) for more details). +Moreover, if a connection to a persistent peer is lost, the node will attempt +reconnecting to that peer. + +Attempts to reconnect to a node configured as a persistent peer are performed +first with regular interval, with up to 20 connection attempts, then with +exponential increasing intervals, with additional 10 connection attempts. +The first phase uses a random interval of `5s` with up to `3s` of random jitter +between attempts; +in the second phase intervals are exponential with base of `3s`, also with a +random random jitter up to `3s`. +As a result, the node will attempt reconnecting to a persisting peer for a +total interval of around 8 hours before giving up. + +Once connected to a persistent peer, the node will request addresses of +potential peers. +This means that when persistent peers are configured the node may not need to +rely on potential peers provided by [seed nodes](#p2pseeds). + +Example: +```toml +persistent_peers = "fedcba@11.22.33.44:26656,beefdead@55.66.77.88:20000" +``` + +### p2p.persistent_peers_max_dial_period + +Maximum pause between successive attempts when dialing a persistent peer. + +```toml +persistent_peers_max_dial_period = "0s" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0s"` | + +When set to `"0s"`, an exponential backoff is applied when re-dialing the +persistent peer, in the same way the node does with ordinary peers. +If it set to non-zero value, the configured value becomes the minimum interval +between attempts to connect to a node configured as a persistent peer. + +### p2p.addr_book_file + +Path to the address book file. + +```toml +addr_book_file = "config/addrbook.json" +``` + +| Value type | string | +|:--------------------|:------------------------------------------------| +| **Possible values** | relative directory path, appended to `$CMTHOME` | +| | absolute directory path | + +The default relative path translates to `$CMTHOME/config/addrbook.json`. In case `$CMTHOME` is unset, it defaults to +`$HOME/.cometbft/config/addrbook.json`. + +The node periodically persists the content of its address book (addresses of +potential peers and information regarding connected peers) to the address book file. +If the node is started with a non-empty address book file, it may not need to +rely on potential peers provided by [seed nodes](#p2pseeds). + +### p2p.addr_book_strict + +Strict address routability rules disallow non-routable IP addresses in the address book. When `false`, private network +IP addresses are enabled to be stored in the address book and dialed. + +```toml +addr_book_strict = true +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `true` | +| | `false` | + +Set it to `false` for testing on private network. Most production nodes can keep it at `true`. + +### p2p.max_num_inbound_peers + +Maximum number of inbound peers, +that is, peers from which the node accepts connections. + +```toml +max_num_inbound_peers = 40 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +The [`p2p.max_num_inbound_peers`](#p2pmax_num_inbound_peers) and +[`p2p.max_num_outbound_peers`](#p2pmax_num_outbound_peers) values +work together to define how many P2P connections the node will +maintain at maximum capacity. + +Nodes configured as [unconditional peers](#p2punconditional_peer_ids) do not count towards the +configured `p2p.max_num_inbound_peers` limit. + +The connections are bidirectional, so any connection can send or receive messages, blocks, and other data. The separation into +inbound and outbound setting only distinguishes the initial setup of the connection: outbound connections are initiated +by the node while inbound connections are initiated by a remote party. + +Nodes on non-routable networks have to set their gateway to port-forward the P2P port for inbound connections to reach +the node. Inbound connections can be accepted as long as the node has an address accessible from the Internet (using NAT or other methods). +Refer to the [p2p.external_address](#p2pexternal_address) configuration for details. + +### p2p.max_num_outbound_peers + +Maximum number of outbound peers, +that is, peers to which the node dials and establishes connections. + +```toml +max_num_outbound_peers = 10 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +The [`p2p.max_num_inbound_peers`](#p2pmax_num_inbound_peers) and +[`p2p.max_num_outbound_peers`](#p2pmax_num_outbound_peers) values +work together to define how many P2P connections the node will +maintain at maximum capacity. + +The `p2p.max_num_outbound_peers` configuration should be seen as the target +number of outbound connections that a node is expected to establish. +While the maximum configured number of outbound connections is not reached, +the node will attempt to establish connections to potential peers. + +This configuration only has effect if the [PEX reactor](#p2ppex) is enabled. +Nodes configured as [persistent peers](#p2ppersistent_peers) do not count towards the +configured `p2p.max_num_outbound_peers` limit +(refer to [issue 1304](https://github.com/cometbft/cometbft/issues/1304) for more details). + +The connections are bidirectional, so any connection can send or receive messages, blocks, and other data. The separation into +inbound and outbound setting only distinguishes the initial setup of the connection: outbound connections are initiated +by the node while inbound connections are initiated by a remote party. + +Nodes on non-routable networks have to set their gateway to port-forward the P2P port for inbound connections to reach +the node. Outbound connections can only be initiated to peers that have addresses accessible from the Internet (using NAT or other methods). +Refer to the [p2p.external_address](#p2pexternal_address) configuration for details. + +### p2p.unconditional_peer_ids + +List of node IDs that are allowed to connect to the node even when connection limits are exceeded. + +```toml +unconditional_peer_ids = "" +``` + +| Value type | string (comma-separated) | +|:--------------------|:---------------------------------| +| **Possible values** | comma-separated list of node IDs | +| | `""` | + +If a peer listed in this property establishes a connection to the node, it will be accepted even if the +configured [`p2p.max_num_inbound_peers`](#p2pmax_num_inbound_peers) limit was reached. +Peers on this list also do not count towards the +configured [`p2p.max_num_outbound_peers`](#p2pmax_num_outbound_peers) limit. + +Contrary to other settings, only the node ID has to be defined here, not the IP:port of the remote node. + +### p2p.flush_throttle_timeout + +Time to wait before flushing messages out on a connection. + +```toml +flush_throttle_timeout = "10ms" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0ms"` | + +The flush operation writes any buffered data to the connection. The flush is throttled, so if multiple triggers come in within the +configured timeout, only one flush is executed. + +Setting the value to `0ms` makes flushing messages out on a connection immediate. +While this might reduce latency, it may degrade throughput as batching +outstanding messages is essentially disabled. + +### p2p.max_packet_msg_payload_size + +Maximum size of a packet payload, in bytes. + +```toml +max_packet_msg_payload_size = 1024 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | > 0 | + +Messages exchanged via P2P connections are split into packets. +Packets contain some metadata and message data (payload). +The value configures the maximum size in bytes of the payload +included in a packet. + +### p2p.send_rate + +Rate at which packets can be sent, in bytes/second. + +```toml +send_rate = 5120000 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | > 0 | + +The value represents the amount of packet bytes that can be sent per second +by each P2P connection. + +### p2p.recv_rate + +Rate at which packets can be received, in bytes/second. + +```toml +recv_rate = 5120000 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | > 0 | + +The value represents the amount of packet bytes that can be received per second +by each P2P connection. + +### p2p.pex + +```toml +pex = true +``` + +Enable peer exchange (PEX) reactor. + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `true` | +| | `false` | + +The peer exchange reactor is responsible for exchanging addresses of potential +peers among nodes. +If the PEX reactor is disabled, the node can only connect to +addresses configured as [persistent peers](#p2ppersistent_peers). + +In the [Sentry Node Architecture](https://forum.cosmos.network/t/sentry-node-architecture-overview/454) on the Cosmos Hub, +validator nodes should have the PEX reactor disabled, +as their connections are manually configured via [persistent peers](#p2ppersistent_peers). +Public nodes, such as sentry nodes, should have the PEX reactor enabled, +as this allows them to discover and connect to public peers in the network. + +### p2p.seed_mode + +In seed mode, the node crawls the network and looks for peers. + +```toml +seed_mode = false +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `false` | +| | `true` | + +In seed mode, the node becomes an online address book. Any incoming connections +can receive a sample of the gathered addresses but no other information (for +example blocks or consensus data) is provided. The node simply disconnects +from the peer after sending the addresses. + +Nodes operating in seed mode should be configured as [seeds](#p2pseeds) for other +nodes in the network. + +The [`p2p.pex`](#p2ppex) option has to be set to `true` for the seed mode to work. + +### p2p.private_peer_ids + +Comma separated list of peer IDs to keep private, they will not be gossiped to other peers. + +```toml +private_peer_ids = "" +``` + +| Value type | string (comma-separated list) | +|:----------------------------------|:----------------------------------| +| **Possible values within commas** | nodeID (`"abcdef0123456789abcd"`) | +| | `""` | + +The addresses with the listed node IDs will not be sent to other peers when the PEX reactor +([`p2p.pex`](#p2ppex)) is enabled. This allows a more granular setting instead of completely disabling the peer exchange +reactor. + +For example, sentry nodes in the +[Sentry Node Architecture](https://forum.cosmos.network/t/sentry-node-architecture-overview/454) on the Cosmos Hub can +use this setting to make sure they do not gossip the node ID of the validator node, while they can still accept node +addresses from the Internet. + +### p2p.allow_duplicate_ip + +Toggle to disable guard against peers connecting from the same IP. + +```toml +allow_duplicate_ip = false +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `false` | +| | `true` | + +When this setting is set to `true`, multiple connections are allowed from the same IP address (for example, on different +ports). + +### p2p.handshake_timeout + +Timeout duration for protocol handshake (or secret connection negotiation). + +```toml +handshake_timeout = "20s" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0s"` | + +This high-level timeout value is applied when the TCP connection has been +established with a peer, and the node and peer are negotiating its upgrade into +a secret authenticated connection. + +The value `"0s"` is undefined, and it can lead to unexpected behaviour. + +### p2p.dial_timeout + +Timeout duration for the low-level dialer that connects to the remote address on the TCP network. + +```toml +dial_timeout = "3s" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0s"` | + +This parameter is the timeout value for dialing on TCP networks. If a hostname is used instead of an IP address and the +hostname resolves to multiple IP addresses, the timeout is spread over each consecutive dial, such that each is given an +appropriate fraction of the time to connect. + +Setting the value to `"0s"` disables the timeout. + +## Mempool +Mempool allows gathering and broadcasting uncommitted transactions among nodes. + +The **mempool** is storage for uncommitted transactions; the **mempool cache** is internal storage within the +mempool for seen transactions. The mempool cache provides a list of transactions already received to filter out +incoming duplicate transactions and prevent duplicate full transaction validations. + +### mempool.type +The type of mempool this node will use. +```toml +type = "flood" +``` + +| Value type | string | +|:--------------------|:----------| +| **Possible values** | `"flood"` | +| | `"nop"` | + +`"flood"` is the original mempool implemented for CometBFT. It is a concurrent linked list with flooding gossip +protocol. + +`"nop"` is a "no operation" or disabled mempool, where the ABCI application is responsible for storing, disseminating and +proposing transactions. Note, that it requires empty blocks to be created: +[`consensus.create_empty_blocks = true`](#consensuscreate_empty_blocks) has to be set. + +### mempool.recheck +Validity check of transactions already in the mempool when a block is finalized. +```toml +recheck = true +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `true` | +| | `false` | + +Committing a block affects the application state, hence the remaining transactions in the mempool after a block commit +might become invalid. Setting `recheck = true` will go through the remaining transactions and remove invalid ones. + +If your application may remove transactions passed by CometBFT to your `PrepareProposal` handler, +you probably want to set this configuration to `true` to avoid possible leaks in your mempool +(transactions staying in the mempool until the node is next restarted). + +### mempool.recheck_timeout +Time to wait for the application to return CheckTx responses after all recheck requests have been +sent. Responses that arrive after the timeout expires are discarded. +```toml +recheck_timeout = "1000ms" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"1000ms"` | + +This setting only applies to non-local ABCI clients and when `recheck` is enabled. + +The ideal value will strongly depend on the application. It could roughly be estimated as the +average size of the mempool multiplied by the average time it takes the application to validate one +transaction. We consider that the ABCI application runs in the same location as the CometBFT binary +(see [`proxy_app`](#proxy_app)) so that the recheck duration is not affected by network delays when +making requests and receiving responses. + +### mempool.broadcast +Broadcast the mempool content (uncommitted transactions) to other nodes. +```toml +broadcast = true +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `true` | +| | `false` | + +This ensures that uncommitted transactions have a chance to reach multiple validators and get committed by one of them. + +Setting this to `false` will stop the mempool from relaying transactions to other peers. +Validators behind sentry nodes typically set this to `false`, +as their sentry nodes take care of disseminating transactions to the rest of the network. + +### mempool.wal_dir +Mempool write-ahead log folder path. +```toml +wal_dir = "" +``` + +| Value type | string | +|:--------------------|:------------------------------------------------| +| **Possible values** | relative directory path, appended to `$CMTHOME` | +| | absolute directory path | +| | `""` | + +In case `$CMTHOME` is unset, it defaults to `$HOME/.cometbft`. + +This value is unused by CometBFT. It was not hooked up to the mempool reactor. + +The mempool implementation does not persist any transaction data to disk (unlike evidence). + +### mempool.size +Maximum number of transactions in the mempool. +```toml +size = 5000 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +If the mempool is full, incoming transactions are dropped. + +The value `0` is undefined. + +### mempool.max_tx_bytes +Maximum size in bytes of a single transaction accepted into the mempool. +```toml +max_tx_bytes = 1048576 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +Transactions bigger than the maximum configured size are rejected by mempool, +this applies to both transactions submitted by clients via RPC endpoints, and +transactions receveing from peers on the mempool protocol. + +### mempool.max_txs_bytes +The maximum size in bytes of all transactions stored in the mempool. +```toml +max_txs_bytes = 67108864 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +This is the raw, total size in bytes of all transactions in the mempool. For example, given 1MB +transactions and a 5MB maximum mempool byte size, the mempool will +only accept five transactions. + +The maximum mempool byte size should be a factor of the network's maximum block size +(which is a [consensus parameter](https://docs.cometbft.com/v1.0/spec/abci/abci++_app_requirements#blockparamsmaxbytes)). +The rationale is to consider how many blocks have to be produced in order to +drain all transactions stored in a full mempool. + +When the mempool is full, incoming transactions are dropped. + +The default value is 64 Mibibyte (2^26 bytes). +This is roughly equivalent to 16 blocks of 4 MiB. + +### mempool.cache_size +Mempool internal cache size for already seen transactions. +```toml +cache_size = 10000 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +The mempool cache is an internal store for transactions that the local node has already seen. Storing these transactions help in filtering incoming duplicate +transactions: we can compare incoming transactions to already seen transactions and filter them out without going +through the process of validating the incoming transaction. + +### mempool.keep-invalid-txs-in-cache +Invalid transactions might become valid in the future, hence they are not added to the mempool cache by default. +Turning this setting on will add an incoming transaction to the cache even if it is deemed invalid by the application (via `CheckTx`). +```toml +keep-invalid-txs-in-cache = false +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `false` | +| | `true` | + +If this setting is set to `true`, the mempool cache will add incoming transactions even if they are invalid. It is useful in cases when +invalid transactions can never become valid again. + +This setting can be used by operators to lower the impact of some spam transactions: when a large number of duplicate +spam transactions are noted on the network, temporarily turning this setting to `true` will filter out the duplicates +quicker than validating each transaction one-by-one. It will also filter out transactions that are supposed to become +valid at a later date. + +### mempool.experimental_max_gossip_connections_to_persistent_peers +> EXPERIMENTAL parameter! + +Limit the number of persistent peer nodes that get mempool transaction broadcasts. +```toml +experimental_max_gossip_connections_to_persistent_peers = 0 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +When set to `0`, the mempool is broadcasting to all the nodes listed in the +[`p2p.persistent_peers`](#p2ppersistent_peers) list. If the number is above `0`, the number of nodes that get broadcasts +will be limited to this setting. + +Unconditional peers and peers not listed in the [`p2p.persistent_peers`](#p2ppersistent_peers) list are not affected by +this parameter. + +See +[`mempool.experimental_max_gossip_connections_to_non_persistent_peers`](#mempoolexperimental_max_gossip_connections_to_persistent_peers) +to limit mempool broadcasts that are not in the list of [`p2p.persistent_peers`](#p2ppersistent_peers). + +### mempool.experimental_max_gossip_connections_to_non_persistent_peers +> EXPERIMENTAL parameter! + +Limit the number of peer nodes that get mempool transaction broadcasts. This parameter does not limit nodes that are +in the [`p2p.persistent_peers`](#p2ppersistent_peers) list. +```toml +experimental_max_gossip_connections_to_non_persistent_peers = 0 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +When set to `0`, the mempool is broadcasting to all the nodes. If the number is above `0`, the number of nodes that get +broadcasts will be limited to this setting. + +Unconditional peers and peers listed in the [`p2p.persistent_peers`](#p2ppersistent_peers) list are not affected by +this parameter. + +See +[`mempool.experimental_max_gossip_connections_to_persistent_peers`](#mempoolexperimental_max_gossip_connections_to_persistent_peers) +to limit broadcasts to persistent peer nodes. + +For non-persistent peers, if enabled, a value of 10 is recommended based on experimental performance results using the +default P2P configuration. + +## State synchronization +State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine snapshot from peers +instead of fetching and replaying historical blocks. It requires some peers in the network to take and serve state +machine snapshots. State sync is not attempted if the starting node has any local state (i.e., it is recovering). + +The node will have a truncated block history, starting from the height of the snapshot. + +### statesync.enable +Enable state synchronization. +```toml +enable = false +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `false` | +| | `true` | + +Enable state synchronization on first start. + +### statesync.rpc_servers +Comma-separated list of RPC servers for light client verification of the synced state machine, +and retrieval of state data for node bootstrapping. +```toml +rpc_servers = "" +``` + +| Value type | string (comma-separated list) | +|:----------------------------------|:-----------------------------------| +| **Possible values within commas** | nodeID@IP:port (`"1.2.3.4:26657"`) | +| | `""` | + +At least two RPC servers have to be defined for state synchronization to work. + +### statesync.trust_height +The height of the trusted header hash. +```toml +trust_height = 0 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +`0` is only allowed when state synchronization is disabled. + +### statesync.trust_hash +Header hash obtained from a trusted source. +```toml +trust_hash = "" +``` + +| Value type | string | +|:--------------------|:-------------------| +| **Possible values** | hex-encoded number | +| | "" | + +`""` is only allowed when state synchronization is disabled. + +This is the header hash value obtained from the trusted source at height +[statesync.trust_height](#statesynctrust_height). + +### statesync.trust_period +The period during which validators can be trusted. +```toml +trust_period = "168h0m0s" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0s"` | + +For Cosmos SDK-based chains, `statesync.trust_period` should usually be about 2/3rd of the unbonding period +(about 2 weeks) during which they can be financially punished (slashed) for misbehavior. + +### statesync.max_discovery_time +Time to spend discovering snapshots before switching to blocksync. If set to 0, state sync will be trying indefinitely. +```toml +max_discovery_time = "2m" +``` + +If `max_discovery_time` is zero, the node will keep trying to discover snapshots indefinitely. + +If `max_discovery_time` is greater than zero, the node will broadcast the "snapshot request" message to its peers and then wait for 5 sec. If no snapshot data has been received after that period, the node will retry: it will broadcast the "snapshot request" message again and wait for 5s, and so on until `max_discovery_time` is reached, after which the node will switch to blocksync. + +### statesync.temp_dir +Temporary directory for state sync snapshot chunks. +```toml +temp_dir = "" +``` + +| Value type | string | +|:--------------------|:------------------------| +| **Possible values** | undefined | + +This value is unused by CometBFT. It was not hooked up to the state sync reactor. + +The codebase will always revert to `/tmp/` for state snapshot chunks. Make sure you have enough space on +your drive that holds `/tmp`. + +### statesync.chunk_request_timeout +The timeout duration before re-requesting a chunk, possibly from a different peer. +```toml +chunk_request_timeout = "10s" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"5s"` | + +If a smaller duration is set when state syncing is enabled, an error message is raised. + +### statesync.chunk_fetchers +The number of concurrent chunk fetchers to run. + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +`0` is only allowed when state synchronization is disabled. + +## Block synchronization +Block synchronization configuration is limited to defining a version of block synchronization to use. + +### blocksync.version +Block Sync version to use. +```toml +version = "v0" +``` + +| Value type | string | +|:--------------------|:--------| +| **Possible values** | `"v0"` | + +All other versions are deprecated. Further versions may be added in future releases. + +## Consensus + +Consensus parameters define how the consensus protocol should behave. + +### consensus.wal_file + +Location of the consensus Write-Ahead Log (WAL) file. + +```toml +wal_file = "data/cs.wal/wal" +``` + +| Value type | string | +|:--------------------|:------------------------------------------------| +| **Possible values** | relative directory path, appended to `$CMTHOME` | +| | absolute directory path | + +The default relative path translates to `$CMTHOME/data/cs.wal/wal`. In case `$CMTHOME` is unset, it defaults to +`$HOME/.cometbft/data/cs.wal/wal`. + +The consensus WAL stores all consensus messages received and broadcast by a +node, as well as some important consensus events (e.g., new height and new round step). +The goal of this log is to enable a node that crashes and later recovers +to re-join consensus with the same state it has before crashing. +Recovering nodes that "forget" the actions taken before crashing are faulty +nodes that are likely to present Byzantine behavior (e.g., double signing). + +## Consensus timeouts + +In this section we describe the consensus timeout parameters. For a more detailed explanation +of these timeout parameters please refer to the [Consensus timeouts explained](#consensus-timeouts-explained) +section below. + +### consensus.timeout_propose + +How long a node waits for the proposal block before prevoting nil. + +```toml +timeout_propose = "3s" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0s"` | + +The proposal block of a round of consensus is broadcast by the proposer of that round. +The `timeout_propose` should be large enough to encompass the common-case +propagation delay of a `Proposal` and one or more `BlockPart` (depending on the +proposed block size) messages from any validator to the node. + +If the proposed block is not received within `timeout_propose`, validators +issue a prevote for nil, indicating that they have not received, and +therefore are unable to vote for, the block proposed in that round. + +Setting `timeout_propose` to `0s` means that the validator does not wait at all +for the proposal block and always prevotes nil. +This has obvious liveness implications since this validator will never prevote +for proposed blocks. + +### consensus.timeout_propose_delta + +How much `timeout_propose` increases with each round. + +```toml +timeout_propose_delta = "500ms" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0ms"` | + +Consensus timeouts are adaptive. +This means that when a round of consensus fails to commit a block, the next +round of consensus will adopt increased timeout durations. +Timeouts increase linearly over rounds, so that the `timeout_propose` adopted +in round `r` is `timeout_propose + r * timeout_propose_delta`. + +### consensus.timeout_vote + +How long a node waits, after receiving +2/3 conflicting prevotes/precommits, before pre-committing nil/going into a new round. + +```toml +timeout_vote = "1s" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0s"` | + +#### Prevotess + +A validator that receives +2/3 prevotes for a block, precommits that block. +If it receives +2/3 prevotes for nil, it precommits nil. +But if prevotes are received from +2/3 validators, but the prevotes do not +match (e.g., they are for different blocks or for blocks and nil), the +validator waits for `timeout_vote` time before precommiting nil. +This gives the validator a chance to wait for additional prevotes and to +possibly observe +2/3 prevotes for a block. + +#### Precommits + +A node that receives +2/3 precommits for a block commits that block. +This is a successful consensus round. +If no block gathers +2/3 precommits, the node cannot commit. +This is an unsuccessful consensus round and the node will start an additional +round of consensus. +Before starting the next round, the node waits for `timeout_vote` time. +This gives the node a chance to wait for additional precommits and to possibly +observe +2/3 precommits for a block, which would allow the node to commit that +block in the current round. + +#### Warning + +Setting `timeout_vote` to `0s` means that the validator will not wait for +additional prevotes/precommits (other than the mandatory +2/3) before +precommitting nil/moving to the next round. This has important liveness +implications and should be avoided. + +### consensus.timeout_vote_delta + +How much the `timeout_vote` increases with each round. + +```toml +timeout_vote_delta = "500ms" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0ms"` | + +Consensus timeouts are adaptive. +This means that when a round of consensus fails to commit a block, the next +round of consensus will adopt increased timeout durations. +Timeouts increase linearly over rounds, so that the `timeout_vote` adopted +in round `r` is `timeout_vote + r * timeout_vote_delta`. + +### consensus.timeout_commit + +How long a node waits after committing a block, before starting on the next height. + +```toml +timeout_commit = "1s" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0s"` | + +The `timeout_commit` represents the minimum interval between the commit of a +block until the start of the next height of consensus. +It gives the node a chance to gather additional precommits for the committed +block, more than the mandatory +2/3 precommits required to commit a block. +The more precommits are gathered for a block, the greater are the safety +guarantees and the easier is to detect misbehaving validators. + +The `timeout_commit` is not a required component of the consensus algorithm, +meaning that there are no liveness implications if it is set to `0s`. +But it may have implications in the way the application rewards validators. + +Notice also that the minimum interval defined with `timeout_commit` includes +the time that both CometBFT and the application take to process the committed block. + +Setting `timeout_commit` to `0s` means that the node will start the next height +as soon as it gathers all the mandatory +2/3 precommits for a block. + +**Notice** that the `timeout_commit` configuration flag is **deprecated** from v1.0. +It is now up to the application to return a `next_block_delay` value upon +[`FinalizeBlock`](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_methods.md#finalizeblock) +to define how long CometBFT should wait before starting the next height. + +### consensus.double_sign_check_height + +How many blocks to look back to check the existence of the node's consensus votes before joining consensus. + +```toml +double_sign_check_height = 0 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +When non-zero, the validator will panic upon restart if the validator's current +consensus key was used to sign any precommit message for the last +`double_sign_check_height` blocks. +If this happens, the validators should stop the state machine, wait for some +blocks, and then restart the state machine again. + +### consensus.create_empty_blocks + +Propose empty blocks if the validator's mempool does not have any transaction. + +```toml +create_empty_blocks = true +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `true` | +| | `false` | + +When set to `true`, empty blocks are produced and proposed to indicate that the +chain is still operative. + + +When set to `false`, blocks are not produced or proposed while there are no +transactions in the validator's mempool. + +Notice that empty blocks are still proposed whenever the application hash +(`app_hash`) has been updated. + +In this setting, blocks are created when transactions are received. + +Note after the block H, CometBFT creates something we call a "proof block" +(only if the application hash changed) H+1. The reason for this is to support +proofs. If you have a transaction in block H that changes the state to X, the +new application hash will only be included in block H+1. If after your +transaction is committed, you want to get a light-client proof for the new state +(X), you need the new block to be committed in order to do that because the new +block has the new application hash for the state X. That's why we make a new +(empty) block if the application hash changes. Otherwise, you won't be able to +make a proof for the new state. + +Plus, if you set `create_empty_blocks_interval` to something other than the +default (`0`), CometBFT will be creating empty blocks even in the absence of +transactions every `create_empty_blocks_interval`. For instance, with +`create_empty_blocks = false` and `create_empty_blocks_interval = "30s"`, +CometBFT will only create blocks if there are transactions, or after waiting +30 seconds without receiving any transactions. + +Setting it to false is more relevant for networks with a low volume number of transactions. + +### consensus.create_empty_blocks_interval + +How long a validator should wait before proposing an empty block. + +```toml +create_empty_blocks_interval = "0s" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0s"` | + +If there are no transactions in the validator's mempool, the validator +waits for `create_empty_blocks_interval` before producing and proposing an +empty block (with no transactions). + +If [`create_empty_blocks`](#createemptyblocks) is set to `false` and +`create_empty_blocks_interval` is set to `0s`, the validator will wait +indefinitely until a transaction is available in its mempool, +to then produce and propose a block. + +Notice that empty blocks are still proposed without waiting for `create_empty_blocks_interval` +whenever the application hash +(`app_hash`) has been updated. + +### consensus.peer_gossip_sleep_duration + +Consensus reactor internal sleep duration when there is no message to send to a peer. + +```toml +peer_gossip_sleep_duration = "100ms" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0ms"` | + +The consensus reactor gossips consensus messages, by sending or forwarding them +to peers. +When there are no messages to be sent to a peer, each reactor routine waits for +`peer_gossip_sleep_duration` time before checking if there are new messages to +be sent to that peer, or if the peer state has been meanwhile updated. + +This generic sleep duration allows other reactor routines to run when a reactor +routine has no work to do. + +### consensus.peer_gossip_intraloop_sleep_duration + +Consensus reactor upper bound for a random sleep duration. + +```toml +peer_gossip_intraloop_sleep_duration = "0s" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0s"` | + +The consensus reactor gossips consensus messages, by sending or forwarding them +to peers. + +If `peer_gossip_intraloop_sleep_duration` is set to a non-zero value, random +sleeps are inserted in the reactor routines when the node is waiting +for `HasProposalBlockPart` messages or `HasVote` messages. +The goal is to reduce the amount of `BlockPart` and `Vote` messages sent. +The value of this parameter is the upper bound for the random duration that is +used by the sleep commands inserted in each loop of the reactor routines. + +### consensus.peer_query_maj23_sleep_duration + +Consensus reactor interval between querying peers for +2/3 vote majorities. + +```toml +peer_query_maj23_sleep_duration = "2s" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0s"` | + +The consensus reactor gossips consensus messages, by sending or forwarding them +to peers. + +The `VoteSetMaj23` message is used by the consensus reactor to query peers +regarding vote messages (prevotes or precommits) they have for a specific +block. +These queries are only triggered when +2/3 votes are observed. + +The value of `peer_query_maj23_sleep_duration` is the interval between sending +those queries to a peer. + +## Storage +In production environments, configuring storage parameters accurately is essential as it can greatly impact the amount +of disk space utilized. + +CometBFT supports storage pruning to delete data indicated as not needed by the application or the data companion. +Other than the pruning interval and compaction options, the configuration parameters in this section refer to the data +companion. The applications pruning configuration is communicated to CometBFT via ABCI. + +Note that for some databases (GolevelDB), the data often does not get physically removed from storage due to the DB backend +not triggering compaction. In these cases it is necessary to enable forced compaction and set the compaction interval accordingly. + +### storage.discard_abci_responses +Discard ABCI responses from the state store, which can save a considerable amount of disk space. +```toml +discard_abci_responses = false +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `false` | +| | `true` | + +If set to `false` ABCI responses are maintained, if set to `true` ABCI responses will be pruned. + +ABCI responses are required for the `/block_results` RPC queries. + +### storage.experimental_db_key_layout + +The representation of keys in the database. The current representation of keys in Comet's stores is considered to be `v1`. + +Users can experiment with a different layout by setting this field to `v2`. Note that this is an experimental feature +and switching back from `v2` to `v1` is not supported by CometBFT. + +If the database was initially created with `v1`, it is necessary to migrate the DB before switching to `v2`. The migration +is not done automatically. + +```toml +experimental_db_key_layout = 'v1' +``` + +| Value type | string | +|:--------------------|:-------| +| **Possible values** | `v1` | +| | `v2` | + +- `v1` - The legacy layout existing in Comet prior to v1. +- `v2` - Order preserving representation ordering entries by height. + +If not specified, the default value `v1` will be used. + +### storage.compact + +If set to true, CometBFT will force compaction to happen for databases that support this feature and save on storage space. + +Setting this to true is most beneficial when used in combination with pruning as it will physically delete the entries marked for deletion. + +```toml +compact = false +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `false` | +| | `true` | + +`false` is the default value (forcing compaction is disabled). + +### storage.compaction_interval + +To avoid forcing compaction every time, this parameter instructs CometBFT to wait the given amount of blocks to be +pruned before triggering compaction. + +It should be tuned depending on the number of items. If your retain height is 1 block, it is too much of an overhead +to try compaction every block. But it should also not be a very large multiple of your retain height as it might incur +bigger overheads. + +| Value type | string (# blocks) | +|:--------------------|:------------------| +| **Possible values** | >= `"0"` | + +```toml +compaction_interval = '1000' +``` + +### storage.pruning.interval +The time period between automated background pruning operations. +```toml +interval = "10s" +``` + +| Value type | string (duration) | +|:--------------------|:------------------| +| **Possible values** | >= `"0s"` | + +### storage.pruning.data_companion.enabled +Tell the automatic pruning function to respect values set by the data companion. + +```toml +enabled = false +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `false` | +| | `true` | + +If disabled, only the application retain height will influence block pruning (but not block results pruning). + +Only enabling this at a later stage will potentially mean that blocks below the application-set retain height at the +time will not be available to the data companion. + +### storage.pruning.data_companion.initial_block_retain_height +The initial value for the data companion block retain height if the data companion has not yet explicitly set one. +If the data companion has already set a block retain height, this is ignored. +```toml +double_sign_check_height = 0 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +### storage.pruning.data_companion.initial_block_results_retain_height +The initial value for the data companion block results retain height if the data companion has not yet explicitly set +one. If the data companion has already set a block results retain height, this is ignored. +```toml +initial_block_results_retain_height = 0 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + + +## Transaction indexer +Transaction indexer settings. + +The application will set which txs to index. +In some cases, a node operator will be able to decide which txs to index based on the configuration set in the application. + +### tx_index.indexer +What indexer to use for transactions. +```toml +indexer = "kv" +``` + +| Value type | string | +|:--------------------|:---------| +| **Possible values** | `"kv"` | +| | `"null"` | +| | `"psql"` | + +`"null"` indexer disables indexing. + +`"kv"` is the simplest possible indexer, backed by a key-value storage. +The key-value storage database backend is defined in [`db_backend`](#db_backend). + +`"psql"` indexer is backed by an external PostgreSQL server. +The server connection string is defined in [`tx_index.psql-conn`](#tx_indexpsql-conn). + +The transaction height and transaction hash is always indexed, except with the `"null"` indexer. + +### tx_index.psql-conn +The PostgreSQL connection configuration. +```toml +psql-conn = "" +``` + +| Value type | string | +|:--------------------|:-------------------------------------------------------------| +| **Possible values** | `"postgresql://:@:/?"` | +| | `""` | + +### tx_index.table_* +Table names used by the PostgreSQL-backed indexer. + +This setting is optional and only applies when `indexer` is set to `psql`. + +| Field | default value | +|:--------------------|:---------------------| +| `"table_blocks"` | `"blocks"` | +| `"table_tx_results"` | `"tx_results"` | +| `"table_events"` | `"events"` | +| `"table_attributes"` | `"table_attributes"` | + +## Prometheus Instrumentation +An extensive amount of Prometheus metrics are built into CometBFT. + +### instrumentation.prometheus +Enable or disable presenting the Prometheus metrics at an endpoint. +```toml +prometheus = false +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `false` | +| | `true` | + +When enabled, metrics are served under the `/metrics` endpoint on the +[instrumentation.prometheus_listen_addr](#instrumentationprometheus_listen_addr) address. + +### instrumentation.prometheus_listen_addr +Address to listen for Prometheus collector(s) connections. +```toml +prometheus_listen_addr = ":26660" +``` + +| Value type | string | +|:--------------------|:--------------------------------------| +| **Possible values** | Network address (`"127.0.0.1:26657"`) | + +If the IP address is omitted (see e.g. the default value) then the listening socket is bound to INADDR_ANY (`0.0.0.0`). + +The metrics endpoint only supports HTTP. + +### instrumentation.max_open_connections +Maximum number of simultaneous connections. +```toml +max_open_connections = 3 +``` + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +`0` allows unlimited connections. + +### instrumentation.namespace +Instrumentation namespace +```toml +namespace = "cometbft" +``` + +| Value type | string | +|:--------------------|:--------------------------| +| **Possible values** | Prometheus namespace name | + +## Consensus timeouts explained + +There's a variety of information about timeouts in [Running in +production](../../explanation/core/running-in-production.md#configuration-parameters). + +You can also find more detailed explanation in the paper describing +the Tendermint consensus algorithm, adopted by CometBFT: [The latest +gossip on BFT consensus](https://arxiv.org/abs/1807.04938). + +```toml +[consensus] +... + +timeout_propose = "3s" +timeout_propose_delta = "500ms" +timeout_prevote = "1s" +timeout_prevote_delta = "500ms" +timeout_precommit = "1s" +timeout_precommit_delta = "500ms" +timeout_commit = "1s" +``` + +Note that in a successful round, the only timeout that we absolutely wait no +matter what is `timeout_commit`. + +Here's a brief summary of the timeouts: + +- `timeout_propose` = how long a validator should wait for a proposal block before prevoting nil +- `timeout_propose_delta` = how much `timeout_propose` increases with each round +- `timeout_prevote` = how long a validator should wait after receiving +2/3 prevotes for + anything (ie. not a single block or nil) +- `timeout_prevote_delta` = how much the `timeout_prevote` increases with each round +- `timeout_precommit` = how long a validator should wait after receiving +2/3 precommits for + anything (ie. not a single block or nil) +- `timeout_precommit_delta` = how much the `timeout_precommit` increases with each round +- `timeout_commit` = how long a validator should wait after committing a block, before starting + on the new height (this gives us a chance to receive some more precommits, + even though we already have +2/3) + +### The adverse effect of using inconsistent `timeout_propose` in a network + +Here's an interesting question. What happens if a particular validator sets a +very small `timeout_propose`, as compared to the rest of the network? + +Imagine there are only two validators in your network: Alice and Bob. Bob sets +`timeout_propose` to 0s. Alice uses the default value of 3s. Let's say they +both have an equal voting power. Given the proposer selection algorithm is a +weighted round-robin, you may expect Alice and Bob to take turns proposing +blocks, and the result like: + +``` +#1 block - Alice +#2 block - Bob +#3 block - Alice +#4 block - Bob +... +``` + +What happens in reality is, however, a little bit different: + +``` +#1 block - Bob +#2 block - Bob +#3 block - Bob +#4 block - Bob +``` + +That's because Bob doesn't wait for a proposal from Alice (prevotes `nil`). +This leaves Alice no chances to commit a block. Note that every block Bob +creates needs a vote from Alice to constitute 2/3+. Bob always gets one because +Alice has `timeout_propose` set to 3s. Alice never gets one because Bob has it +set to 0s. + +Imagine now there are ten geographically distributed validators. One of them +(Bob) sets `timeout_propose` to 0s. Others have it set to 3s. Now, Bob won't be +able to move with his own speed because it still needs 2/3 votes of the other +validators and it takes time to propagate those. I.e., the network moves with +the speed of time to accumulate 2/3+ of votes (prevotes & precommits), not with +the speed of the fastest proposer. + +> Isn't block production determined by voting power? + +If it were determined solely by voting power, it wouldn't be possible to ensure +liveness. Timeouts exist because the network can't rely on a single proposer +being available and must move on if such is not responding. + +> How can we address situations where someone arbitrarily adjusts their block +> production time to gain an advantage? + +The impact shown above is negligible in a decentralized network with enough +decentralization. + +### The adverse effect of using inconsistent `timeout_commit` in a network + +Let's look at the same scenario as before. There are ten geographically +distributed validators. One of them (Bob) sets `timeout_commit` to 0s. Others +have it set to 1s (the default value). Now, Bob will be the fastest producer +because he doesn't wait for additional precommits after creating a block. If +waiting for precommits (`timeout_commit`) is not incentivized, Bob will accrue +more rewards compared to the other 9 validators. + +This is because Bob has the advantage of broadcasting its proposal early (1 +second earlier than the others). But it also makes it possible for Bob to miss +a proposal from another validator and prevote `nil` due to him starting +`timeout_propose` earlier. I.e., if Bob's `timeout_commit` is too low comparing +to other validators, then he might miss some proposals and get slashed for +inactivity. + +**Notice** that the `timeout_commit` configuration flag is **deprecated** from v1.0. +It is now up to the application to return a `next_block_delay` value upon +[`FinalizeBlock`](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_methods.md#finalizeblock) +to define how long CometBFT should wait before starting the next height. diff --git a/docs/references/config/genesis.json.md b/docs/references/config/genesis.json.md new file mode 100644 index 00000000000..7a64495db73 --- /dev/null +++ b/docs/references/config/genesis.json.md @@ -0,0 +1,135 @@ +--- +order: 1 +parent: + title: genesis.json + description: The network genesis file + order: 1 +--- +# genesis.json +It is **crucial** that all nodes in a network must have _exactly_ the same contents in their `genesis.json` file. + +On first start, the network parameters are read from the `genesis.json` file. +On subsequent starts (node recovery), the `genesis.json` file is ignored. + +### Example +```json +{ + "genesis_time": "2024-03-01T20:22:57.532998Z", + "chain_id": "test-chain-HfdKnD", + "initial_height": "0", + "consensus_params": { + "block": { + "max_bytes": "4194304", + "max_gas": "10000000" + }, + "evidence": { + "max_age_num_blocks": "100000", + "max_age_duration": "172800000000000", + "max_bytes": "1048576" + }, + "validator": { + "pub_key_types": [ + "ed25519" + ] + }, + "version": { + "app": "0" + }, + "feature": { + "vote_extensions_enable_height": "1" + "pbts_enable_height": "1" + } + }, + "validators": [ + { + "address": "E74FBE24164CFC4F88E311C3AC92E63D0DC310D8", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "UjxDQgVTlHJOZ7axpMl/iczMIJXiQpFxCFjwKGvzYqE=" + }, + "power": "10", + "name": "" + } + ], + "app_hash": "" +} +``` + +For a production example, you can see [here](https://github.com/cosmos/mainnet/tree/master/genesis) +the history of genesis files for the Cosmos Hub network. + +## genesis_time +Timestamp of the genesis file creation. + +| Value type | string | +|:--------------------|:---------------------------| +| **Possible values** | RFC3339-formatted datetime | + +RFC3339 has multiple representation. The one we use here has 6 digits for sub-second representation. + +## chain_id +The chain ID of the blockchain network. + +| Value type | string | +|:--------------------|:----------------------------------| +| **Possible values** | usually in `"name-number"` format | + +Cannot be empty. + +Can be maximum 50 UTF-8-encoded character. + +The `number` part is typically a revision number of the blockchain, starting at `1` and incrementing each time the network +undergoes a hard fork. + +## initial_height +Initial height at genesis. + +| Value type | string | +|:--------------------|:------------| +| **Possible values** | >= `"0"` | + +When a hard fork happens, a new chain can start from a higher initial height by setting this parameter. + +> Notes: + +>> If a Height `"0"` is specified in `initial_heigth`, then CometBFT during the genesis file validation, will change the +> initial height parameter to `"1"`. + +>> Note: A height in CometBFT is an `int64` integer therefore its maximum value is `9223372036854775807` + +## consensus_params + +The initial values for the consensus parameters. +Consensus Parameters are global parameters that apply to all nodes in the network. + +Please refer to the +[specification](https://docs.cometbft.com/v1.0/spec/abci/abci++_app_requirements#consensus-parameters) +for details on the existing consensus parameters, their default and valid values. + + +## validators +List of initial validators for consensus. + +| Value type | array of objects | | +|:----------------------------------------|:-----------------|-----------------------------------------------------------------------------------------| +| **Mandatory keys of each array object** | address | See [address](priv_validator_key.json.md#address) in priv_validator_key.json | +| | pub_key | See [pub_key.type](priv_validator_key.json.md#pub_keytype) | +| | | and [pub_key.value](priv_validator_key.json.md#pub_keyvalue) in priv_validator_key.json | +| | power | > `"0"` | +| | name | string or `""` | + +## app_hash +The initial AppHash, represented by the state embedded in the genesis file. + +| Value type | string | +|:--------------------|:-------------------| +| **Possible values** | hex-encoded number | +| | "" | + +## app_state +A raw encoded JSON value that has the application state encoded in it. + +| Value type | string | +|:--------------------|:-----------------------| +| **Possible values** | raw bytes JSON-encoded | +| | "" | diff --git a/docs/references/config/node_key.json.md b/docs/references/config/node_key.json.md new file mode 100644 index 00000000000..6091a53864a --- /dev/null +++ b/docs/references/config/node_key.json.md @@ -0,0 +1,93 @@ +--- +order: 1 +parent: + title: node_key.json + description: Description and usage of the node ID + order: 2 +--- + +## The `node_key.json` file +The node ID, the host address and the P2P port together identify a node in a CometBFT network: `nodeID@host:port`. + +The easiest way to get the `nodeID` is running the `cometbft show-node-id` command. + +The `node_key.json` file resides at `$CMTHOME/config/node_key.json`. This can be overridden at the +[node_key_file](config.toml.md#node_key_file) parameter in the [`config.toml`](config.toml.md) file. + +The file contains a private key (in [priv_key.value](#priv_keyvalue)) to an asymmetric algorithm +(in [priv_key.type](#priv_keytype)). + +The node ID is calculated by hashing the public key with the SHA256 algorithm and taking the first 20 bytes of the +result. + +### priv_key.type +The type of the key defined under [`priv_key.value`](#priv_keyvalue). + +Default example in context: +```json +{ + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "jxG2ywUkVPiF4XDW1Dwa5ZfcrC0rEa4iM1y4O5qCMpYxdiypykyf9yp7C81cJTZHKMOvrnGcZiqxlMfyQsaUUA==" + } +} +``` + +| Value type | string (crypto package asymmetric encryption algorithms) | +|:--------------------|:---------------------------------------------------------| +| **Possible values** | `"tendermint/PrivKeyEd25519"` | +| | `"tendermint/PrivKeySecp256k1"` | + +The string values are derived from the asymmetric cryptographic implementations defined in the `crypto` package. + +CometBFT will always generate an Ed25519 key-pair for node ID using the `cometbft init` or the `cometbft gen-node-key` +commands. Other types of encryption keys have to be created manually. (See examples under +[priv_key.value](#priv_keyvalue).) + +### priv_key.value +Base64-encoded bytes, the private key of an asymmetric encryption algorithm. +The type of encryption is defined in [priv_key.type](#priv_keytype). + +Default example in context: +```json +{ + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "jxG2ywUkVPiF4XDW1Dwa5ZfcrC0rEa4iM1y4O5qCMpYxdiypykyf9yp7C81cJTZHKMOvrnGcZiqxlMfyQsaUUA==" + } +} +``` + +| Value type | string (base64-encoded bytes) | +|:--------------------|:----------------------------------------------------| +| **Possible values** | base64-encoded Ed25519 private key **+ public key** | +| | base64-encoded Secp256k1 private key | + +CometBFT will always generate an Ed25519 key-pair for node ID using the `cometbft init` or the `cometbft gen-node-key` +command. Other types of encryption keys have to be created manually. (See examples below.) + +The Ed25519 encryption implementation requires the public key concatenated in the value. The implementation ignores the +private key and uses the stored public key to generate the node ID. In the below example, we zeroed out the private key, +but the resultant concatenated bytes still produce a valid node ID. Other algorithms generate the public key from the +private key. + +Examples: + +Ed25519: +```json +{ + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxdiypykyf9yp7C81cJTZHKMOvrnGcZiqxlMfyQsaUUA==" + } +} +``` +Secp256k1: +```json +{ + "priv_key": { + "type": "tendermint/PrivKeySecp256k1", + "value": "2swJ5TwUhhqjJW+CvVbbSnTGxqpYmb2yvib+MHyDJIU=" + } +} +``` diff --git a/docs/references/config/priv_validator_key.json.md b/docs/references/config/priv_validator_key.json.md new file mode 100644 index 00000000000..3772e8a80cd --- /dev/null +++ b/docs/references/config/priv_validator_key.json.md @@ -0,0 +1,115 @@ +--- +order: 1 +parent: + title: priv_validator_key.json + description: Private/public key-pair for signing consensus + order: 4 +--- +# priv_validator_key.json +CometBFT supports different key signing methods. The default method is storing the consensus (or signing) key +unencrypted on the file system in the `priv_validator_key.json` file. + +The file is located at `$CMTHOME/config/priv_validator_key.json`. If `$CMTHOME` is unset, it defaults to +`$HOME/.cometbft`. + +The file contains a [private key](#priv_keyvalue) and its corresponding [public key](#pub_keyvalue). + +A [wallet address](#address) is derived from the public key. + +### Examples +Ed25519: +```json +{ + "address": "E74FBE24164CFC4F88E311C3AC92E63D0DC310D8", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "UjxDQgVTlHJOZ7axpMl/iczMIJXiQpFxCFjwKGvzYqE=" + }, + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "9giFjwnmAKCAI95l4Q32kXsau+itGrbsvz84CTLxGnJSPENCBVOUck5ntrGkyX+JzMwgleJCkXEIWPAoa/NioQ==" + } +} +``` + +Secp256k1: +```json +{ + "address": "E5B4F106D46A46820308C49B5F92DC22D9F9ACFA", + "pub_key": { + "type": "tendermint/PubKeySecp256k1", + "value": "AhRzbjoZaiyrbCE/yJ6gwIBXjwzl8+H7W8KMAphJVUzt" + }, + "priv_key": { + "type": "tendermint/PrivKeySecp256k1", + "value": "Lfa2uW//4KGvzLXhtoHGfI5Yd2DA2gC7pOfHSkFheGg=" + } +} +``` + +Do NOT use these examples in production systems unless you are planning to give away your tokens. + +You can generate random keys with the `cometbft gen-validator` command. + +## address +The wallet address generated from the consensus public key. + +The wallet address is calculated by hashing the public key with the SHA256 algorithm and taking the first 20 bytes of +the result. + +## pub_key.type +The type of the key defined under [`pub_key.value`](#pub_keyvalue). + +| Value type | string (crypto package asymmetric encryption algorithms) | +|:--------------------|:---------------------------------------------------------| +| **Possible values** | `"tendermint/PubKeyEd25519"` | +| | `"tendermint/PubKeySecp256k1"` | +| | `"tendermint/PubKeyBls12_381"` | + +The string values are derived from the asymmetric cryptographic implementations defined in the `crypto` package. + +CometBFT will generate an Ed25519 key-pair for consensus key by default when using the `cometbft init` or the +`cometbft gen-validator` commands. Use `--key-type` or `-k` flag to create a consensus key of a different type. + +## pub_key.value +Base64-encoded bytes, the public key of an asymmetric encryption algorithm. +The type of encryption is defined in [pub_key.type](#pub_keytype). + +| Value type | string (base64-encoded bytes) | +|:--------------------|:------------------------------------| +| **Possible values** | base64-encoded Ed25519 public key | +| | base64-encoded Secp256k1 public key | +| | base64-encoded BLS12-381 public key | + +CometBFT will generate an Ed25519 key-pair for consensus key by default when using the `cometbft init` or the +`cometbft gen-validator` commands. Use `--key-type` or `-k` flag to create a consensus key of a different type. + + +## priv_key.type +The type of the key defined under [`priv_key.value`](#priv_keyvalue). + +| Value type | string (crypto package asymmetric encryption algorithms) | +|:--------------------|:---------------------------------------------------------| +| **Possible values** | `"tendermint/PrivKeyEd25519"` | +| | `"tendermint/PrivKeySecp256k1"` | +| | `"tendermint/PrivKeyBls12_381"` | + +The string values are derived from the asymmetric cryptographic implementations defined in the `crypto` package. + +CometBFT will generate an Ed25519 key-pair for consensus key by default when using the `cometbft init` or the +`cometbft gen-validator` commands. Use `--key-type` or `-k` flag to create a consensus key of a different type. + +## priv_key.value +Base64-encoded bytes, the private key of an asymmetric encryption algorithm. +The type of encryption is defined in [priv_key.type](#priv_keytype). + +| Value type | string (base64-encoded bytes) | +|:--------------------|:----------------------------------------------------| +| **Possible values** | base64-encoded Ed25519 private key **+ public key** | +| | base64-encoded Secp256k1 private key | +| | base64-encoded BLS12-381 private key | + +CometBFT will generate an Ed25519 key-pair for consensus key by default when using the `cometbft init` or the +`cometbft gen-validator` commands. Use `--key-type` or `-k` flag to create a consensus key of a different type. + +The Ed25519 encryption implementation requires the public key concatenated in the value. diff --git a/docs/references/config/priv_validator_state.json.md b/docs/references/config/priv_validator_state.json.md new file mode 100644 index 00000000000..12c991c33b2 --- /dev/null +++ b/docs/references/config/priv_validator_state.json.md @@ -0,0 +1,72 @@ +--- +order: 1 +parent: + title: priv_validator_state.json + description: Details of last signed block + order: 5 +--- +# priv_validator_state.json +When CometBFT is run as a validator and a local private validator (`PrivVal`) is adopted, it uses this file to keep data about the last signed consensus messages. + +This file is only updated if a local private validator is adopted. +(When [priv_validator_laddr](config.toml.md#priv_validator_laddr) is not set.) + +### Examples +```json +{ + "height": "0", + "round": 0, + "step": 0 +} +``` + +```json +{ + "height": "36", + "round": 0, + "step": 3, + "signature": "N813twXq5yC84wKGrD85X79iXPwtVytGdD3j8btwZ5ZyAAHSkNt6NBWvrTJUcMLqefPfG3SBdPHdfOedieeYCg==", + "signbytes": "76080211240000000000000022480A20D1823B950D1A0FD7335B4E63D2B65CF9D0CEAC13DF4E9E2DFB4765D2C69C74D0122408011220DB69B3B750BBCEAB4BC86BB1847D3E0DDB342EFAFE5731605C61A828265E09802A0C08CDF288AF0610A88CA8FE023211746573742D636861696E2D4866644B6E44" +} +``` +## height +Set to the last height that was signed. + +| Value type | string | +|:--------------------|:------------| +| **Possible values** | >= `"0"` | + +Height, a number, is presented as a string so arbitrary high numbers can be used without the limitation of the integer +maximum. + +## round +Set to the last round that was signed. + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +## step +Set to the last round step that was signed. + +| Value type | integer | +|:--------------------|:--------| +| **Possible values** | >= 0 | + +## signature +The last signature produced. This was provided at the above [height/round/step](#height). + +| Value type | string | +|:--------------------|:---------------------| +| **Possible values** | base64-encoded bytes | +| | `""` | + +## signbytes +Proto-encoding of the latest consensus message signed. Used to compare incoming requests and if possible reuse the +previous signature provided in [signature](#signature). + +| Value type | string | +|:--------------------|:------------------| +| **Possible values** | hex-encoded bytes | +| | `""` | + diff --git a/docs/qa/CometBFT-QA-34.md b/docs/references/qa/CometBFT-QA-34.md similarity index 79% rename from docs/qa/CometBFT-QA-34.md rename to docs/references/qa/CometBFT-QA-34.md index 22db683bfe7..be462165aa8 100644 --- a/docs/qa/CometBFT-QA-34.md +++ b/docs/references/qa/CometBFT-QA-34.md @@ -8,6 +8,55 @@ parent: # CometBFT QA Results v0.34.x +## Table of Contents +- [v0.34.x - From Tendermint Core to CometBFT](#v034x---from-tendermint-core-to-cometbft) +- [Configuration and Results](#configuration-and-results) + - [Saturation Point](#saturation-point) + - [Experiments](#experiments) +- [Examining latencies](#examining-latencies) + - [CometBFT Homogeneous network](#cometbft-homogeneous-network) + - [1/2 Tendermint Core - 1/2 CometBFT](#12-tendermint-core---12-cometbft) + - [1/3 Tendermint Core - 2/3 CometBFT](#13-tendermint-core---23-cometbft) + - [2/3 Tendermint Core - 1/3 CometBFT](#23-tendermint-core---13-cometbft) +- [Prometheus Metrics](#prometheus-metrics) + - [Mempool Size](#mempool-size) + - [Baseline](#baseline) + - [CometBFT Homogeneous network](#cometbft-homogeneous-network-1) + - [1/2 Tendermint Core - 1/2 CometBFT](#12-tendermint-core---12-cometbft-1) + - [1/3 Tendermint Core - 2/3 CometBFT](#13-tendermint-core---23-cometbft-1) + - [2/3 Tendermint Core - 1/3 CometBFT](#23-tendermint-core---13-cometbft-1) + - [Consensus Rounds per Height](#consensus-rounds-per-height) + - [Baseline](#baseline-1) + - [CometBFT Homogeneous network](#cometbft-homogeneous-network-2) + - [1/2 Tendermint Core - 1/2 CometBFT](#12-tendermint-core---12-cometbft-2) + - [1/3 Tendermint Core - 2/3 CometBFT](#13-tendermint-core---23-cometbft-2) + - [2/3 Tendermint Core - 1/3 CometBFT](#23-tendermint-core---13-cometbft-2) + - [Peers](#peers) + - [Baseline](#baseline-2) + - [CometBFT Homogeneous network](#cometbft-homogeneous-network-3) + - [1/2 Tendermint Core - 1/2 CometBFT](#12-tendermint-core---12-cometbft-3) + - [1/3 Tendermint Core - 2/3 CometBFT](#13-tendermint-core---23-cometbft-3) + - [2/3 Tendermint Core - 1/3 CometBFT](#23-tendermint-core---13-cometbft-3) + - [Blocks Produced per Minute, Transactions Processed per Minute](#blocks-produced-per-minute-transactions-processed-per-minute) + - [Baseline](#baseline-3) + - [CometBFT Homogeneous network](#cometbft-homogeneous-network-4) + - [1/2 Tendermint Core - 1/2 CometBFT](#12-tendermint-core---12-cometbft-4) + - [1/3 Tendermint Core - 2/3 CometBFT](#13-tendermint-core---23-cometbft-4) + - [2/3 Tendermint Core - 1/3 CometBFT](#23-tendermint-core---13-cometbft-4) + - [Memory Resident Set Size](#memory-resident-set-size) + - [Baseline](#baseline-4) + - [CometBFT Homogeneous network](#cometbft-homogeneous-network-5) + - [1/2 Tendermint Core - 1/2 CometBFT](#12-tendermint-core---12-cometbft-5) + - [1/3 Tendermint Core - 2/3 CometBFT](#13-tendermint-core---23-cometbft-5) + - [2/3 Tendermint Core - 1/3 CometBFT](#23-tendermint-core---13-cometbft-5) + - [CPU utilization](#cpu-utilization) + - [Baseline](#baseline-5) + - [CometBFT Homogeneous network](#cometbft-homogeneous-network-6) + - [1/2 Tendermint Core - 1/2 CometBFT](#12-tendermint-core---12-cometbft-6) + - [1/3 Tendermint Core - 2/3 CometBFT](#13-tendermint-core---23-cometbft-6) + - [2/3 Tendermint Core - 1/3 CometBFT](#23-tendermint-core---13-cometbft-6) +- [Test Results](#test-results) + ## v0.34.x - From Tendermint Core to CometBFT This section reports on the QA process we followed before releasing the first `v0.34.x` version @@ -114,7 +163,7 @@ This section reports on the key Prometheus metrics extracted from the following * Mixed network, 1/3 Tendermint Core `v0.34.26` and 2/3 running CometBFT: experiment with UUID starting with `fc5e`. * Mixed network, 2/3 Tendermint Core `v0.34.26` and 1/3 running CometBFT: experiment with UUID starting with `4759`. -We make explicit comparisons between the baseline and the homogenous setups, but refrain from +We make explicit comparisons between the baseline and the homogeneous setups, but refrain from commenting on the mixed network experiment unless they show some exceptional results. ### Mempool Size @@ -197,7 +246,7 @@ The thick red dashed line represents the moving average over a sliding window of #### Baseline -The following graph shows the that the number of peers was stable throughout the experiment. +The following graph shows that the number of peers was stable throughout the experiment. Seed nodes typically have a higher number of peers. The fact that non-seed nodes reach more than 50 peers is due to [#9548](https://github.com/tendermint/tendermint/issues/9548). @@ -236,7 +285,7 @@ The thick red dashed line show the rates' moving averages. #### Baseline -The average number of blocks/minute oscilate between 10 and 40. +The average number of blocks/minute oscillate between 10 and 40. ![heights](img34/baseline/block_rate_regular.png) @@ -327,7 +376,7 @@ command, and their average value. #### CometBFT Homogeneous network -The load in the homogenous network is, similarly to the baseline case, below 5 and, therefore, normal. +The load in the homogeneous network is, similarly to the baseline case, below 5 and, therefore, normal. ![load1-homogeneous](img34/homogeneous/cpu.png) @@ -358,7 +407,7 @@ As expected, the average plot also looks similar. The comparison of the baseline results and the homogeneous case show that both scenarios had similar numbers and are therefore equivalent. The mixed nodes cases show that networks operate normally with a mix of compatible Tendermint Core and CometBFT versions. -Although not the main goal, a comparison of metric numbers with the homogenous case and the baseline scenarios show similar results and therefore we can conclude that mixing compatible Tendermint Core and CometBFT introduces not performance degradation. +Although not the main goal, a comparison of metric numbers with the homogeneous case and the baseline scenarios show similar results and therefore we can conclude that mixing compatible Tendermint Core and CometBFT introduces no performance degradation. A conclusion of these tests is shown in the following table, along with the commit versions used in the experiments. diff --git a/docs/qa/CometBFT-QA-37.md b/docs/references/qa/CometBFT-QA-37.md similarity index 91% rename from docs/qa/CometBFT-QA-37.md rename to docs/references/qa/CometBFT-QA-37.md index 1717ecf3ecd..4235b370b5c 100644 --- a/docs/qa/CometBFT-QA-37.md +++ b/docs/references/qa/CometBFT-QA-37.md @@ -12,6 +12,19 @@ This iteration of the QA was run on CometBFT `v0.37.0-alpha3`, the first `v0.37. The changes with respect to the baseline, `TM v0.37.x` as of Oct 12, 2022 (Commit: 1cf9d8e276afe8595cba960b51cd056514965fd1), include the rebranding of our fork of Tendermint Core to CometBFT and several improvements, described in the CometBFT [CHANGELOG](https://github.com/cometbft/cometbft/blob/v0.37.0-alpha.3/CHANGELOG.md). +## Table of Contents +- [Testbed](#testbed) + - [Saturation point](#saturation-point) +- [Examining latencies](#examining-latencies) +- [Prometheus Metrics on the Chosen Experiment](#prometheus-metrics-on-the-chosen-experiment) + - [Mempool Size](#mempool-size) + - [Peers](#peers) + - [Consensus Rounds per Height](#consensus-rounds-per-height) + - [Blocks Produced per Minute, Transactions Processed per Minute](#blocks-produced-per-minute-transactions-processed-per-minute) + - [Memory Resident Set Size](#memory-resident-set-size) + - [CPU utilization](#cpu-utilization) +- [Test Results](#test-results) + ## Testbed As in other iterations of our QA process, we have used a 200-node network as testbed, plus nodes to introduce load and collect metrics. diff --git a/docs/qa/CometBFT-QA-38.md b/docs/references/qa/CometBFT-QA-38.md similarity index 92% rename from docs/qa/CometBFT-QA-38.md rename to docs/references/qa/CometBFT-QA-38.md index 591cce884e6..17c6f78416f 100644 --- a/docs/qa/CometBFT-QA-38.md +++ b/docs/references/qa/CometBFT-QA-38.md @@ -17,6 +17,42 @@ range of ABCI++ functionality (ABCI 2.0), and other several improvements described in the [CHANGELOG](https://github.com/cometbft/cometbft/blob/v0.38.0-alpha.2/CHANGELOG.md). +## Table of Contents +- [CometBFT QA Results v0.38.x](#cometbft-qa-results-v038x) + - [Table of Contents](#table-of-contents) + - [Issues discovered](#issues-discovered) + - [200 Node Testnet](#200-node-testnet) + - [Saturation point](#saturation-point) + - [Latencies](#latencies) + - [Prometheus Metrics on the Chosen Experiment](#prometheus-metrics-on-the-chosen-experiment) + - [Mempool Size](#mempool-size) + - [Peers](#peers) + - [Consensus Rounds per Height](#consensus-rounds-per-height) + - [Blocks Produced per Minute, Transactions Processed per Minute](#blocks-produced-per-minute-transactions-processed-per-minute) + - [Memory Resident Set Size](#memory-resident-set-size) + - [CPU utilization](#cpu-utilization) + - [Comparison to baseline](#comparison-to-baseline) + - [Impact of vote extension signature verification](#impact-of-vote-extension-signature-verification) + - [Test Results](#test-results) + - [Rotating Node Testnet](#rotating-node-testnet) + - [Latencies](#latencies-1) + - [Prometheus Metrics](#prometheus-metrics) + - [Blocks and Transactions per minute](#blocks-and-transactions-per-minute) + - [Peers](#peers-1) + - [Memory Resident Set Size](#memory-resident-set-size-1) + - [CPU utilization](#cpu-utilization-1) + - [Test Result](#test-result) + - [Vote Extensions Testbed](#vote-extensions-testbed) + - [Latency](#latency) + - [Blocks and Transactions per minute](#blocks-and-transactions-per-minute-1) + - [Overview](#overview) + - [First run](#first-run) + - [Number of rounds](#number-of-rounds) + - [CPU](#cpu) + - [Resident Memory](#resident-memory) + - [Mempool size](#mempool-size-1) + - [Results](#results) + ## Issues discovered * (critical, fixed) [\#539] and [\#546] - This bug causes the proposer to crash in @@ -37,7 +73,7 @@ load on which the system begins to show a degraded performance. Then we run the experiments with the system subjected to a load slightly under the saturation point. The method to identify the saturation point is explained [here](CometBFT-QA-34.md#saturation-point) and its application to the baseline -is described [here](TMCore-QA-37.md#finding-the-saturation-point). +is described [here](TMCore-QA-37.md#finding-the-saturation-point). The following table summarizes the results for the different experiments (extracted from @@ -56,7 +92,7 @@ second. We can observe in the table that the system is saturated beyond the diagonal defined by the entries `c=1,r=400` and `c=2,r=200`. Entries in the diagonal have the same amount of transaction load, so we can consider them equivalent. For the -chosen diagonal, the expected number of processed transactions is `1 * 400 tx/s * 89 s = 35600`. +chosen diagonal, the expected number of processed transactions is `1 * 400 tx/s * 89 s = 35600`. (Note that we use 89 out of 90 seconds of the experiment because the last transaction batch coincides with the end of the experiment and is thus not sent.) The experiments in the diagonal below expect double that number, that is, `1 * 800 tx/s * 89 s = 71200`, but the @@ -91,7 +127,7 @@ configuration `c=1,r=400`. ![latency-1-400](img38/200nodes/e_de676ecf-038e-443f-a26a-27915f29e312.png). For reference, the following figure shows the latencies of one of the -experiments for `c=2,r=200` in the baseline. +experiments for `c=2,r=200` in the baseline. ![latency-2-200-37](img37/200nodes_cmt037/e_75cb89a8-f876-4698-82f3-8aaab0b361af.png) @@ -271,7 +307,7 @@ The plot of all latencies can be seen here. Which is similar to the baseline. -![rotating-all-latencies](img37/200nodes_tm037/v037_rotating_latencies.png) +![rotating-all-latencies-bl](img37/200nodes_tm037/v037_rotating_latencies.png) The average increase of about 1 second with respect to the baseline is due to the higher transaction load produced (remember the baseline was affected by [\#9539], whereby most transactions @@ -301,7 +337,7 @@ blocksyncing as that metric was implemented afterwards. ![rotating-heights-ephe-bl](img37/rotating/rotating_eph_heights.png) -We seen that heights follow a similar pattern in both plots: they grow in length as the experiment advances. +We see that heights follow a similar pattern in both plots: they grow in length as the experiment advances. The following plot shows the transactions processed per minute. @@ -327,7 +363,7 @@ This is the baseline plot, for comparison. The plotted values and their evolution are comparable in both plots. -For further details on these plots, see the [this section](./TMCore-QA-34.md#peers-1). +For further details on these plots, see the [this section](TMCore-QA-34.md#peers-1). #### Memory Resident Set Size diff --git a/docs/references/qa/CometBFT-QA-v1.md b/docs/references/qa/CometBFT-QA-v1.md new file mode 100644 index 00000000000..9284c31dc92 --- /dev/null +++ b/docs/references/qa/CometBFT-QA-v1.md @@ -0,0 +1,477 @@ +--- +order: 1 +parent: + title: QA results for CometBFT v1.x + description: This is a report on the results obtained when running CometBFT v1.x on testnets + order: 5 +--- + +## Table of Contents +- [QA results for CometBFT v1.x](#qa-results-for-cometbft-v1x) + - [Latency emulation (LE)](#latency-emulation-le) + - [Storage optimizations](#storage-optimizations) + - [Saturation point](#saturation-point) + - [With latency emulation](#with-latency-emulation) + - [200-nodes test](#200-nodes-test) + - [Latencies](#latencies) + - [Metrics](#metrics) + - [Mempool size](#mempool-size) + - [Peers](#peers) + - [Consensus rounds](#consensus-rounds) + - [Blocks produced per minute and transactions processed per minute](#blocks-produced-per-minute-and-transactions-processed-per-minute) + - [Memory resident set size](#memory-resident-set-size) + - [CPU utilization](#cpu-utilization) + - [Test Results](#test-results) + - [Test results with latency emulation](#test-results-with-latency-emulation) + - [Rotating Nodes Testnet](#rotating-nodes-testnet) + - [Latencies](#latencies-1) + - [Prometheus Metrics](#prometheus-metrics) + - [Blocks and Transactions per minute](#blocks-and-transactions-per-minute) + - [Peers](#peers-1) + - [Memory Resident Set Size](#memory-resident-set-size-1) + - [CPU utilization](#cpu-utilization-1) + - [Test Result](#test-result) + +# QA results for CometBFT v1.x + +We run this iteration of the [Quality Assurance (QA)][qa] process on CometBFT `v1.0.0-alpha.2`, the +second tag of the backport branch `v1.x` from the CometBFT repository. The previous QA tests were +performed on `v0.38.0-alpha.2` from May 21, 2023, which we use here as a baseline for comparison. +There are many changes with respect to the baseline. In particular, new features that can affect +performance are some improvements to bandwidth consumption and proposer-based timestamps (PBTS). For +the full list of changes, check out the +[CHANGELOG](https://github.com/cometbft/cometbft/blob/v1.0.0-alpha.2/CHANGELOG.md). + +The primary objective of the QA process is to ensure that no significant regressions have occurred +compared to the previous version. We consider that a regression is present if there is a variance +greater than 10% in the results. After having performed the experiments, we have determined that no +notable differences exist when compared to the baseline. Consequently, version `v1.0.0-alpha.2` has +successfully passed the QA tests. + +In the remainder of this document we present and analyse the results obtained. The main steps of the +QA process are the following: +- [Saturation point](#saturation-point): On a network with 200 nodes, identify its saturation point, + that is, the transaction load where system performance begins to degrade. Subsequent QA + experiments will subject the system to a load slightly below this saturation point. +- [200-nodes test](#200-nodes-test): Apply a consistent transaction load to the 200-nodes network + for a fixed duration. Then, gather metrics and block data to calculate latencies and compare them + with the baseline results. +- Rotating-nodes test: Initially, deploy 10 validators and 3 seed nodes. Start the same load + (saturation point) used for the previous tests. Then, launch 25 full nodes, wait until they are + caught up to the latest height (minus 100) using Block Sync, and then stop them and wipe out their + data on disk. Repeat this process until the chain reaches height 3000. Then, stop the load and + wait for the full nodes to catch up one more time. + +## Latency emulation (LE) + +For the first time in the QA process we are introducing latency emulation (LE) into our experiments. +We typically deploy all the testnet nodes within the same region of a DigitalOcean data center to +keep the costs low. However, this setup creates unrealistic communication between nodes due to +minimal latency. To address this, while still deploying the testnet in a single region, we can now +emulate latency by adding random delays into outgoing messages. + +Here's how we emulate latency: +- Reference real latency data: We utilize [a table][aws-latencies] containing real data collected + from AWS, which includes average latencies between different AWS data centers worldwide. +- Assign zones: When defining the testnet, each node is randomly assigned a "zone", corresponding + to one of the regions listed in the latency table. +- Set delays: Prior to launching CometBFT on each node, we execute a script to configure added + delays between the current node and every other zone, as specified in the latency table. This + script utilizes the `tc` utility to control network traffic at the kernel level. + +Up until now, all QA results were obtained without latency emulation. To ensure fair comparisons, we +will conduct a two-step analysis. First, we will compare the QA results of `v0.38` (the baseline) +with those of `v1` without latency emulation. Secondly, we will compare the results of `v1` with and +without latency emulation. + +It's important to note that the results with latency emulation in this report are not used to assess +whether `v1.0.0-alpha.2` passes the QA tests. Instead, they serve as a baseline for future QA tests to +be conducted for upcoming releases. + +## Storage optimizations + +We have conducted several experiments aimed to address concerns regarding storage efficiency and +performance of CometBFT. These experiments focused on various aspects, including the effectiveness +of the pruning mechanism, the impact of different database key layouts, and the performance of +alternative database engines like PebbleDB. For a comprehensive overview, you can access the full +report [here](../storage/README.md). + +The experiments were performed on different versions of CometBFT. Of particular relevance for this +report are those where we targeted a version based on `v1.0.0-alpha.1`. The main difference with +`v1.0.0-alpha.2` is PBTS, which does not impact storage performance. Hence, we consider the results +obtained equally applicable to `v1.0.0-alpha.2`. In particular, both versions include the data +companion API, background pruning, compaction, and support for different key layouts. + +To summarize the findings relevant to `v1`: +- Pruning does not negatively affect the node performance, though it showed to be ineffective at + controlling storage growth. However, combining pruning with forced compaction and the new key + layout proved to be an effective strategy, which we recommend adopting. +- Experiments reveal mixed results regarding the impact of different database key layouts on + performance. While some scenarios exhibit improvements in block processing times and storage + efficiency, particularly with the new key layout, further analysis suggests that the benefits of + the new layout were not consistently realized across different environments. Consequently, we've + released the new key layout as purely experimental. +- Tests with PebbleDB showed promising performance improvements, especially when paired with the new + key layout. PebbleDB exhibits superior handling of compaction without the need of manual + intervention. + +## Saturation point + +The initial phase of our QA process involves identifying the saturation point within the testnet. As +in previous iterations, our testbed comprises 200 nodes (175 validator nodes, 20 full nodes, and 5 +seed nodes), along with one node dedicated to sending transaction load, and another for metric +collection. All nodes use the same, default configuration. The experiment entails multiple +iterations, each lasting 90 seconds, with varied load configurations. A configuration is +characterized by: +- `c`, denoting the number of connections from the load runner process to the target node, and +- `r`, indicating the rate or frequency of transactions submitted per second. Each connection + dispatches `r` transactions per second. + +All transactions are 1024 bytes long. +For more details on the methodology to identify the saturation point, see +[here](method.md#running-the-test). + +The figure below shows the values obtained for v1 and v0.38 (the baseline). It's important to note +that configurations that have the same amount of total transaction load are regarded as equivalent. +For example, `c=1,r=400` and `c=2,r=200` are plotted on the same x-axis value corresponding to their +total rate of 400 tx/s, which corresponds to configuration with `c=1`. + +![saturation-plot](imgs/v1/saturation/saturation_v1_v038.png) + +In the figure, we observe that up to a rate of 400 tx/s, the obtained values closely match or are +equal to the expected number of processed transactions, which is 35600 txs. However, beyond this +point, the system becomes overloaded and cannot process all incoming transactions, resulting in +dropped transactions. This state indicates that the system is saturated. The expected number of +processed transactions is calculated as `c * r * 89 s = 35600 txs`. It's worth noting that we +utilize 89 out of 90 seconds of the experiment duration, as the final transaction batch coincides +with the end of the experiment and is thus not sent. + +The complete results from which the figure was generated can be found in the file +[`v1_report_tabbed.txt`](imgs/v1/200nodes/v1_report_tabbed.txt). The following table summarizes the +values plotted in the figure, that is, the number of transaction processed. We can see the +saturation point in the diagonal defined by `c=1,r=400` and `c=2,r=200`: + +| r | c=1 | c=2 | c=4 | +| ---: | --------: | --------: | ----: | +| 200 | 17800 | **34600** | 50464 | +| 400 | **31200** | 54706 | 49463 | +| 800 | 51146 | 51917 | 41376 | +| 1600 | 50889 | 47732 | 45530 | + +For comparison, this is the table obtained on the baseline version, with the same saturation point: + +| r | c=1 | c=2 | c=4 | +| ---: | --------: | --------: | ----: | +| 200 | 17800 | **33259** | 33259 | +| 400 | **35600** | 41565 | 41384 | +| 800 | 36831 | 38686 | 40816 | +| 1600 | 40600 | 45034 | 39830 | + +In conclusion, we chose `c=1,r=400` as the transaction load that we will use in the rest of QA +process. This is the same value used in the previous QA tests. + +### With latency emulation + +For comparing where the network starts to saturate in `v1` with and without latency emulation, we +run a new set of experiments with different configurations of transaction loads: we use only one +connection and a transaction rate ranging from 100 to 1000 tx/s, in intervals of 100. The figure +depicts in total six instances of these experiments, three with latency emulation and three without. + +![v1_saturation](imgs/v1/saturation/saturation_v1_LE.png) + +Up to 300 tx/s, the throughput is optimal for both configurations. However, when the load increases +beyond this threshold, not all transactions are processed. Given the limited number of experiments +conducted, it's challenging to conclusively determine which configuration offers better throughput. +Nevertheless, we can still say that there are no big discrepancies in the obtained values on both +scenarios. + +## 200-nodes test + +This experiment consists in running the 200-nodes network, injecting a load of 400 tx/s (`c=1,r=400`) +during 90 seconds, and collecting the metrics. The network composition is the same as used for +finding the saturation point. + +For the experiments with latency emulation we have set a duration of 180 seconds instead of 90. + +### Latencies + +The following figures show the latencies of the experiment. Each dot represents a block: at which +time it was created (x axis) and the average latency of its transactions (y axis). + +| v0.38 | v1 (without LE / with LE) +|:--------------:|:--------------:| +| ![latency-1-400-v38](img38/200nodes/e_de676ecf-038e-443f-a26a-27915f29e312.png) | ![latency-1-400-v1](imgs/v1/200nodes/latencies/e_8e4e1e81-c171-4879-b86f-bce96ee2e861.png) +| | ![latency-1-400-v1-le](imgs/v1/200nodes_with_latency_emulation/latencies/e_8190e83a-9135-444b-92fb-4efaeaaf2b52.png) + +In both cases, most latencies are around or below 4 seconds. On `v0.38` there are peaks reaching 10 +seconds, while on `v1` (without LE) the only peak reaches 8 seconds. Even if both images are +similar, it's crucial to note that `v0.38` implements [BFT time][bft-time], while the experiments on +`v1` were performed with [PBTS][pbts]. The implication is that PBTS tends to produce slightly +smaller latencies. The reason is that, in PBTS, the block's timestamp is the proposer's wallclock +value at the moment the block was created. For reference, with BFT time, the block's timestamp is +the median of the timestamp of all precommits in the previous height. Each of these timestamps +reflects the validator's wallclock time when the precommit is sent. Consequently, latencies +calculated when BFT time is active tend to contain one extra network propagation delay--the one +whereby precommits are disseminated. + +With these considerations in mind, and taking into account that this is a small experiment, we infer +that the latencies measured on `v1` are not worse than those of the baseline. With latency +emulation, the latencies are considerably higher, as expected. + +### Metrics + +In this section we analyse key metrics extracted from Prometheus data on the 200-nodes experiment +with configuration `c=1,r=400`. + +#### Mempool size + +The following figures show the evolution of the average and maximum mempool size over all full +nodes. + +**Average size** On `v1`, the average mempool size mostly stays below 1000 outstanding transactions +except for a peak above 2000, coinciding with the moment the system reached round number 1 (see +below). For these particular runs, this result is better than the baseline, which oscilates between +1000 and 2500. + +With latency emulation, the average mempool size stays mostly above 2000 outstanding transactions +with peaks almost reaching the maximum mempool size of 5000 transactions. + +| v0.38 | v1 (without LE / with LE) +| :--------------:|:--------------:| +| ![mempool-avg-baseline](img38/200nodes/avg_mempool_size_ylim.png) | ![mempool-avg](imgs/v1/200nodes/metrics/avg_mempool_size.png) +| | ![mempool-avg-le](imgs/v1/200nodes_with_latency_emulation/metrics/avg_mempool_size.png) + +**Maximum size** The maximum mempool size indicates when one or more nodes have reached their +maximum capacity in terms of the number of transactions they can hold. In version `v0.38`, it's +apparent that most of the time, at least one node is dropping incoming transactions. +Conversely, in `v1`, this happens less often, especially after reaching round 1 (as detailed below). + +However, when we introduce latency emulation into `v1`, there is consistently at least one node with +a saturated mempool. + +| v0.38 | v1 (without LE / with LE) +| :--------------:|:--------------:| +| ![mempool-cumulative-baseline](img38/200nodes/mempool_size_max.png) | ![mempoool-cumulative](imgs/v1/200nodes/metrics/mempool_size_max.png) +| | ![mempoool-cumulative-le](imgs/v1/200nodes_with_latency_emulation/metrics/mempool_size_max.png) + +#### Peers + +On all experiments, the number of peers was stable on all nodes. As expected, the seed nodes have +more peers (around 125) than the rest (between 20 and 70 for most nodes). The red dashed line +denotes the average value. Just as in the baseline, the fact that non-seed nodes reach more than 50 +peers is due to [\#486]. + +| v0.38 | v1 (without LE / with LE) +|:--------------:|:--------------:| +| ![peers](img38/200nodes/peers.png) | ![peers](imgs/v1/200nodes/metrics/peers.png) +| | ![peers](imgs/v1/200nodes_with_latency_emulation/metrics/peers.png) + +#### Consensus rounds + +On both versions, most blocks took just one round to reach consensus, except for a few cases when +a second round was needed. On these two particular runs, we observe that `v0.38` required an extra +round on more occasions than `v1`. + +With latency emulation, the performance is notably worse: the consensus module requires an extra +round more often, even needing four rounds to finalise a block. This indicates that the values of +consensus timeouts should be increased, so that to represent the actual delays in the network. + +| v0.38 | v1 (without LE / with LE) +|:--------------:|:--------------:| +| ![rounds](img38/200nodes/rounds_ylim.png) | ![rounds](imgs/v1/200nodes/metrics/rounds.png) +| | ![rounds](imgs/v1/200nodes_with_latency_emulation/metrics/rounds.png) + +#### Blocks produced per minute and transactions processed per minute + +These figures show the rate in which blocks were created from the point of view of each node, +indicating when each node learned that a new block had been agreed upon. Throughout much of the load +application period, the majority of nodes maintained a rate of between 20 and 40 blocks per minute. +A spike to more than 100 blocks per minute occurred due to a slower node catching up. In the +baseline scenario, most blocks had a somewhat slower rate of approximately between 10 and 30 blocks +per minute. + +| v0.38 | v1 (without LE / with LE) +|:--------------:|:--------------:| +| ![heights-baseline](img38/200nodes/block_rate.png) | ![heights](imgs/v1/200nodes/metrics/block_rate.png) +| | ![heights](imgs/v1/200nodes_with_latency_emulation/metrics/block_rate.png) + +| v0.38 | v1 (without LE / with LE) +|:--------------:|:--------------:| +| ![total-txs-baseline](img38/200nodes/total_txs_rate_ylim.png) | ![total-txs](imgs/v1/200nodes/metrics/total_txs_rate.png) +| | ![total-txs](imgs/v1/200nodes_with_latency_emulation/metrics/total_txs_rate.png) + +The collective spike on the right of the graph marks the end of the load injection, when blocks +become smaller (empty) and impose less strain on the network. + +With latency emulation (LE), there is a noticeable degradation in throughput. The block generation +rate drops from approximately 30 blocks per minute (without LE) to around 10 blocks per minute. +Since the rates of transaction processing are similar, this means that more transactions are +included in the blocks, as shown in the following images. The most probable reason is that, once block latency is higher with latency emulation, more transactions are available at the proposer when it is ready to propose a new block. Note that the maximum block size is 4 Mb. + +| v0.38 | v1 (without LE / with LE) +|:--------------:|:--------------:| +| ![block-size-v038](img38/200nodes/block_size_bytes.png) | ![block-size-v1](imgs/v1/200nodes/metrics/block_size_bytes.png) +| | ![block-size-v1-le](imgs/v1/200nodes_with_latency_emulation/metrics/block_size_bytes.png) + +#### Memory resident set size + +The following graphs show the Resident Set Size of all monitored processes. Most nodes use less than +0.9 GB of memory, and a maximum of 1.3GB. In all cases, the memory usage in `v1` is less than in the +baseline. On all processes, the memory usage went down as the load was being removed, showing no +signs of unconstrained growth. With latency emulation, the results are comparable. + +| v0.38 | v1 (without LE / with LE) +|:--------------:|:--------------:| +|![rss](img38/200nodes/memory_ylim.png) | ![rss](imgs/v1/200nodes/metrics/memory.png) +| | ![rss](imgs/v1/200nodes_with_latency_emulation/metrics/memory.png) + +#### CPU utilization + +The most reliable metric from Prometheus for assessing CPU utilization in a Unix machine is `load1`, +commonly found in the [output of +`top`](https://www.digitalocean.com/community/tutorials/load-average-in-linux). In these scenarios, +the load remains consistently below 4 on the majority of nodes, with the baseline exhibiting a +similar pattern. With latency emulation, the CPU load reaches 5 on some nodes, probably due to +having a busier mempool and processing more rounds. + +| v0.38 | v1 (without LE / with LE) +|:--------------:|:--------------:| +| ![load1-baseline](img38/200nodes/cpu.png) | ![load1](imgs/v1/200nodes/metrics/cpu.png) +| | ![load1](imgs/v1/200nodes_with_latency_emulation/metrics/cpu.png) + +### Test Results + +We have demonstrated that there are no regressions when comparing CometBFT `v1.0.0-alpha.2` against +the results obtained for `v0.38`. In fact, the observed results are identical to, or occasionally even +slightly better than those of the baseline. We therefore conclude that this version of CometBFT has +passed the test. + +| Scenario | Date | Version | Result | +| --------- | ---------- | --------------------------------------------------------- | ------ | +| 200-nodes | 2024-03-21 | v1.0.0-alpha.2 (4ced46d3d742bdc6093050bd67d9bbde830b6df2) | Pass | + +#### Test results with latency emulation + +As expected, the introduction of emulated latencies to the network results in a degradation of +performance for `v1` compared to `v1` without latency emulation, although not by an order of +magnitude. Moving forward with the next QA tests, it may be prudent to consider adjusting the +saturation point to a slightly lower value. Determining this adjustment will require conducting new +experiments on the network with latency emulation. + +## Rotating Nodes Testnet + +As done in past releases, we use `c=1,r=400` as load, as the saturation point in the 200-node test +has not changed from `v0.38.x` (see the corresponding [section](#saturation-point) above). +Further, although latency emulation is now available, we decided to run this test case +without latency emulation (LE); this choice may change in future releases. + +The baseline considered in this test case is `v0.38.0-alpha.2`, as described +in the [introduction](#qa-results-for-cometbft-v1x) above. + +### Latencies + +The following two plots show latencies for the whole experiment. +We see the baseline (`v0.38.0-alpha.2`) on the left, and the current version +on the right. + +We can appreciate that most latencies are under 4 seconds in both cases, +and both graphs have a comparable amount of outliers between 4 seconds and 11 seconds. + +| v0.38.0 | v1.0.0 (without LE) | +| :-----------------------------------------------------------------: | :-------------------------------------------------------: | +| ![rotating-all-latencies-bl](img38/rotating/rotating_latencies.png) | ![rotating-all-latencies](imgs/v1/rotating/latencies.png) | + + +### Prometheus Metrics + +This section shows relevant metrics both on `v1.0.0` and the baseline (`v0.38.0`). +In general, all metrics roughly match those seen on the baseline +for the rotating node experiment. + +#### Blocks and Transactions per minute + +The following two plots show the blocks produced per minute. In both graphs, most nodes stabilize around 40 blocks per minute. + +| v0.38.0 | v1.0.0 (without LE) | +| :------------------------------------------------------------: | :-------------------------------------------------------------------: | +| ![rotating-heights-bl](img38/rotating/rotating_block_rate.png) | ![rotating-heights](imgs/v1/rotating/metrics/rotating_block_rate.png) | + +The following plots show only the heights reported by ephemeral nodes, both when they were blocksyncing +and when they were running consensus. + +| v0.38.0 | v1.0.0 (without LE) | +| :------------------------------------------------------------------: | :----------------------------------------------------------------: | +| ![rotating-heights-ephe-bl](img38/rotating/rotating_eph_heights.png) | ![rotating-heights-ephe](imgs/v1/rotating/metrics/rotating_eph_heights.png) | + +In both cases, we see the main loop of the `rotating` test case repeat itself a number of times. +Ephemeral nodes are stopped, their persisted state is wiped out, their config is transferred over +from the orchestrating node, they are started, we wait for all of them to catch up via blocksync, +and the whole cycle starts over. All these steps are carried out via `ansible` playbooks. + +We see that there are less cycles in `v1.0.0`. The reason is the following. +All `ansible` steps are currently run from the orchestrating node (i.e., the engineer's laptop). +The orchestrating node when running the rotating nodes test case for `v1.0.0` +was connected to a network connection that was slower than when the equivalent test was run for `v0.38.x`. +This caused the steps reconfiguring ephemeral nodes at the end of each cycle to be somewhat slower. +This can be noticed in the graphs when comparing the width (in x-axis terms) of the gaps without metric +from the end of a cycle to the beginning of the next one. + +If we focus on the _width_ of periods when ephemeral nodes are blocksynching, we see that they are slightly narrower +in `v1.0.0`. This is likely due to the improvements introduced as part of the issues +[#1283](https://github.com/cometbft/cometbft/issues/1283), +[#2379](https://github.com/cometbft/cometbft/issues/2379), and +[#2465](https://github.com/cometbft/cometbft/issues/2465). + +The following plots show the transactions processed per minute. + +| v0.38.0 | v1.0.0 (without LE) | +| :------------------------------------------------------------: | :-------------------------------------------------------------------: | +| ![rotating-total-txs-bl](img38/rotating/rotating_txs_rate.png) | ![rotating-total-txs](imgs/v1/rotating/metrics/rotating_txs_rate.png) | + +They seem similar, except for an outlier in the `v1.0.0` plot. + +#### Peers + +The plots below show the evolution of the number of peers throughout the experiment. + +| v0.38.0 | v1.0.0 (without LE) | +| :-----------------------------------------------------: | :------------------------------------------------------------: | +| ![rotating-peers-bl](img38/rotating/rotating_peers.png) | ![rotating-peers](imgs/v1/rotating/metrics/rotating_peers.png) | + +The plotted values and their evolution show the same dynamics in both plots. +Nevertheless, all nodes seem to acquire more peers when ephemeral node are catching up in the `v1.0.0` experiment. + +For further explanations on these plots, see the [this section](TMCore-QA-34.md#peers-1). + +#### Memory Resident Set Size + +These plots show the average Resident Set Size (RSS) over all processes. +They are comparable in both releases. + +| v0.38.0 | v1.0.0 (without LE) | +| :------------------------------------------------------------: | :-------------------------------------------------------------------: | +| ![rotating-rss-avg-bl](img38/rotating/rotating_avg_memory.png) | ![rotating-rss-avg](imgs/v1/rotating/metrics/rotating_avg_memory.png) | + +#### CPU utilization + +The plots below show metric `load1` for all nodes for `v1.0.0-alpha.2` and for the baseline (`v0.38.0`). + +| v0.38.0 | v1.0.0 (without LE) | +| :---------------------------------------------------: | :-------------------------------------------------: | +| ![rotating-load1-bl](img38/rotating/rotating_cpu.png) | ![rotating-load1](imgs/v1/rotating/metrics/rotating_cpu.png) | + +In both cases, it is contained under 5 most of the time, which is considered normal load. + +### Test Result + +| Scenario | Date | Version | Result | +| -------- | ---------- | --------------------------------------------------------- | ------ | +| Rotating | 2024-04-03 | v1.0.0-alpha.2 (e42f62b681a2d0b05607a61d834afea90f73d366) | Pass | + +[qa]: README.md#cometbft-quality-assurance +[aws-latencies]: https://github.com/cometbft/cometbft/blob/v1.0.0-alpha.2/test/e2e/pkg/latency/aws-latencies.csv +[latency-emulator-script]: https://github.com/cometbft/cometbft/blob/v1.0.0-alpha.2/test/e2e/pkg/latency/latency-setter.py +[bft-time]: ../../../spec/consensus/bft-time.md +[pbts]: ../../../spec/consensus/proposer-based-timestamp/README.md +[\#486]: https://github.com/cometbft/cometbft/issues/486 +[end-to-end]: https://github.com/cometbft/cometbft/tree/main/test/e2e diff --git a/docs/qa/README.md b/docs/references/qa/README.md similarity index 87% rename from docs/qa/README.md rename to docs/references/qa/README.md index 52071d4003a..78610688f46 100644 --- a/docs/qa/README.md +++ b/docs/references/qa/README.md @@ -3,7 +3,7 @@ order: 1 parent: title: CometBFT Quality Assurance description: This is a report on the process followed and results obtained when running v0.34.x on testnets - order: 2 + order: 7 --- # CometBFT Quality Assurance @@ -14,7 +14,7 @@ This directory is to live in multiple branches. On each release branch, the contents of this directory reflect the status of the process at the time the Quality Assurance process was applied for that release. -File [method](./method.md) keeps track of the process followed to obtain the results +File [method](method.md) keeps track of the process followed to obtain the results used to decide if a release is passing the Quality Assurance process. The results obtained in each release are stored in their own directory. The following releases have undergone the Quality Assurance process, and the corresponding reports include detailed information on tests and comparison with the baseline. @@ -24,3 +24,4 @@ The following releases have undergone the Quality Assurance process, and the cor * [TM v0.37.x](TMCore-QA-37.md) - Tested prior to releasing TM v0.37.x, using TM v0.34.x results as baseline. * [v0.37.x](CometBFT-QA-37.md) - Tested on CometBFT v0.37.0-alpha3, using TM v0.37.x results as baseline. * [v0.38.x](CometBFT-QA-38.md) - Tested on v0.38.0-alpha.2, using v0.37.x results as baseline. +* [v1.x](CometBFT-QA-v1.md) - Tested on v1.0.0-alpha.2, using v0.38.x results as baseline. diff --git a/docs/qa/TMCore-QA-34.md b/docs/references/qa/TMCore-QA-34.md similarity index 91% rename from docs/qa/TMCore-QA-34.md rename to docs/references/qa/TMCore-QA-34.md index e5764611c06..fbf85401b58 100644 --- a/docs/qa/TMCore-QA-34.md +++ b/docs/references/qa/TMCore-QA-34.md @@ -8,6 +8,27 @@ parent: # Tendermint Core QA Results v0.34.x +## Table of Contents +- [200 Node Testnet](#200-node-testnet) + - [Finding the Saturation Point](#finding-the-saturation-point) + - [Examining latencies](#examining-latencies) + - [Prometheus Metrics on the Chosen Experiment](#prometheus-metrics-on-the-chosen-experiment) + - [Mempool Size](#mempool-size) + - [Peers](#peers) + - [Consensus Rounds per Height](#consensus-rounds-per-height) + - [Blocks Produced per Minute, Transactions Processed per Minute](#blocks-produced-per-minute-transactions-processed-per-minute) + - [Memory Resident Set Size](#memory-resident-set-size) + - [CPU utilization](#cpu-utilization) + - [Test Result](#test-result) +- [Rotating Node Testnet](#rotating-node-testnet) + - [Latencies](#latencies) + - [Prometheus Metrics](#prometheus-metrics) + - [Blocks and Transactions per minute](#blocks-and-transactions-per-minute) + - [Peers](#peers-1) + - [Memory Resident Set Size](#memory-resident-set-size-1) + - [CPU utilization](#cpu-utilization-1) + - [Test Result](#test-result-1) + ## 200 Node Testnet ### Finding the Saturation Point diff --git a/docs/qa/TMCore-QA-37.md b/docs/references/qa/TMCore-QA-37.md similarity index 91% rename from docs/qa/TMCore-QA-37.md rename to docs/references/qa/TMCore-QA-37.md index edff57b0276..624c6704dd9 100644 --- a/docs/qa/TMCore-QA-37.md +++ b/docs/references/qa/TMCore-QA-37.md @@ -8,6 +8,28 @@ parent: # Tendermint Core QA Results v0.37.x +## Table of Contents +- [Issues discovered](#issues-discovered) +- [200 Node Testnet](#200-node-testnet) + - [Finding the Saturation Point](#finding-the-saturation-point) + - [Examining latencies](#examining-latencies) + - [Prometheus Metrics on the Chosen Experiment](#prometheus-metrics-on-the-chosen-experiment) + - [Mempool Size](#mempool-size) + - [Peers](#peers) + - [Consensus Rounds per Height](#consensus-rounds-per-height) + - [Blocks Produced per Minute, Transactions Processed per Minute](#blocks-produced-per-minute-transactions-processed-per-minute) + - [Memory Resident Set Size](#memory-resident-set-size) + - [CPU utilization](#cpu-utilization) + - [Test Result](#test-result) +- [Rotating Node Testnet](#rotating-node-testnet) + - [Latencies](#latencies) + - [Prometheus Metrics](#prometheus-metrics) + - [Blocks and Transactions per minute](#blocks-and-transactions-per-minute) + - [Peers](#peers-1) + - [Memory Resident Set Size](#memory-resident-set-size-1) + - [CPU utilization](#cpu-utilization-1) + - [Test Result](#test-result-1) + ## Issues discovered During this iteration of the QA process, the following issues were found: diff --git a/docs/qa/img34/baseline/avg_cpu.png b/docs/references/qa/img34/baseline/avg_cpu.png similarity index 100% rename from docs/qa/img34/baseline/avg_cpu.png rename to docs/references/qa/img34/baseline/avg_cpu.png diff --git a/docs/qa/img34/baseline/avg_memory.png b/docs/references/qa/img34/baseline/avg_memory.png similarity index 100% rename from docs/qa/img34/baseline/avg_memory.png rename to docs/references/qa/img34/baseline/avg_memory.png diff --git a/docs/qa/img34/baseline/avg_mempool_size.png b/docs/references/qa/img34/baseline/avg_mempool_size.png similarity index 100% rename from docs/qa/img34/baseline/avg_mempool_size.png rename to docs/references/qa/img34/baseline/avg_mempool_size.png diff --git a/docs/qa/img34/baseline/block_rate_regular.png b/docs/references/qa/img34/baseline/block_rate_regular.png similarity index 100% rename from docs/qa/img34/baseline/block_rate_regular.png rename to docs/references/qa/img34/baseline/block_rate_regular.png diff --git a/docs/qa/img34/baseline/cpu.png b/docs/references/qa/img34/baseline/cpu.png similarity index 100% rename from docs/qa/img34/baseline/cpu.png rename to docs/references/qa/img34/baseline/cpu.png diff --git a/docs/qa/img34/baseline/memory.png b/docs/references/qa/img34/baseline/memory.png similarity index 100% rename from docs/qa/img34/baseline/memory.png rename to docs/references/qa/img34/baseline/memory.png diff --git a/docs/qa/img34/baseline/mempool_size.png b/docs/references/qa/img34/baseline/mempool_size.png similarity index 100% rename from docs/qa/img34/baseline/mempool_size.png rename to docs/references/qa/img34/baseline/mempool_size.png diff --git a/docs/qa/img34/baseline/peers.png b/docs/references/qa/img34/baseline/peers.png similarity index 100% rename from docs/qa/img34/baseline/peers.png rename to docs/references/qa/img34/baseline/peers.png diff --git a/docs/qa/img34/baseline/rounds.png b/docs/references/qa/img34/baseline/rounds.png similarity index 100% rename from docs/qa/img34/baseline/rounds.png rename to docs/references/qa/img34/baseline/rounds.png diff --git a/docs/qa/img34/baseline/total_txs_rate_regular.png b/docs/references/qa/img34/baseline/total_txs_rate_regular.png similarity index 100% rename from docs/qa/img34/baseline/total_txs_rate_regular.png rename to docs/references/qa/img34/baseline/total_txs_rate_regular.png diff --git a/docs/qa/img34/cmt1tm1/all_experiments.png b/docs/references/qa/img34/cmt1tm1/all_experiments.png similarity index 100% rename from docs/qa/img34/cmt1tm1/all_experiments.png rename to docs/references/qa/img34/cmt1tm1/all_experiments.png diff --git a/docs/qa/img34/cmt1tm1/avg_cpu.png b/docs/references/qa/img34/cmt1tm1/avg_cpu.png similarity index 100% rename from docs/qa/img34/cmt1tm1/avg_cpu.png rename to docs/references/qa/img34/cmt1tm1/avg_cpu.png diff --git a/docs/qa/img34/cmt1tm1/avg_memory.png b/docs/references/qa/img34/cmt1tm1/avg_memory.png similarity index 100% rename from docs/qa/img34/cmt1tm1/avg_memory.png rename to docs/references/qa/img34/cmt1tm1/avg_memory.png diff --git a/docs/qa/img34/cmt1tm1/avg_mempool_size.png b/docs/references/qa/img34/cmt1tm1/avg_mempool_size.png similarity index 100% rename from docs/qa/img34/cmt1tm1/avg_mempool_size.png rename to docs/references/qa/img34/cmt1tm1/avg_mempool_size.png diff --git a/docs/qa/img34/cmt1tm1/block_rate_regular.png b/docs/references/qa/img34/cmt1tm1/block_rate_regular.png similarity index 100% rename from docs/qa/img34/cmt1tm1/block_rate_regular.png rename to docs/references/qa/img34/cmt1tm1/block_rate_regular.png diff --git a/docs/qa/img34/cmt1tm1/cpu.png b/docs/references/qa/img34/cmt1tm1/cpu.png similarity index 100% rename from docs/qa/img34/cmt1tm1/cpu.png rename to docs/references/qa/img34/cmt1tm1/cpu.png diff --git a/docs/qa/img34/cmt1tm1/memory.png b/docs/references/qa/img34/cmt1tm1/memory.png similarity index 100% rename from docs/qa/img34/cmt1tm1/memory.png rename to docs/references/qa/img34/cmt1tm1/memory.png diff --git a/docs/qa/img34/cmt1tm1/mempool_size.png b/docs/references/qa/img34/cmt1tm1/mempool_size.png similarity index 100% rename from docs/qa/img34/cmt1tm1/mempool_size.png rename to docs/references/qa/img34/cmt1tm1/mempool_size.png diff --git a/docs/qa/img34/cmt1tm1/peers.png b/docs/references/qa/img34/cmt1tm1/peers.png similarity index 100% rename from docs/qa/img34/cmt1tm1/peers.png rename to docs/references/qa/img34/cmt1tm1/peers.png diff --git a/docs/qa/img34/cmt1tm1/rounds.png b/docs/references/qa/img34/cmt1tm1/rounds.png similarity index 100% rename from docs/qa/img34/cmt1tm1/rounds.png rename to docs/references/qa/img34/cmt1tm1/rounds.png diff --git a/docs/qa/img34/cmt1tm1/total_txs_rate_regular.png b/docs/references/qa/img34/cmt1tm1/total_txs_rate_regular.png similarity index 100% rename from docs/qa/img34/cmt1tm1/total_txs_rate_regular.png rename to docs/references/qa/img34/cmt1tm1/total_txs_rate_regular.png diff --git a/docs/qa/img34/cmt2tm1/all_experiments.png b/docs/references/qa/img34/cmt2tm1/all_experiments.png similarity index 100% rename from docs/qa/img34/cmt2tm1/all_experiments.png rename to docs/references/qa/img34/cmt2tm1/all_experiments.png diff --git a/docs/qa/img34/cmt2tm1/avg_cpu.png b/docs/references/qa/img34/cmt2tm1/avg_cpu.png similarity index 100% rename from docs/qa/img34/cmt2tm1/avg_cpu.png rename to docs/references/qa/img34/cmt2tm1/avg_cpu.png diff --git a/docs/qa/img34/cmt2tm1/avg_memory.png b/docs/references/qa/img34/cmt2tm1/avg_memory.png similarity index 100% rename from docs/qa/img34/cmt2tm1/avg_memory.png rename to docs/references/qa/img34/cmt2tm1/avg_memory.png diff --git a/docs/qa/img34/cmt2tm1/avg_mempool_size.png b/docs/references/qa/img34/cmt2tm1/avg_mempool_size.png similarity index 100% rename from docs/qa/img34/cmt2tm1/avg_mempool_size.png rename to docs/references/qa/img34/cmt2tm1/avg_mempool_size.png diff --git a/docs/qa/img34/cmt2tm1/block_rate_regular.png b/docs/references/qa/img34/cmt2tm1/block_rate_regular.png similarity index 100% rename from docs/qa/img34/cmt2tm1/block_rate_regular.png rename to docs/references/qa/img34/cmt2tm1/block_rate_regular.png diff --git a/docs/qa/img34/cmt2tm1/cpu.png b/docs/references/qa/img34/cmt2tm1/cpu.png similarity index 100% rename from docs/qa/img34/cmt2tm1/cpu.png rename to docs/references/qa/img34/cmt2tm1/cpu.png diff --git a/docs/qa/img34/cmt2tm1/memory.png b/docs/references/qa/img34/cmt2tm1/memory.png similarity index 100% rename from docs/qa/img34/cmt2tm1/memory.png rename to docs/references/qa/img34/cmt2tm1/memory.png diff --git a/docs/qa/img34/cmt2tm1/mempool_size.png b/docs/references/qa/img34/cmt2tm1/mempool_size.png similarity index 100% rename from docs/qa/img34/cmt2tm1/mempool_size.png rename to docs/references/qa/img34/cmt2tm1/mempool_size.png diff --git a/docs/qa/img34/cmt2tm1/peers.png b/docs/references/qa/img34/cmt2tm1/peers.png similarity index 100% rename from docs/qa/img34/cmt2tm1/peers.png rename to docs/references/qa/img34/cmt2tm1/peers.png diff --git a/docs/qa/img34/cmt2tm1/rounds.png b/docs/references/qa/img34/cmt2tm1/rounds.png similarity index 100% rename from docs/qa/img34/cmt2tm1/rounds.png rename to docs/references/qa/img34/cmt2tm1/rounds.png diff --git a/docs/qa/img34/cmt2tm1/total_txs_rate_regular.png b/docs/references/qa/img34/cmt2tm1/total_txs_rate_regular.png similarity index 100% rename from docs/qa/img34/cmt2tm1/total_txs_rate_regular.png rename to docs/references/qa/img34/cmt2tm1/total_txs_rate_regular.png diff --git a/docs/qa/img34/homogeneous/all_experiments.png b/docs/references/qa/img34/homogeneous/all_experiments.png similarity index 100% rename from docs/qa/img34/homogeneous/all_experiments.png rename to docs/references/qa/img34/homogeneous/all_experiments.png diff --git a/docs/qa/img34/homogeneous/avg_cpu.png b/docs/references/qa/img34/homogeneous/avg_cpu.png similarity index 100% rename from docs/qa/img34/homogeneous/avg_cpu.png rename to docs/references/qa/img34/homogeneous/avg_cpu.png diff --git a/docs/qa/img34/homogeneous/avg_memory.png b/docs/references/qa/img34/homogeneous/avg_memory.png similarity index 100% rename from docs/qa/img34/homogeneous/avg_memory.png rename to docs/references/qa/img34/homogeneous/avg_memory.png diff --git a/docs/qa/img34/homogeneous/avg_mempool_size.png b/docs/references/qa/img34/homogeneous/avg_mempool_size.png similarity index 100% rename from docs/qa/img34/homogeneous/avg_mempool_size.png rename to docs/references/qa/img34/homogeneous/avg_mempool_size.png diff --git a/docs/qa/img34/homogeneous/block_rate_regular.png b/docs/references/qa/img34/homogeneous/block_rate_regular.png similarity index 100% rename from docs/qa/img34/homogeneous/block_rate_regular.png rename to docs/references/qa/img34/homogeneous/block_rate_regular.png diff --git a/docs/qa/img34/homogeneous/cpu.png b/docs/references/qa/img34/homogeneous/cpu.png similarity index 100% rename from docs/qa/img34/homogeneous/cpu.png rename to docs/references/qa/img34/homogeneous/cpu.png diff --git a/docs/qa/img34/homogeneous/memory.png b/docs/references/qa/img34/homogeneous/memory.png similarity index 100% rename from docs/qa/img34/homogeneous/memory.png rename to docs/references/qa/img34/homogeneous/memory.png diff --git a/docs/qa/img34/homogeneous/mempool_size.png b/docs/references/qa/img34/homogeneous/mempool_size.png similarity index 100% rename from docs/qa/img34/homogeneous/mempool_size.png rename to docs/references/qa/img34/homogeneous/mempool_size.png diff --git a/docs/qa/img34/homogeneous/peers.png b/docs/references/qa/img34/homogeneous/peers.png similarity index 100% rename from docs/qa/img34/homogeneous/peers.png rename to docs/references/qa/img34/homogeneous/peers.png diff --git a/docs/qa/img34/homogeneous/rounds.png b/docs/references/qa/img34/homogeneous/rounds.png similarity index 100% rename from docs/qa/img34/homogeneous/rounds.png rename to docs/references/qa/img34/homogeneous/rounds.png diff --git a/docs/qa/img34/homogeneous/total_txs_rate_regular.png b/docs/references/qa/img34/homogeneous/total_txs_rate_regular.png similarity index 100% rename from docs/qa/img34/homogeneous/total_txs_rate_regular.png rename to docs/references/qa/img34/homogeneous/total_txs_rate_regular.png diff --git a/docs/qa/img34/v034_200node_latencies.png b/docs/references/qa/img34/v034_200node_latencies.png similarity index 100% rename from docs/qa/img34/v034_200node_latencies.png rename to docs/references/qa/img34/v034_200node_latencies.png diff --git a/docs/qa/img34/v034_200node_latencies_zoomed.png b/docs/references/qa/img34/v034_200node_latencies_zoomed.png similarity index 100% rename from docs/qa/img34/v034_200node_latencies_zoomed.png rename to docs/references/qa/img34/v034_200node_latencies_zoomed.png diff --git a/docs/qa/img34/v034_200node_tm2cmt1/all_experiments.png b/docs/references/qa/img34/v034_200node_tm2cmt1/all_experiments.png similarity index 100% rename from docs/qa/img34/v034_200node_tm2cmt1/all_experiments.png rename to docs/references/qa/img34/v034_200node_tm2cmt1/all_experiments.png diff --git a/docs/qa/img34/v034_200node_tm2cmt1/avg_cpu.png b/docs/references/qa/img34/v034_200node_tm2cmt1/avg_cpu.png similarity index 100% rename from docs/qa/img34/v034_200node_tm2cmt1/avg_cpu.png rename to docs/references/qa/img34/v034_200node_tm2cmt1/avg_cpu.png diff --git a/docs/qa/img34/v034_200node_tm2cmt1/avg_memory.png b/docs/references/qa/img34/v034_200node_tm2cmt1/avg_memory.png similarity index 100% rename from docs/qa/img34/v034_200node_tm2cmt1/avg_memory.png rename to docs/references/qa/img34/v034_200node_tm2cmt1/avg_memory.png diff --git a/docs/qa/img34/v034_200node_tm2cmt1/avg_mempool_size.png b/docs/references/qa/img34/v034_200node_tm2cmt1/avg_mempool_size.png similarity index 100% rename from docs/qa/img34/v034_200node_tm2cmt1/avg_mempool_size.png rename to docs/references/qa/img34/v034_200node_tm2cmt1/avg_mempool_size.png diff --git a/docs/qa/img34/v034_200node_tm2cmt1/block_rate_regular.png b/docs/references/qa/img34/v034_200node_tm2cmt1/block_rate_regular.png similarity index 100% rename from docs/qa/img34/v034_200node_tm2cmt1/block_rate_regular.png rename to docs/references/qa/img34/v034_200node_tm2cmt1/block_rate_regular.png diff --git a/docs/qa/img34/v034_200node_tm2cmt1/c2r200_merged.png b/docs/references/qa/img34/v034_200node_tm2cmt1/c2r200_merged.png similarity index 100% rename from docs/qa/img34/v034_200node_tm2cmt1/c2r200_merged.png rename to docs/references/qa/img34/v034_200node_tm2cmt1/c2r200_merged.png diff --git a/docs/qa/img34/v034_200node_tm2cmt1/cpu.png b/docs/references/qa/img34/v034_200node_tm2cmt1/cpu.png similarity index 100% rename from docs/qa/img34/v034_200node_tm2cmt1/cpu.png rename to docs/references/qa/img34/v034_200node_tm2cmt1/cpu.png diff --git a/docs/qa/img34/v034_200node_tm2cmt1/memory.png b/docs/references/qa/img34/v034_200node_tm2cmt1/memory.png similarity index 100% rename from docs/qa/img34/v034_200node_tm2cmt1/memory.png rename to docs/references/qa/img34/v034_200node_tm2cmt1/memory.png diff --git a/docs/qa/img34/v034_200node_tm2cmt1/mempool_size.png b/docs/references/qa/img34/v034_200node_tm2cmt1/mempool_size.png similarity index 100% rename from docs/qa/img34/v034_200node_tm2cmt1/mempool_size.png rename to docs/references/qa/img34/v034_200node_tm2cmt1/mempool_size.png diff --git a/docs/qa/img34/v034_200node_tm2cmt1/peers.png b/docs/references/qa/img34/v034_200node_tm2cmt1/peers.png similarity index 100% rename from docs/qa/img34/v034_200node_tm2cmt1/peers.png rename to docs/references/qa/img34/v034_200node_tm2cmt1/peers.png diff --git a/docs/qa/img34/v034_200node_tm2cmt1/rounds.png b/docs/references/qa/img34/v034_200node_tm2cmt1/rounds.png similarity index 100% rename from docs/qa/img34/v034_200node_tm2cmt1/rounds.png rename to docs/references/qa/img34/v034_200node_tm2cmt1/rounds.png diff --git a/docs/qa/img34/v034_200node_tm2cmt1/total_txs_rate_regular.png b/docs/references/qa/img34/v034_200node_tm2cmt1/total_txs_rate_regular.png similarity index 100% rename from docs/qa/img34/v034_200node_tm2cmt1/total_txs_rate_regular.png rename to docs/references/qa/img34/v034_200node_tm2cmt1/total_txs_rate_regular.png diff --git a/docs/qa/img34/v034_latency_throughput.png b/docs/references/qa/img34/v034_latency_throughput.png similarity index 100% rename from docs/qa/img34/v034_latency_throughput.png rename to docs/references/qa/img34/v034_latency_throughput.png diff --git a/docs/qa/img34/v034_r200c2_heights.png b/docs/references/qa/img34/v034_r200c2_heights.png similarity index 100% rename from docs/qa/img34/v034_r200c2_heights.png rename to docs/references/qa/img34/v034_r200c2_heights.png diff --git a/docs/qa/img34/v034_r200c2_load-runner.png b/docs/references/qa/img34/v034_r200c2_load-runner.png similarity index 100% rename from docs/qa/img34/v034_r200c2_load-runner.png rename to docs/references/qa/img34/v034_r200c2_load-runner.png diff --git a/docs/qa/img34/v034_r200c2_load1.png b/docs/references/qa/img34/v034_r200c2_load1.png similarity index 100% rename from docs/qa/img34/v034_r200c2_load1.png rename to docs/references/qa/img34/v034_r200c2_load1.png diff --git a/docs/qa/img34/v034_r200c2_mempool_size.png b/docs/references/qa/img34/v034_r200c2_mempool_size.png similarity index 100% rename from docs/qa/img34/v034_r200c2_mempool_size.png rename to docs/references/qa/img34/v034_r200c2_mempool_size.png diff --git a/docs/qa/img34/v034_r200c2_mempool_size_avg.png b/docs/references/qa/img34/v034_r200c2_mempool_size_avg.png similarity index 100% rename from docs/qa/img34/v034_r200c2_mempool_size_avg.png rename to docs/references/qa/img34/v034_r200c2_mempool_size_avg.png diff --git a/docs/qa/img34/v034_r200c2_peers.png b/docs/references/qa/img34/v034_r200c2_peers.png similarity index 100% rename from docs/qa/img34/v034_r200c2_peers.png rename to docs/references/qa/img34/v034_r200c2_peers.png diff --git a/docs/qa/img34/v034_r200c2_rounds.png b/docs/references/qa/img34/v034_r200c2_rounds.png similarity index 100% rename from docs/qa/img34/v034_r200c2_rounds.png rename to docs/references/qa/img34/v034_r200c2_rounds.png diff --git a/docs/qa/img34/v034_r200c2_rss.png b/docs/references/qa/img34/v034_r200c2_rss.png similarity index 100% rename from docs/qa/img34/v034_r200c2_rss.png rename to docs/references/qa/img34/v034_r200c2_rss.png diff --git a/docs/qa/img34/v034_r200c2_rss_avg.png b/docs/references/qa/img34/v034_r200c2_rss_avg.png similarity index 100% rename from docs/qa/img34/v034_r200c2_rss_avg.png rename to docs/references/qa/img34/v034_r200c2_rss_avg.png diff --git a/docs/qa/img34/v034_r200c2_total-txs.png b/docs/references/qa/img34/v034_r200c2_total-txs.png similarity index 100% rename from docs/qa/img34/v034_r200c2_total-txs.png rename to docs/references/qa/img34/v034_r200c2_total-txs.png diff --git a/docs/qa/img34/v034_report_tabbed.txt b/docs/references/qa/img34/v034_report_tabbed.txt similarity index 100% rename from docs/qa/img34/v034_report_tabbed.txt rename to docs/references/qa/img34/v034_report_tabbed.txt diff --git a/docs/qa/img34/v034_rotating_heights.png b/docs/references/qa/img34/v034_rotating_heights.png similarity index 100% rename from docs/qa/img34/v034_rotating_heights.png rename to docs/references/qa/img34/v034_rotating_heights.png diff --git a/docs/qa/img34/v034_rotating_heights_ephe.png b/docs/references/qa/img34/v034_rotating_heights_ephe.png similarity index 100% rename from docs/qa/img34/v034_rotating_heights_ephe.png rename to docs/references/qa/img34/v034_rotating_heights_ephe.png diff --git a/docs/qa/img34/v034_rotating_latencies.png b/docs/references/qa/img34/v034_rotating_latencies.png similarity index 100% rename from docs/qa/img34/v034_rotating_latencies.png rename to docs/references/qa/img34/v034_rotating_latencies.png diff --git a/docs/qa/img34/v034_rotating_latencies_uniq.png b/docs/references/qa/img34/v034_rotating_latencies_uniq.png similarity index 100% rename from docs/qa/img34/v034_rotating_latencies_uniq.png rename to docs/references/qa/img34/v034_rotating_latencies_uniq.png diff --git a/docs/qa/img34/v034_rotating_load1.png b/docs/references/qa/img34/v034_rotating_load1.png similarity index 100% rename from docs/qa/img34/v034_rotating_load1.png rename to docs/references/qa/img34/v034_rotating_load1.png diff --git a/docs/qa/img34/v034_rotating_peers.png b/docs/references/qa/img34/v034_rotating_peers.png similarity index 100% rename from docs/qa/img34/v034_rotating_peers.png rename to docs/references/qa/img34/v034_rotating_peers.png diff --git a/docs/qa/img34/v034_rotating_rss_avg.png b/docs/references/qa/img34/v034_rotating_rss_avg.png similarity index 100% rename from docs/qa/img34/v034_rotating_rss_avg.png rename to docs/references/qa/img34/v034_rotating_rss_avg.png diff --git a/docs/qa/img34/v034_rotating_total-txs.png b/docs/references/qa/img34/v034_rotating_total-txs.png similarity index 100% rename from docs/qa/img34/v034_rotating_total-txs.png rename to docs/references/qa/img34/v034_rotating_total-txs.png diff --git a/docs/qa/img37/200nodes_cmt037/all_experiments.png b/docs/references/qa/img37/200nodes_cmt037/all_experiments.png similarity index 100% rename from docs/qa/img37/200nodes_cmt037/all_experiments.png rename to docs/references/qa/img37/200nodes_cmt037/all_experiments.png diff --git a/docs/qa/img37/200nodes_cmt037/avg_mempool_size.png b/docs/references/qa/img37/200nodes_cmt037/avg_mempool_size.png similarity index 100% rename from docs/qa/img37/200nodes_cmt037/avg_mempool_size.png rename to docs/references/qa/img37/200nodes_cmt037/avg_mempool_size.png diff --git a/docs/qa/img37/200nodes_cmt037/block_rate.png b/docs/references/qa/img37/200nodes_cmt037/block_rate.png similarity index 100% rename from docs/qa/img37/200nodes_cmt037/block_rate.png rename to docs/references/qa/img37/200nodes_cmt037/block_rate.png diff --git a/docs/qa/img37/200nodes_cmt037/cpu.png b/docs/references/qa/img37/200nodes_cmt037/cpu.png similarity index 100% rename from docs/qa/img37/200nodes_cmt037/cpu.png rename to docs/references/qa/img37/200nodes_cmt037/cpu.png diff --git a/docs/qa/img37/200nodes_cmt037/e_75cb89a8-f876-4698-82f3-8aaab0b361af.png b/docs/references/qa/img37/200nodes_cmt037/e_75cb89a8-f876-4698-82f3-8aaab0b361af.png similarity index 100% rename from docs/qa/img37/200nodes_cmt037/e_75cb89a8-f876-4698-82f3-8aaab0b361af.png rename to docs/references/qa/img37/200nodes_cmt037/e_75cb89a8-f876-4698-82f3-8aaab0b361af.png diff --git a/docs/qa/img37/200nodes_cmt037/memory.png b/docs/references/qa/img37/200nodes_cmt037/memory.png similarity index 100% rename from docs/qa/img37/200nodes_cmt037/memory.png rename to docs/references/qa/img37/200nodes_cmt037/memory.png diff --git a/docs/qa/img37/200nodes_cmt037/mempool_size.png b/docs/references/qa/img37/200nodes_cmt037/mempool_size.png similarity index 100% rename from docs/qa/img37/200nodes_cmt037/mempool_size.png rename to docs/references/qa/img37/200nodes_cmt037/mempool_size.png diff --git a/docs/qa/img37/200nodes_cmt037/peers.png b/docs/references/qa/img37/200nodes_cmt037/peers.png similarity index 100% rename from docs/qa/img37/200nodes_cmt037/peers.png rename to docs/references/qa/img37/200nodes_cmt037/peers.png diff --git a/docs/qa/img37/200nodes_cmt037/rounds.png b/docs/references/qa/img37/200nodes_cmt037/rounds.png similarity index 100% rename from docs/qa/img37/200nodes_cmt037/rounds.png rename to docs/references/qa/img37/200nodes_cmt037/rounds.png diff --git a/docs/qa/img37/200nodes_cmt037/total_txs_rate.png b/docs/references/qa/img37/200nodes_cmt037/total_txs_rate.png similarity index 100% rename from docs/qa/img37/200nodes_cmt037/total_txs_rate.png rename to docs/references/qa/img37/200nodes_cmt037/total_txs_rate.png diff --git a/docs/qa/img37/200nodes_tm037/avg_mempool_size.png b/docs/references/qa/img37/200nodes_tm037/avg_mempool_size.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/avg_mempool_size.png rename to docs/references/qa/img37/200nodes_tm037/avg_mempool_size.png diff --git a/docs/qa/img37/200nodes_tm037/block_rate_regular.png b/docs/references/qa/img37/200nodes_tm037/block_rate_regular.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/block_rate_regular.png rename to docs/references/qa/img37/200nodes_tm037/block_rate_regular.png diff --git a/docs/qa/img37/200nodes_tm037/cpu.png b/docs/references/qa/img37/200nodes_tm037/cpu.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/cpu.png rename to docs/references/qa/img37/200nodes_tm037/cpu.png diff --git a/docs/qa/img37/200nodes_tm037/memory.png b/docs/references/qa/img37/200nodes_tm037/memory.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/memory.png rename to docs/references/qa/img37/200nodes_tm037/memory.png diff --git a/docs/qa/img37/200nodes_tm037/mempool_size.png b/docs/references/qa/img37/200nodes_tm037/mempool_size.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/mempool_size.png rename to docs/references/qa/img37/200nodes_tm037/mempool_size.png diff --git a/docs/qa/img37/200nodes_tm037/peers.png b/docs/references/qa/img37/200nodes_tm037/peers.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/peers.png rename to docs/references/qa/img37/200nodes_tm037/peers.png diff --git a/docs/qa/img37/200nodes_tm037/rounds.png b/docs/references/qa/img37/200nodes_tm037/rounds.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/rounds.png rename to docs/references/qa/img37/200nodes_tm037/rounds.png diff --git a/docs/qa/img37/200nodes_tm037/total_txs_rate_regular.png b/docs/references/qa/img37/200nodes_tm037/total_txs_rate_regular.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/total_txs_rate_regular.png rename to docs/references/qa/img37/200nodes_tm037/total_txs_rate_regular.png diff --git a/docs/qa/img37/200nodes_tm037/v037_200node_latencies.png b/docs/references/qa/img37/200nodes_tm037/v037_200node_latencies.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_200node_latencies.png rename to docs/references/qa/img37/200nodes_tm037/v037_200node_latencies.png diff --git a/docs/qa/img37/200nodes_tm037/v037_latency_throughput.png b/docs/references/qa/img37/200nodes_tm037/v037_latency_throughput.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_latency_throughput.png rename to docs/references/qa/img37/200nodes_tm037/v037_latency_throughput.png diff --git a/docs/qa/img37/200nodes_tm037/v037_r200c2_heights.png b/docs/references/qa/img37/200nodes_tm037/v037_r200c2_heights.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_r200c2_heights.png rename to docs/references/qa/img37/200nodes_tm037/v037_r200c2_heights.png diff --git a/docs/qa/img37/200nodes_tm037/v037_r200c2_load1.png b/docs/references/qa/img37/200nodes_tm037/v037_r200c2_load1.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_r200c2_load1.png rename to docs/references/qa/img37/200nodes_tm037/v037_r200c2_load1.png diff --git a/docs/qa/img37/200nodes_tm037/v037_r200c2_mempool_size.png b/docs/references/qa/img37/200nodes_tm037/v037_r200c2_mempool_size.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_r200c2_mempool_size.png rename to docs/references/qa/img37/200nodes_tm037/v037_r200c2_mempool_size.png diff --git a/docs/qa/img37/200nodes_tm037/v037_r200c2_mempool_size_avg.png b/docs/references/qa/img37/200nodes_tm037/v037_r200c2_mempool_size_avg.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_r200c2_mempool_size_avg.png rename to docs/references/qa/img37/200nodes_tm037/v037_r200c2_mempool_size_avg.png diff --git a/docs/qa/img37/200nodes_tm037/v037_r200c2_peers.png b/docs/references/qa/img37/200nodes_tm037/v037_r200c2_peers.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_r200c2_peers.png rename to docs/references/qa/img37/200nodes_tm037/v037_r200c2_peers.png diff --git a/docs/qa/img37/200nodes_tm037/v037_r200c2_rounds.png b/docs/references/qa/img37/200nodes_tm037/v037_r200c2_rounds.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_r200c2_rounds.png rename to docs/references/qa/img37/200nodes_tm037/v037_r200c2_rounds.png diff --git a/docs/qa/img37/200nodes_tm037/v037_r200c2_rss.png b/docs/references/qa/img37/200nodes_tm037/v037_r200c2_rss.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_r200c2_rss.png rename to docs/references/qa/img37/200nodes_tm037/v037_r200c2_rss.png diff --git a/docs/qa/img37/200nodes_tm037/v037_r200c2_rss_avg.png b/docs/references/qa/img37/200nodes_tm037/v037_r200c2_rss_avg.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_r200c2_rss_avg.png rename to docs/references/qa/img37/200nodes_tm037/v037_r200c2_rss_avg.png diff --git a/docs/qa/img37/200nodes_tm037/v037_r200c2_total-txs.png b/docs/references/qa/img37/200nodes_tm037/v037_r200c2_total-txs.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_r200c2_total-txs.png rename to docs/references/qa/img37/200nodes_tm037/v037_r200c2_total-txs.png diff --git a/docs/qa/img37/200nodes_tm037/v037_report_tabbed.txt b/docs/references/qa/img37/200nodes_tm037/v037_report_tabbed.txt similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_report_tabbed.txt rename to docs/references/qa/img37/200nodes_tm037/v037_report_tabbed.txt diff --git a/docs/qa/img37/200nodes_tm037/v037_rotating_heights.png b/docs/references/qa/img37/200nodes_tm037/v037_rotating_heights.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_rotating_heights.png rename to docs/references/qa/img37/200nodes_tm037/v037_rotating_heights.png diff --git a/docs/qa/img37/200nodes_tm037/v037_rotating_heights_ephe.png b/docs/references/qa/img37/200nodes_tm037/v037_rotating_heights_ephe.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_rotating_heights_ephe.png rename to docs/references/qa/img37/200nodes_tm037/v037_rotating_heights_ephe.png diff --git a/docs/qa/img37/200nodes_tm037/v037_rotating_latencies.png b/docs/references/qa/img37/200nodes_tm037/v037_rotating_latencies.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_rotating_latencies.png rename to docs/references/qa/img37/200nodes_tm037/v037_rotating_latencies.png diff --git a/docs/qa/img37/200nodes_tm037/v037_rotating_load1.png b/docs/references/qa/img37/200nodes_tm037/v037_rotating_load1.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_rotating_load1.png rename to docs/references/qa/img37/200nodes_tm037/v037_rotating_load1.png diff --git a/docs/qa/img37/200nodes_tm037/v037_rotating_peers.png b/docs/references/qa/img37/200nodes_tm037/v037_rotating_peers.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_rotating_peers.png rename to docs/references/qa/img37/200nodes_tm037/v037_rotating_peers.png diff --git a/docs/qa/img37/200nodes_tm037/v037_rotating_rss_avg.png b/docs/references/qa/img37/200nodes_tm037/v037_rotating_rss_avg.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_rotating_rss_avg.png rename to docs/references/qa/img37/200nodes_tm037/v037_rotating_rss_avg.png diff --git a/docs/qa/img37/200nodes_tm037/v037_rotating_total-txs.png b/docs/references/qa/img37/200nodes_tm037/v037_rotating_total-txs.png similarity index 100% rename from docs/qa/img37/200nodes_tm037/v037_rotating_total-txs.png rename to docs/references/qa/img37/200nodes_tm037/v037_rotating_total-txs.png diff --git a/docs/qa/img37/rotating/rotating_avg_memory.png b/docs/references/qa/img37/rotating/rotating_avg_memory.png similarity index 100% rename from docs/qa/img37/rotating/rotating_avg_memory.png rename to docs/references/qa/img37/rotating/rotating_avg_memory.png diff --git a/docs/qa/img37/rotating/rotating_block_rate.png b/docs/references/qa/img37/rotating/rotating_block_rate.png similarity index 100% rename from docs/qa/img37/rotating/rotating_block_rate.png rename to docs/references/qa/img37/rotating/rotating_block_rate.png diff --git a/docs/qa/img37/rotating/rotating_cpu.png b/docs/references/qa/img37/rotating/rotating_cpu.png similarity index 100% rename from docs/qa/img37/rotating/rotating_cpu.png rename to docs/references/qa/img37/rotating/rotating_cpu.png diff --git a/docs/qa/img37/rotating/rotating_eph_heights.png b/docs/references/qa/img37/rotating/rotating_eph_heights.png similarity index 100% rename from docs/qa/img37/rotating/rotating_eph_heights.png rename to docs/references/qa/img37/rotating/rotating_eph_heights.png diff --git a/docs/qa/img37/rotating/rotating_peers.png b/docs/references/qa/img37/rotating/rotating_peers.png similarity index 100% rename from docs/qa/img37/rotating/rotating_peers.png rename to docs/references/qa/img37/rotating/rotating_peers.png diff --git a/docs/qa/img37/rotating/rotating_txs_rate.png b/docs/references/qa/img37/rotating/rotating_txs_rate.png similarity index 100% rename from docs/qa/img37/rotating/rotating_txs_rate.png rename to docs/references/qa/img37/rotating/rotating_txs_rate.png diff --git a/docs/qa/img38/200nodes/avg_mempool_size.png b/docs/references/qa/img38/200nodes/avg_mempool_size.png similarity index 100% rename from docs/qa/img38/200nodes/avg_mempool_size.png rename to docs/references/qa/img38/200nodes/avg_mempool_size.png diff --git a/docs/references/qa/img38/200nodes/avg_mempool_size_ylim.png b/docs/references/qa/img38/200nodes/avg_mempool_size_ylim.png new file mode 100644 index 00000000000..e1c3108b72e Binary files /dev/null and b/docs/references/qa/img38/200nodes/avg_mempool_size_ylim.png differ diff --git a/docs/qa/img38/200nodes/block_rate.png b/docs/references/qa/img38/200nodes/block_rate.png similarity index 100% rename from docs/qa/img38/200nodes/block_rate.png rename to docs/references/qa/img38/200nodes/block_rate.png diff --git a/docs/references/qa/img38/200nodes/block_size_bytes.png b/docs/references/qa/img38/200nodes/block_size_bytes.png new file mode 100644 index 00000000000..32bb4bffdd7 Binary files /dev/null and b/docs/references/qa/img38/200nodes/block_size_bytes.png differ diff --git a/docs/qa/img38/200nodes/c1r400.png b/docs/references/qa/img38/200nodes/c1r400.png similarity index 100% rename from docs/qa/img38/200nodes/c1r400.png rename to docs/references/qa/img38/200nodes/c1r400.png diff --git a/docs/qa/img38/200nodes/cpu.png b/docs/references/qa/img38/200nodes/cpu.png similarity index 100% rename from docs/qa/img38/200nodes/cpu.png rename to docs/references/qa/img38/200nodes/cpu.png diff --git a/docs/qa/img38/200nodes/e_de676ecf-038e-443f-a26a-27915f29e312.png b/docs/references/qa/img38/200nodes/e_de676ecf-038e-443f-a26a-27915f29e312.png similarity index 100% rename from docs/qa/img38/200nodes/e_de676ecf-038e-443f-a26a-27915f29e312.png rename to docs/references/qa/img38/200nodes/e_de676ecf-038e-443f-a26a-27915f29e312.png diff --git a/docs/qa/img38/200nodes/memory.png b/docs/references/qa/img38/200nodes/memory.png similarity index 100% rename from docs/qa/img38/200nodes/memory.png rename to docs/references/qa/img38/200nodes/memory.png diff --git a/docs/references/qa/img38/200nodes/memory_ylim.png b/docs/references/qa/img38/200nodes/memory_ylim.png new file mode 100644 index 00000000000..a9e25eb2007 Binary files /dev/null and b/docs/references/qa/img38/200nodes/memory_ylim.png differ diff --git a/docs/qa/img38/200nodes/mempool_size.png b/docs/references/qa/img38/200nodes/mempool_size.png similarity index 100% rename from docs/qa/img38/200nodes/mempool_size.png rename to docs/references/qa/img38/200nodes/mempool_size.png diff --git a/docs/references/qa/img38/200nodes/mempool_size_max.png b/docs/references/qa/img38/200nodes/mempool_size_max.png new file mode 100644 index 00000000000..e28d6724979 Binary files /dev/null and b/docs/references/qa/img38/200nodes/mempool_size_max.png differ diff --git a/docs/qa/img38/200nodes/peers.png b/docs/references/qa/img38/200nodes/peers.png similarity index 100% rename from docs/qa/img38/200nodes/peers.png rename to docs/references/qa/img38/200nodes/peers.png diff --git a/docs/qa/img38/200nodes/rounds.png b/docs/references/qa/img38/200nodes/rounds.png similarity index 100% rename from docs/qa/img38/200nodes/rounds.png rename to docs/references/qa/img38/200nodes/rounds.png diff --git a/docs/references/qa/img38/200nodes/rounds_ylim.png b/docs/references/qa/img38/200nodes/rounds_ylim.png new file mode 100644 index 00000000000..1c769d9c7a0 Binary files /dev/null and b/docs/references/qa/img38/200nodes/rounds_ylim.png differ diff --git a/docs/qa/img38/200nodes/total_txs_rate.png b/docs/references/qa/img38/200nodes/total_txs_rate.png similarity index 100% rename from docs/qa/img38/200nodes/total_txs_rate.png rename to docs/references/qa/img38/200nodes/total_txs_rate.png diff --git a/docs/references/qa/img38/200nodes/total_txs_rate_ylim.png b/docs/references/qa/img38/200nodes/total_txs_rate_ylim.png new file mode 100644 index 00000000000..83900aef29f Binary files /dev/null and b/docs/references/qa/img38/200nodes/total_txs_rate_ylim.png differ diff --git a/docs/qa/img38/200nodes/v038_report_tabbed.txt b/docs/references/qa/img38/200nodes/v038_report_tabbed.txt similarity index 100% rename from docs/qa/img38/200nodes/v038_report_tabbed.txt rename to docs/references/qa/img38/200nodes/v038_report_tabbed.txt diff --git a/docs/qa/img38/rotating/rotating_avg_memory.png b/docs/references/qa/img38/rotating/rotating_avg_memory.png similarity index 100% rename from docs/qa/img38/rotating/rotating_avg_memory.png rename to docs/references/qa/img38/rotating/rotating_avg_memory.png diff --git a/docs/qa/img38/rotating/rotating_block_rate.png b/docs/references/qa/img38/rotating/rotating_block_rate.png similarity index 100% rename from docs/qa/img38/rotating/rotating_block_rate.png rename to docs/references/qa/img38/rotating/rotating_block_rate.png diff --git a/docs/qa/img38/rotating/rotating_cpu.png b/docs/references/qa/img38/rotating/rotating_cpu.png similarity index 100% rename from docs/qa/img38/rotating/rotating_cpu.png rename to docs/references/qa/img38/rotating/rotating_cpu.png diff --git a/docs/qa/img38/rotating/rotating_eph_heights.png b/docs/references/qa/img38/rotating/rotating_eph_heights.png similarity index 100% rename from docs/qa/img38/rotating/rotating_eph_heights.png rename to docs/references/qa/img38/rotating/rotating_eph_heights.png diff --git a/docs/qa/img38/rotating/rotating_latencies.png b/docs/references/qa/img38/rotating/rotating_latencies.png similarity index 100% rename from docs/qa/img38/rotating/rotating_latencies.png rename to docs/references/qa/img38/rotating/rotating_latencies.png diff --git a/docs/qa/img38/rotating/rotating_peers.png b/docs/references/qa/img38/rotating/rotating_peers.png similarity index 100% rename from docs/qa/img38/rotating/rotating_peers.png rename to docs/references/qa/img38/rotating/rotating_peers.png diff --git a/docs/qa/img38/rotating/rotating_txs_rate.png b/docs/references/qa/img38/rotating/rotating_txs_rate.png similarity index 100% rename from docs/qa/img38/rotating/rotating_txs_rate.png rename to docs/references/qa/img38/rotating/rotating_txs_rate.png diff --git a/docs/qa/img38/voteExtensions/02k_1_block_rate.png b/docs/references/qa/img38/voteExtensions/02k_1_block_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/02k_1_block_rate.png rename to docs/references/qa/img38/voteExtensions/02k_1_block_rate.png diff --git a/docs/qa/img38/voteExtensions/02k_1_total_txs_rate.png b/docs/references/qa/img38/voteExtensions/02k_1_total_txs_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/02k_1_total_txs_rate.png rename to docs/references/qa/img38/voteExtensions/02k_1_total_txs_rate.png diff --git a/docs/qa/img38/voteExtensions/02k_avg_cpu.png b/docs/references/qa/img38/voteExtensions/02k_avg_cpu.png similarity index 100% rename from docs/qa/img38/voteExtensions/02k_avg_cpu.png rename to docs/references/qa/img38/voteExtensions/02k_avg_cpu.png diff --git a/docs/qa/img38/voteExtensions/02k_avg_memory.png b/docs/references/qa/img38/voteExtensions/02k_avg_memory.png similarity index 100% rename from docs/qa/img38/voteExtensions/02k_avg_memory.png rename to docs/references/qa/img38/voteExtensions/02k_avg_memory.png diff --git a/docs/qa/img38/voteExtensions/02k_avg_mempool_size.png b/docs/references/qa/img38/voteExtensions/02k_avg_mempool_size.png similarity index 100% rename from docs/qa/img38/voteExtensions/02k_avg_mempool_size.png rename to docs/references/qa/img38/voteExtensions/02k_avg_mempool_size.png diff --git a/docs/qa/img38/voteExtensions/02k_block_rate.png b/docs/references/qa/img38/voteExtensions/02k_block_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/02k_block_rate.png rename to docs/references/qa/img38/voteExtensions/02k_block_rate.png diff --git a/docs/qa/img38/voteExtensions/02k_rounds.png b/docs/references/qa/img38/voteExtensions/02k_rounds.png similarity index 100% rename from docs/qa/img38/voteExtensions/02k_rounds.png rename to docs/references/qa/img38/voteExtensions/02k_rounds.png diff --git a/docs/qa/img38/voteExtensions/02k_total_txs_rate.png b/docs/references/qa/img38/voteExtensions/02k_total_txs_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/02k_total_txs_rate.png rename to docs/references/qa/img38/voteExtensions/02k_total_txs_rate.png diff --git a/docs/qa/img38/voteExtensions/04k_1_block_rate.png b/docs/references/qa/img38/voteExtensions/04k_1_block_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/04k_1_block_rate.png rename to docs/references/qa/img38/voteExtensions/04k_1_block_rate.png diff --git a/docs/qa/img38/voteExtensions/04k_1_total_txs_rate.png b/docs/references/qa/img38/voteExtensions/04k_1_total_txs_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/04k_1_total_txs_rate.png rename to docs/references/qa/img38/voteExtensions/04k_1_total_txs_rate.png diff --git a/docs/qa/img38/voteExtensions/04k_avg_cpu.png b/docs/references/qa/img38/voteExtensions/04k_avg_cpu.png similarity index 100% rename from docs/qa/img38/voteExtensions/04k_avg_cpu.png rename to docs/references/qa/img38/voteExtensions/04k_avg_cpu.png diff --git a/docs/qa/img38/voteExtensions/04k_avg_memory.png b/docs/references/qa/img38/voteExtensions/04k_avg_memory.png similarity index 100% rename from docs/qa/img38/voteExtensions/04k_avg_memory.png rename to docs/references/qa/img38/voteExtensions/04k_avg_memory.png diff --git a/docs/qa/img38/voteExtensions/04k_avg_mempool_size.png b/docs/references/qa/img38/voteExtensions/04k_avg_mempool_size.png similarity index 100% rename from docs/qa/img38/voteExtensions/04k_avg_mempool_size.png rename to docs/references/qa/img38/voteExtensions/04k_avg_mempool_size.png diff --git a/docs/qa/img38/voteExtensions/04k_block_rate.png b/docs/references/qa/img38/voteExtensions/04k_block_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/04k_block_rate.png rename to docs/references/qa/img38/voteExtensions/04k_block_rate.png diff --git a/docs/qa/img38/voteExtensions/04k_rounds.png b/docs/references/qa/img38/voteExtensions/04k_rounds.png similarity index 100% rename from docs/qa/img38/voteExtensions/04k_rounds.png rename to docs/references/qa/img38/voteExtensions/04k_rounds.png diff --git a/docs/qa/img38/voteExtensions/04k_total_txs_rate.png b/docs/references/qa/img38/voteExtensions/04k_total_txs_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/04k_total_txs_rate.png rename to docs/references/qa/img38/voteExtensions/04k_total_txs_rate.png diff --git a/docs/qa/img38/voteExtensions/08k_1_avg_mempool_size.png b/docs/references/qa/img38/voteExtensions/08k_1_avg_mempool_size.png similarity index 100% rename from docs/qa/img38/voteExtensions/08k_1_avg_mempool_size.png rename to docs/references/qa/img38/voteExtensions/08k_1_avg_mempool_size.png diff --git a/docs/qa/img38/voteExtensions/08k_1_block_rate.png b/docs/references/qa/img38/voteExtensions/08k_1_block_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/08k_1_block_rate.png rename to docs/references/qa/img38/voteExtensions/08k_1_block_rate.png diff --git a/docs/qa/img38/voteExtensions/08k_1_rounds.png b/docs/references/qa/img38/voteExtensions/08k_1_rounds.png similarity index 100% rename from docs/qa/img38/voteExtensions/08k_1_rounds.png rename to docs/references/qa/img38/voteExtensions/08k_1_rounds.png diff --git a/docs/qa/img38/voteExtensions/08k_1_total_txs_rate.png b/docs/references/qa/img38/voteExtensions/08k_1_total_txs_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/08k_1_total_txs_rate.png rename to docs/references/qa/img38/voteExtensions/08k_1_total_txs_rate.png diff --git a/docs/qa/img38/voteExtensions/08k_avg_cpu.png b/docs/references/qa/img38/voteExtensions/08k_avg_cpu.png similarity index 100% rename from docs/qa/img38/voteExtensions/08k_avg_cpu.png rename to docs/references/qa/img38/voteExtensions/08k_avg_cpu.png diff --git a/docs/qa/img38/voteExtensions/08k_avg_memory.png b/docs/references/qa/img38/voteExtensions/08k_avg_memory.png similarity index 100% rename from docs/qa/img38/voteExtensions/08k_avg_memory.png rename to docs/references/qa/img38/voteExtensions/08k_avg_memory.png diff --git a/docs/qa/img38/voteExtensions/08k_avg_mempool_size.png b/docs/references/qa/img38/voteExtensions/08k_avg_mempool_size.png similarity index 100% rename from docs/qa/img38/voteExtensions/08k_avg_mempool_size.png rename to docs/references/qa/img38/voteExtensions/08k_avg_mempool_size.png diff --git a/docs/qa/img38/voteExtensions/08k_rounds.png b/docs/references/qa/img38/voteExtensions/08k_rounds.png similarity index 100% rename from docs/qa/img38/voteExtensions/08k_rounds.png rename to docs/references/qa/img38/voteExtensions/08k_rounds.png diff --git a/docs/qa/img38/voteExtensions/08k_total_txs_rate.png b/docs/references/qa/img38/voteExtensions/08k_total_txs_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/08k_total_txs_rate.png rename to docs/references/qa/img38/voteExtensions/08k_total_txs_rate.png diff --git a/docs/qa/img38/voteExtensions/16k_1_avg_mempool_size.png b/docs/references/qa/img38/voteExtensions/16k_1_avg_mempool_size.png similarity index 100% rename from docs/qa/img38/voteExtensions/16k_1_avg_mempool_size.png rename to docs/references/qa/img38/voteExtensions/16k_1_avg_mempool_size.png diff --git a/docs/qa/img38/voteExtensions/16k_1_block_rate.png b/docs/references/qa/img38/voteExtensions/16k_1_block_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/16k_1_block_rate.png rename to docs/references/qa/img38/voteExtensions/16k_1_block_rate.png diff --git a/docs/qa/img38/voteExtensions/16k_1_rounds.png b/docs/references/qa/img38/voteExtensions/16k_1_rounds.png similarity index 100% rename from docs/qa/img38/voteExtensions/16k_1_rounds.png rename to docs/references/qa/img38/voteExtensions/16k_1_rounds.png diff --git a/docs/qa/img38/voteExtensions/16k_1_total_txs_rate.png b/docs/references/qa/img38/voteExtensions/16k_1_total_txs_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/16k_1_total_txs_rate.png rename to docs/references/qa/img38/voteExtensions/16k_1_total_txs_rate.png diff --git a/docs/qa/img38/voteExtensions/16k_avg_cpu.png b/docs/references/qa/img38/voteExtensions/16k_avg_cpu.png similarity index 100% rename from docs/qa/img38/voteExtensions/16k_avg_cpu.png rename to docs/references/qa/img38/voteExtensions/16k_avg_cpu.png diff --git a/docs/qa/img38/voteExtensions/16k_avg_memory.png b/docs/references/qa/img38/voteExtensions/16k_avg_memory.png similarity index 100% rename from docs/qa/img38/voteExtensions/16k_avg_memory.png rename to docs/references/qa/img38/voteExtensions/16k_avg_memory.png diff --git a/docs/qa/img38/voteExtensions/16k_avg_mempool_size.png b/docs/references/qa/img38/voteExtensions/16k_avg_mempool_size.png similarity index 100% rename from docs/qa/img38/voteExtensions/16k_avg_mempool_size.png rename to docs/references/qa/img38/voteExtensions/16k_avg_mempool_size.png diff --git a/docs/qa/img38/voteExtensions/16k_block_rate.png b/docs/references/qa/img38/voteExtensions/16k_block_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/16k_block_rate.png rename to docs/references/qa/img38/voteExtensions/16k_block_rate.png diff --git a/docs/qa/img38/voteExtensions/16k_rounds.png b/docs/references/qa/img38/voteExtensions/16k_rounds.png similarity index 100% rename from docs/qa/img38/voteExtensions/16k_rounds.png rename to docs/references/qa/img38/voteExtensions/16k_rounds.png diff --git a/docs/qa/img38/voteExtensions/16k_total_txs_rate.png b/docs/references/qa/img38/voteExtensions/16k_total_txs_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/16k_total_txs_rate.png rename to docs/references/qa/img38/voteExtensions/16k_total_txs_rate.png diff --git a/docs/qa/img38/voteExtensions/32k_1_avg_mempool_size.png b/docs/references/qa/img38/voteExtensions/32k_1_avg_mempool_size.png similarity index 100% rename from docs/qa/img38/voteExtensions/32k_1_avg_mempool_size.png rename to docs/references/qa/img38/voteExtensions/32k_1_avg_mempool_size.png diff --git a/docs/qa/img38/voteExtensions/32k_1_block_rate.png b/docs/references/qa/img38/voteExtensions/32k_1_block_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/32k_1_block_rate.png rename to docs/references/qa/img38/voteExtensions/32k_1_block_rate.png diff --git a/docs/qa/img38/voteExtensions/32k_1_rounds.png b/docs/references/qa/img38/voteExtensions/32k_1_rounds.png similarity index 100% rename from docs/qa/img38/voteExtensions/32k_1_rounds.png rename to docs/references/qa/img38/voteExtensions/32k_1_rounds.png diff --git a/docs/qa/img38/voteExtensions/32k_1_total_txs_rate.png b/docs/references/qa/img38/voteExtensions/32k_1_total_txs_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/32k_1_total_txs_rate.png rename to docs/references/qa/img38/voteExtensions/32k_1_total_txs_rate.png diff --git a/docs/qa/img38/voteExtensions/32k_avg_cpu.png b/docs/references/qa/img38/voteExtensions/32k_avg_cpu.png similarity index 100% rename from docs/qa/img38/voteExtensions/32k_avg_cpu.png rename to docs/references/qa/img38/voteExtensions/32k_avg_cpu.png diff --git a/docs/qa/img38/voteExtensions/32k_avg_memory.png b/docs/references/qa/img38/voteExtensions/32k_avg_memory.png similarity index 100% rename from docs/qa/img38/voteExtensions/32k_avg_memory.png rename to docs/references/qa/img38/voteExtensions/32k_avg_memory.png diff --git a/docs/qa/img38/voteExtensions/32k_avg_mempool_size.png b/docs/references/qa/img38/voteExtensions/32k_avg_mempool_size.png similarity index 100% rename from docs/qa/img38/voteExtensions/32k_avg_mempool_size.png rename to docs/references/qa/img38/voteExtensions/32k_avg_mempool_size.png diff --git a/docs/qa/img38/voteExtensions/32k_block_rate.png b/docs/references/qa/img38/voteExtensions/32k_block_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/32k_block_rate.png rename to docs/references/qa/img38/voteExtensions/32k_block_rate.png diff --git a/docs/qa/img38/voteExtensions/32k_rounds.png b/docs/references/qa/img38/voteExtensions/32k_rounds.png similarity index 100% rename from docs/qa/img38/voteExtensions/32k_rounds.png rename to docs/references/qa/img38/voteExtensions/32k_rounds.png diff --git a/docs/qa/img38/voteExtensions/32k_total_txs_rate.png b/docs/references/qa/img38/voteExtensions/32k_total_txs_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/32k_total_txs_rate.png rename to docs/references/qa/img38/voteExtensions/32k_total_txs_rate.png diff --git a/docs/qa/img38/voteExtensions/8k_block_rate.png b/docs/references/qa/img38/voteExtensions/8k_block_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/8k_block_rate.png rename to docs/references/qa/img38/voteExtensions/8k_block_rate.png diff --git a/docs/qa/img38/voteExtensions/all_c1r400_16k.png b/docs/references/qa/img38/voteExtensions/all_c1r400_16k.png similarity index 100% rename from docs/qa/img38/voteExtensions/all_c1r400_16k.png rename to docs/references/qa/img38/voteExtensions/all_c1r400_16k.png diff --git a/docs/qa/img38/voteExtensions/all_c1r400_2k.png b/docs/references/qa/img38/voteExtensions/all_c1r400_2k.png similarity index 100% rename from docs/qa/img38/voteExtensions/all_c1r400_2k.png rename to docs/references/qa/img38/voteExtensions/all_c1r400_2k.png diff --git a/docs/qa/img38/voteExtensions/all_c1r400_32k.png b/docs/references/qa/img38/voteExtensions/all_c1r400_32k.png similarity index 100% rename from docs/qa/img38/voteExtensions/all_c1r400_32k.png rename to docs/references/qa/img38/voteExtensions/all_c1r400_32k.png diff --git a/docs/qa/img38/voteExtensions/all_c1r400_4k.png b/docs/references/qa/img38/voteExtensions/all_c1r400_4k.png similarity index 100% rename from docs/qa/img38/voteExtensions/all_c1r400_4k.png rename to docs/references/qa/img38/voteExtensions/all_c1r400_4k.png diff --git a/docs/qa/img38/voteExtensions/all_c1r400_64k.png b/docs/references/qa/img38/voteExtensions/all_c1r400_64k.png similarity index 100% rename from docs/qa/img38/voteExtensions/all_c1r400_64k.png rename to docs/references/qa/img38/voteExtensions/all_c1r400_64k.png diff --git a/docs/qa/img38/voteExtensions/all_c1r400_8k.png b/docs/references/qa/img38/voteExtensions/all_c1r400_8k.png similarity index 100% rename from docs/qa/img38/voteExtensions/all_c1r400_8k.png rename to docs/references/qa/img38/voteExtensions/all_c1r400_8k.png diff --git a/docs/qa/img38/voteExtensions/all_c1r400_baseline.png b/docs/references/qa/img38/voteExtensions/all_c1r400_baseline.png similarity index 100% rename from docs/qa/img38/voteExtensions/all_c1r400_baseline.png rename to docs/references/qa/img38/voteExtensions/all_c1r400_baseline.png diff --git a/docs/qa/img38/voteExtensions/all_experiments_16k.png b/docs/references/qa/img38/voteExtensions/all_experiments_16k.png similarity index 100% rename from docs/qa/img38/voteExtensions/all_experiments_16k.png rename to docs/references/qa/img38/voteExtensions/all_experiments_16k.png diff --git a/docs/qa/img38/voteExtensions/all_experiments_2k.png b/docs/references/qa/img38/voteExtensions/all_experiments_2k.png similarity index 100% rename from docs/qa/img38/voteExtensions/all_experiments_2k.png rename to docs/references/qa/img38/voteExtensions/all_experiments_2k.png diff --git a/docs/qa/img38/voteExtensions/all_experiments_32k.png b/docs/references/qa/img38/voteExtensions/all_experiments_32k.png similarity index 100% rename from docs/qa/img38/voteExtensions/all_experiments_32k.png rename to docs/references/qa/img38/voteExtensions/all_experiments_32k.png diff --git a/docs/qa/img38/voteExtensions/all_experiments_4k.png b/docs/references/qa/img38/voteExtensions/all_experiments_4k.png similarity index 100% rename from docs/qa/img38/voteExtensions/all_experiments_4k.png rename to docs/references/qa/img38/voteExtensions/all_experiments_4k.png diff --git a/docs/qa/img38/voteExtensions/all_experiments_64k.png b/docs/references/qa/img38/voteExtensions/all_experiments_64k.png similarity index 100% rename from docs/qa/img38/voteExtensions/all_experiments_64k.png rename to docs/references/qa/img38/voteExtensions/all_experiments_64k.png diff --git a/docs/qa/img38/voteExtensions/all_experiments_8k.png b/docs/references/qa/img38/voteExtensions/all_experiments_8k.png similarity index 100% rename from docs/qa/img38/voteExtensions/all_experiments_8k.png rename to docs/references/qa/img38/voteExtensions/all_experiments_8k.png diff --git a/docs/qa/img38/voteExtensions/all_experiments_baseline.png b/docs/references/qa/img38/voteExtensions/all_experiments_baseline.png similarity index 100% rename from docs/qa/img38/voteExtensions/all_experiments_baseline.png rename to docs/references/qa/img38/voteExtensions/all_experiments_baseline.png diff --git a/docs/qa/img38/voteExtensions/baseline_1_avg_mempool_size.png b/docs/references/qa/img38/voteExtensions/baseline_1_avg_mempool_size.png similarity index 100% rename from docs/qa/img38/voteExtensions/baseline_1_avg_mempool_size.png rename to docs/references/qa/img38/voteExtensions/baseline_1_avg_mempool_size.png diff --git a/docs/qa/img38/voteExtensions/baseline_1_block_rate.png b/docs/references/qa/img38/voteExtensions/baseline_1_block_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/baseline_1_block_rate.png rename to docs/references/qa/img38/voteExtensions/baseline_1_block_rate.png diff --git a/docs/qa/img38/voteExtensions/baseline_1_rounds.png b/docs/references/qa/img38/voteExtensions/baseline_1_rounds.png similarity index 100% rename from docs/qa/img38/voteExtensions/baseline_1_rounds.png rename to docs/references/qa/img38/voteExtensions/baseline_1_rounds.png diff --git a/docs/qa/img38/voteExtensions/baseline_1_total_txs_rate.png b/docs/references/qa/img38/voteExtensions/baseline_1_total_txs_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/baseline_1_total_txs_rate.png rename to docs/references/qa/img38/voteExtensions/baseline_1_total_txs_rate.png diff --git a/docs/qa/img38/voteExtensions/baseline_avg_cpu.png b/docs/references/qa/img38/voteExtensions/baseline_avg_cpu.png similarity index 100% rename from docs/qa/img38/voteExtensions/baseline_avg_cpu.png rename to docs/references/qa/img38/voteExtensions/baseline_avg_cpu.png diff --git a/docs/qa/img38/voteExtensions/baseline_avg_memory.png b/docs/references/qa/img38/voteExtensions/baseline_avg_memory.png similarity index 100% rename from docs/qa/img38/voteExtensions/baseline_avg_memory.png rename to docs/references/qa/img38/voteExtensions/baseline_avg_memory.png diff --git a/docs/qa/img38/voteExtensions/baseline_avg_mempool_size.png b/docs/references/qa/img38/voteExtensions/baseline_avg_mempool_size.png similarity index 100% rename from docs/qa/img38/voteExtensions/baseline_avg_mempool_size.png rename to docs/references/qa/img38/voteExtensions/baseline_avg_mempool_size.png diff --git a/docs/qa/img38/voteExtensions/baseline_block_rate.png b/docs/references/qa/img38/voteExtensions/baseline_block_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/baseline_block_rate.png rename to docs/references/qa/img38/voteExtensions/baseline_block_rate.png diff --git a/docs/qa/img38/voteExtensions/baseline_rounds.png b/docs/references/qa/img38/voteExtensions/baseline_rounds.png similarity index 100% rename from docs/qa/img38/voteExtensions/baseline_rounds.png rename to docs/references/qa/img38/voteExtensions/baseline_rounds.png diff --git a/docs/qa/img38/voteExtensions/baseline_total_txs_rate.png b/docs/references/qa/img38/voteExtensions/baseline_total_txs_rate.png similarity index 100% rename from docs/qa/img38/voteExtensions/baseline_total_txs_rate.png rename to docs/references/qa/img38/voteExtensions/baseline_total_txs_rate.png diff --git a/docs/references/qa/imgs/v1/200nodes/latencies/e_8e4e1e81-c171-4879-b86f-bce96ee2e861.png b/docs/references/qa/imgs/v1/200nodes/latencies/e_8e4e1e81-c171-4879-b86f-bce96ee2e861.png new file mode 100644 index 00000000000..8b636364df3 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/latencies/e_8e4e1e81-c171-4879-b86f-bce96ee2e861.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/avg_cpu.png b/docs/references/qa/imgs/v1/200nodes/metrics/avg_cpu.png new file mode 100644 index 00000000000..9606bfecdd1 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/avg_cpu.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/avg_memory.png b/docs/references/qa/imgs/v1/200nodes/metrics/avg_memory.png new file mode 100644 index 00000000000..ac50a161519 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/avg_memory.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/avg_mempool_size.png b/docs/references/qa/imgs/v1/200nodes/metrics/avg_mempool_size.png new file mode 100644 index 00000000000..ab22f0c8ded Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/avg_mempool_size.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/block_rate.png b/docs/references/qa/imgs/v1/200nodes/metrics/block_rate.png new file mode 100644 index 00000000000..ab8d4226604 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/block_rate.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/block_rate_regular.png b/docs/references/qa/imgs/v1/200nodes/metrics/block_rate_regular.png new file mode 100644 index 00000000000..34298d9c4f0 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/block_rate_regular.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/block_size_bytes.png b/docs/references/qa/imgs/v1/200nodes/metrics/block_size_bytes.png new file mode 100644 index 00000000000..64ca4cd06f1 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/block_size_bytes.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/blocks.png b/docs/references/qa/imgs/v1/200nodes/metrics/blocks.png new file mode 100644 index 00000000000..b57e006b3f6 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/blocks.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/cpu.png b/docs/references/qa/imgs/v1/200nodes/metrics/cpu.png new file mode 100644 index 00000000000..4dff228ade7 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/cpu.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/memory.png b/docs/references/qa/imgs/v1/200nodes/metrics/memory.png new file mode 100644 index 00000000000..c1598ed1e75 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/memory.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/mempool_size.png b/docs/references/qa/imgs/v1/200nodes/metrics/mempool_size.png new file mode 100644 index 00000000000..4f1fd20dbaf Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/mempool_size.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/mempool_size_max.png b/docs/references/qa/imgs/v1/200nodes/metrics/mempool_size_max.png new file mode 100644 index 00000000000..5f81a480f70 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/mempool_size_max.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/peers.png b/docs/references/qa/imgs/v1/200nodes/metrics/peers.png new file mode 100644 index 00000000000..d83174e2b34 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/peers.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/rounds.png b/docs/references/qa/imgs/v1/200nodes/metrics/rounds.png new file mode 100644 index 00000000000..eef946e4545 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/rounds.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/total_txs.png b/docs/references/qa/imgs/v1/200nodes/metrics/total_txs.png new file mode 100644 index 00000000000..7996975997d Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/total_txs.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/total_txs_rate.png b/docs/references/qa/imgs/v1/200nodes/metrics/total_txs_rate.png new file mode 100644 index 00000000000..a1efeea2b85 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/total_txs_rate.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/metrics/total_txs_rate_regular.png b/docs/references/qa/imgs/v1/200nodes/metrics/total_txs_rate_regular.png new file mode 100644 index 00000000000..3890ebf3ab7 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes/metrics/total_txs_rate_regular.png differ diff --git a/docs/references/qa/imgs/v1/200nodes/v1_report_tabbed.txt b/docs/references/qa/imgs/v1/200nodes/v1_report_tabbed.txt new file mode 100644 index 00000000000..be4b2ee85d3 --- /dev/null +++ b/docs/references/qa/imgs/v1/200nodes/v1_report_tabbed.txt @@ -0,0 +1,40 @@ +Experiment ID: 73d1ecf6-2dbe-4288-bd84-72e286d939be Experiment ID: b3447e8e-f85e-48be-bb05-bf1b35b9e8b6 Experiment ID: 7b76c980-4882-4be7-898f-7ba7c4c950a2 + Connections: 1 Connections: 2 Connections: 4 + Rate: 200 Rate: 200 Rate: 200 + Size: 1024 Size: 1024 Size: 1024 + Total Valid Tx: 17800 Total Valid Tx: 34600 Total Valid Tx: 50464 + Total Negative Latencies: 0 Total Negative Latencies: 0 Total Negative Latencies: 0 + Minimum Latency: 1.710409731s Minimum Latency: 1.934148332s Minimum Latency: 3.207030208s + Maximum Latency: 8.977271598s Maximum Latency: 19.921012538s Maximum Latency: 22.695517951s + Average Latency: 3.873914787s Average Latency: 6.759146915s Average Latency: 9.394390517s + Standard Deviation: 1.80382447s Standard Deviation: 4.158131769s Standard Deviation: 4.907778924s +Experiment ID: 240abfc5-2e9f-4096-8d25-87b8890b419f Experiment ID: 99e88ddf-f0bd-4d44-98ac-cee52e2a74a6 Experiment ID: 88664d81-9d37-4820-a60e-8c0d2a9b2d63 + Connections: 1 Connections: 2 Connections: 4 + Rate: 400 Rate: 400 Rate: 400 + Size: 1024 Size: 1024 Size: 1024 + Total Valid Tx: 31200 Total Valid Tx: 54706 Total Valid Tx: 49463 + Total Negative Latencies: 0 Total Negative Latencies: 0 Total Negative Latencies: 0 + Minimum Latency: 2.050980308s Minimum Latency: 2.74995384s Minimum Latency: 4.469911766s + Maximum Latency: 26.195522089s Maximum Latency: 23.182542187s Maximum Latency: 16.480603178s + Average Latency: 9.280762294s Average Latency: 9.360818846s Average Latency: 9.976733037s + Standard Deviation: 7.166791513s Standard Deviation: 4.378492426s Standard Deviation: 3.223167612s +Experiment ID: 3efdd2a0-8bef-43d0-a0f0-6dce2b63825f Experiment ID: 3a49bc80-f63f-4d97-9663-3a02838fe1e8 Experiment ID: 1c2492ff-b82b-48a2-a975-bb252365521a + Connections: 1 Connections: 2 Connections: 4 + Rate: 800 Rate: 800 Rate: 800 + Size: 1024 Size: 1024 Size: 1024 + Total Valid Tx: 51146 Total Valid Tx: 51917 Total Valid Tx: 41376 + Total Negative Latencies: 0 Total Negative Latencies: 0 Total Negative Latencies: 0 + Minimum Latency: 3.026020902s Minimum Latency: 4.617306731s Minimum Latency: 4.400844549s + Maximum Latency: 21.969169815s Maximum Latency: 29.138788503s Maximum Latency: 39.972301945s + Average Latency: 9.039266773s Average Latency: 11.676386139s Average Latency: 16.583749953s + Standard Deviation: 4.734959842s Standard Deviation: 6.190789791s Standard Deviation: 8.729665317s +Experiment ID: 685cca77-ce3b-483f-9af8-6eafbdb03f7f Experiment ID: ba5cd52a-e3e6-4e35-86e3-5b391395650b Experiment ID: d27c4154-7c64-4379-91f9-0dc9b5f4c1d0 + Connections: 1 Connections: 2 Connections: 4 + Rate: 1600 Rate: 1600 Rate: 1600 + Size: 1024 Size: 1024 Size: 1024 + Total Valid Tx: 50889 Total Valid Tx: 47732 Total Valid Tx: 45530 + Total Negative Latencies: 0 Total Negative Latencies: 0 Total Negative Latencies: 0 + Minimum Latency: 4.411916099s Minimum Latency: 4.734922576s Minimum Latency: 5.500813279s + Maximum Latency: 24.512517023s Maximum Latency: 21.733104885s Maximum Latency: 30.120411703s + Average Latency: 11.022462182s Average Latency: 12.420967288s Average Latency: 13.648996358s + Standard Deviation: 4.882919113s Standard Deviation: 4.803231316s Standard Deviation: 6.370448765s diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/latencies/e_8190e83a-9135-444b-92fb-4efaeaaf2b52.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/latencies/e_8190e83a-9135-444b-92fb-4efaeaaf2b52.png new file mode 100644 index 00000000000..3845545b6a6 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/latencies/e_8190e83a-9135-444b-92fb-4efaeaaf2b52.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/avg_cpu.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/avg_cpu.png new file mode 100644 index 00000000000..0de9421dfb0 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/avg_cpu.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/avg_memory.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/avg_memory.png new file mode 100644 index 00000000000..8fba818cb0b Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/avg_memory.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/avg_mempool_size.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/avg_mempool_size.png new file mode 100644 index 00000000000..ec2f37acee5 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/avg_mempool_size.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/block_rate.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/block_rate.png new file mode 100644 index 00000000000..3cad284563f Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/block_rate.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/block_rate_regular.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/block_rate_regular.png new file mode 100644 index 00000000000..d051e062460 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/block_rate_regular.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/block_size_bytes.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/block_size_bytes.png new file mode 100644 index 00000000000..14b2cb8a970 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/block_size_bytes.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/blocks.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/blocks.png new file mode 100644 index 00000000000..6fcacbb1dc0 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/blocks.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/cpu.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/cpu.png new file mode 100644 index 00000000000..abce498e933 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/cpu.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/memory.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/memory.png new file mode 100644 index 00000000000..2e7bc154f66 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/memory.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/mempool_size.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/mempool_size.png new file mode 100644 index 00000000000..f00dd24b882 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/mempool_size.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/mempool_size_max.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/mempool_size_max.png new file mode 100644 index 00000000000..fcf3526d12b Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/mempool_size_max.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/peers.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/peers.png new file mode 100644 index 00000000000..44045259063 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/peers.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/rounds.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/rounds.png new file mode 100644 index 00000000000..e80bfc91473 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/rounds.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/total_txs.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/total_txs.png new file mode 100644 index 00000000000..03ff5a3aa1f Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/total_txs.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/total_txs_rate.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/total_txs_rate.png new file mode 100644 index 00000000000..308fd362e9a Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/total_txs_rate.png differ diff --git a/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/total_txs_rate_regular.png b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/total_txs_rate_regular.png new file mode 100644 index 00000000000..b6a4fe443e6 Binary files /dev/null and b/docs/references/qa/imgs/v1/200nodes_with_latency_emulation/metrics/total_txs_rate_regular.png differ diff --git a/docs/references/qa/imgs/v1/rotating/latencies.png b/docs/references/qa/imgs/v1/rotating/latencies.png new file mode 100644 index 00000000000..581672f7e48 Binary files /dev/null and b/docs/references/qa/imgs/v1/rotating/latencies.png differ diff --git a/docs/references/qa/imgs/v1/rotating/metrics/rotating_avg_memory.png b/docs/references/qa/imgs/v1/rotating/metrics/rotating_avg_memory.png new file mode 100644 index 00000000000..2b2696e093d Binary files /dev/null and b/docs/references/qa/imgs/v1/rotating/metrics/rotating_avg_memory.png differ diff --git a/docs/references/qa/imgs/v1/rotating/metrics/rotating_block_rate.png b/docs/references/qa/imgs/v1/rotating/metrics/rotating_block_rate.png new file mode 100644 index 00000000000..d1a42aee7af Binary files /dev/null and b/docs/references/qa/imgs/v1/rotating/metrics/rotating_block_rate.png differ diff --git a/docs/references/qa/imgs/v1/rotating/metrics/rotating_cpu.png b/docs/references/qa/imgs/v1/rotating/metrics/rotating_cpu.png new file mode 100644 index 00000000000..35da6294486 Binary files /dev/null and b/docs/references/qa/imgs/v1/rotating/metrics/rotating_cpu.png differ diff --git a/docs/references/qa/imgs/v1/rotating/metrics/rotating_eph_heights.png b/docs/references/qa/imgs/v1/rotating/metrics/rotating_eph_heights.png new file mode 100644 index 00000000000..09a49c6616b Binary files /dev/null and b/docs/references/qa/imgs/v1/rotating/metrics/rotating_eph_heights.png differ diff --git a/docs/references/qa/imgs/v1/rotating/metrics/rotating_peers.png b/docs/references/qa/imgs/v1/rotating/metrics/rotating_peers.png new file mode 100644 index 00000000000..119f1c12947 Binary files /dev/null and b/docs/references/qa/imgs/v1/rotating/metrics/rotating_peers.png differ diff --git a/docs/references/qa/imgs/v1/rotating/metrics/rotating_txs_rate.png b/docs/references/qa/imgs/v1/rotating/metrics/rotating_txs_rate.png new file mode 100644 index 00000000000..e165d41c6dc Binary files /dev/null and b/docs/references/qa/imgs/v1/rotating/metrics/rotating_txs_rate.png differ diff --git a/docs/references/qa/imgs/v1/saturation/saturation-plotter-LE.py b/docs/references/qa/imgs/v1/saturation/saturation-plotter-LE.py new file mode 100755 index 00000000000..26f4bc50526 --- /dev/null +++ b/docs/references/qa/imgs/v1/saturation/saturation-plotter-LE.py @@ -0,0 +1,54 @@ +#/usr/bin/env python3 +"""Plotter for comparing saturation results on v1 with and without Letency Emulation. + +This script generates an image with the number of processed transactions for different load +configurations (tx rate and number of connections). The purpose is to find the saturation point of +the network and to compare the results between different CometBFT versions. + +Quick setup before running: +``` +python3 -m venv .venv && source .venv/bin/activate +pip install matplotlib +``` +""" +import matplotlib.pyplot as plt +import numpy as np + +# Transaction rate (x axis) +rates = np.arange(100, 1100, 100) +rates2 = [200, 400, 800, 1600] + +# expected values +expected = [(i+1) * 100 * 89 for i in range(10)] + +# v1 without LE +d1 = [8900,17800,26053,28800,32513,30455,33077,32191,30688,32395] # experiments/2024-03-26-13_47_51N/validator174 +d2 = [8900,17800,26300,25400,31371,31063,31603,32886,24521,25211] # experiments/2024-03-26-11_17_58N/validator174 +d3 = [8900,17800,26700,35600,38500,40502,51962,48328,50713,42361] # experiments/2024-03-26-20_20_33N/validator174 + +# v1 with LE +le1 = [8900,17800,26700,35600,34504,42169,38916,38004,34332,36948] # experiments/2024-03-25-17_41_09N/validator174 +le2 = [17800, 33800, 34644, 43464] # experiments/2024-03-25-12_17_12N/validator174 +le3 = [8900,17800,26700,33200,37665,51771,38631,49290,51526,46902] # experiments/2024-03-26-22_21_31N/validator174 + +fig, ax = plt.subplots(figsize=(10, 5)) +ax.plot(rates, expected, linestyle='dotted', marker=',', color='g', label='expected') +ax.plot(rates, d1, linestyle='solid', marker='o', color='b', label='without LE #1') +ax.plot(rates, d2, linestyle='solid', marker='o', color='violet', label='without LE #2') +ax.plot(rates, d3, linestyle='solid', marker='o', color='grey', label='without LE #3') +ax.plot(rates, le1, linestyle='dashed', marker='s', color='r', label='with LE #1') +ax.plot(rates2, le2, linestyle='dashed', marker='s', color='orange', label='with LE #2') +ax.plot(rates, le3, linestyle='dashed', marker='s', color='brown', label='with LE #3') + +plt.title('saturation point for v1.0.0-alpha.2, c=1') +plt.xlabel("rate (txs/s)") +plt.ylabel("txs processed in 90 seconds") +plt.xticks(np.arange(0, 1100, 200).tolist()) +ax.set_xlim([0, 1100]) +ax.set_ylim([0, 60000]) +ax.grid() +ax.legend() + +fig.tight_layout() +fig.savefig("saturation_v1_LE.png") +plt.show() diff --git a/docs/references/qa/imgs/v1/saturation/saturation-plotter.py b/docs/references/qa/imgs/v1/saturation/saturation-plotter.py new file mode 100755 index 00000000000..ee9dfbd0fa8 --- /dev/null +++ b/docs/references/qa/imgs/v1/saturation/saturation-plotter.py @@ -0,0 +1,56 @@ +#/usr/bin/env python3 +"""Plotter for comparing saturation results on v1 and v0.38. + +This script generates an image with the number of processed transactions for different load +configurations (tx rate and number of connections). The purpose is to find the saturation point of +the network and to compare the results between different CometBFT versions. + +Quick setup before running: +``` +python3 -m venv .venv && source .venv/bin/activate +pip install matplotlib +``` +""" +import matplotlib.pyplot as plt +import numpy as np + +# Expected values of processed transactions for a given transaction rate. +rates0 = [0, 3600] +expected = [r * 89 for r in rates0] + +# Transaction rate (x axis) +rates1 = [200, 400, 800, 1600] +rates2 = [r*2 for r in rates1] +rates4 = [r*2 for r in rates2] + +# v1 (without latency emulation), for number of connections c in [1,2,4] +c1 = [17800,31200,51146,50889] +c2 = [34600,54706,51917,47732] +c4 = [50464,49463,41376,45530] + +# v0.38, for number of connections c in [1,2,4] +d1 = [17800,35600,36831,40600] +d2 = [33259,41565,38686,45034] +d4 = [33259,41384,40816,39830] + +fig, ax = plt.subplots(figsize=(12, 5)) +ax.plot(rates0, expected, linestyle='dotted', marker=',', color='g', label='expected') +ax.plot(rates1, c1, linestyle='solid', marker='s', color='red', label='v1 c=1') +ax.plot(rates2, c2, linestyle='solid', marker='s', color='salmon', label='v1 c=2') +ax.plot(rates4, c4, linestyle='solid', marker='s', color='orange', label='v1 c=4') +ax.plot(rates1, d1, linestyle='dashed', marker='o', color='blue', label='v0.38 c=1') +ax.plot(rates2, d2, linestyle='dashed', marker='o', color='violet', label='v0.38 c=2') +ax.plot(rates4, d4, linestyle='dashed', marker='o', color='purple', label='v0.38 c=4') + +plt.title('finding the saturation point') +plt.xlabel("total rate over all connections (txs/s)") +plt.ylabel("txs processed in 90 seconds") +plt.xticks(np.arange(0, 3600, 200).tolist()) +ax.set_xlim([0, 3600]) +ax.set_ylim([0, 60000]) +ax.grid() +ax.legend() + +fig.tight_layout() +fig.savefig("saturation_v1_v038.png") +plt.show() diff --git a/docs/references/qa/imgs/v1/saturation/saturation_v1_LE.png b/docs/references/qa/imgs/v1/saturation/saturation_v1_LE.png new file mode 100644 index 00000000000..bc9aa01bc10 Binary files /dev/null and b/docs/references/qa/imgs/v1/saturation/saturation_v1_LE.png differ diff --git a/docs/references/qa/imgs/v1/saturation/saturation_v1_v038.png b/docs/references/qa/imgs/v1/saturation/saturation_v1_v038.png new file mode 100644 index 00000000000..812bf62945c Binary files /dev/null and b/docs/references/qa/imgs/v1/saturation/saturation_v1_v038.png differ diff --git a/docs/references/qa/imgs/v1/saturation/throughput_vs_latencies_v1_with_LE.png b/docs/references/qa/imgs/v1/saturation/throughput_vs_latencies_v1_with_LE.png new file mode 100644 index 00000000000..bc2f4adbae9 Binary files /dev/null and b/docs/references/qa/imgs/v1/saturation/throughput_vs_latencies_v1_with_LE.png differ diff --git a/docs/references/qa/imgs/v1/saturation/throughput_vs_latencies_v1_without_LE.png b/docs/references/qa/imgs/v1/saturation/throughput_vs_latencies_v1_without_LE.png new file mode 100644 index 00000000000..95b2faf42be Binary files /dev/null and b/docs/references/qa/imgs/v1/saturation/throughput_vs_latencies_v1_without_LE.png differ diff --git a/docs/references/qa/method.md b/docs/references/qa/method.md new file mode 100644 index 00000000000..b906e4726f5 --- /dev/null +++ b/docs/references/qa/method.md @@ -0,0 +1,319 @@ +--- +order: 1 +parent: + title: Method + order: 1 +--- + +# Method + +This document provides a detailed description of the QA process. +It is intended to be used by engineers reproducing the experimental setup for future tests of CometBFT. + +The (first iteration of the) QA process as described [in the RELEASES.md document][releases] +was applied to version v0.34.x in order to have a set of results acting as benchmarking baseline. +This baseline is then compared with results obtained in later versions. +See [RELEASES.md][releases] for a description of the tests that we run on the QA process. + +Out of the testnet-based test cases described in [the releases document][releases] we focused on two of them: +_200 Node Test_, and _Rotating Nodes Test_. + +[releases]: https://github.com/cometbft/cometbft/blob/main/RELEASES.md#large-scale-testnets + +## Table of Contents +- [Method](#method) + - [Table of Contents](#table-of-contents) + - [Software Dependencies](#software-dependencies) + - [Infrastructure Requirements to Run the Tests](#infrastructure-requirements-to-run-the-tests) + - [Requirements for Result Extraction](#requirements-for-result-extraction) + - [200 Node Testnet](#200-node-testnet) + - [Running the test](#running-the-test) + - [Result Extraction](#result-extraction) + - [Steps](#steps) + - [Extracting Prometheus Metrics](#extracting-prometheus-metrics) + - [Rotating Node Testnet](#rotating-node-testnet) + - [Running the test](#running-the-test-1) + - [Result Extraction](#result-extraction-1) + - [Vote Extensions Testnet](#vote-extensions-testnet) + - [Running the test](#running-the-test-2) + - [Result Extraction](#result-extraction-2) + +## Software Dependencies + +### Infrastructure Requirements to Run the Tests + +* An account at Digital Ocean (DO), with a high droplet limit (>202) +* The machine to orchestrate the tests should have the following installed: + * A clone of the [testnet repository][testnet-repo] + * This repository contains all the scripts mentioned in the remainder of this section + * [Digital Ocean CLI][doctl] + * [Terraform CLI][Terraform] + * [Ansible CLI][Ansible] + +[testnet-repo]: https://github.com/cometbft/qa-infra +[Ansible]: https://docs.ansible.com/ansible/latest/index.html +[Terraform]: https://www.terraform.io/docs +[doctl]: https://docs.digitalocean.com/reference/doctl/how-to/install/ + +### Requirements for Result Extraction + +* [Prometheus DB][prometheus] to collect metrics from nodes +* Prometheus DB to process queries (may be different node from the previous) +* blockstore DB of one of the full nodes in the testnet + + +[prometheus]: https://prometheus.io/ + +## 200 Node Testnet + +This test consists in spinning up 200 nodes (175 validators + 20 full nodes + 5 seed nodes) and +performing two experiments: +- First we find the [saturation point](saturation) of the network by running the script + [200-node-loadscript.sh][200-node-loadscript.sh]. +- Then we run several times the testnet using the saturation point to collect data. + +The script [200-node-loadscript.sh] runs multiple transaction load instances with all possible +combinations of the following parameters: +- number of transactions sent per second (the rate): 200, 400, 800, and 1600. +- number of connections to the target node: 1, 2, and 4. + +Additionally: +- The size of each transaction is 1024 bytes. +- The duration of each test is 90 seconds. +- There is one target node (a validator) that receives all the load. +- After each test iteration, it waits that the mempool is empty and then wait `120 + rate /60` + seconds more. + +[200-node-loadscript.sh]: https://github.com/cometbft/qa-infra/blob/main/ansible/scripts/200-node-loadscript.sh +[saturation]: CometBFT-QA-34.md#saturation-point + +### Running the test + +This section explains how the tests were carried out for reproducibility purposes. + +1. [If you haven't done it before] + Follow steps 1-5 of the `README.md` at the top of the testnet repository to configure Terraform and the DigitalOcean CLI (`doctl`). +2. In the `experiment.mk` file, set the following variables (do NOT commit these changes): + 1. Set `MANIFEST` to point to the file `testnets/200-nodes-with-zones.toml`. + 2. Set `VERSION_TAG` to the git hash that is to be tested. + * If you are running the base test, which implies an homogeneous network (all nodes are running the same version), + then make sure makefile variable `VERSION2_WEIGHT` is set to 0 + * If you are running a mixed network, set the variable `VERSION2_TAG` to the other version you want deployed + in the network. + Then adjust the weight variables `VERSION_WEIGHT` and `VERSION2_WEIGHT` to configure the + desired proportion of nodes running each of the two configured versions. +3. Follow steps 5-11 of the `README.md` to configure and start the 200 node testnet. + * WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests (see step 9) +4. As a sanity check, connect to the Prometheus node's web interface (port 9090) + and check the graph for the `cometbft_consensus_height` metric. All nodes + should be increasing their heights. + + * Run `ansible --list-hosts prometheus` to obtain the Prometheus node's IP address. + * The following URL will display the metrics `cometbft_consensus_height` and `cometbft_mempool_size`: + + ``` + http://:9090/classic/graph?g0.range_input=1h&g0.expr=cometbft_consensus_height&g0.tab=0&g1.range_input=1h&g1.expr=cometbft_mempool_size&g1.tab=0 + ``` + +5. Discover the saturation point of the network. If you already know it, skip this step. + * Run `make loadrunners-init`, in case the load runner is not yet initialised. This will copy the + loader scripts to the `testnet-load-runner` node and install the load tool. + * Run `ansible --list-hosts loadrunners` to find the IP address of the `testnet-load-runner` node. + * `ssh` into `testnet-load-runner`. + * We will run a script that takes about 40 mins to complete, so it is suggested to first run `tmux` in case the ssh session breaks. + * `tmux` quick cheat sheet: `ctrl-b a` to attach to an existing session; `ctrl-b %` to split the current pane vertically; `ctrl-b ;` to toggle to last active pane. + * Find the *internal* IP address of a full node (for example, `validator000`). This node will receive all transactions from the load runner node. + * Run `/root/200-node-loadscript.sh ` from the load runner node, where `` is the internal IP address of a full node. + * The script runs 90-seconds-long experiments in a loop with different load values. + * Follow the steps of the [Result Extraction](#result-extraction) section below to obtain the file `report_tabbed.txt`. + +6. Run several transaction load instances (typically 5), each of 90 seconds, using a load somewhat below the saturation point. + * Set the Makefile variables `LOAD_CONNECTIONS`, `LOAD_TX_RATE`, to values that will produce the desired transaction load. + * Set `LOAD_TOTAL_TIME` to 91 (seconds). The extra second is because the last transaction batch + coincides with the end of the experiment and is thus not sent. + * Run `make runload` and wait for it to complete. You may want to run this several times so the data from different runs can be compared. +7. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine + * Alternatively, you may want to run `make retrieve-prometheus-data` and `make retrieve-blockstore` separately. + The end result will be the same. + * `make retrieve-blockstore` accepts the following values in makefile variable `RETRIEVE_TARGET_HOST` + * `any`: (which is the default) picks up a full node and retrieves the blockstore from that node only. + * `all`: retrieves the blockstore from all full nodes; this is extremely slow, and consumes plenty of bandwidth, + so use it with care. + * the name of a particular full node (e.g., `validator01`): retrieves the blockstore from that node only. +8. Verify that the data was collected without errors + * at least one blockstore DB for a CometBFT validator + * the Prometheus database from the Prometheus node + * for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s) +9. **Run `make terraform-destroy`** + * Don't forget to type `yes`! Otherwise you're in trouble. + +### Result Extraction + +The method for extracting the results described here is highly manual (and exploratory) at this stage. +The CometBFT team should improve it at every iteration to increase the amount of automation. + +#### Saturation point + +For identifying the saturation point, run from the `qa-infra` repository: +```sh +./script/reports/saturation-gen-table.sh +``` +where `` is the directory where the results of the experiments were downloaded. +This directory should contain the file `blockstore.db.zip`. The script will automatically: +1. Unzip `blockstore.db.zip`, if not already. +2. Run the tool `test/loadtime/cmd/report` to extract data for all instances with different + transaction load. + - This will generate an intermediate file `report.txt` that contains an unordered list of + experiments results with varying concurrent connections and transaction rate. +3. Generate the files: + - `report_tabbed.txt` with results formatted as a matrix, where rows are a particular tx rate and + columns are a particular number of websocket connections. + - `saturation_table.tsv` which just contains columns with the number of processed transactions; + this is handy to create a Markdown table for the report. + +#### Latencies + +For generating images on latency, run from the `qa-infra` repository: +```sh +./script/reports/latencies-gen-images.sh +``` +As above, `` should contain the file `blockstore.db.zip`. +The script will automatically: +1. Unzip `blockstore.db.zip`, if not already. +2. Generate a file with raw results `results/raw.csv` using the tool `test/loadtime/cmd/report`. +3. Setup a Python virtual environment and install the dependencies required for running the scripts + in the steps below. +4. Generate a latency vs throughput images, using [`latency_throughput.py`]. This plot is useful to + visualize the saturation point. +5. Generate a series of images with the average latency of each block for each experiment instance + and configuration, using [`latency_plotter.py`]. This plots may help with visualizing latency vs. + throughput variation. + +[`latency_throughput.py`]: ../../../scripts/qa/reporting/README.md#Latency-vs-Throughput-Plotting +[`latency_plotter.py`]: ../../../scripts/qa/reporting/README.md#Latency-vs-Throughput-Plotting-version-2 + +#### Prometheus metrics + +1. From the `qa-infra` repository, run: + ```sh + ./script/reports/prometheus-start-local.sh + ``` + where `` is the directory where the results of the experiments were + downloaded. This directory should contain the file `blockstore.db.zip`. This script will: + - kill any running Prometheus server, + - unzip the Prometheus database retrieved from the testnet, and + - start a Prometheus server on the default `localhost:9090`, bootstrapping the downloaded data + as database. +2. Identify the time window you want to plot in your graphs. In particular, search for the start + time and duration of the window. +3. Run: + ```sh + ./script/reports/prometheus-gen-images.sh [] [] + ``` + where `` is in the format `'%Y-%m-%dT%H:%M:%SZ'` and `` is in seconds. + This will download, set up a Python virtual environment with required dependencies, and execute + the script [`prometheus_plotter.py`]. The optional parameter `` is one of `200_nodes` + (default), `rotating`, and `vote_extensions`; `` is just for putting in the title + of the plot. + +[`prometheus_plotter.py`]: ../../../scripts/qa/reporting/README.md#prometheus-metrics + +## Rotating Node Testnet + +### Running the test + +This section explains how the tests were carried out for reproducibility purposes. + +1. [If you haven't done it before] + Follow the [set up][qa-setup] steps of the `README.md` at the top of the testnet repository to + configure Terraform, and `doctl`. +2. In the `experiment.mk` file, set the following variables (do NOT commit these changes): + * Set `MANIFEST` to point to the file `testnets/rotating.toml`. + * Set `VERSION_TAG` to the git hash that is to be tested. + * Set `EPHEMERAL_SIZE` to 25. +3. Follow the [testnet starting][qa-start] steps of the `README.md` to configure and start the + the rotating node testnet. + * WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests. +4. As a sanity check, connect to the Prometheus node's web interface and check the graph for the + `cometbft_consensus_height` metric. All nodes should be increasing their heights. +5. On a different shell, + * run `make loadrunners-init` to initialize the load runner. + * run `make runload ITERATIONS=1 LOAD_CONNECTIONS=X LOAD_TX_RATE=Y LOAD_TOTAL_TIME=Z` + * `X` and `Y` should reflect a load below the saturation point (see, e.g., + [this paragraph](CometBFT-QA-38#saturation-point) for further info) + * `Z` (in seconds) should be big enough to keep running throughout the test, until we manually stop it in step 7. + In principle, a good value for `Z` is `7200` (2 hours) +6. Run `make rotate` to start the script that creates the ephemeral nodes, and kills them when they are caught up. + * WARNING: If you run this command from your laptop, the laptop needs to be up and connected for the full length + of the experiment. + * [This][rotating-prometheus] is an example Prometheus URL you can use to monitor the test case's progress. +7. When the height of the chain reaches 3000, stop the `make runload` script. +8. When the rotate script has made two iterations (i.e., all ephemeral nodes have caught up twice) + after height 3000 was reached, stop `make rotate`. +9. Run `make stop-network`. +10. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine +11. Verify that the data was collected without errors + * at least one blockstore DB for a CometBFT validator + * the Prometheus database from the Prometheus node + * for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s) +12. **Run `make terraform-destroy`** + +Steps 8 to 10 are highly manual at the moment and will be improved in next iterations. + +### Result Extraction + +In order to obtain a latency plot, follow the instructions above for the 200 node experiment, +but the `results.txt` file contains only one experiment. + +As for prometheus, the same method as for the 200 node experiment can be applied. + +## Vote Extensions Testnet + +### Running the test + +This section explains how the tests were carried out for reproducibility purposes. + +1. [If you haven't done it before] + Follow the [set up][qa-setup] steps of the `README.md` at the top of the testnet repository to + configure Terraform, and `doctl`. +2. In the `experiment.mk` file, set the following variables (do NOT commit these changes): + 1. Set `MANIFEST` to point to the file `testnets/varyVESize.toml`. + 2. Set `VERSION_TAG` to the git hash that is to be tested. +3. Follow the [testnet starting][qa-start] steps of the `README.md` to configure and start + the testnet. + * WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests +4. Configure the load runner to produce the desired transaction load. + * set makefile variables `ROTATE_CONNECTIONS`, `ROTATE_TX_RATE`, to values that will produce the desired transaction load. + * set `ROTATE_TOTAL_TIME` to 150 (seconds). + * set `ITERATIONS` to the number of iterations that each configuration should run for. +5. Execute the [testnet starting][qa-start] steps of the `README.md` file at the testnet repository. +6. Repeat the following steps for each desired `vote_extension_size` + 1. Update the configuration (you can skip this step if you didn't change the `vote_extension_size`) + * Update the `vote_extensions_size` in the `testnet.toml` to the desired value. + * `make configgen` + * `ANSIBLE_SSH_RETRIES=10 ansible-playbook ./ansible/re-init-testapp.yaml -u root -i ./ansible/hosts --limit=validators -e "testnet_dir=testnet" -f 20` + * `make restart` + 2. Run the test + * `make runload` + This will repeat the tests `ITERATIONS` times every time it is invoked. + 3. Collect your data + * `make retrieve-data` + Gathers all relevant data from the testnet into the orchestrating machine, inside folder `experiments`. + Two subfolders are created, one blockstore DB for a CometBFT validator and one for the Prometheus DB data. + * Verify that the data was collected without errors with `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s). +7. Clean up your setup. + * `make terraform-destroy`; don't forget that you need to type **yes** for it to complete. + + +### Result Extraction + +In order to obtain a latency plot, follow the instructions above for the 200 node experiment, but: + +* The `results.txt` file contains only one experiment +* Therefore, no need for any `for` loops + +As for Prometheus, the same method as for the 200 node experiment can be applied. + +[qa-setup]: https://github.com/cometbft/qa-infra/blob/main/README.md#setup +[qa-start]: https://github.com/cometbft/qa-infra/blob/main/README.md#start-the-network +[rotating-prometheus]: http://PROMETHEUS-NODE-IP:9090/classic/graph?g0.expr=cometbft_consensus_height%7Bjob%3D~%22ephemeral.*%22%7D%3Ecometbft_blocksync_latest_block_height%7Bjob%3D~%22ephemeral.*%22%7D%20or%20cometbft_blocksync_latest_block_height%7Bjob%3D~%22ephemeral.*%22%7D&g0.tab=0&g0.display_mode=lines&g0.show_exemplars=0&g0.range_input=1h40m&g1.expr=cometbft_mempool_size%7Bjob!~%22ephemeral.*%22%7D&g1.tab=0&g1.display_mode=lines&g1.show_exemplars=0&g1.range_input=1h40m&g2.expr=cometbft_consensus_num_txs%7Bjob!~%22ephemeral.*%22%7D&g2.tab=0&g2.display_mode=lines&g2.show_exemplars=0&g2.range_input=1h40m diff --git a/docs/rfc/README.md b/docs/references/rfc/README.md similarity index 68% rename from docs/rfc/README.md rename to docs/references/rfc/README.md index 4f7addb6500..f4bb42feabb 100644 --- a/docs/rfc/README.md +++ b/docs/references/rfc/README.md @@ -15,7 +15,7 @@ discussion that might otherwise only be recorded in an ad-hoc way (for example, via gists or Google docs) that are difficult to discover for someone after the fact. An RFC _may_ give rise to more specific architectural _decisions_ for CometBFT, but those decisions must be recorded separately in -[Architecture Decision Records (ADR)](../architecture/). +[Architecture Decision Records (ADR)](./../architecture/README.md). As a rule of thumb, if you can articulate a specific question that needs to be answered, write an ADR. If you need to explore the topic and get input from @@ -32,7 +32,7 @@ An RFC should provide: substance of the discussion (links to other documents are fine here). - The **discussion**, the primary content of the document. -The [rfc-template.md](./rfc-template.md) file includes placeholders for these +The [rfc-template.md](rfc-template.md) file includes placeholders for these sections. ## Table of Contents @@ -42,10 +42,11 @@ relating to Tendermint Core prior to forking, please see [this list](./tendermint-core/). -- [RFC-100: ABCI Vote Extension Propagation](./rfc-100-abci-vote-extension-propag.md) -- [RFC-101: Banning peers based on ResponseCheckTx](./rfc-101-p2p-bad-peers-checktx.md) -- [RFC-102: Improve forward compatibility of proto-generated Rust code](./rfc-102-rust-gen-builders.md) -- [RFC-103: Incoming transactions when node is catching up](./rfc-103-incoming-txs-when-catching-up.md) -- [RFC-104: Internal messaging using the actor model](./rfc-104-actor-model.md) -- [RFC-105: Allowing Non-Determinism in `ProcessProposal`](./rfc-105-non-det-process-proposal.md) -- [RFC-106: Separation of non-idempotent methods in data companion API](./rfc-106-separate-stateful-methods.md) +- [RFC-100: ABCI Vote Extension Propagation](rfc-100-abci-vote-extension-propag.md) +- [RFC-101: Banning peers based on ResponseCheckTx](rfc-101-p2p-bad-peers-checktx.md) +- [RFC-102: Improve forward compatibility of proto-generated Rust code](rfc-102-rust-gen-builders.md) +- [RFC-103: Incoming transactions when node is catching up](rfc-103-incoming-txs-when-catching-up.md) +- [RFC-104: Internal messaging using the actor model](rfc-104-actor-model.md) +- [RFC-105: Allowing Non-Determinism in `ProcessProposal`](rfc-105-non-det-process-proposal.md) +- [RFC-106: Separation of non-idempotent methods in data companion API](rfc-106-separate-stateful-methods.md) +- [RFC-107: Internal signalling using event observers](rfc-107-event-observer.md) diff --git a/docs/rfc/images/rfc-103-optimization-comparison.png b/docs/references/rfc/images/rfc-103-optimization-comparison.png similarity index 100% rename from docs/rfc/images/rfc-103-optimization-comparison.png rename to docs/references/rfc/images/rfc-103-optimization-comparison.png diff --git a/docs/rfc/rfc-100-abci-vote-extension-propag.md b/docs/references/rfc/rfc-100-abci-vote-extension-propag.md similarity index 99% rename from docs/rfc/rfc-100-abci-vote-extension-propag.md rename to docs/references/rfc/rfc-100-abci-vote-extension-propag.md index 450556ff573..f57cc250263 100644 --- a/docs/rfc/rfc-100-abci-vote-extension-propag.md +++ b/docs/references/rfc/rfc-100-abci-vote-extension-propag.md @@ -156,7 +156,7 @@ discussions and need to be addressed. They are (roughly) ordered from easiest to If sets *valseth* and *valseth+1* are disjoint, more than *2nh/3* of validators in height *h* should - have actively participated in conensus in *h*. So, as of height *h*, only a minority of validators + have actively participated in consensus in *h*. So, as of height *h*, only a minority of validators in *h* can be lagging behind, although they could all lag behind from *h+1* on, as they are no longer validators, only full nodes. This situation falls under the assumptions of case (h) below. diff --git a/docs/rfc/rfc-101-p2p-bad-peers-checktx.md b/docs/references/rfc/rfc-101-p2p-bad-peers-checktx.md similarity index 84% rename from docs/rfc/rfc-101-p2p-bad-peers-checktx.md rename to docs/references/rfc/rfc-101-p2p-bad-peers-checktx.md index 80e52480b5a..019fdb64db5 100644 --- a/docs/rfc/rfc-101-p2p-bad-peers-checktx.md +++ b/docs/references/rfc/rfc-101-p2p-bad-peers-checktx.md @@ -10,23 +10,23 @@ ## Abstract In CometBFT, nodes receive transactions either from external clients via RPC, -or from their peers via p2p. Upon receiving a transaction, a node runs `CheckTx` on it. This is +or from their peers via p2p. Upon receiving a transaction, a node runs `CheckTx` on it. This is an application specific check whose return code with a zero value indicates the transaction has passed this check, and can be added into the mempool. Any non-zero code indicates the transaction is not valid. Thus, the main role of `CheckTx` is to, as early as possible, prevent invalid transactions -from entering the mempool. +from entering the mempool. `CheckTx` will never place a transaction failing the check into the mempool. But there are scenarios where a, once valid, transaction can become invalid. And there are valid, non-malicious, scenarios in which a transaction will not pass this check (including nodes getting different transactions at different times, meaning some -of them might be obsolete at the time of the check; state changes upon block execution etc.). +of them might be obsolete at the time of the check; state changes upon block execution etc.). However, CometBFT users observed that there are transactions that can never have been or will never be valid. They thus propose to introduce a special response code for `CheckTx` to indicate this behaviour, and ban the peers who gossip such transactions. Additionally, users expressed a need for banning peers who -repeatedly send transactions failing `CheckTx`. +repeatedly send transactions failing `CheckTx`. -The main goal of this document is to analyse the cases where peers could be banned when they send +The main goal of this document is to analyse the cases where peers could be banned when they send transactions failing `CheckTx`, and provide the exact conditions that a peer and transaction have to satisfy in order to mark the peer as bad. @@ -38,121 +38,121 @@ potential changes to the existing mempool logic and implementation. This work was triggered by issue [#7918](https://github.com/tendermint/tendermint/issues/7918) and a related discussion in [#2185](https://github.com/tendermint/tendermint/issues/2185). Additionally, -there was a [proposal](https://github.com/tendermint/tendermint/issues/6523) -to disconnect from peers after they send us transactions that constantly fail `CheckTx`. While -the actual implementation of an additional response code for `CheckTx` is straight forward there -are certain correctness aspects to consider. The questions to answer, along with identified risks will be outlined in -the discussion. +there was a [proposal](https://github.com/tendermint/tendermint/issues/6523) +to disconnect from peers after they send us transactions that constantly fail `CheckTx`. While +the actual implementation of an additional response code for `CheckTx` is straight forward there +are certain correctness aspects to consider. The questions to answer, along with identified risks will be outlined in +the discussion. ### Existing issues and concerns -Before diving into the details, we collected a set of issues opened by various users, arguing for -this behaviour and explaining their needs. +Before diving into the details, we collected a set of issues opened by various users, arguing for +this behaviour and explaining their needs. - Celestia: [blacklisting peers that repeatedly send bad tx](https://github.com/celestiaorg/celestia-core/issues/867) and investigating how [Tendermint treats large Txs](https://github.com/celestiaorg/celestia-core/issues/243) - BigChainDb: [Handling proposers who propose bad blocks](https://github.com/bigchaindb/BEPs/issues/84) - IBC relayers: Nodes allow transactions with a wrong `minGas` and gossip them, and other nodes keep rejecting them. Problem seen with relayers where some nodes would have the `minGas` [parameter](https://github.com/cosmos/gaia/blob/5db8fcc9a229730f5115bed82d0f85b6db7184b4/contrib/testnets/test_platform/templates/app.toml#L8-L11) misconfigured. -- Gaia reported and [issue](https://github.com/cosmos/gaia/issues/2073) where misconfigured non-validator nodes were spamming the validatos with +- Gaia reported and [issue](https://github.com/cosmos/gaia/issues/2073) where misconfigured non-validator nodes were spamming the validatos with transactions that were too big. The debugging was long and it was not easy to realize what was going on. **Acceptable duplicate transactions** -Banning peers was also mentioned within [IBC-go](https://github.com/cosmos/ibc-go/issues/853#issuecomment-1032211020). However,the crux of the issue is preventing transactions with duplicate payload. While this is indeed undesired behaviour, this is not considered behaviour that should lead to banning a peer or even disconnecting from him. Duplicate transactions are in this case prevented using an application-specific solution. +Banning peers was also mentioned within [IBC-go](https://github.com/cosmos/ibc-go/issues/853#issuecomment-1032211020). However,the crux of the issue is preventing transactions with duplicate payload. While this is indeed undesired behaviour, this is not considered behaviour that should lead to banning a peer or even disconnecting from him. Duplicate transactions are in this case prevented using an application-specific solution. ### Current state of mempool/p2p interaction -Transactions received from a peer are handled within the [`Receive`](https://github.com/cometbft/cometbft/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/mempool/v0/reactor.go#L158) routine. +Transactions received from a peer are handled within the [`Receive`](https://github.com/cometbft/cometbft/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/mempool/v0/reactor.go#L158) routine. Currently, the mempool triggers a disconnect from a peer in the case of the following errors: - [Unknown message type](https://github.com/cometbft/cometbft/blob/main/mempool/reactor.go#L119) -However, disconnecting from a peer is not the same as banning the peer. The p2p layer will close the connecton but -the peer can reconnect without any penalty, and if the peer it is connecting to is configured to be its persistent peer, +However, disconnecting from a peer is not the same as banning the peer. The p2p layer will close the connection but +the peer can reconnect without any penalty, and if the peer it is connecting to is configured to be its persistent peer, a reconnect will be initiated -from the node. +from the node. ### Current support for peer banning The p2p layer implements banning peers by marking them -as bad and removing them from the list of peers to connect to for *at least* a predefined amount of time. This is done by calling the +as bad and removing them from the list of peers to connect to for *at least* a predefined amount of time. This is done by calling the [`MarkBad`](https://github.com/cometbft/cometbft/blob/main/spec/p2p/implementation/addressbook.md#bad-peers) routine implemented by the `Switch`. -If the node does not set the amount of time to be banned, a default value is used. -Note that the timing parameter sets the lower bound for when a peer will be unbanned. +If the node does not set the amount of time to be banned, a default value is used. +Note that the timing parameter sets the lower bound for when a peer will be unbanned. But the p2p layer will only try to connect to banned peers if the node is not sufficiently connected. Thus the node has no explicit control on when a reconnect attempt will be triggered. -The application can blacklist peers via ABCI if the -[`filterPeers`](../../spec/abci/abci%2B%2B_app_requirements.md#peer-filtering) -config flag is set, by providing a set of peers to ban to CometBFT. +The application can blacklist peers via ABCI if the +[`filterPeers`](../../../spec/abci/abci++_app_requirements.md#peer-filtering) +config flag is set, by providing a set of peers to ban to CometBFT. If the discussion in this RFC deems a different banning mechanism is needed, the actual implementation and design of this mechanism will be discussed in a separate RFC. This mechanism should be generic, designed within the p2p layer and simply provide an interface for reactors to indicate peers to ban and for how long. It should not involve any mempool -specific design considerations. +specific design considerations. ## Discussion If this feature is to be implemented we need to clearly define the following: 1. What does banning a peer mean: - 1. A peer can be simply disconnected from. + 1. A peer can be simply disconnected from. 2. Peer is disconnected from and banned. 3. Conditions for the peer to be banned. -2. If `CheckTx` signals a peer should be banned, retrieve the ID of peers to ban. -3. Are there possible attack scenarios or unexpected behaviours by allowing this. +2. If `CheckTx` signals a peer should be banned, retrieve the ID of peers to ban. +3. Are there possible attack scenarios or unexpected behaviours by allowing this. -Any further mentions of `banning` will be agnostic to the actual way banning is implemented by the p2p layer. +Any further mentions of `banning` will be agnostic to the actual way banning is implemented by the p2p layer. -### 1. What does banning a peer mean +### 1. What does banning a peer mean -CometBFT recognizes that peers can accept transactions into their mempool as valid but then when the state changes, they can become invalid. -There are also transactions that are received that could never have been valid (for examle due to misconfiguration on one node). -We thus differentiate two scenarios - a) where `CheckTx` fails due to reasons already +CometBFT recognizes that peers can accept transactions into their mempool as valid but then when the state changes, they can become invalid. +There are also transactions that are received that could never have been valid (for example due to misconfiguration on one node). +We thus differentiate two scenarios - a) where `CheckTx` fails due to reasons already known and b) where `CheckTx` deems a transaction could never have been valid. -For the sake of simplicity , in the remainder of the text we will distinguish the failures due to a) as failures -signaled with `ResponseCheckTx.code = 1` and the failures described in b), failures with `ResponseCheckTx.code > 1`, even though +For the sake of simplicity , in the remainder of the text we will distinguish the failures due to a) as failures +signaled with `ResponseCheckTx.code = 1` and the failures described in b), failures with `ResponseCheckTx.code > 1`, even though the way we actually mark them in the end might differ. -For a), a peer sends transactions that **repeatedly** fail CheckTx with `ResponseCheckTx.code = 1`, and is banned or disconnected from to avoid this. -In this case we need to define what repeatedly means. +For a), a peer sends transactions that **repeatedly** fail CheckTx with `ResponseCheckTx.code = 1`, and is banned or disconnected from to avoid this. +In this case we need to define what repeatedly means. -For b) we need to understand what is the potential reason a transaction could never have been valid on one node, but passes `CheckTx` on another node. +For b) we need to understand what is the potential reason a transaction could never have been valid on one node, but passes `CheckTx` on another node. We need to understand all the possible scenarios in which this can happen: -1. What happens if a node is misconfigured and allows, for example, very large transactions into the mempool. -This node would then gossip these transactions and they would always fail on other nodes. +1. What happens if a node is misconfigured and allows, for example, very large transactions into the mempool. +This node would then gossip these transactions and they would always fail on other nodes. Is this a scenario where we want nodes to disconnect from this peer and ban it but do not consider it malicious? 2. Are all other reasons for this to happen sign of malicious behaviour where a node explicitly lies? How can `CheckTx` pass on a valid node, but fail on another valid node with a `ResponseCheckTx.code > 1`? -If such behaviour is only possible when a peer is malicious, should this peer be punished or banned forever? Note that +If such behaviour is only possible when a peer is malicious, should this peer be punished or banned forever? Note that we cannot know whether a node is a validator in order for it to be punished. Gossiping this behaviour to other peers pro-actively -also entails a different set of problems with it - how do we know we can trust peers who tell us to ban other peers. For these reasons, understanding the actual reason for these failures can be left for future work. +also entails a different set of problems with it - how do we know we can trust peers who tell us to ban other peers. For these reasons, understanding the actual reason for these failures can be left for future work. -For now, we will disconnect and ban the peer regardless of the exact reason a transaction -is considered to never be valid. +For now, we will disconnect and ban the peer regardless of the exact reason a transaction +is considered to never be valid. #### **Banning for frequent CheckTx failures** If a node sends transactions that fail `CheckTx` but could be valid at some point, a peer should not be banned the first time this happens. -Only if this happens frequently enough should this be considered as spam. To define this behaviour we keep track how many times (`numFailures`) a peer -sent us invalid transactions within a time interval (`lastFailure`). This time interval should be reset every `failureResetInterval`. +Only if this happens frequently enough should this be considered as spam. To define this behaviour we keep track how many times (`numFailures`) a peer +sent us invalid transactions within a time interval (`lastFailure`). This time interval should be reset every `failureResetInterval`. For each peer, we should have a separate `numFailures` and `lastFailure` variable. There is no need to have one per transaction. - Whenever a transaction fails, if the `now - lastFailure <= failureResetInterval`, we increment the `numFailures` for this particular peer and set the `lastFailure` to `now`. - Otherwise, we set `lastFailure` to `now` and set `numFailures` to 1. - Once the value for `numFailures` for a peer reaches `maxAllowedFailures`, the peer is disconnected from and banned. + Whenever a transaction fails, if the `now - lastFailure <= failureResetInterval`, we increment the `numFailures` for this particular peer and set the `lastFailure` to `now`. + Otherwise, we set `lastFailure` to `now` and set `numFailures` to 1. + Once the value for `numFailures` for a peer reaches `maxAllowedFailures`, the peer is disconnected from and banned. The reason for this logic is as follows: We deem it acceptable if every now and then a peer sends us an invalid transaction. - But if this happens very frequently, then this behaviour can be considered as spamming and we want to disconnect from the peer. - + But if this happens very frequently, then this behaviour can be considered as spamming and we want to disconnect from the peer. + **Discussion** - The problem with supporting this scenario is the definition of the above mentioned parameters. It is very hard to estimate, at the CometBFT level, what these parameters should be. A possible solution is - to allow the application to set these parameters. What is unclear, how will the application know that these parameters are not well set if, due to a bug or network problems, transactions start to fail? + The problem with supporting this scenario is the definition of the above mentioned parameters. It is very hard to estimate, at the CometBFT level, what these parameters should be. A possible solution is + to allow the application to set these parameters. What is unclear, how will the application know that these parameters are not well set if, due to a bug or network problems, transactions start to fail? The network could end up with all nodes banning everyone. How would an application developer know to debug this, what to look for? A possible solution is to ban peers temporarily. In addition to the question on how long is temporarily, setting specific time limits for banning on a peer basis @@ -160,29 +160,29 @@ is currently not supported by the p2p layer. *Banning a peer in case of duplicate transactions* -Currently, a peer can send the same valid (or invalid) transaction [multiple times](https://github.com/cometbft/cometbft/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/mempool/v0/clist_mempool.go#L247). Peers do not -gossip transactions to peers that have sent them that same transaction. But there is no check on whether +Currently, a peer can send the same valid (or invalid) transaction [multiple times](https://github.com/cometbft/cometbft/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/mempool/v0/clist_mempool.go#L247). Peers do not +gossip transactions to peers that have sent them that same transaction. But there is no check on whether a node has already sent the same transaction to this peer before. There is also no check whether the transaction that is being gossiped is currently valid or not (assuming that invalid transactions could become valid). -The transaction broadcast logic simply loops through the mempool and tries to send the transactions currently in the pool. +The transaction broadcast logic simply loops through the mempool and tries to send the transactions currently in the pool. -If we want to ban peers based on duplicate transactions, we should either add additional checks for the cases above, or -not ban peers for this behaviour at the moment. It would be useful to gather metrics on how often a peer gossips the same -transaction and whether this is cause of significant traffic. +If we want to ban peers based on duplicate transactions, we should either add additional checks for the cases above, or +not ban peers for this behaviour at the moment. It would be useful to gather metrics on how often a peer gossips the same +transaction and whether this is cause of significant traffic. #### **Banning for sending *never-valid* transactions** -If a transaction fails since it could never have been valid, `CheckTx` returns a `ResponseCheckTx.code` -value greater than 1. In this case, the peer should be disconnected from and banned immediately without keeping count on how often -this has happened. +If a transaction fails since it could never have been valid, `CheckTx` returns a `ResponseCheckTx.code` +value greater than 1. In this case, the peer should be disconnected from and banned immediately without keeping count on how often +this has happened. -The question is whether this transaction should be kept track of in the cache? We can still store it in +The question is whether this transaction should be kept track of in the cache? We can still store it in the cache so that we don't run `CheckTx` on it again, but if this peer is immediately banned, maybe there is no need to store its information. Now, if we want to differentiate further reasons of why this transaction is sent to a node (whether it is a sign of malice or not), -we might need more information on the actual reason for rejection. This could be done by an additional set of response codes provided by the application. +we might need more information on the actual reason for rejection. This could be done by an additional set of response codes provided by the application. ### 2. Choosing the peer to ban @@ -190,20 +190,20 @@ Each transaction gossiped contains the ID of the peer that sent that transaction a node saves the peer ID of the peer(s) that have sent it. As each peer had to have run `CheckTx` on this transaction before adding it to its own mempool, we can assume this peer can be held accountable for the validity of transactions it gossips. Invalid transactions are kept only in the mempool -cache and thus not gossiped. +cache and thus not gossiped. As nodes have to complete a cryptographic handshake at the p2p layer, CometBFT guarantees that a malicious peer -cannot lie about who the sender of the transaction is. +cannot lie about who the sender of the transaction is. *Transactions received from users* -For transactions submitted via `broadcastTxCommit`, the `SenderID` field is empty. +For transactions submitted via `broadcastTxCommit`, the `SenderID` field is empty. **Question** Do we have mechanisms in place to handle cases when `broadcastTxCommit` submits failing transactions (can this be a form of attack)? **From PR discussion** -At the moment there is no particular defense mechanism beyond rate limiting as for any RPC endpoint (which is not done internally by CometBFT). -An alternative would be to indeed internally make sure we do not get spammed with bad transaction using this endpoint. +At the moment there is no particular defense mechanism beyond rate limiting as for any RPC endpoint (which is not done internally by CometBFT). +An alternative would be to indeed internally make sure we do not get spammed with bad transaction using this endpoint. ### 3. Attack scenarios @@ -211,22 +211,22 @@ While an attack by simply banning peers on failing `CheckTx` is hard to imagine, Should we keep transactions that could never have been valid in the cache? Assuming that receiving such transactions is rare, and the peer that sent them is banned, do we need to occupy space in the mempool cache with these transactions? -- What if nodes run different versions of CometBFT and banning is not supported in one of the versions? +- What if nodes run different versions of CometBFT and banning is not supported in one of the versions? + +- Reserving response codes can be problematic for existing applications that may have reserved these codes for internal purposes withtou being aware that this causes a ban now. -- Reserving response codes can be problematic for existing applications that may have reserved these codes for internal purposes withtou being aware that this causes a ban now. - ## Implementation considerations **Indicating a new type of `CheckTx` failure** -The initial proposal is to reserve a special response code to indicate that the transaction could never have been valid. +The initial proposal is to reserve a special response code to indicate that the transaction could never have been valid. Due to concerns of this being a breaking change for applications that have already reserved this code for internal -purposes, there is an alternative implementation: expanding `ResponseCheckTx` with an additional field. +purposes, there is an alternative implementation: expanding `ResponseCheckTx` with an additional field. This field `neverValidTx` would be `false` by default. If a transaction could never have been valid, -in addition to indicating this with a non-zero response code from `CheckTx`, the application would set this field value. +in addition to indicating this with a non-zero response code from `CheckTx`, the application would set this field value. -Another proposal is to expand this, by allowing the application to explicitly instruct CometBFT on whether to ban a peer or not. +Another proposal is to expand this, by allowing the application to explicitly instruct CometBFT on whether to ban a peer or not. This requires adding yet another field to `CheckTx`: `banPeer`. The field can have the following values: - `0`(default): do not ban peer - `1`: decrement peer reputation (if such a mechanism exists in the p2p layer) @@ -234,23 +234,23 @@ This requires adding yet another field to `CheckTx`: `banPeer`. The field can ha **Adding support for peer banning** -When a transaction fails `CheckTx`, it is not stored in the mempool but **can** be stored in the cache. If it is in the cache, it cannot be resubmitted again (as it will be discovered in the cache and not checked again). These two scenarios require a different implementation of banning in case `CheckTx` failed. +When a transaction fails `CheckTx`, it is not stored in the mempool but **can** be stored in the cache. If it is in the cache, it cannot be resubmitted again (as it will be discovered in the cache and not checked again). These two scenarios require a different implementation of banning in case `CheckTx` failed. -In both cases we need to keep track of the peers that sent invalid transactions. If invalid transactions are cached, -we also need to keep track of the `CheckTx` response code for each transaction. Currently the `ResponseCheckTx` code is checked in `resCbFirstTime` of the mempool. -If invalid transactions are kept in the cache, the check is ran only when a transaction is -seen for the first time. Afterwards, the transaction is cached, to avoid running `CheckTx` on transactions already checked. +In both cases we need to keep track of the peers that sent invalid transactions. If invalid transactions are cached, +we also need to keep track of the `CheckTx` response code for each transaction. Currently the `ResponseCheckTx` code is checked in `resCbFirstTime` of the mempool. +If invalid transactions are kept in the cache, the check is ran only when a transaction is +seen for the first time. Afterwards, the transaction is cached, to avoid running `CheckTx` on transactions already checked. Thus when a transaction is received from a peer, if it is in the cache, -`CheckTx` is not ran again, but the peers' ID is addded to the list of peers who sent this particular transaction. -These transactions are rechecked once a block is committed to verify that they are still valid. +`CheckTx` is not ran again, but the peers' ID is added to the list of peers who sent this particular transaction. +These transactions are rechecked once a block is committed to verify that they are still valid. -If invalid transactions are not kept in the cache, they can be resubmitted multiple times, and `CheckTx` will be executed on them upon submission. +If invalid transactions are not kept in the cache, they can be resubmitted multiple times, and `CheckTx` will be executed on them upon submission. Therefore we do not need to remember the previous response codes for these transactions. In summary, if we want to support banning peers based on the frequency with which they submit invalid transactions, we need to have **additional datastructures**: 1. One to keep track of past invalid transactions 2. A datastructure to differentiate between valid and invalid *cached* transactions. If the `KeepInvalidTxsInCache` configuration parameter is not set, this datastructure - is not needed. + is not needed. We propose two ways to implement peer banning based on the result of `CheckTx`: @@ -261,7 +261,7 @@ We propose two ways to implement peer banning based on the result of `CheckTx`: **Peer banning when transactions are received** -If a transaction fails `CheckTx` the +If a transaction fails `CheckTx` the [first time it is seen](https://github.com/cometbft/cometbft/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/mempool/v0/clist_mempool.go#L409), the peer can be banned right there: @@ -292,11 +292,11 @@ if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { } } -``` +``` The `KeepInvalidTxsInCache` configuration parameter defines whether an invalid transaction stays in cache. For *never-valid* transactions, we could apply a different approach based on what we deem to be the bigger gain: - As we do not expect to receive frequently and from many peers, and we ban the peer that sent it immediately, we do not store it in the cache to save space. This would mean -that if we did see it again, we'd ran `CheckTx` on it again. +that if we did see it again, we'd ran `CheckTx` on it again. ```golang if !mem.config.KeepInvalidTxsInCache || r.CheckTx.Code == abci.NeverValid { @@ -305,20 +305,20 @@ if !mem.config.KeepInvalidTxsInCache || r.CheckTx.Code == abci.NeverValid { } ``` -- We do keep it in the cache as long as possible to avoid running `CheckTX` on it because we know, for sure, that it will never be valid. As it is rare enough, it -might not take that much space. In this case though, as we ban the sending peer immediately, we can save space by not storing peer information for this transaction. +- We do keep it in the cache as long as possible to avoid running `CheckTX` on it because we know, for sure, that it will never be valid. As it is rare enough, it +might not take that much space. In this case though, as we ban the sending peer immediately, we can save space by not storing peer information for this transaction. The question is which one is more costly, doing `CheckTx` more then once, or keeping an extra entry in the cache? -As said, this code will [never be executed](https://github.com/cometbft/cometbft/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/mempool/v0/clist_mempool.go#L239) +As said, this code will [never be executed](https://github.com/cometbft/cometbft/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/mempool/v0/clist_mempool.go#L239) for transactions whose hash is found -in the cache. +in the cache. Instead of remembering the cached transactions, we could have had a valid/invalid bit per transaction within the cache. As transactions themselves do not store such information and we expect this scenario to be unlikely, instead of increasing the footprint of all transactions in the cache, -we opted to keep a map of transaction signature if the transaction is in the cache, but is invalid. Alternatively, the cache could keep two lists, one for valid, and one for invalid transactions. -This modifies the following pieces of code as follows (this is just a prototype and does not include +we opted to keep a map of transaction signature if the transaction is in the cache, but is invalid. Alternatively, the cache could keep two lists, one for valid, and one for invalid transactions. +This modifies the following pieces of code as follows (this is just a prototype and does not include some obvious sanity checks): ```golang @@ -359,7 +359,7 @@ if !mem.cache.Push(tx) { // if the transaction already exists in the cache // If transaction was invalid, we need to remember the peer information if _, ok := mem.cache.invalidCachedTx.Load(tx.Key); ok { - mem.banPeer(peerID) + mem.banPeer(peerID) } return mempool.ErrTxInCache } @@ -375,15 +375,15 @@ func (mem* ClistMempool) banPeer(peerID NodeID) { if val, ok := mem.peerFailureMap[peerID]; ok { lastFailureT := val.lastFailure numFails = val.numFails - // if the failure was recent enough, update the number of failures and + // if the failure was recent enough, update the number of failures and // ban peer if applicable if time.Since(lastFailureT) <= failureResetInterval { if numFails == maxAllowedFailures - 1 { - // Send Ban request to p2p + // Send Ban request to p2p } } } - // Update the time of the last failure + // Update the time of the last failure mem.peerFailureMap[peerID] = { time.Now(), numFailures + 1} } ``` @@ -396,85 +396,85 @@ As it is the mempool **reactor** that has access to the p2p layer, not the actua **Implementing peer banning on recheck** -Currently the recheck logic confirmes whether once **valid** transactions, -, where `ResponseCheckTx.code == 0`, are still valid. +Currently the recheck logic confirmes whether once **valid** transactions, +, where `ResponseCheckTx.code == 0`, are still valid. -As this logic loops through the transactions in any case, we can leverage it to check whether we can ban peers. +As this logic loops through the transactions in any case, we can leverage it to check whether we can ban peers. However, this approach has several downsides: -- It is not timely enough. Recheck is executed after a block is committed, leaving room for a bad peer to send -us transactions the entire time between two blocks. -- If we want to keep track of when peers sent us a traansaction and punish them only if the misbehaviour happens -frequently enough, this approach makes it hard to keep track of when exactly was a transaction submitted. -- Rechecking if optional and node operators can disable it. +- It is not timely enough. Recheck is executed after a block is committed, leaving room for a bad peer to send +us transactions the entire time between two blocks. +- If we want to keep track of when peers sent us a traansaction and punish them only if the misbehaviour happens +frequently enough, this approach makes it hard to keep track of when exactly was a transaction submitted. +- Rechecking if optional and node operators can disable it. - Furthermore, rechecking is a node local configuration parameter. This means that, some nodes might be performing this check, - while others will be unaware of this. + while others will be unaware of this. On the plus side this would avoid adding new logic to the mempool caching mechanism and keeping additional information about transaction validity. But we would still have to keep the information on peers and the frequency at which they send us bad transactions. -Transactions that became invalid on recheck should not be cause for peer banning as they have not been gossiped as invalid transactions. +Transactions that became invalid on recheck should not be cause for peer banning as they have not been gossiped as invalid transactions. #### `PreCheck` and`PostCheck` -The `PreCheck` and `PostCheck` functions are optional functions that can be executed before or after `CheckTx`. -Following the design outlined in this RFC, their responses are not considered for banning. +The `PreCheck` and `PostCheck` functions are optional functions that can be executed before or after `CheckTx`. +Following the design outlined in this RFC, their responses are not considered for banning. #### Checks outside `CheckTx` -There are a number of checks that the mempool performs on the transaction, that are not part of `CheckTx` itself. +There are a number of checks that the mempool performs on the transaction, that are not part of `CheckTx` itself. Those checks, have been mentioned in the user issues described at the beginning of this document: - Transaction size - Proto checks - Receiving unknown messages via the mempool channel -The previous code snippets do not incroporate these in peer banning. If we adopt those as valid reasons for banning, we should put the corresponding logic in place. +The previous code snippets do not incorporate these in peer banning. If we adopt those as valid reasons for banning, we should put the corresponding logic in place. ### Impacted mempool functionalities -- Mempool caching: remembering failed transactions and whether they come from banned peers; Removal of transactions from +- Mempool caching: remembering failed transactions and whether they come from banned peers; Removal of transactions from `invalidCachedTx` when a transaction is removed from cache. - Handling of transactions failing `CheckTx`: Keeping track of how often transactions from a particular peer have failed and banning them if the conditions for a ban are met. -### Impact on ABCI +### Impact on ABCI -- Introduction of new response codes for CheckTx. As previously noted, this might break existing applications if they reserved codes for internal purposes. +- Introduction of new response codes for CheckTx. As previously noted, this might break existing applications if they reserved codes for internal purposes. - Altering the specification to reflect this change ### Github discussion summary -The main concern that arose from the dicussion on github is whether banning peers based on the return code of `CheckTx` -can lead to unwanted side effects, such as partitioning the network or influencing the behaviour of other nodes. +The main concern that arose from the discussion on github is whether banning peers based on the return code of `CheckTx` +can lead to unwanted side effects, such as partitioning the network or influencing the behaviour of other nodes. #### *Potential failure scenarios* -* Assume we have a network that is configured in such a way that there exists an overlay network, and a node can only influence its direct connections. +* Assume we have a network that is configured in such a way that there exists an overlay network, and a node can only influence its direct connections. The node can force a peer to disconnect from it forever if, say, it wanted to lower the number of ways it has of getting messages to the rest of the network. However, that could have already be done by just disconnecting or by dropping its messages. * A bug in `CheckTx`causes the rejection of all transactions and all nodes disconnect, how do we ensure the operator knows what has happened? * An attacker discovers a particular transaction that they know would be accepted, and therefore propagated, by >1/3 of the voting power on the network, - but rejected by the rest. This would result in halting the network for the period for which we blacklist "misconfigured" peers, - because >1/3 of the voting power would be blacklisted by the remaining peers. This means that if >1/3 of the voting power on a network has, - for example, a minimum transaction fee requirement much lower than the remaining nodes, and application developers return a `neverValidTx=true` + but rejected by the rest. This would result in halting the network for the period for which we blacklist "misconfigured" peers, + because >1/3 of the voting power would be blacklisted by the remaining peers. This means that if >1/3 of the voting power on a network has, + for example, a minimum transaction fee requirement much lower than the remaining nodes, and application developers return a `neverValidTx=true` value from `CheckTx` here, they could halt their network. #### *Decisions* -The uncertainties are higher in the case of banning based on the *frequency* of the failures. This option has therefore been **dismissed**. +The uncertainties are higher in the case of banning based on the *frequency* of the failures. This option has therefore been **dismissed**. As for the banning based on the return code from the application, due to the lack of strong use cases and potential unwanted side-effects, -it will not be implemented at the moment of writing the final version of this RFC (March 2023). +it will not be implemented at the moment of writing the final version of this RFC (March 2023). An alternative is being proposed at the moment due to feedback we received when debugging the Gaia issue mentioned above. Namely, they found that having these peers banned or even a log message about this failure would have significantly shortened the debugging -time. +time. -Additionally, as input from Osmosis, we got a potential security-related use case for the implementation of banning. +Additionally, as input from Osmosis, we got a potential security-related use case for the implementation of banning. It was therefore proposed to first implement a log message that the transaction could never have been valid, and even send this message to the sender of the transaction, warning the node that it sent a transaction that failed `CheckTx`. But this should not be sent on every `CheckTx` failure as it would create a lot of noise (we mentioned the valid reasons for `CheckTx` to failures). We would indeed require adding a special code and/or the `neverValidTx` flag @@ -482,16 +482,16 @@ to `ResponseCheckTx`, and logging this warning only if the application sets thes This would facilitate debugging and pinpointing the problem for operators of the nodes receiving these warnings. -Then, once we progress with the p2p specification and understand all possible implications of banning, actual peer banning can be implemented. +Then, once we progress with the p2p specification and understand all possible implications of banning, actual peer banning can be implemented. #### *Discussion on minor implementation details* -For completeness, and to make sure the information is not lost, there were a few discussions on minor implementation details. +For completeness, and to make sure the information is not lost, there were a few discussions on minor implementation details. *Keeping transactions failing `CheckTx` with a special code in the cache* -Without any change to the current logic, these transactions are kept in the cache, as long as they are not evicted. -Users argued for these transactions to be rare enough, that they can safely be discared in the case a peer is actually banned after sending them. +Without any change to the current logic, these transactions are kept in the cache, as long as they are not evicted. +Users argued for these transactions to be rare enough, that they can safely be discarded in the case a peer is actually banned after sending them. *Banning based on IP or nodeID* @@ -506,7 +506,7 @@ Other than avoiding relying solely on the response code values, there are no imm - Most of the relevant links are in the [existing issues and concerns section](#existing-issues-and-concerns) -- [`CheckTx` function description](./../../spec/abci/abci%2B%2B_methods.md#checktx) +- [`CheckTx` function description](../../../spec/abci/abci++_methods.md#checktx) - Github discussions on this RFC: - [CometBFT repo - PR \#78](https://github.com/cometbft/cometbft/pull/78) diff --git a/docs/rfc/rfc-102-rust-gen-builders.md b/docs/references/rfc/rfc-102-rust-gen-builders.md similarity index 100% rename from docs/rfc/rfc-102-rust-gen-builders.md rename to docs/references/rfc/rfc-102-rust-gen-builders.md diff --git a/docs/rfc/rfc-103-incoming-txs-when-catching-up.md b/docs/references/rfc/rfc-103-incoming-txs-when-catching-up.md similarity index 97% rename from docs/rfc/rfc-103-incoming-txs-when-catching-up.md rename to docs/references/rfc/rfc-103-incoming-txs-when-catching-up.md index 4da2e4ee61e..161d0ff4990 100644 --- a/docs/rfc/rfc-103-incoming-txs-when-catching-up.md +++ b/docs/references/rfc/rfc-103-incoming-txs-when-catching-up.md @@ -79,7 +79,7 @@ transaction is invalid. This can occur in three cases. 1. The first time a transaction is received and then rejected by `CheckTx`. 2. When the block executor [updates][update] the mempool, right after finalizing and committing a block: if there was an error while executing a transaction - against the application, then it is removed from the cache. + against the application, then it is removed from the cache. 3. When all the transactions in the mempool need to be rechecked after a new block has been delivered to the application. Each transaction will be validated again with `CheckTx` and removed from the cache if deemed invalid. @@ -148,10 +148,10 @@ if peerState.GetHeight() < memTx.Height()-1 { continue } ``` -where: +where: - `peerState` is the local state of the peer (updated with the information received by the consensus reactor in `PeerState.ApplyNewRoundStepMessage` - messages), + messages), - `memTx.Height()` is the height at which transaction `memTx.tx` was validated (set during the handling of `CheckTx` responses), and - `PeerCatchupSleepIntervalMS` is fixed to `100`. @@ -174,14 +174,14 @@ shows the results of an experiment with four interconnected nodes. On the left we see the collected metrics when we run the nodes without the optimization. On the right we see the results of running the nodes with the optimization, that is, without modifying the code. -![rfc-103-comparison](./images/rfc-103-optimization-comparison.png) The node in -orange called _validator04_ joins the network at arount height 100 and starts +![rfc-103-comparison](images/rfc-103-optimization-comparison.png) The node in +orange called _validator04_ joins the network at around height 100 and starts performing block sync. In the graph at the bottom we can see the height of all nodes and in particular how this node starts from height 0 and catches up with the other nodes. Also we can observe that, when the optimization is disabled (left side), while the orange node is catching up, both its mempool size (top graph) and the number of rejected transactions (middle graph) increases -significantly compared to the optimizated code (right side). +significantly compared to the optimizated code (right side). __Decision__ The results presented above indicate that the optimization is effectively improving the system's performance and should be kept for now. In diff --git a/docs/rfc/rfc-104-actor-model.md b/docs/references/rfc/rfc-104-actor-model.md similarity index 100% rename from docs/rfc/rfc-104-actor-model.md rename to docs/references/rfc/rfc-104-actor-model.md diff --git a/docs/rfc/rfc-105-non-det-process-proposal.md b/docs/references/rfc/rfc-105-non-det-process-proposal.md similarity index 100% rename from docs/rfc/rfc-105-non-det-process-proposal.md rename to docs/references/rfc/rfc-105-non-det-process-proposal.md diff --git a/docs/rfc/rfc-106-separate-stateful-methods.md b/docs/references/rfc/rfc-106-separate-stateful-methods.md similarity index 100% rename from docs/rfc/rfc-106-separate-stateful-methods.md rename to docs/references/rfc/rfc-106-separate-stateful-methods.md diff --git a/docs/references/rfc/rfc-107-event-observer.md b/docs/references/rfc/rfc-107-event-observer.md new file mode 100644 index 00000000000..98599d02443 --- /dev/null +++ b/docs/references/rfc/rfc-107-event-observer.md @@ -0,0 +1,240 @@ +# RFC 107: Internal Signalling Using Event Observers + +## Changelog + +- 2023-07-24: First draft (@thanethomson) + +## Abstract + +The overall problem follows from that discussed in [\#1055] and [RFC 104]: +CometBFT is difficult to reason about and change in part due to the complexity +of the internal interaction between different reactors/services. + +RFC 104 explored and ruled out the possibility of employing a loosely coupled +model like the [Actor] model. This RFC explores the possibility of a simpler, +more tightly coupled model with a range of benefits compared to the actor model +and what is currently implemented. It is possible that this would also help +simplify testing, since the tests in CometBFT are the biggest users (by volume) +of the current event bus subsystem. + +## Background + +Various design patterns are potentially useful in addressing the problem of +coupling of concurrent components in a system like CometBFT. The [Observer +Pattern], for instance, is implicitly implemented in the [`EventBus`] in +CometBFT, but in a very loosely coupled manner analogous to how it is +implemented in the Actor model. Such loosely coupled approaches are generally +better suited for cases where coupling between components needs to adapt at +runtime, but this is not the case for CometBFT - all impactful coupling happens +at compile time. This points to the possibility that this pattern is +inappropriately applied, except in the case of the WebSocket-based event +subscriptions. + +Another alternative is possible within CometBFT if one wants to access +information from other reactors/services: the [`Switch`] allows developers to +look up reactors, at runtime, and access methods directly on those reactors. +This is again an inappropriate pattern because all lookups are hard-coded, and +reactors/services are not dynamically created/destroyed at runtime. + +This suggests that a different approach is necessary for cross-component +interaction - ideally one which provides more robust compile-time guarantees +than the current ones. + +## Discussion + +A more type-safe, understandable Observer pattern is proposed here than what +currently exists in CometBFT. For example, in the consensus state there are many +places where events are published via the event bus, e.g.: + +- +- +- +- etc. + +All of these event publishing methods, not only for consensus state but for +other types of event publishers, are defined on this central `EventBus` type, +which seems to signal that its functionality should be decentralized. + +### Strongly Typed Event Observer + +A simple alternative pattern here would be to define an **event observer** +interface for every major component of the system capable of producing events. +For consensus state, this may end up looking something like: + +```golang +package consensus + +// StateObserver is specific to the consensus.State struct, and all of its +// methods are called from within consensus.State instead of using an "event +// bus". This allows for greater compile-time guarantees through doing away with +// the generic pub/sub mechanism in the event bus. +// +// Note how all methods are infallible (i.e. they do not return any errors). +// This is functionally equivalent to the fire-and-forget pattern implemented by +// the event bus. +// +// Also note how method names are prefixed by the name of the relevant producer +// of events (in this case "ConsensusState", corresponding to the +// consensus.State struct). This is intentional to allow composition of +// observers of multiple different components without function names clashing. +// +// Finally, given that this is just straightforward Go, it is up to either the +// caller or the callee to decide how to handle the concurrency of certain +// events. The event bus approach, by contrast, is always concurrent and relies +// on Go channels, which could end up filling up and causing back-pressure into +// the caller (already observed in slow WebSocket subscribers). +type StateObserver interface { + ConsensusStateNewRoundStep(ev EventDataRoundState) + ConsensusStateTimeoutPropose(ev EventDataRoundState) + ConsensusStateTimeoutWait(ev EventDataRoundState) + // ... +} +``` + +And on `consensus.State` one could easily either supply such an observer in the +constructor, or define a new method that allows one to set the observer, e.g.: + +```golang +package consensus + +func (cs *State) SetObserver(obs StateObserver) { + cs.observer = obs +} +``` + +Then, instead of publishing events via the event bus, one would simply do the +following: + +```diff +- if err := cs.eventBus.PublishEventNewRoundStep(rs); err != nil { +- cs.Logger.Error("failed publishing new round step", "err", err) +- } ++ // Notify the observer ++ cs.observer.ConsensusStateNewRoundStep(rs) +``` + +### Comparing "Subscription" Interfaces + +The `EventBus` offers two slightly different subscription mechanisms for events, +both of which rely on Go channels under the hood for their implementation. + +- [`Subscribe`] +- [`SubscribeUnbuffered`] + +The observer pattern proposed in this RFC can facilitate both patterns and more, +given that one has the option of intervening synchronously in a blocking way +when the "publisher" calls the observer event handler. Using the proposed +observer pattern, it is up to either the publisher or the observer to implement +their own concurrency. + +### Fanout Observers + +How then does one simulate the same sort of pub/sub functionality that the +`EventBus` provides using this approach? Where this sort of behaviour is +absolutely necessary, it is trivial to build a "fanout" observer that implements +this interface, e.g.: + +```golang +type StateFanoutObserver struct { + observers []StateObserver +} + +func NewStateFanoutObserver(observers ...StateObserver) *StateFanoutObserver { + return &StateFanoutObserver{ + observers: observers, + } +} + +func (o *StateFanoutObserver) ConsensusStateNewRoundStep(ev EventDataRoundStep) { + for _, obs := range o.observers { + obs.ConsensusStateNewRoundStep(ev) + } +} + +// ... +``` + +### Testing + +Many tests in the CometBFT codebase rely heavily on subscribing to specific +events directly via the event bus. This can easily be accomplished using the +approach described in the [Fanout Observers](#fanout-observers) section: + +```golang +state.SetObserver( + NewStateFanoutObserver( + // An observer specifically for use during testing. + newTestStateObserver(), + // ... other observers here that would be used in production + ), +) +``` + +One could easily also define an ergonomic observer type that would allow inline +definition and overriding of only specific event handlers: + +```golang +type testObserver struct { + newRoundStep func(EventDataRoundState) + timeoutPropose func(EventDataRoundState) + // ... +} + +func (o *testObserver) ConsensusStateNewRoundStep(ev EventDataRoundState) { + if o.newRoundStep != nil { + o.newRoundStep(ev) + } +} + +// ... + +func TestCustomObserver(t *testing.T) { + testObs := &testObserver{} + testObs.newRoundStep = func(ev EventDataRoundState) { + // Custom code here called upon new round step + } + // ... +} +``` + +### Event Subscription + +The current WebSocket-based event subscription mechanism is envisaged to go away +at some point in future, and there is no other mechanism by which external +observers can subscribe to events. + +[ADR 101], however, provides a more general alternative through which +integrators can gain access to event data from outside of the node. Once ADR 101 +has been implemented, the whole WebSocket-based interface could be removed. + +### Pros and Cons + +The benefits of the proposed approach include: + +- Greater compile-time correctness guarantees +- Code becomes easier to reason about, since one can easily follow the call + chain for certain events using one's IDE instead of needing to search the + codebase for subscriptions to certain events +- Easier to test +- Does away with needing to access internal/private `eventBus` variables within + reactors/state from tests ([example][test-eventbus-access]) +- Splits event generation and handling out into a per-package responsibility, + more cleanly separating and modularizing the codebase + +The drawbacks of the proposed approach include: + +- Potentially involves writing more code (volume-wise) than what is currently + present, although the new code would be simpler +- Concurrency concerns need to be reasoned about carefully, as back-pressure is + still possible depending on how observers are implemented + +[\#1055]: https://github.com/cometbft/cometbft/issues/1055 +[RFC 104]: rfc-104-actor-model.md +[Actor]: https://en.wikipedia.org/wiki/Actor_model +[Observer Pattern]: https://en.wikipedia.org/wiki/Observer_pattern +[`EventBus`]: https://github.com/cometbft/cometbft/blob/b23ef56f8e6d8a7015a7f816a61f2e53b0b07b0d/types/event_bus.go#L33 +[`Switch`]: https://github.com/cometbft/cometbft/blob/b23ef56f8e6d8a7015a7f816a61f2e53b0b07b0d/p2p/switch.go#L70 +[test-eventbus-access]: https://github.com/cometbft/cometbft/blob/091a1f312e5f2f4b183fab1d57d729a6c478ff1f/consensus/mempool_test.go#L40 +[ADR 101]: https://github.com/cometbft/cometbft/issues/574 +[`Subscribe`]: https://github.com/cometbft/cometbft/blob/a9deb305e51278c25ad92b249caa092d24c5fc29/types/event_bus.go#L75 +[`SubscribeUnbuffered`]: https://github.com/cometbft/cometbft/blob/a9deb305e51278c25ad92b249caa092d24c5fc29/types/event_bus.go#L86 diff --git a/docs/rfc/rfc-template.md b/docs/references/rfc/rfc-template.md similarity index 100% rename from docs/rfc/rfc-template.md rename to docs/references/rfc/rfc-template.md diff --git a/docs/references/rfc/tendermint-core/README.md b/docs/references/rfc/tendermint-core/README.md new file mode 100644 index 00000000000..014f04d9ad8 --- /dev/null +++ b/docs/references/rfc/tendermint-core/README.md @@ -0,0 +1,42 @@ +--- +order: 1 +parent: + order: false +--- + +# Tendermint Core Requests for Comments + +This document serves as a historical reference for all RFCs that were logged +during the development of Tendermint Core. + +This list is frozen as-is, and new RFCs should be added [here](../). + +## Table of Contents + +- [RFC-000: P2P Roadmap](rfc-000-p2p-roadmap.rst) +- [RFC-001: Storage Engines](rfc-001-storage-engine.rst) +- [RFC-002: Interprocess Communication](rfc-002-ipc-ecosystem.md) +- [RFC-003: Performance Taxonomy](rfc-003-performance-questions.md) +- [RFC-004: E2E Test Framework Enhancements](rfc-004-e2e-framework.rst) +- [RFC-005: Event System](rfc-005-event-system.rst) +- [RFC-006: Event Subscription](rfc-006-event-subscription.md) +- [RFC-007: Deterministic Proto Byte Serialization](rfc-007-deterministic-proto-bytes.md) +- [RFC-008: Don't Panic](rfc-008-do-not-panic.md) +- [RFC-009: Consensus Parameter Upgrades](rfc-009-consensus-parameter-upgrades.md) +- [RFC-010: P2P Light Client](rfc-010-p2p-light-client.rst) +- [RFC-011: Delete Gas](rfc-011-delete-gas.md) +- [RFC-012: Event Indexing Revisited](rfc-012-custom-indexing.md) +- [RFC-013: ABCI++](rfc-013-abci++.md) +- [RFC-014: Semantic Versioning](rfc-014-semantic-versioning.md) +- [RFC-015: ABCI++ Tx Mutation](rfc-015-abci++-tx-mutation.md) +- [RFC-016: Node Architecture](rfc-016-node-architecture.md) +- [RFC-017: ABCI++ Vote Extension Propagation](rfc-017-abci++-vote-extension-propag.md) +- [RFC-018: BLS Signature Aggregation Exploration](rfc-018-bls-agg-exploration.md) +- [RFC-019: Configuration File Versioning](rfc-019-config-version.md) +- [RFC-020: Onboarding Projects](rfc-020-onboarding-projects.rst) +- [RFC-021: The Future of the Socket Protocol](rfc-021-socket-protocol.md) +- [RFC-023: Semi-permanent Testnet](rfc-023-semi-permanent-testnet.md) +- [RFC-024: Block Structure Consolidation](rfc-024-block-structure-consolidation.md) +- [RFC-025: Application Defined Transaction Storage](rfc-025-support-app-side-mempool.md) +- [RFC-026: Banning peers based on ResponseCheckTx](rfc-026-p2p-bad-peers-checktx.md) +- [RFC-027: P2P Message Bandwidth Report](rfc-027-p2p-message-bandwidth-report.md) diff --git a/docs/rfc/tendermint-core/images/abci++.png b/docs/references/rfc/tendermint-core/images/abci++.png similarity index 100% rename from docs/rfc/tendermint-core/images/abci++.png rename to docs/references/rfc/tendermint-core/images/abci++.png diff --git a/docs/rfc/tendermint-core/images/abci.png b/docs/references/rfc/tendermint-core/images/abci.png similarity index 100% rename from docs/rfc/tendermint-core/images/abci.png rename to docs/references/rfc/tendermint-core/images/abci.png diff --git a/docs/rfc/tendermint-core/images/node-dependency-tree.svg b/docs/references/rfc/tendermint-core/images/node-dependency-tree.svg similarity index 100% rename from docs/rfc/tendermint-core/images/node-dependency-tree.svg rename to docs/references/rfc/tendermint-core/images/node-dependency-tree.svg diff --git a/docs/rfc/tendermint-core/images/receive-rate-all.png b/docs/references/rfc/tendermint-core/images/receive-rate-all.png similarity index 100% rename from docs/rfc/tendermint-core/images/receive-rate-all.png rename to docs/references/rfc/tendermint-core/images/receive-rate-all.png diff --git a/docs/rfc/tendermint-core/images/send-rate-all.png b/docs/references/rfc/tendermint-core/images/send-rate-all.png similarity index 100% rename from docs/rfc/tendermint-core/images/send-rate-all.png rename to docs/references/rfc/tendermint-core/images/send-rate-all.png diff --git a/docs/rfc/tendermint-core/images/top-3-percent-receive.png b/docs/references/rfc/tendermint-core/images/top-3-percent-receive.png similarity index 100% rename from docs/rfc/tendermint-core/images/top-3-percent-receive.png rename to docs/references/rfc/tendermint-core/images/top-3-percent-receive.png diff --git a/docs/rfc/tendermint-core/images/top-3-percent-send.png b/docs/references/rfc/tendermint-core/images/top-3-percent-send.png similarity index 100% rename from docs/rfc/tendermint-core/images/top-3-percent-send.png rename to docs/references/rfc/tendermint-core/images/top-3-percent-send.png diff --git a/docs/rfc/tendermint-core/rfc-000-p2p-roadmap.rst b/docs/references/rfc/tendermint-core/rfc-000-p2p-roadmap.rst similarity index 100% rename from docs/rfc/tendermint-core/rfc-000-p2p-roadmap.rst rename to docs/references/rfc/tendermint-core/rfc-000-p2p-roadmap.rst diff --git a/docs/rfc/tendermint-core/rfc-001-storage-engine.rst b/docs/references/rfc/tendermint-core/rfc-001-storage-engine.rst similarity index 100% rename from docs/rfc/tendermint-core/rfc-001-storage-engine.rst rename to docs/references/rfc/tendermint-core/rfc-001-storage-engine.rst diff --git a/docs/rfc/tendermint-core/rfc-002-ipc-ecosystem.md b/docs/references/rfc/tendermint-core/rfc-002-ipc-ecosystem.md similarity index 100% rename from docs/rfc/tendermint-core/rfc-002-ipc-ecosystem.md rename to docs/references/rfc/tendermint-core/rfc-002-ipc-ecosystem.md diff --git a/docs/rfc/tendermint-core/rfc-003-performance-questions.md b/docs/references/rfc/tendermint-core/rfc-003-performance-questions.md similarity index 100% rename from docs/rfc/tendermint-core/rfc-003-performance-questions.md rename to docs/references/rfc/tendermint-core/rfc-003-performance-questions.md diff --git a/docs/rfc/tendermint-core/rfc-004-e2e-framework.rst b/docs/references/rfc/tendermint-core/rfc-004-e2e-framework.rst similarity index 99% rename from docs/rfc/tendermint-core/rfc-004-e2e-framework.rst rename to docs/references/rfc/tendermint-core/rfc-004-e2e-framework.rst index 2046645a0c5..bd525718e9b 100644 --- a/docs/rfc/tendermint-core/rfc-004-e2e-framework.rst +++ b/docs/references/rfc/tendermint-core/rfc-004-e2e-framework.rst @@ -199,7 +199,7 @@ interesting to engage with. These could include dimensions, such as: - As a flavor or mult-version testing, include upgrade testing, to build confidence in migration code and procedures. -- Additional test applications, particularly practical-type applciations +- Additional test applications, particularly practical-type applications including some that use gaiad and/or the cosmos-sdk. Test-only applications that simulate other kinds of applications (e.g. variable application operation latency.) diff --git a/docs/rfc/tendermint-core/rfc-005-event-system.rst b/docs/references/rfc/tendermint-core/rfc-005-event-system.rst similarity index 98% rename from docs/rfc/tendermint-core/rfc-005-event-system.rst rename to docs/references/rfc/tendermint-core/rfc-005-event-system.rst index b70ee6c05e2..2eb302dbaf3 100644 --- a/docs/rfc/tendermint-core/rfc-005-event-system.rst +++ b/docs/references/rfc/tendermint-core/rfc-005-event-system.rst @@ -71,7 +71,7 @@ Changes to Published Events As part of this process, the Tendermint team should do a study of the existing event types and ensure that there are viable production use cases for subscriptions to all event types. Instinctively it seems plausible that some -of the events may not be useable outside of tendermint, (e.g. ``TimeoutWait`` +of the events may not be usable outside of tendermint, (e.g. ``TimeoutWait`` or ``NewRoundStep``) and it might make sense to remove them. Certainly, it would be good to make sure that we don't maintain infrastructure for unused or un-useful message indefinitely. diff --git a/docs/rfc/tendermint-core/rfc-006-event-subscription.md b/docs/references/rfc/tendermint-core/rfc-006-event-subscription.md similarity index 99% rename from docs/rfc/tendermint-core/rfc-006-event-subscription.md rename to docs/references/rfc/tendermint-core/rfc-006-event-subscription.md index 0e03c119120..1da41520268 100644 --- a/docs/rfc/tendermint-core/rfc-006-event-subscription.md +++ b/docs/references/rfc/tendermint-core/rfc-006-event-subscription.md @@ -196,8 +196,8 @@ mutually exclusive. [rpc-service]: https://docs.tendermint.com/v0.34/rpc/ [rpc-methods]: https://github.com/tendermint/tendermint/blob/main/rpc/core/routes.go#L12 -[events]: ./rfc-005-event-system.rst -[rpc-transport]: ./rfc-002-ipc-ecosystem.md#rpc-transport +[events]: rfc-005-event-system.rst +[rpc-transport]: rfc-002-ipc-ecosystem.md#rpc-transport [ws]: https://datatracker.ietf.org/doc/html/rfc6455 [json-response]: https://www.jsonrpc.org/specification#response_object [json-notify]: https://www.jsonrpc.org/specification#notification diff --git a/docs/rfc/tendermint-core/rfc-007-deterministic-proto-bytes.md b/docs/references/rfc/tendermint-core/rfc-007-deterministic-proto-bytes.md similarity index 99% rename from docs/rfc/tendermint-core/rfc-007-deterministic-proto-bytes.md rename to docs/references/rfc/tendermint-core/rfc-007-deterministic-proto-bytes.md index c1521753bc1..3c7f11c2541 100644 --- a/docs/rfc/tendermint-core/rfc-007-deterministic-proto-bytes.md +++ b/docs/references/rfc/tendermint-core/rfc-007-deterministic-proto-bytes.md @@ -97,7 +97,7 @@ data structure that the digital signature signed using the process's local data. 2. Reordered all message fields to be in tag-sorted order. Tag-sorting top-level fields will place all fields of the same tag in a adjacent -to eachother within the serialized representation. +to each other within the serialized representation. 3. Reordered the contents of all `repeated` fields to be in lexicographically sorted order. diff --git a/docs/rfc/tendermint-core/rfc-008-do-not-panic.md b/docs/references/rfc/tendermint-core/rfc-008-do-not-panic.md similarity index 100% rename from docs/rfc/tendermint-core/rfc-008-do-not-panic.md rename to docs/references/rfc/tendermint-core/rfc-008-do-not-panic.md diff --git a/docs/rfc/tendermint-core/rfc-009-consensus-parameter-upgrades.md b/docs/references/rfc/tendermint-core/rfc-009-consensus-parameter-upgrades.md similarity index 100% rename from docs/rfc/tendermint-core/rfc-009-consensus-parameter-upgrades.md rename to docs/references/rfc/tendermint-core/rfc-009-consensus-parameter-upgrades.md diff --git a/docs/rfc/tendermint-core/rfc-010-p2p-light-client.rst b/docs/references/rfc/tendermint-core/rfc-010-p2p-light-client.rst similarity index 100% rename from docs/rfc/tendermint-core/rfc-010-p2p-light-client.rst rename to docs/references/rfc/tendermint-core/rfc-010-p2p-light-client.rst diff --git a/docs/rfc/tendermint-core/rfc-011-delete-gas.md b/docs/references/rfc/tendermint-core/rfc-011-delete-gas.md similarity index 100% rename from docs/rfc/tendermint-core/rfc-011-delete-gas.md rename to docs/references/rfc/tendermint-core/rfc-011-delete-gas.md diff --git a/docs/rfc/tendermint-core/rfc-012-custom-indexing.md b/docs/references/rfc/tendermint-core/rfc-012-custom-indexing.md similarity index 98% rename from docs/rfc/tendermint-core/rfc-012-custom-indexing.md rename to docs/references/rfc/tendermint-core/rfc-012-custom-indexing.md index 489bcccc1d2..83d03a32207 100644 --- a/docs/rfc/tendermint-core/rfc-012-custom-indexing.md +++ b/docs/references/rfc/tendermint-core/rfc-012-custom-indexing.md @@ -56,7 +56,7 @@ a datum published to or received from the pubsub bus, and **ABCI event** or **Indexing** in this context means recording the association between certain ABCI metadata and the blocks or transactions they're attached to. The ABCI metadata typically carry application-specific details like sender and recipient -addresses, catgory tags, and so forth, that are not part of consensus but are +addresses, category tags, and so forth, that are not part of consensus but are used by UI tools to find and display transactions of interest. The consensus node records the blocks and transactions as part of its block @@ -128,7 +128,7 @@ proprietary indexer. These include: are reported to a custom indexer. - The interface requires the implementation to define methods for the legacy - search and query API. This requirement comes from the integation with the + search and query API. This requirement comes from the integration with the [event subscription RPC API][event-rpc], but actually supporting these methods is not trivial. @@ -199,7 +199,7 @@ Inevitably, a question will arise whether we could implement both strategies and toggle between them with a flag. That would be a worst-case scenario, requiring us to maintain the complexity of two very-different operational concerns. If our goal is that Tendermint should be as simple, efficient, and -trustworthy as posible, there is not a strong case for making these options +trustworthy as possible, there is not a strong case for making these options configurable: We should pick a side and commit to it. ### Design Principles diff --git a/docs/rfc/tendermint-core/rfc-013-abci++.md b/docs/references/rfc/tendermint-core/rfc-013-abci++.md similarity index 98% rename from docs/rfc/tendermint-core/rfc-013-abci++.md rename to docs/references/rfc/tendermint-core/rfc-013-abci++.md index 3f4e3c82d12..aef486b1c35 100644 --- a/docs/rfc/tendermint-core/rfc-013-abci++.md +++ b/docs/references/rfc/tendermint-core/rfc-013-abci++.md @@ -182,7 +182,7 @@ In most configurations, we expect that the consensus engine and the application This memory model conversion is typically considered negligible, as delay here is measured on the order of microseconds at most, whereas we face millisecond delays due to cryptography and network overheads. Thus we ignore the overhead in the case of linked libraries. -In the case where the consensus engine and the application are ran in separate processes, and thus communicate with a form of Inter-process communication (IPC), the delays can easily become on the order of miliseconds based upon the data sent. Thus its important to consider whats happening here. +In the case where the consensus engine and the application are ran in separate processes, and thus communicate with a form of Inter-process communication (IPC), the delays can easily become on the order of milliseconds based upon the data sent. Thus its important to consider what's happening here. We go through this phase by phase. ##### Prepare proposal IPC overhead @@ -228,7 +228,7 @@ Proposed - Finalize Block - Can black-box call BeginBlock, DeliverTx, EndBlock given the cached block data - Vote Extensions adds more complexity to core Tendermint Data Structures -- Allowing alternate alternate execution models will lead to a proliferation of new ways for applications to violate expected guarantees. +- Allowing alternate execution models will lead to a proliferation of new ways for applications to violate expected guarantees. ### Neutral diff --git a/docs/rfc/tendermint-core/rfc-014-semantic-versioning.md b/docs/references/rfc/tendermint-core/rfc-014-semantic-versioning.md similarity index 100% rename from docs/rfc/tendermint-core/rfc-014-semantic-versioning.md rename to docs/references/rfc/tendermint-core/rfc-014-semantic-versioning.md diff --git a/docs/rfc/tendermint-core/rfc-015-abci++-tx-mutation.md b/docs/references/rfc/tendermint-core/rfc-015-abci++-tx-mutation.md similarity index 98% rename from docs/rfc/tendermint-core/rfc-015-abci++-tx-mutation.md rename to docs/references/rfc/tendermint-core/rfc-015-abci++-tx-mutation.md index 92d9ed66877..5ae7dc02bf2 100644 --- a/docs/rfc/tendermint-core/rfc-015-abci++-tx-mutation.md +++ b/docs/references/rfc/tendermint-core/rfc-015-abci++-tx-mutation.md @@ -74,7 +74,7 @@ from the mempool, so this would be a pretty straightforward change. ### What value may be added to Tendermint by introducing transaction replacement? -Transaction replacement would would enable applications to aggregate or disaggregate transactions. +Transaction replacement would enable applications to aggregate or disaggregate transactions. For aggregation, a set of transactions that all related work, such as transferring tokens between the same two accounts, could be replaced with a single transaction, @@ -132,7 +132,7 @@ Malicious nodes will be granted a new vector for censoring transactions. There is no guarantee that a replaced transactions is actually executed at all. A malicious node could censor a transaction by simply listing it as replaced. Honest nodes seeing the replacement would flush the transaction from their mempool -and not execute or propose it it in later blocks. +and not execute or propose it in later blocks. ### Transaction tracking implementations diff --git a/docs/rfc/tendermint-core/rfc-016-node-architecture.md b/docs/references/rfc/tendermint-core/rfc-016-node-architecture.md similarity index 90% rename from docs/rfc/tendermint-core/rfc-016-node-architecture.md rename to docs/references/rfc/tendermint-core/rfc-016-node-architecture.md index 80212370e25..b43e16237f2 100644 --- a/docs/rfc/tendermint-core/rfc-016-node-architecture.md +++ b/docs/references/rfc/tendermint-core/rfc-016-node-architecture.md @@ -7,7 +7,7 @@ ## Abstract -The `node` package is the entry point into the Tendermint codebase, used both by the command line and programatically to create the nodes that make up a network. The package has suffered the most from the evolution of the codebase, becoming bloated as developers clipped on their bits of code here and there to get whatever feature they wanted working. +The `node` package is the entry point into the Tendermint codebase, used both by the command line and programmatically to create the nodes that make up a network. The package has suffered the most from the evolution of the codebase, becoming bloated as developers clipped on their bits of code here and there to get whatever feature they wanted working. The decisions made at the node level have the biggest impact to simplifying the protocols within them, unlocking better internal designs and making Tendermint more intuitive to use and easier to understand from the outside. Work, in minor increments, has already begun on this section of the codebase. This document exists to spark forth the necessary discourse in a few related areas that will help the team to converge on the long term makeup of the node. @@ -19,7 +19,7 @@ The following is a list of points of discussion around the architecture of the n The node object is currently stuffed with every component that possibly exists within Tendermint. In the constructor, all objects are built and interlaid with one another in some awkward dance. My guiding principle is that the node should only be made up of the components that it wants to have direct control of throughout its life. The node is a service which currently has the purpose of starting other services up in a particular order and stopping them all when commanded to do so. However, there are many services which are not direct dependents i.e. the mempool and evidence services should only be working when the consensus service is running. I propose to form more of a hierarchical structure of dependents which forces us to be clear about the relations that one component has to the other. More concretely, I propose the following dependency tree: -![node dependency tree](./images/node-dependency-tree.svg) +![node dependency tree](images/node-dependency-tree.svg) Many of the further discussion topics circle back to this representation of the node. @@ -34,7 +34,7 @@ In a decentralized message passing system, individual services make their decisi Both centralized and decentralized systems rely on the communication of the nodes current height and a judgement on the height of the head of the chain. The latter, working out the head of the chain, is quite a difficult challenge as their is nothing preventing the node from acting maliciously and providing a different height. Currently both blocksync, consensus (and to a certain degree statesync), have parallel systems where peers communicate their height. This could be streamlined with the consensus (or even the p2p layer), broadcasting peer heights and either the node or the other state advancing mechanisms acting accordingly. -Currently, when a node starts, it turns on every service that it is attached to. This means that while a node is syncing up by requesting blocks, it is also receiving transactions and votes, as well as snapshot and block requests. This is a needless use of bandwidth. An implementation of an orchestrator, regardless of whether the system is heirachical or not, should look to be able to open and close channels dynamically and effectively broadcast which services it is running. Integrating this with service discovery may also lead to a better serivce to peers. +Currently, when a node starts, it turns on every service that it is attached to. This means that while a node is syncing up by requesting blocks, it is also receiving transactions and votes, as well as snapshot and block requests. This is a needless use of bandwidth. An implementation of an orchestrator, regardless of whether the system is hierarchical or not, should look to be able to open and close channels dynamically and effectively broadcast which services it is running. Integrating this with service discovery may also lead to a better service to peers. The orchestrator allows for some deal of variablity in how a node is constructed. Does it just run blocksync, shadowing the head of the chain and be highly available for querying. Does it rely on state sync at all? An important question that arises from this dynamicism is we ideally want to encourage nodes to provide as much of their resources as possible so that their is a healthy amount of providers to consumers. Do we make all services compulsory or allow for them to be disabled? Arguably it's possible that a user forks the codebase and rips out the blocksync code because they want to reduce bandwidth so this is more a question of how easy do we want to make this for users. @@ -48,7 +48,7 @@ The block executor is an important component that is currently used by both cons ### The Interprocess communication systems: RPC, P2P, ABCI, and Events The schematic supplied above shows the relations between the different services, the node, the block executor, and the storage layer. Represented as colored dots are the components responsible for different roles of interprocess communication (IPC). These components permeate throughout the code base, seeping into most services. What can provide powerful functionality on one hand can also become a twisted vine, creating messy corner cases and convoluting the protocols themselves. A lot of the thinking around -how we want our IPC systens to function has been summarised in this [RFC](./rfc-002-ipc-ecosystem.md). In this section, I'd like to focus the reader on the relation between the IPC and the node structure. An issue that has frequently risen is that the RPC has control of the components where it strikes me as being more logical for the component to dictate the information that is emitted/available and the knobs it wishes to expose. The RPC is also inextricably tied to the node instance and has situations where it is passed pointers directly to the storage engine and other components. +how we want our IPC systems to function has been summarised in this [RFC](rfc-002-ipc-ecosystem.md). In this section, I'd like to focus the reader on the relation between the IPC and the node structure. An issue that has frequently risen is that the RPC has control of the components where it strikes me as being more logical for the component to dictate the information that is emitted/available and the knobs it wishes to expose. The RPC is also inextricably tied to the node instance and has situations where it is passed pointers directly to the storage engine and other components. I am currently convinced of the approach that the p2p layer takes and would like to see other IPC components follow suit. This would mean that the RPC and events system would be constructed in the node yet would pass the adequate methods to register endpoints and topics to the sub components. For example, diff --git a/docs/rfc/tendermint-core/rfc-017-abci++-vote-extension-propag.md b/docs/references/rfc/tendermint-core/rfc-017-abci++-vote-extension-propag.md similarity index 99% rename from docs/rfc/tendermint-core/rfc-017-abci++-vote-extension-propag.md rename to docs/references/rfc/tendermint-core/rfc-017-abci++-vote-extension-propag.md index 3892bfd67c9..fe6c2ee37e9 100644 --- a/docs/rfc/tendermint-core/rfc-017-abci++-vote-extension-propag.md +++ b/docs/references/rfc/tendermint-core/rfc-017-abci++-vote-extension-propag.md @@ -31,7 +31,7 @@ In the [Discussion](#discussion) section, subsection [Solutions Proposed](#solut worded abstracting away from implementation details, whilst subsections [Feasibility of the Proposed Solutions](#feasibility-of-the-proposed-solutions) and [Current Limitations and Possible Implementations](#current-limitations-and-possible-implementations) -analize the viability of one of the proposed solutions in the context of Tendermint's architecture +analyze the viability of one of the proposed solutions in the context of Tendermint's architecture based on reactors. Finally, [Formalization Work](#formalization-work) briefly discusses the work still needed demonstrate the correctness of the chosen solution. @@ -149,7 +149,7 @@ discussions and need to be addressed. They are (roughly) ordered from easiest to If sets *valseth* and *valseth+1* are disjoint, more than *2nh/3* of validators in height *h* should - have actively participated in conensus in *h*. So, as of height *h*, only a minority of validators + have actively participated in consensus in *h*. So, as of height *h*, only a minority of validators in *h* can be lagging behind, although they could all lag behind from *h+1* on, as they are no longer validators, only full nodes. This situation falls under the assumptions of case (h) below. @@ -369,7 +369,7 @@ These are the solutions proposed in discussions leading up to this RFC. At this point, *all* full nodes, including all validators in *valseth+1*, have advanced to height *h+1* believing they are late, and so, expecting the *hypothetical* leading majority of - validators in *valseth+1* to propose for *h+1*. As a result, the blockhain + validators in *valseth+1* to propose for *h+1*. As a result, the blockchain grinds to a halt. A (rather complex) ad-hoc mechanism would need to be carried out by node operators to roll back all validators to the precommit step of height *h*, round *r*, so that they can regenerate @@ -530,7 +530,7 @@ The two main drawbacks of this base implementation are: #### Possible Optimization: Pruning the Extended Commit History If we cannot switch from the consensus reactor back to the blocksync reactor we cannot prune the extended commit backlog in the block store without sacrificing the implementation's correctness. The asynchronous -nature of our distributed system model allows a process to fall behing an arbitrary number of +nature of our distributed system model allows a process to fall behind an arbitrary number of heights, and thus all extended commits need to be kept *just in case* a node that late had previously switched to the consensus reactor. @@ -538,7 +538,7 @@ However, there is a possibility to optimize the base implementation. Every time we could prune from the block store all extended commits that are more than *d* heights in the past. Then, we need to handle two new situations, roughly equivalent to cases (h.1) and (h.2) described above. -- (h.1) A node starts from scratch or recovers after a crash. In thisy case, we need to modify the +- (h.1) A node starts from scratch or recovers after a crash. In this case, we need to modify the blocksync reactor's base implementation. - when receiving a `BlockResponse` message, it MUST accept that the extended commit set to `nil`, - when sending a `BlockResponse` message, if the block store contains the extended commit for that diff --git a/docs/rfc/tendermint-core/rfc-018-bls-agg-exploration.md b/docs/references/rfc/tendermint-core/rfc-018-bls-agg-exploration.md similarity index 99% rename from docs/rfc/tendermint-core/rfc-018-bls-agg-exploration.md rename to docs/references/rfc/tendermint-core/rfc-018-bls-agg-exploration.md index 6de7510ab47..de06e09f7ac 100644 --- a/docs/rfc/tendermint-core/rfc-018-bls-agg-exploration.md +++ b/docs/references/rfc/tendermint-core/rfc-018-bls-agg-exploration.md @@ -60,7 +60,7 @@ elliptic curves over a finite field. With some original curve, you can define tw `G1` and `G2` which are points of the original curve _modulo_ different values. Finally, you define a third group `Gt`, where points from `G1` and `G2` satisfy the property of bilinearity with `Gt`. In this scheme, the function `e` takes -as inputs points in `G1` and `G2` and outputs values in `Gt`. Succintly, given +as inputs points in `G1` and `G2` and outputs values in `Gt`. Succinctly, given some point `P` in `G1` and some point `Q` in `G1`, `e(P, Q) = C` where `C` is in `Gt`. You can efficiently compute the mapping of points in `G1` and `G2` into `Gt`, but you cannot efficiently determine what points were summed and paired to @@ -102,7 +102,7 @@ BLS signatures have already gained traction within several popular projects. Gossip could be updated to aggregate vote signatures during a consensus round. This appears to be of frankly little utility. Creating an aggregated signature incurs overhead, so frequently re-aggregating may incur a significant -overhead. How costly this is is still subject to further investigation and +overhead. How costly this is still subject to further investigation and performance testing. Even if vote signatures were aggregated before gossip, each validator would still @@ -179,7 +179,7 @@ number of operations used to verify a signature does not grow at all with the number of signatures included in the aggregate signature (as long as the signers signed over the same message data as is the case in Tendermint). -It is worth noting that this would also represent a _degredation_ in signature +It is worth noting that this would also represent a _degradation_ in signature verification time for chains with small validator sets. When batch verifying only 32 signatures, our ed25519 library takes .57 milliseconds, whereas BLS would still require the same 1.5 milliseconds. @@ -232,7 +232,7 @@ instead of the full list of multi-signatures as we have them now. Aggregation requires a specific signature algorithm, and our legacy signing schemes cannot be aggregated. In practice, this means that aggregated signatures could be created for a subset of validators using BLS signatures, and validators -with other key types (such as Ed25519) would still have to be be separately +with other key types (such as Ed25519) would still have to be separately propagated in blocks and votes. #### Many HSMs do not support aggregated signatures diff --git a/docs/rfc/tendermint-core/rfc-019-config-version.md b/docs/references/rfc/tendermint-core/rfc-019-config-version.md similarity index 100% rename from docs/rfc/tendermint-core/rfc-019-config-version.md rename to docs/references/rfc/tendermint-core/rfc-019-config-version.md diff --git a/docs/rfc/tendermint-core/rfc-020-onboarding-projects.rst b/docs/references/rfc/tendermint-core/rfc-020-onboarding-projects.rst similarity index 98% rename from docs/rfc/tendermint-core/rfc-020-onboarding-projects.rst rename to docs/references/rfc/tendermint-core/rfc-020-onboarding-projects.rst index 4bee7aa84b4..7a3b3102cb1 100644 --- a/docs/rfc/tendermint-core/rfc-020-onboarding-projects.rst +++ b/docs/references/rfc/tendermint-core/rfc-020-onboarding-projects.rst @@ -164,7 +164,7 @@ covers some of the background and approach. While the changes are in this project are relatively rote, this will provide exposure to lots of different areas of the codebase as well as insight into -how different areas of the codebase interact with eachother, as well as +how different areas of the codebase interact with each other, as well as experience with the test suites and infrastructure. Implement more Expressive ABCI Applications @@ -216,7 +216,7 @@ messages that might be (e.g. increment a counter for certain kinds of errors) One approach might be to look at various logging statements, particularly debug statements or errors that are logged but not returned, and see if -they're convertable to counters or other metrics. +they're convertible to counters or other metrics. Expose Metrics to Tests +++++++++++++++++++++++ diff --git a/docs/rfc/tendermint-core/rfc-021-socket-protocol.md b/docs/references/rfc/tendermint-core/rfc-021-socket-protocol.md similarity index 99% rename from docs/rfc/tendermint-core/rfc-021-socket-protocol.md rename to docs/references/rfc/tendermint-core/rfc-021-socket-protocol.md index 4b8fd2ab6d3..cfc714d2283 100644 --- a/docs/rfc/tendermint-core/rfc-021-socket-protocol.md +++ b/docs/references/rfc/tendermint-core/rfc-021-socket-protocol.md @@ -235,7 +235,7 @@ design. essential to avoid it. This is a sound principle, but conflates protocol errors with "mechanical" - errors such as timeouts, resoures exhaustion, failed connections, and so on. + errors such as timeouts, resources exhaustion, failed connections, and so on. Because the protocol has no way to distinguish these conditions, the only way for an application to report an error is to panic or crash. diff --git a/docs/rfc/tendermint-core/rfc-023-semi-permanent-testnet.md b/docs/references/rfc/tendermint-core/rfc-023-semi-permanent-testnet.md similarity index 100% rename from docs/rfc/tendermint-core/rfc-023-semi-permanent-testnet.md rename to docs/references/rfc/tendermint-core/rfc-023-semi-permanent-testnet.md diff --git a/docs/rfc/tendermint-core/rfc-024-block-structure-consolidation.md b/docs/references/rfc/tendermint-core/rfc-024-block-structure-consolidation.md similarity index 99% rename from docs/rfc/tendermint-core/rfc-024-block-structure-consolidation.md rename to docs/references/rfc/tendermint-core/rfc-024-block-structure-consolidation.md index 91dec2d63f5..49f91ed1088 100644 --- a/docs/rfc/tendermint-core/rfc-024-block-structure-consolidation.md +++ b/docs/references/rfc/tendermint-core/rfc-024-block-structure-consolidation.md @@ -304,7 +304,7 @@ _each_ block. We could easily save the value and the height at which the value was updated and construct each block using the data that existed at the time. This document does not make any specific recommendations around storage since -that is likely to change with upcoming improvements to to the database infrastructure. +that is likely to change with upcoming improvements to the database infrastructure. However, it's important to note that removing fields from the block for the purposes of 'saving space' may not be that meaningful. We should instead focus our attention of removing fields from the block that are no longer needed diff --git a/docs/rfc/tendermint-core/rfc-025-support-app-side-mempool.md b/docs/references/rfc/tendermint-core/rfc-025-support-app-side-mempool.md similarity index 100% rename from docs/rfc/tendermint-core/rfc-025-support-app-side-mempool.md rename to docs/references/rfc/tendermint-core/rfc-025-support-app-side-mempool.md diff --git a/docs/rfc/tendermint-core/rfc-026-p2p-bad-peers-checktx.md b/docs/references/rfc/tendermint-core/rfc-026-p2p-bad-peers-checktx.md similarity index 98% rename from docs/rfc/tendermint-core/rfc-026-p2p-bad-peers-checktx.md rename to docs/references/rfc/tendermint-core/rfc-026-p2p-bad-peers-checktx.md index ad5a27f0c05..8ca6386bd1f 100644 --- a/docs/rfc/tendermint-core/rfc-026-p2p-bad-peers-checktx.md +++ b/docs/references/rfc/tendermint-core/rfc-026-p2p-bad-peers-checktx.md @@ -64,7 +64,7 @@ Currently, the mempool triggers a disconnect from a peer in the case of the foll - [Unknown message type](https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/mempool/v0/reactor.go#L184) -However, disconnecting from a peer is not the same as banning the peer. The p2p layer will close the connecton but +However, disconnecting from a peer is not the same as banning the peer. The p2p layer will close the connection but the peer can reconnect without any penalty, and if it as a persistent peer, a reconnect will be initiated from the node. @@ -102,7 +102,7 @@ Any further mentions of `banning` will be agnostic to the actual way banning is ### 1. What does banning a peer mean Tendermint recognizes that peers can accept transactions into their mempool as valid but then when the state changes, they can become invalid. -There are also transactions that are received that could never have been valid (for examle due to misconfiguration on one node). +There are also transactions that are received that could never have been valid (for example due to misconfiguration on one node). We thus differentiate two scenarios - a) where `CheckTx` fails due to reasons already known and b) where `CheckTx` deems a transaction could never have been valid. @@ -223,7 +223,7 @@ we also need to keep track of the `CheckTx` response code for each transaction. If invalid transactions are kept in the cache, the check is ran only when a transaction is seen for the first time. Afterwards, the transaction is cached, to avoid running `CheckTx` on transactions already checked. Thus when a transaction is received from a peer, if it is in the cache, -`CheckTx` is not ran again, but the peers' ID is addded to the list of peers who sent this particular transaction. +`CheckTx` is not ran again, but the peers' ID is added to the list of peers who sent this particular transaction. These transactions are rechecked once a block is committed to verify that they are still valid. If invalid transactions are not kept in the cache, they can be resubmitted multiple times, and `CheckTx` will be executed on them upon submission. @@ -408,7 +408,7 @@ Those checks, have been mentioned in the user issues described at the beginning - Proto checks - Receiving unknown messages via the mempool channel -The previous code snippets do not incroporate these in peer banning. If we adopt those as valid reasons for banning, we should put the corresponding logic in place. +The previous code snippets do not incorporate these in peer banning. If we adopt those as valid reasons for banning, we should put the corresponding logic in place. ### Impacted mempool functionalities diff --git a/docs/rfc/tendermint-core/rfc-027-p2p-message-bandwidth-report.md b/docs/references/rfc/tendermint-core/rfc-027-p2p-message-bandwidth-report.md similarity index 98% rename from docs/rfc/tendermint-core/rfc-027-p2p-message-bandwidth-report.md rename to docs/references/rfc/tendermint-core/rfc-027-p2p-message-bandwidth-report.md index eaa99cdef4a..19199f6af5c 100644 --- a/docs/rfc/tendermint-core/rfc-027-p2p-message-bandwidth-report.md +++ b/docs/references/rfc/tendermint-core/rfc-027-p2p-message-bandwidth-report.md @@ -54,25 +54,25 @@ The image below of p2p data collected from the Blockpane validator illustrate the total bandwidth consumption of these three message types. -#### Send: +#### Send: -##### Top 3 Percent: +##### Top 3 Percent: -![](./images/top-3-percent-send.png) +![](images/top-3-percent-send.png) -##### Rate For All Messages: +##### Rate For All Messages: -![](./images/send-rate-all.png) +![](images/send-rate-all.png) -#### Receive: +#### Receive: -##### Top 3 Percent: +##### Top 3 Percent: -![](./images/top-3-percent-receive.png) +![](images/top-3-percent-receive.png) -##### Rate For All Messages: +##### Rate For All Messages: -![](./images/receive-rate-all.png) +![](images/receive-rate-all.png) ### Investigation of Message Usage diff --git a/docs/references/storage/README.md b/docs/references/storage/README.md new file mode 100644 index 00000000000..ae8fb21eb76 --- /dev/null +++ b/docs/references/storage/README.md @@ -0,0 +1,297 @@ +# Overview + +This report summarizes the changes on CometBFT storage between Q3 2023 and Q1 2024, along with all the experiments and benchmarking performed to understand the impact of those changes. + +As of Q3 2023, the CometBFT team has dedicated significant resources addressing a number of storage related concerns: +1. Pruning not working: operators noticed that even when nodes prune data, the storage footprint is increasing. +2. Enabling pruning slows down nodes. Many chains disable pruning due to the impact on block processing time. +3. CometBFT is addressing application level concerns such as transaction indexing. Furthermore, operators have very coarse grained control over what is stored on their node. +4. Comet supports many database backends when ideally we should converge towards one. This requires understanding of the DB features but also the way CometBFT uses the database. +5. The representation of keys CometBFT uses to store block and state data is suboptimal for the way this data is sorted within most kvstores. This work was [started in Tendermint 0.36](https://github.com/tendermint/tendermint/pull/5771) but not completed. We picked up that work and experimented with the proposed data layout. + +All the experiments were performed on `main` after `v1-alpha.1` was released. The experiments on Injective were done on custom branches porting the changes to `0.37.x` with the changes Injective has on their fork of 0.37: +- [Injective testing "v1"](https://github.com/cometbft/cometbft/tree/storage/tmp/injective/v0.37.x-testing-validator) +- [Injective testing "v2"](https://github.com/cometbft/cometbft/tree/storage/tmp/injective/v0.37.x-testing-newlayout-validator) + +We also have non Injective specific backports to 0.37.x based code in the following branches: + +- [Old keylayout](https://github.com/cometbft/cometbft/tree/storage/tmp/v0.37.x-testing-validator) +- [New key layout](https://github.com/cometbft/cometbft/tree/storage/tmp/v0.37.x-testing-validator) + +These branches are however used only for testing and development purposes and are not meant nor designed to be used in production. + +### Releases containing the changes + +- *v1* : Data companion, background pruning, compaction and support for different key layouts +- *v0.38.x-experimental*: Data companion, background pruning (production ready) +- *Validator testing branches based of 0.37.x* - background pruning, compaction, key layout (not production ready). + +## Pre Q1 2024 results +By the end of Q3 we have addressed and documented the second problem by introducing a data companion API. The API allows node operators to extract data out of full nodes or validators, index them in whichever way they find suitable and instruct CometBFT to prune data at a much finer granularity: +- Blocks +- State +- ABCI Results +- The transaction indexer +- The block indexer + +For comparison, until then, CometBFT would only prune the block and state store (not including ABCI results), based on instructions from the application. + +More details on the API itself and how it can be used can be found in the corresponding [ADR](https://github.com/cometbft/cometbft/blob/main/docs/references/architecture/adr-101-data-companion-pull-api.md) and [documentation](https://github.com/cometbft/cometbft/tree/main/docs/explanation/data-companion). + +The rest of this report covers the changes and their impact related to fixing and improving the pruning related points (1 and 3) as well as supporting a new data key layout (point 5). The results are obtained using `goleveldb` as the default backend unless stated otherwise. + +## Q1 goals & summary of results +The expected result for Q1 was fixing the problem of storage growth despite pruning and improving database access times with a more optimal key layout. + +While we did indeed fix the problem of pruning/compaction not working, based on the results we obtained we could not demonstrate a certain benefit of changing the database key representation. + +Without a real world application, when running our test applications, our hypothesis on the impact of ordering was demonstrated by performance improvements and faster compaction times with the new layout. + +The one experiment with a real application (injective) did not back up this theory though. Even though the overall performance was better than the software version used by the application at the moment. + +The block processing time reported by our application were in the range of 600-850ms compared to 100s of ms for the real world application. Furthermore, the application might have different access patterns. Our tests can be seen as testing only CometBFT's interaction with storage, without much interference from the application. + +That is why, in Q1, we introduce an interface with two implementations: the current key layout (a "v1") and a new representation ("v2") sorting the keys by height using ordercode. The new layout is marked as purely experimental. Our hope is that chains will be incentivized to experiment with it and provide us with more real world data. This will also facilitate switching the data layout without breaking changes between releases if we decide to officially support a new data layout. + + + +**Summary of Q1 results:** + +- pruning on its own is inefficient to control storage growth +- we have complemented pruning with a (forced) compaction feature, and this proved effective to control storage growth +- we confirmed that moving pruning + compaction to background mitigates potential performance impact of this feature, by running tests with it on Injective mainnet + - this works as expected, therefore pruning + compaction does not show to impact node performance and we recommend using it +- regarding key layouts: + - our hypothesis was that the new "v2" layout should improve read/write performance on block & state store access + - we obtained contradicting results on this hypothesis locally vs. production settings, concretely: + - in local setting, enabling pruning with the old "v1" key layout _had_ a performance impact; enabling pruning with new layout _had no_ impact + - in mainnet setting the observation was in reverse: using the new layout _introduced a ~10ms latency_ + - it is inconclusive if the new key layout "v2" is beneficial, so we will introduce this as an experimental feature + - we expect to continue working with operator teams to gather data from production, ideally Injective and Osmosis +- pebbleDB: handles compaction without the need for Comet to force it, generally shows better performance with the new layout + +Application developers who use `cometbft-db` as a database backend for their application store should use the new API forcing compaction. This will reduce the storage used by their application in case of pruning. + +# Testing setup + +The experiments were ran in a number of different settings: + 1. We call this setup **Local-1node**: Local runs on one node using a light kvstore application with almost no app state. This setup increases the chances that storage is the bottleneck and enables us to evaluate the changes independent + of the demands of specific applications. Furthermore, we were able to create a larger storage footprint quicker + thus speeding up the experimentation process. + + To evaluate the impact of forced compaction and the different key layouts on both compaction/pruning and performance, we ran the following set of experiments on this setup: + - Current layout - no pruning + - Current layout - pruning, no forced compaction + - Current layout - pruning and forced compaction + - New layout - no pruning + - New layout - pruning, no forced compaction + - New layout - pruning and forced compaction + + We have also experimented with a [third key layout option](https://github.com/cometbft/cometbft/pull/1814), from which we initially expected the most: The new layout combined with insights into the access pattern of CometBFT to order together keys frequently accessed. In all + our experiments, when running CometBFT using this layout was less efficient than the other two and we therefore dismissed it. + + We reduced the `timeout_commit` in this setup to 300ms to speed up execution. The load was generated using `test/loadtime` with the following parameters: `-c 1 -T 3600 -r 1000 -s 8096`, sending 8KB transactions at a rate of 1000txs/s for 1h. + + Each experiment was repeated 3 times to make sure the results are deterministic. + + 2. **e2e-6 node**: CometBFT's e2e application run on a Digital Ocean cluster of 6 nodes. Each node had a different combination of changes: + - Pruning with and without compaction on the current database key layout vs. the same but using the new key layout that uses `ordercode` to sort keys by height. + - No pruning using the current database key layout vs. the new key layout. + + The nodes ran on top of a 11GB database to analyze the effects of pruning but also potentially capture additional impact on performance depending on the key layout. + +3. **production-testing**: The validator team at Informal Staking was kind enough to spend a lot of time with us trying to evaluate our changes on full nodes running on mainnet Injective chains. As their time was limited and they had found initially that pruning, in addition to not working, slows down Injective nodes, we were interested to understand the impact our changes made on their network. Future investigation on mainnet nodes would be required to gather more real-world data on chains with different demands and see if pruning is indeed ineffective and the slow down is reproducible. + + +## **Metrics collected** + +- **Storage footprint** +- **RAM usage** +- **Block processing time** (*cometbft_state_block_processing_time*) This time here indicates the time to execute `FinalizeBlock` while reconstructing the last commit from the database and sending it to the application for processing. +- **Block time**: Computes the time taken for 1 block based on the number of blocks procssed in 1h. Note that for small networks the validators usually keep up and thus their average block times end up being similar. +- **Duration of individual consensus steps** (*cometbft_consensus_step_duration_seconds* aggregated by step) +- **consensus_total_txs** + +During this work we extended CometBFT with two additional storage related metrics: +- *Block store access time* that records the time taken for each method to access the block store +- *State store access time* that records the time taken for each method to access the state store + +## Pruning + +Pruning the blockstore and statestore is a long supported feature by CometBFT. An application can set a `retain_height` - the number of blocks which must be kept - and instruct CometBFT to prune the remaining blocks (taking into account some other constraints). + +## The pruning feature on its own is ineffective in reducing storage footprint +Unfortunately, many users have noticed that, despite the pruning feature based on `retain_height` being enabled, the growth of both the state and block store does not stop. To free up storage, operators copy the database, enforce compaction of the deleted items manually and copy it back. We have talked to operators and some have to do this weekly or every two weeks. + +After some research, we found that some of the database backends can be forced to compact the data. We experimented on it and confirmed those findings. + +That is why we extended `cometbft-db`, [with an API](https://github.com/cometbft/cometbft-db/pull/111) to instruct the database to compact the files. Then we made sure that CometBFT [calls](https://github.com/cometbft/cometbft/pull/1972) this function after blocks are pruned. + +To evaluate whether this was really beneficial, we ran a couple of experiments and recorded the storage used: + +### Local 1 node run of a dummy app that grows the DB to 20GB: + +![local-run-compaction](img/impact_compaction_local.png) + + +### Running CometBFT's e2e application in a mixed network of 6 nodes. +![e2e_storage_usage](img/e2e_storage_usage.png "Storage usage e2e") + + The nodes doing pruning and compaction have a constant footprint compared to the other nodes. + *validator03* and *validator05* prune without compaction. They have a smaller footprint than the + nodes without pruning. The fact that the footprint of *validator05* has a lower footprint than + *validator03* stems from the compaction logic of `goleveldb`. As the keys on *validator03* are sorted + by height, new data is simply appended without the need to reshuffle very old levels with old heights. + On *validator05*, keys are sorted lexicographically leading to `goleveldb` *touching* more levels on insertions. By default, the conditions for triggering compaction are evaluated only when a file is touched. This is the reason why random key order leads to more frequent compaction. (This was also confirmed by [findings](https://github.com/cometbft/cometbft/files/12914649/DB.experiments.pdf) done by our intern in Q3/Q4 2023 on goleveldb without Comet on top, part of the [issue](https://github.com/cometbft/cometbft/issues/64) to understand the database backends and decide which one to optimize and choose.). + + +### Production - Injective mainnet + +#### Pruning without compaction + +![injective-no-compaction](img/injective_no_compaction.png "Injective - pruning without compaction") +#### Pruning with compaction + +![injective-compaction](img/injective_compaction.png "Injective - pruning with compaction") + +## Pruning is slowing nodes down + +While the previous results confirm the storage footprint can be reduced, it is important that this is not impacting the performance of the entire system. + +The most impactful change we have made with regards to that is moving block and state pruning into a background process. Up until v1.x, pruning was done before a node moves on to the next height, blocking +consensus from proceeding. In Q3 2023, we changed this by launching a pruning service that checks in fixed intervals, whether there are blocks to be pruned. This interval is configurable and is `10s` by default. + +### Production - Injective mainnet +The impact of this changes is best demonstrated with the runs by Informal staking comparing 4 Injective nodes with the following setup: + +1. *injective-sentry0* comet="v0.37" , pruning="default", keylayout=old +2. *injective-sentry1* comet="v0.37" , pruning="none" , keylayout=old +3. *injective-pruning* comet="modified" , pruning="600blocks" , keylayout=old +4. *injective-newlayout* comet="modified" , pruning="600blocks" , keylayout=new + +Comet v0.37 is the current 0.37 release used in production where pruning is not happening within a background process. + +We report the time to execute Commit: +![injective-commit](img/injective_commit.png "Injective - commit") + + +The time to complete Commit for pruning done within the same thread, the Commit step takes 412ms vs 286ms when no pruning is activated. Using these numbers as baseline, the new changes for both layout do not degrade performance. The duration of Commit with pruning over the current DB key layout is 253ms, and 260ms on the new layout. + +The graph below plots the block processing time for the 4 nodes. + +![injective-bpt](img/injective_block_processing_time.png "Injective - average block processing time") + +The new changes lead to faster block processing time compared even to the node that has no pruning active. However, the new layout seems to be slightly slower. We will discuss this in more details below. + + +## Database key layout and pruning + +The results above clearly show that pruning is not impacting the nodes performance anymore and could be turned on. The next step was determining whether we should remove the current database key representation from CometBFT and use the new ordering by height, which should be more optimal. (Pure golevelDB benchmarks showed orders of magnitute improvement when keys were written in order vs randomly: 8s vs 16ms - this can be seen in the PDF report on `goleveldb` experiments linked above). +However, while running the same set of experiments locally vs. in production, we obtained contradicting results on the impact of the key layout on these numbers. + +### **Local-1node** +In this setup, we came to the conclusion that, if pruning is turned on, only the version of CometBFT using the new database key layout was not impacted by it. The throughput of CometBFT (measured by num of txs processed within 1h), decreased with pruning (with and without compaction) using the current layout - 500txs/s vs 700 txs/s with the new layout. The compaction operation itself was also much faster than with the old key layout. The block time difference is between 100 and 200ms which for some chains can be significant. +The same was true for additional parameters such as RAM usage (200-300MB). + +We show the findings in the table below. `v1` is the current DB key layout and `v2` is the new key representation leveraging ordercode. + + +| Metric | No pruning v1 | No pruning v2 | Pruning v1 | Pruning v2 | Pruning + compaction v1 | Pruning + compaction v2 +| :---------------- | :------: | ----: | ------: | ----: | ------: | ----: | +| Total tx | 2538767 | 2601857 | 2063870 | 2492327 | 2062080 | 2521171 | +| Tx/s | 705.21 | 722.74 | 573.30 | 692.31 | 572.80 | 700.33 | +| Chain height | 4936 | 5095 | 4277 | 4855 | 4398 | 5104 | +| RAM (MB) | 550 | 470 | 650 | 510 | 660 | 510| +| Block processing time(ms) | 1.9 | 2.1 | 2.2 | 2.1 | 2.0 | 1.9 | +| Block time (s) | 0.73| 0.71 | 0.84 | 0.74| 0.82| 0.71| + +We collected locally periodic heap usage samples via `pprof` and noticed that compaction for the old layout would take ~80MB of RAM vs ~30MB with the new layout. + + +When backporting these changes to the 0.37.x based branch we gave to Informal staking, we obtained similar results when ran locally on the kvstore app. However, this is not what they observed on mainnet. In the graphs above, we see that the new layout, while still improving performance compared to CometBFT v0.37.x, introduced a ~10ms latency in this particular case. According to the operators, this is a big difference for chains like Injective. + +### **e2e - 6 nodes** + +In this experiment, we started a network of 6 validator nodes and 1 seed node. Each node had an initial state with 11GB in its blockstore. The configuration of the nodes is the same one as in the local runs: + + - **validator00**: Current layout - no pruning + - **validator05**: Current layout - pruning, no forced compaction + - **validator02**: Current layout - pruning and forced compaction + - **validator04**: New layout - no pruning + - **validator03**: New layout - pruning, no forced compaction + - **validator01**: New layout - pruning and forced compaction + + Once the nodes synced up to the height in the blockstore, we ran a load of transactions against the network for ~6h. As the run was long, we alternated the nodes to which the load was sent to avoid potential pollution of results by the handling of incoming transactions . + +*Block time* + +As all nodes were validator nodes who were able to most of the time keep up, their block times were very similar (~3ms of difference). We thus looked whether validators were missing blocks and the Commit time to give us an indication of the impact the layout and pruning have. + +*Time to execute Commit* + +![e2e_commitTime](img/e2e_commit_time.png "Commit time e2e") + + +*Missed blocks* +![e2e_val_missed_blocks](img/e2e_val_missed_blocks.png "Blocks missed by a validator") + +The graph above shows the number of missed blocks per validator. *validator02* is doing pruning and compaction using the old layout and keeps missing blocks. The other two validators all use the new layout with *validator03* doing pruning without compaction compared to *validator01* who missed only 1 block while doing pruning and compaction. + +This is something we could not verify in production because the nodes ran by Informal Staking were not validator nodes. + + *Block Store Access time* + + In general, store access times are very low, without pruning they are up to 40ms. The storage device on the Digital Ocean nodes are SSDs but many of our operators use state of the art NVMe devices, thus they could be even lower in production. + Without pruning, the current layout is slightly faster than the new one. However, when pruning is turned on and deletions are performed, the access times grow (to 100s of ms) and using the new layout leads to lower access times. The gap grows slightly as the database size grows (second graph). + + ![e2e_block_store_access_time](img/e2e_block_store_access_time.png "Block store access time e2e") + + + *State Store Access time* + + The conclusions for the state store access times are very similar to those for the blockstore. Note though that in our e2e app the state store does not grow to more than 1GB as we disabled ABCI results saving and the validator set is not updated - which typically leads to increase in state. + + + ![e2e_state_store_access_time](img/e2e_state_store_access_time.png "State store access time e2e") + + *RAM Usage* + The difference in RAM used between the nodes was not very big. Nodes that prune efficiently (with compaction), used 20-40MB more RAM than nodes that did no pruning. + + But *validator04* and *validator00* (no pruning) are using 278MB of RAM . *validator02* (pruning on old layout) uses 330MB of RAM and *validator01*(pruning on new layout) uses 298MB. This is in line with the local runs where pruning on the new layout uses less RAM. + + +### Conclusion on key layout +As demonstrated by the above results, and mentioned at the beginning, `v1.x` will be released with support for the new key layout as a purely experimental feature. + +Without pruning, for a bigger database and block processing times (on runs with our e2e), the new layout lowered the overall block processing time even without pruning. When the block times are very low (in our local setup or on Injective), we did not observe the same benefits. + +Thus, the final decision should be left to the application after testing and understanding their behaviour. + +As the feature is experimental, we do not provide a way to convert the database back into the current format if it is initialized with the new layout. + +The two layouts are not interchange-able, once one is used, a node cannot switch to the other. The version to be used is set in the `config.toml` and defaults to `v1` - the current layout. Once the layout is set, it is written back to the database with `version` as the key. When a node boots up and loads the database initially, if this flag is set, it takes precedense over any configuration file. + +The support for both layouts will allow users to benchmark their applications. If at any point we get clear indications that one layout is better than the other, we will gladly drop support for one of them and provide users with a way to migrate their databases gracefully. + + +## Pebble + +`PebbleDB` was recently added to `cometbft-db` by Notional labs and based on their benchmarks it was superior to goleveldDB. + +We repeated our tests done in **1-node-local** using PebbleDB as the underlying database. While PebbleDB is slightly better in performance (tx/s), the most impressive difference is that PebbleDB seemed to handle compaction very well without a need to enforce it. + +In the graph below, we see the old layout without any compaction and the new layout with and without compaction on the same workload that generated 20GB of data when no pruning is active. + + +![pebble](img/pebble.png "Pebble") + +The table below shows the performance metrics for Pebble: + +| Metric | No pruning v1 | No pruning v2 | Pruning v1 | Pruning v2 | Pruning + compaction v1 | Pruning + compaction v2 +| :---------------- | :------: | ----: | ------: | ----: | ------: | ----: | +| Total tx | 2827232 | 2906186 | 2851298 | 2873765 | 2826235 | 2881003 | +| Tx/s | 785.34 | 807.27 | 792.03 | 798.27 | 785.08 | 800.28 | +| Chain height | 5743 | 5666 | 5553| 5739 | 5551 | 5752 | +| RAM (MB) | 494 | 445 | 456 | 445 | 490 | 461 | +| Block processing time(ms) | 2.1 | 3.9 | 2.1 | 2.1 | 2.1 | 2.1 | +| Block time (s) | 0.63 | 0.64 | 0.65 | 0.63 | 0.65 | 0.63 | \ No newline at end of file diff --git a/docs/references/storage/img/e2e_block_store_access_time.png b/docs/references/storage/img/e2e_block_store_access_time.png new file mode 100644 index 00000000000..8acd1792f9c Binary files /dev/null and b/docs/references/storage/img/e2e_block_store_access_time.png differ diff --git a/docs/references/storage/img/e2e_commit_time.png b/docs/references/storage/img/e2e_commit_time.png new file mode 100644 index 00000000000..d44779a8708 Binary files /dev/null and b/docs/references/storage/img/e2e_commit_time.png differ diff --git a/docs/references/storage/img/e2e_state_store_access_time.png b/docs/references/storage/img/e2e_state_store_access_time.png new file mode 100644 index 00000000000..6990893af89 Binary files /dev/null and b/docs/references/storage/img/e2e_state_store_access_time.png differ diff --git a/docs/references/storage/img/e2e_storage_usage.png b/docs/references/storage/img/e2e_storage_usage.png new file mode 100644 index 00000000000..780004d98ed Binary files /dev/null and b/docs/references/storage/img/e2e_storage_usage.png differ diff --git a/docs/references/storage/img/e2e_val_missed_blocks.png b/docs/references/storage/img/e2e_val_missed_blocks.png new file mode 100644 index 00000000000..e4ee4d3d715 Binary files /dev/null and b/docs/references/storage/img/e2e_val_missed_blocks.png differ diff --git a/docs/references/storage/img/impact_compaction_local.png b/docs/references/storage/img/impact_compaction_local.png new file mode 100644 index 00000000000..42f14b7df89 Binary files /dev/null and b/docs/references/storage/img/impact_compaction_local.png differ diff --git a/docs/references/storage/img/injective_block_processing_time.png b/docs/references/storage/img/injective_block_processing_time.png new file mode 100644 index 00000000000..397de16cae4 Binary files /dev/null and b/docs/references/storage/img/injective_block_processing_time.png differ diff --git a/docs/references/storage/img/injective_commit.png b/docs/references/storage/img/injective_commit.png new file mode 100644 index 00000000000..1adf2cfc02c Binary files /dev/null and b/docs/references/storage/img/injective_commit.png differ diff --git a/docs/references/storage/img/injective_compaction.png b/docs/references/storage/img/injective_compaction.png new file mode 100644 index 00000000000..661d87a1307 Binary files /dev/null and b/docs/references/storage/img/injective_compaction.png differ diff --git a/docs/references/storage/img/injective_no_compaction.png b/docs/references/storage/img/injective_no_compaction.png new file mode 100644 index 00000000000..f85b3dc1d79 Binary files /dev/null and b/docs/references/storage/img/injective_no_compaction.png differ diff --git a/docs/references/storage/img/pebble.png b/docs/references/storage/img/pebble.png new file mode 100644 index 00000000000..86c95e9a648 Binary files /dev/null and b/docs/references/storage/img/pebble.png differ diff --git a/docs/rfc/tendermint-core/README.md b/docs/rfc/tendermint-core/README.md deleted file mode 100644 index 7258586a031..00000000000 --- a/docs/rfc/tendermint-core/README.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -order: 1 -parent: - order: false ---- - -# Tendermint Core Requests for Comments - -This document serves as a historical reference for all RFCs that were logged -during the development of Tendermint Core. - -This list is frozen as-is, and new RFCs should be added [here](../). - -## Table of Contents - -- [RFC-000: P2P Roadmap](./rfc-000-p2p-roadmap.rst) -- [RFC-001: Storage Engines](./rfc-001-storage-engine.rst) -- [RFC-002: Interprocess Communication](./rfc-002-ipc-ecosystem.md) -- [RFC-003: Performance Taxonomy](./rfc-003-performance-questions.md) -- [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.rst) -- [RFC-005: Event System](./rfc-005-event-system.rst) -- [RFC-006: Event Subscription](./rfc-006-event-subscription.md) -- [RFC-007: Deterministic Proto Byte Serialization](./rfc-007-deterministic-proto-bytes.md) -- [RFC-008: Don't Panic](./rfc-008-do-not-panic.md) -- [RFC-009: Consensus Parameter Upgrades](./rfc-009-consensus-parameter-upgrades.md) -- [RFC-010: P2P Light Client](./rfc-010-p2p-light-client.rst) -- [RFC-011: Delete Gas](./rfc-011-delete-gas.md) -- [RFC-012: Event Indexing Revisited](./rfc-012-custom-indexing.md) -- [RFC-013: ABCI++](./rfc-013-abci++.md) -- [RFC-014: Semantic Versioning](./rfc-014-semantic-versioning.md) -- [RFC-015: ABCI++ Tx Mutation](./rfc-015-abci++-tx-mutation.md) -- [RFC-016: Node Architecture](./rfc-016-node-architecture.md) -- [RFC-017: ABCI++ Vote Extension Propagation](./rfc-017-abci++-vote-extension-propag.md) -- [RFC-018: BLS Signature Aggregation Exploration](./rfc-018-bls-agg-exploration.md) -- [RFC-019: Configuration File Versioning](./rfc-019-config-version.md) -- [RFC-020: Onboarding Projects](./rfc-020-onboarding-projects.rst) -- [RFC-021: The Future of the Socket Protocol](./rfc-021-socket-protocol.md) -- [RFC-023: Semi-permanent Testnet](./rfc-023-semi-permanent-testnet.md) -- [RFC-024: Block Structure Consolidation](./rfc-024-block-structure-consolidation.md) -- [RFC-025: Application Defined Transaction Storage](./rfc-025-support-app-side-mempool.md) -- [RFC-026: Banning peers based on ResponseCheckTx](./rfc-026-p2p-bad-peers-checktx.md) -- [RFC-027: P2P Message Bandwidth Report](./rfc-027-p2p-message-bandwidth-report.md) diff --git a/docs/tutorials/README.md b/docs/tutorials/README.md new file mode 100644 index 00000000000..5cb7e9ab735 --- /dev/null +++ b/docs/tutorials/README.md @@ -0,0 +1,30 @@ +--- +order: 1 +title: CometBFT Tutorials +description: Tutorials +parent: + order: 1 +--- + +## Tutorials + +Are you ready to explore the world of CometBFT, the cutting-edge consensus algorithm that's revolutionizing the +field of distributed systems? You've come to the right place! Our CometBFT Tutorials provide the knowledge +and hands-on experience you need to master this groundbreaking technology. + +## Why Choose CometBFT Tutorials? + +- Comprehensive Learning: Our tutorials cover everything from the basics of consensus algorithms to advanced topics in CometBFT, ensuring that both beginners and experts can benefit. +- Hands-On Experience: We believe in learning by doing. Our tutorials include practical examples and exercises that allow you to implement CometBFT in real-world scenarios. +- Up-to-date Content: We keep our tutorials up-to-date with the latest developments in CometBFT, ensuring that you have access to the most current information and best practices. + +## Get Started Today! + +Whether you're a chain developer, an integrator, an operator, or simply curious about distributed systems, our CometBFT Tutorials are the perfect resource to enhance your knowledge and skills. + +Ready to begin? Start exploring our tutorials now and embark on a learning experience that will empower you to harness the power of CometBFT for your projects and applications. Let's build a more reliable and resilient future together with CometBFT! +- [Installing CometBFT](./install.md) +- [Quick-start using CometBFT](./quick-start.md) +- [Creating a built-in application in Go](./go-built-in.md) +- [Creating an external application in Go](./go.md) +- [Creating a Forum Application with ABCI 2.0](forum-application/1.abci-intro.md) diff --git a/docs/tutorials/forum-application/1.abci-intro.md b/docs/tutorials/forum-application/1.abci-intro.md new file mode 100644 index 00000000000..48182c09dbc --- /dev/null +++ b/docs/tutorials/forum-application/1.abci-intro.md @@ -0,0 +1,65 @@ +--- +order: 1 +--- + +# Introduction to ABCI 2.0 + +`ABCI 2.0` is an updated version of the ABCI (Application Blockchain Interface) from CometBFT. For more details, check the [specification document](https://docs.cometbft.com/v1.0/spec/abci/). + +![ABCI 2.0](images/ABCI2.jpg) + +## ABCI 2.0 Methods + +### InitChain + +- **Purpose**: Initialize the blockchain state. +- **Process**: Called when a blockchain node starts up for the first time. It sets up the initial state, including the consensus parameters and initial validator set. + +### Info + +- **Purpose**: Provide information about the application. +- **Process**: Called to retrieve information such as the application version, the last block height, and the application state hash. + +### Query + +- **Purpose**: Query the application state. +- **Process**: Allows external clients to query the application for specific data, such as transaction details. + +### CheckTx + +- **Purpose**: Validate an individual transaction before it is added to the mempool. +- **Process**: Every node that receives a transaction for the first time runs this method to ensure a transaction is valid. + +### PrepareProposal + +- **Purpose**: Modify block proposals before finalization. +- **Process**: The block proposer collects pending transactions, creating a "raw proposal". The application can then reorder, add, or remove transactions, creating a "prepared proposal." + +### ProcessProposal + +- **Purpose**: Validate a block proposal based on application-specific rules. +- **Process**: Validators assess the proposal and can reject invalid blocks. If rejected, the consensus algorithm will perform a nil prevote. The logic must be deterministic to avoid liveness problems. + +### ExtendVote + +- **Purpose**: Add application-specific data to pre-commit messages. +- **Process**: Validators can add vote extensions to their pre-commit messages, which the application can use later. If no data is added, a zero-length byte array is returned. This method can use non-deterministic logic. + +### VerifyVoteExtension + +- **Purpose**: Validate vote extension data in pre-commit messages. +- **Process**: Checks the integrity of vote extensions. If invalid, the entire pre-commit message is rejected. This method must follow deterministic logic to avoid liveness problems. + +### FinalizeBlock + +- **Purpose**: Process a decided proposal and update the application's state. +- **Process**: Includes the `FinalizeBlockRequest` which contains information such as the transactions to execute and evidence of misbehavior. And the `FinalizeBlockResponse` which includes information such as transaction results, updates to the validator set, changes to consensus parameters, and the `app_hash`. Changes are not persisted until the `Commit` phase. + +### Commit + +- **Purpose**: Permanently store finalized state changes. +- **Process**: Before `Commit`, the mempool is locked to prevent new transactions from reaching the application. The application must persist state changes to ensure blockchain integrity and reliability. + +--------------- + +*In the next session, you will learn about [**Forum Application**](2.intro-forumApp.md) and how it functions.* diff --git a/docs/tutorials/forum-application/2.intro-forumApp.md b/docs/tutorials/forum-application/2.intro-forumApp.md new file mode 100644 index 00000000000..09f8ca38a59 --- /dev/null +++ b/docs/tutorials/forum-application/2.intro-forumApp.md @@ -0,0 +1,40 @@ +--- +order: 2 +--- + +## How the Application Works + +For the sake of simplicity, we will not be developing a fully functioning Forum Application. In our tutorial, we will focus +on `sender` and `messages` and the censorship of ill-behaved forum users. + +The **Forum Application** accepts `sender` and `message` as input and checks if the `sender` +has been banned or not. If the `sender` is banned, it cannot send a `message` on the Forum. + +If the `sender` is not banned, the `message` is checked for any curse words. If the `message` contains any curse words, the +`sender` is banned. If not, the `message` is persisted in the application state. + +*Note: Curse words are defined in genesis, a copy of which is available with all nodes. To add a new curse word, the +node must use `VoteExtension`* + +### Application Structure + +In order to follow this tutorial, you can refer to the folder that hosts the finished [ABCI 2.0 Forum Application](../../../abci/tutorials/abci-v2-forum-app). + +Here is an example of how the tutorial directory is structured: + +- `abci` + - `app.go` + - `config.go` + - `state.go` + - `util.go` +- `model` + - `db.go` + - `messages.go` + - `user.go` +- `forum.go` +- `app.toml` + +--------------- + +*In the next session, you will learn more about how a user can [**send a message**](3.send-message.md) in the ABCI 2.0 +Forum Application.* diff --git a/docs/tutorials/forum-application/3.send-message.md b/docs/tutorials/forum-application/3.send-message.md new file mode 100644 index 00000000000..17ac7463961 --- /dev/null +++ b/docs/tutorials/forum-application/3.send-message.md @@ -0,0 +1,444 @@ +--- +order: 3 +--- + +# Sending Messages + +**In this section you will learn how a user can send a message on the Forum Application.** + +## CheckTx + +In ABCI, the `CheckTx` method is used to ask the application to check the validity of an individual transaction before it is included in the mempool. +Every node runs CheckTx before letting a transaction into its local mempool. + +The `CheckTx` method is responsible for performing any necessary validation checks on the transaction, such as verifying +the signature, checking for double spending, or enforcing application-specific rules. It is a lightweight and fast +operation, as it is meant to quickly determine whether a transaction is valid or not. + +The method takes in a `CheckTxRequest` object, which contains the transaction to be checked (`req.Tx`) and any other +relevant information that the application needs to validate the transaction. + +The `CheckTx` method should return a `CheckTxResponse` object, which indicates whether the transaction is valid or not. +If the transaction is valid, the response may include additional information about the transaction +such as the gas that the submitter is willing to spend executing the transaction. + +**Following is the code for the `CheckTx` method:** + +```go +// CheckTx handles validation of inbound transactions. If a transaction is not a valid message, or if a user +// does not exist in the database or if a user is banned it returns an error. +func (app *ForumApp) CheckTx(_ context.Context, req *abci.CheckTxRequest) (*abci.CheckTxResponse, error) { + app.logger.Info("Executing Application CheckTx") + + // Parse the tx message + msg, err := model.ParseMessage(req.Tx) + if err != nil { + app.logger.Info("CheckTx: failed to parse transaction message", "message", msg, "error", err) + return &abci.CheckTxResponse{Code: CodeTypeInvalidTxFormat, Log: "Invalid transaction", Info: err.Error()}, nil + } + + // Check for invalid sender + if len(msg.Sender) == 0 { + app.logger.Info("CheckTx: failed to parse transaction message", "message", msg, "error", "Sender is missing") + return &abci.CheckTxResponse{Code: CodeTypeInvalidTxFormat, Log: "Invalid transaction", Info: "Sender is missing"}, nil + } + + app.logger.Debug("searching for sender", "sender", msg.Sender) + u, err := app.state.DB.FindUserByName(msg.Sender) + + if err != nil { + if !errors.Is(err, badger.ErrKeyNotFound) { + app.logger.Error("CheckTx: Error in check tx", "tx", string(req.Tx), "error", err) + return &abci.CheckTxResponse{Code: CodeTypeEncodingError, Log: "Invalid transaction", Info: err.Error()}, nil + } + app.logger.Info("CheckTx: Sender not found", "sender", msg.Sender) + } else if u != nil && u.Banned { + return &abci.CheckTxResponse{Code: CodeTypeBanned, Log: "Invalid transaction", Info: "User is banned"}, nil + } + app.logger.Info("CheckTx: success checking tx", "message", msg.Message, "sender", msg.Sender) + return &abci.CheckTxResponse{Code: CodeTypeOK, Log: "Valid transaction", Info: "Transaction validation succeeded"}, nil +} +``` + +**Explanation of code:** + +`CheckTx` function parses the transaction message contained in `req.Tx` using the `model.ParseMessage` function. If +there is an error parsing the message, it prints an error message and returns a response with an error code indicating +an invalid transaction format. + +Then, it searches for a user in the database using the `app.state.DB.FindUserByName` function. If the user is not found, +it prints a message indicating that the user was not found. If there is an error other than a key not found error, +it prints an error message and returns a response with an error code indicating an encoding error. + +If the user is found and is not banned, it returns a response with a success code (`CodeTypeOK` which in CometBFT a `0` +code value to indicate success). + +Finally, it prints a success message indicating the success of the check transaction. + +*Tip: The function `CheckTx` is a stateless function that is primarily used by the application to check if a tx is +valid or not as per the application criteria (well-formed and from a valid user)* + +Note: You will learn about different packages and functions like `app.state.DB.FindUserByName` in the upcoming sections. +In this section you will learn about the ABCI methods in the `ForumApp`. + +## PrepareProposal + +The `PrepareProposal` method is responsible for creating the contents of the proposed block, typically by selecting a +set of transactions that should be included in the next block. It may use various criteria to determine which transactions +to include, such as transaction fees, priority, or application-specific rules (as defined by the application). + +The method takes in a `PrepareProposalRequest` object, which contains information about the current state of the blockchain, +such as the current height and the last committed block hash. It may also include other relevant information that the +application needs to generate the proposal. + +The `PrepareProposal` method should return a `PrepareProposalResponse` object, which includes the proposed block contents. +This typically includes the list of transactions (txs) that should be included in the next block. + +**Following is the code for the `PrepareProposal` method:** + +```go +// PrepareProposal is used to prepare a proposal for the next block in the blockchain. The application can re-order, remove +// or add transactions. +func (app *ForumApp) PrepareProposal(_ context.Context, req *abci.PrepareProposalRequest) (*abci.PrepareProposalResponse, error) { + app.logger.Info("Executing Application PrepareProposal") + + // Get the curse words from for all vote extensions received at the end of last height. + voteExtensionCurseWords := app.getWordsFromVe(req.LocalLastCommit.Votes) + + curseWords := strings.Split(voteExtensionCurseWords, "|") + if hasDuplicateWords(curseWords) { + return nil, errors.New("duplicate words found") + } + + // Prepare req puts the BanTx first, then adds the other transactions + // ProcessProposal should verify this + proposedTxs := make([][]byte, 0) + finalProposal := make([][]byte, 0) + bannedUsersString := make(map[string]struct{}) + for _, tx := range req.Txs { + msg, err := model.ParseMessage(tx) + if err != nil { + // this should never happen since the tx should have been validated by CheckTx + return nil, fmt.Errorf("failed to marshal tx in PrepareProposal: %w", err) + } + // Adding the curse words from vote extensions too + if !hasCurseWord(msg.Message, voteExtensionCurseWords) { + proposedTxs = append(proposedTxs, tx) + continue + } + // If the message contains curse words then ban the user by + // creating a "ban transaction" and adding it to the final proposal + banTx := model.BanTx{UserName: msg.Sender} + bannedUsersString[msg.Sender] = struct{}{} + resultBytes, err := json.Marshal(banTx) + if err != nil { + // this should never happen since the ban tx should have been validated by CheckTx + return nil, fmt.Errorf("failed to marshal ban tx in PrepareProposal: %w", err) + } + finalProposal = append(finalProposal, resultBytes) + } + + // Need to loop again through the proposed Txs to make sure there is none left by a user that was banned + // after the tx was accepted + for _, tx := range proposedTxs { + // there should be no error here as these are just transactions we have checked and added + msg, err := model.ParseMessage(tx) + if err != nil { + // this should never happen since the tx should have been validated by CheckTx + return nil, fmt.Errorf("failed to marshal tx in PrepareProposal: %w", err) + } + // If the user is banned then include this transaction in the final proposal + if _, ok := bannedUsersString[msg.Sender]; !ok { + finalProposal = append(finalProposal, tx) + } + } + return &abci.PrepareProposalResponse{Txs: finalProposal}, nil +} +``` + +**Explanation of code:** + +`PrepareProposal` function first retrieves curse words from for all vote extensions received at the end of last height. +Then, it iterates over the transactions in the proposal and checks if each transaction contains curse words. If a +transaction does not contain curse words, it adds it to the `proposedTxs` slice. If a transaction does contain curse words, it creates a ban transaction and adds +it to the `finalProposal` slice. + +After iterating over all the transactions, it loops through the `proposedTxs` again to make sure there are no transactions +left from users who were banned after their transactions were accepted. The final set of transactions is stored in the +`finalProposal` slice, which is then returned as part of the `PrepareProposalResponse` response. + +*Tip: The function `PrepareProposal` is used by state replication to indicate to the application to begin processing the tx. +Typically, the application is expected to order the tx and remove the tx from pool as defined by application logic.* + +Note: You will learn about different packages and functions like `model.ParseMessage` in the upcoming sections. +In this section you will learn about `app.go` file only. + +## ProcessProposal + +The `ProcessProposal` method is used to process a proposal for the next block in the blockchain. It is called by +CometBFT to request the application to validate and potentially execute the proposed block. + +The `ProcessProposal` method is responsible for performing any necessary validation checks on the proposed block, such +as verifying the validity of the included transactions, checking for double spending, or enforcing application-specific +rules (as defined by the application). + +The method takes in a `ProcessProposalRequest` object, which contains the proposed block contents, including the list of +transactions (`req.Txs`) that are included in the block. + +The `ProcessProposal` method should return a `ProcessProposalResponse` object, which includes a status if the the proposed block +was accepted (`PROCESS_PROPOSAL_STATUS_ACCEPT`) or rejected (`PROCESS_PROPOSAL_STATUS_REJECT`) + +**Following is the code for the `ProcessProposal` function:** + +```go +// ProcessProposal validates the proposed block and the transactions and return a status if it was accepted or rejected. +func (app *ForumApp) ProcessProposal(_ context.Context, req *abci.ProcessProposalRequest) (*abci.ProcessProposalResponse, error) { + app.logger.Info("Executing Application ProcessProposal") + + bannedUsers := make(map[string]struct{}, 0) + + finishedBanTxIdx := len(req.Txs) + for i, tx := range req.Txs { + if !isBanTx(tx) { + finishedBanTxIdx = i + break + } + var parsedBan model.BanTx + err := json.Unmarshal(tx, &parsedBan) + if err != nil { + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_REJECT}, err + } + bannedUsers[parsedBan.UserName] = struct{}{} + } + + for _, tx := range req.Txs[finishedBanTxIdx:] { + // From this point on, there should be no BanTxs anymore + // If there is one, ParseMessage will return an error as the + // format of the two transactions is different. + msg, err := model.ParseMessage(tx) + if err != nil { + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_REJECT}, err + } + if _, ok := bannedUsers[msg.Sender]; ok { + // sending us a tx from a banned user + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_REJECT}, nil + } + } + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_ACCEPT}, nil +} +``` + +**Explanation of code:** + +`ProcessProposal` function initializes an empty map called `bannedUsers` to keep track of banned user names. + +Then, it iterates through the transactions and checks if each transaction is a ban transaction using the `isBanTx` function. +If it is a ban transaction, it parses the transaction into a `BanTx` struct and adds the banned user's name to the `bannedUsers` map. +If it is not a ban transaction, it breaks out of the loop and records the index of the last ban transaction. + +After that, it iterates through the remaining transactions (starting from the index after the last ban transaction) and +parses each transaction using the `model.ParseMessage` function. If any banned user attempts to send a transaction, +it rejects the proposal. + +Finally, if there are no banned users found in the transactions, it accepts the proposal. + +*Tip: The function `ProcessProposal` is used by state replication to indicate to the application to process the tx. +The application can process a tx in accordance to the logic defined by the application. Although the application can +perform 'optimisitic execution', the application is not mandated to do so.* + +Note: You will learn about different packages and functions like `isBanTx` in the upcoming sections. In this section +you will learn about `app.go` file only. + +## FinalizeBlock + +The `FinalizeBlock` method is used to finalize a block in the blockchain. It is called by CometBFT after the validators have agreed on the +next block and it is ready to be added to the blockchain. + +The `FinalizeBlock` method takes in a `FinalizeBlockRequest` object, which contains information about the block being finalized. +It performs any necessary processing or validation on the block, such as updating the application state, or performing +additional computations. + +After processing the block, the method returns a `FinalizeBlockResponse` object, which typically includes the results +of the block finalization process, such as the transaction results, validator set updates, consensus parameters updates +or the new hash of the application state. + +**Following is the code for the `FinalizeBlock` function:** + +```go +// FinalizeBlock Deliver the decided block to the Application. +func (app *ForumApp) FinalizeBlock(_ context.Context, req *abci.FinalizeBlockRequest) (*abci.FinalizeBlockResponse, error) { + app.logger.Info("Executing Application FinalizeBlock") + + // Iterate over Tx in current block + app.onGoingBlock = app.state.DB.GetDB().NewTransaction(true) + respTxs := make([]*abci.ExecTxResult, len(req.Txs)) + finishedBanTxIdx := len(req.Txs) + for i, tx := range req.Txs { + var err error + + if !isBanTx(tx) { + finishedBanTxIdx = i + break + } + banTx := new(model.BanTx) + err = json.Unmarshal(tx, &banTx) + if err != nil { + // since we did this in ProcessProposal this should never happen here + return nil, err + } + err = UpdateOrSetUser(app.state.DB, banTx.UserName, true, app.onGoingBlock) + if err != nil { + return nil, err + } + respTxs[i] = &abci.ExecTxResult{Code: CodeTypeOK} + } + + for idx, tx := range req.Txs[finishedBanTxIdx:] { + // From this point on, there should be no BanTxs anymore + // If there is one, ParseMessage will return an error as the + // format of the two transactions is different. + msg, err := model.ParseMessage(tx) + i := idx + finishedBanTxIdx + if err != nil { + // since we did this in ProcessProposal this should never happen here + return nil, err + } + + // Check if this sender already existed; if not, add the user too + err = UpdateOrSetUser(app.state.DB, msg.Sender, false, app.onGoingBlock) + if err != nil { + return nil, err + } + // Add the message for this sender + message, err := model.AppendToExistingMessages(app.state.DB, *msg) + if err != nil { + return nil, err + } + err = app.onGoingBlock.Set([]byte(msg.Sender+"msg"), []byte(message)) + if err != nil { + return nil, err + } + chatHistory, err := model.AppendToChat(app.state.DB, *msg) + if err != nil { + return nil, err + } + // Append messages to chat history + err = app.onGoingBlock.Set([]byte("history"), []byte(chatHistory)) + if err != nil { + return nil, err + } + // This adds the user to the DB, but the data is not committed nor persisted until Commit is called + respTxs[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} + app.state.Size++ + } + app.state.Height = req.Height + + response := &abci.FinalizeBlockResponse{TxResults: respTxs, AppHash: app.state.Hash()} + return response, nil +} +``` + +**Explanation of code:** + +`FinalizeBlock` function iterates over the transactions (`req.Txs`) in the block. If a transaction is a ban transaction (`isBanTx(tx)`), +it updates or sets a user in the application's database and assigns a response code. If it is not a ban transaction, the loop breaks. + +After the ban transactions, the method parses the remaining transactions using `model.ParseMessage(tx)`. It updates or sets +the sender as a user in the database, appends the message to the existing messages, appends the message to the chat history, +and assigns a response code. Finally, it updates the blockchain state and returns the response object. + +*Tip: The function `FinalizeBlock` finalizes processing of a tx. All the state change that happen in this function are finalized. +However, they are not yet persisted to database. This is done in next step, i.e. `Commit`* + +Note: You will learn about different packages and functions like `model.AppendToExistingMsgs` in the upcoming sections. +In this section you will learn about `app.go` file only. + +## Commit + +The `Commit` method is used to persist the changes made to the application state during the FinalizeBlock method. + +After calling `FinalizeBlock`, the state changes are finalized but not yet persisted to the database. The `Commit` method +is responsible for persisting these changes to the database, ensuring that they are durable and can be retrieved later. + +Typically, the `Commit` method updates the application's blockchain state by saving the modified data to a persistent +storage system, such as a database or a file. This allows the application to maintain a consistent and reliable state +across different blocks in the blockchain. + +The Commit method takes in a `CommitRequest` object and returns a `CommitResponse` object. + +**Following is the code for the `Commit` function:** + +```go +// Commit the application state. +func (app *ForumApp) Commit(_ context.Context, _ *abci.CommitRequest) (*abci.CommitResponse, error) { + app.logger.Info("Executing Application Commit") + + if err := app.onGoingBlock.Commit(); err != nil { + return nil, err + } + err := saveState(&app.state) + if err != nil { + return nil, err + } + return &abci.CommitResponse{}, nil +} +``` + +**Explanation of code:** + +The `Commit` method takes in a `context.Context` object and a `*abci.CommitRequest` object as parameters. It returns +a `*abci.CommitResponse` object and an error. + +Inside the method, it calls the Commit method on `app.onGoingBlock`, which is an instance of a block object. This commits +the state changes made during the `FinalizeBlock` method to the underlying storage system. If there's an error during +the `commit`, it returns the error in the `Commit` method. + +After the state changes are committed, the `saveState` function is called with a pointer to `app.state.` This function +is responsible for persisting the updated state to a persistent storage system. + +Finally, the method returns an empty `*abci.CommitResponse` object and a `nil` error. + +## Signature verification + +The tutorial does not include the logic for verifying transaction signatures. However, in a real-world application, it +is important to validate transaction signatures to ensure that the users sending messages are legitimate and not trying +to exploit the system. If implemented, signature verification would typically be carried out using the `CheckTx`, `ProcessProposal`, and `FinalizeBlock` +methods to confirm the validity of transactions from a signature perspective. While our tutorial app is simplified for +educational purposes, a fully functional application should include signature verification as an essential part of its core logic. + +An example signature verification code could be something like: + +```go +func isValidSignature(tx Transaction) bool { + pubKey := tx.PubKey + signature := tx.Signature + message := tx.Message + + // Use the cryptographic library to verify the signature + return crypto.VerifySignature(pubKey, message, signature) +} +``` + +and then in `CheckTx` the signature could be verified: + +```go +func (app *ForumApp) CheckTx(req types.CheckTxRequest) types.CheckTxResponse { + tx := req.Tx + // Extract the transaction fields, including the signature and the public key + // Verify the signature + if !isValidSignature(tx) { + return types.CheckTxResponse{ + Code: code.InvalidSignature, + Log: "Invalid transaction signature", + } + } + + // some other validation... + + return types.CheckTxResponse{Code: code.OK} +} +``` + +--------------- + +*In the next session, you will learn about how user can [**Query Message**](4.query-message.md) in the Forum Application.* diff --git a/docs/tutorials/forum-application/4.query-message.md b/docs/tutorials/forum-application/4.query-message.md new file mode 100644 index 00000000000..b092b0d779f --- /dev/null +++ b/docs/tutorials/forum-application/4.query-message.md @@ -0,0 +1,78 @@ +--- +order: 4 +--- + +# Querying Messages + +**In this section you will learn how users query messages on the forum application.** + +## Query + +The `Query` method is used to query the application state for specific information. It allows clients to request data +from the application without submitting a transaction. + +The `Query` method is responsible for retrieving data from the application state based on the provided query parameters. +The query parameters can be specified in the `QueryRequest` object, which typically includes information such as the +query path and the query data. + +The application is expected to interpret the query parameters and return the corresponding data in a `QueryResponse` object. +The response may include the requested data, error codes, or any other relevant information based on the application's implementation. + +The `Query` method is usually particularly useful for retrieving specific information from the application, such as +account balances, transaction history, or any other data stored in the application's state. + +**Following is the code for the `Query` function:** + +```go +// Query the application state for specific information. +func (app *ForumApp) Query(_ context.Context, query *abci.QueryRequest) (*abci.QueryResponse, error) { + app.logger.Info("Executing Application Query") + + resp := abci.QueryResponse{Key: query.Data} + + // Parse sender from query data + sender := string(query.Data) + + if sender == "history" { + messages, err := model.FetchHistory(app.state.DB) + if err != nil { + return nil, err + } + resp.Log = messages + resp.Value = []byte(messages) + + return &resp, nil + } + // Retrieve all message sent by the sender + messages, err := model.GetMessagesBySender(app.state.DB, sender) + if err != nil { + return nil, err + } + + // Convert the messages to JSON and return as query result + resultBytes, err := json.Marshal(messages) + if err != nil { + return nil, err + } + + resp.Log = string(resultBytes) + resp.Value = resultBytes + + return &resp, nil +} +``` + +**Explanation of code:** + +`Query` function queries the application state. It takes a context and a query as input and returns a response or an error. + +If the query data is equal to "history", it fetches the history of messages from the application's state database and +returns it as a response. Otherwise, it retrieves all messages sent by the sender specified in the query data, +converts them to JSON, and returns them as the query result. + +*Tip: The function `Query` can still fetch previous messages from a sender before it was banned. You can change this to +delete all messages from a banned sender.* + +--------------- + +*In the next session, you will learn about different [**models**](5.model.md) and functions and how they are used in the Forum Application.* diff --git a/docs/tutorials/forum-application/5.model.md b/docs/tutorials/forum-application/5.model.md new file mode 100644 index 00000000000..4c50951e5c3 --- /dev/null +++ b/docs/tutorials/forum-application/5.model.md @@ -0,0 +1,469 @@ +--- +order: 5 +--- + +# Defining model types + +**In this section you will learn how a user, messages and db are defined in the Forum Application.** + +## User + +**This is how a `User` is defined in the Forum Application.** + +```go +package model + +type User struct { + Name string `json:"name"` + Moderator bool `json:"moderator"` + Banned bool `json:"banned"` + NumMessages int64 `json:"numMessages"` + Version uint64 `json:"version"` + SchemaVersion int `json:"schemaVersion"` +} +``` + +## Messages + +**This is a `Message` is defined in the Forum Application. It also allows you to perform various operations on a message** + +```go +package model + +import ( + "errors" + "fmt" + "strings" + + "github.com/dgraph-io/badger/v4" +) + +type BanTx struct { + UserName string `json:"username"` +} + +// Message represents a message sent by a user. +type Message struct { + Sender string `json:"sender"` + Message string `json:"message"` +} + +type MsgHistory struct { + Msg string `json:"history"` +} + +func AppendToChat(db *DB, message Message) (string, error) { + historyBytes, err := db.Get([]byte("history")) + if err != nil { + return "", fmt.Errorf("error fetching history: %w", err) + } + msgBytes := string(historyBytes) + msgBytes = msgBytes + "{sender:" + message.Sender + ",message:" + message.Message + "}" + return msgBytes, nil +} + +func FetchHistory(db *DB) (string, error) { + historyBytes, err := db.Get([]byte("history")) + if err != nil { + return "", fmt.Errorf("error fetching history: %w", err) + } + msgHistory := string(historyBytes) + return msgHistory, nil +} + +func AppendToExistingMessages(db *DB, message Message) (string, error) { + existingMessages, err := GetMessagesBySender(db, message.Sender) + if err != nil && !errors.Is(err, badger.ErrKeyNotFound) { + return "", err + } + if errors.Is(err, badger.ErrKeyNotFound) { + return message.Message, nil + } + return existingMessages + ";" + message.Message, nil +} + +// GetMessagesBySender retrieves all messages sent by a specific sender +// Get Message using String. +func GetMessagesBySender(db *DB, sender string) (string, error) { + var messages string + err := db.db.View(func(txn *badger.Txn) error { + item, err := txn.Get([]byte(sender + "msg")) + if err != nil { + return err + } + value, err := item.ValueCopy(nil) + if err != nil { + return err + } + messages = string(value) + return nil + }) + if err != nil { + return "", err + } + return messages, nil +} + +// ParseMessage parse messages. +func ParseMessage(tx []byte) (*Message, error) { + msg := &Message{} + + // Parse the message into key-value pairs + pairs := strings.Split(string(tx), ",") + + if len(pairs) != 2 { + return nil, errors.New("invalid number of key-value pairs in message") + } + + for _, pair := range pairs { + kv := strings.Split(pair, ":") + + if len(kv) != 2 { + return nil, fmt.Errorf("invalid key-value pair in message: %s", pair) + } + + key := kv[0] + value := kv[1] + + switch strings.ToLower(key) { + case "sender": + msg.Sender = value + case "message": + msg.Message = value + case "history": + return nil, fmt.Errorf("reserved key name: %s", key) + default: + return nil, fmt.Errorf("unknown key in message: %s", key) + } + } + + // Check if the message contains a sender and message + if msg.Sender == "" { + return nil, errors.New("message is missing sender") + } + if msg.Message == "" { + return nil, errors.New("message is missing message") + } + + return msg, nil +} +``` + +### Explanation of code + +**AppendToChat** + +`AppendToChat` takes a pointer to a `DB` object and a `Message` object as parameters. It appends the message to the +chat history stored in the DB object, and returns the updated chat history as a string. If there is an error retrieving +the chat history, it returns an empty string and the error. + +**FetchHistory** + +`FetchHistory` takes a pointer to a `DB` struct as an argument. It attempts to retrieve a value from the database using +the `ViewDB` function, passing in the `DB` underlying database and a key called `"history"`. + +If an error occurs during the retrieval, it prints an error message and returns an empty string and the error. +The retrieved value is then converted to a string and returned. If an error occurs during the conversion, it prints an +error message but still returns the converted value and the error. + +**AppendToExistingMsgs** + +`AppendToExistingMsgs` takes a pointer to a `DB` object and a `Message` object as input. It retrieves existing messages +from the database by the sender of the input message and appends the input message to the existing messages. + +If no existing messages are found, it returns the input message as is. The function returns the combined messages or an error. + +**GetMessagesBySender** + +`GetMessagesBySender` retrieves all messages sent by a specific sender from a database. It takes a pointer to a `DB` object +and a string representing the `sender` as input. It returns a string containing the messages and an error if any occurred. + +It uses the badger package to interact with the database and retrieves the messages by concatenating the sender with +the string "msg" and performing a database lookup. + +**ParseMessage** + +`ParseMessage` takes a byte array tx as input and returns a pointer to a `Message` struct and an error. + +The function first initializes an empty `Message` struct. It then splits the input byte array into key-value pairs using +a comma as the separator. If the number of pairs is not equal to 2, it returns an error indicating an invalid number of +key-value pairs. + +Next, it iterates over each pair, splitting it into key and value using a colon as the separator. If the number of elements +in a pair is not equal to 2, it returns an error indicating an invalid key-value pair. + +For each key-value pair, it checks the key and assigns the corresponding value to the appropriate field in the `Message` struct. + +Finally, it checks if the `Sender` and `Message` fields in the `Message` struct are empty. If either of them is empty, +it returns an error indicating that the message is missing the sender or the message itself. + +If all checks pass, it returns the populated Message struct and a `nil` error. + +## DB + +**These are the storage operation in the Forum Application. It also allows you to perform various operations related to +storage in the underline database.** + +```go +package model + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + + "github.com/dgraph-io/badger/v4" + + "github.com/cometbft/cometbft/abci/types" +) + +type DB struct { + db *badger.DB +} + +func (db *DB) Init(database *badger.DB) { + db.db = database +} + +func (db *DB) Commit() error { + return db.db.Update(func(txn *badger.Txn) error { + return txn.Commit() + }) +} + +func NewDB(dbPath string) (*DB, error) { + // Open badger DB + opts := badger.DefaultOptions(dbPath) + db, err := badger.Open(opts) + if err != nil { + return nil, err + } + + // Create a new DB instance and initialize with badger DB + dbInstance := &DB{} + dbInstance.Init(db) + + return dbInstance, nil +} + +func (db *DB) GetDB() *badger.DB { + return db.db +} + +func (db *DB) Size() int64 { + lsm, vlog := db.GetDB().Size() + return lsm + vlog +} + +func (db *DB) CreateUser(user *User) error { + // Check if the user already exists + err := db.db.View(func(txn *badger.Txn) error { + _, err := txn.Get([]byte(user.Name)) + return err + }) + if err == nil { + return errors.New("user already exists") + } + + // Save the user to the database + err = db.db.Update(func(txn *badger.Txn) error { + userBytes, err := json.Marshal(user) + if err != nil { + return fmt.Errorf("failed to marshal user to JSON: %w", err) + } + err = txn.Set([]byte(user.Name), userBytes) + if err != nil { + return err + } + return nil + }) + return err +} + +func (db *DB) FindUserByName(name string) (*User, error) { + // Read the user from the database + var user *User + err := db.db.View(func(txn *badger.Txn) error { + item, err := txn.Get([]byte(name)) + if err != nil { + return err + } + err = item.Value(func(val []byte) error { + return json.Unmarshal(val, &user) + }) + return err + }) + if err != nil { + return nil, fmt.Errorf("error in retrieving user: %w", err) + } + return user, nil +} + +func (db *DB) UpdateOrSetUser(uname string, toBan bool, txn *badger.Txn) error { + user, err := db.FindUserByName(uname) + // If user is not in the db, then add it + if errors.Is(err, badger.ErrKeyNotFound) { + u := new(User) + u.Name = uname + u.Banned = toBan + user = u + } else { + if err != nil { + return errors.New("not able to process user") + } + user.Banned = toBan + } + userBytes, err := json.Marshal(user) + if err != nil { + return fmt.Errorf("error marshaling user: %w", err) + } + return txn.Set([]byte(user.Name), userBytes) +} + +func (db *DB) Set(key, value []byte) error { + return db.db.Update(func(txn *badger.Txn) error { + return txn.Set(key, value) + }) +} + +func ViewDB(db *badger.DB, key []byte) ([]byte, error) { + var value []byte + err := db.View(func(txn *badger.Txn) error { + item, err := txn.Get(key) + if err != nil { + if !errors.Is(err, badger.ErrKeyNotFound) { + return err + } + return nil + } + value, err = item.ValueCopy(nil) + return err + }) + if err != nil { + return nil, err + } + return value, nil +} + +func (db *DB) Close() error { + return db.db.Close() +} + +func (db *DB) Get(key []byte) ([]byte, error) { + return ViewDB(db.db, key) +} + +func (db *DB) GetValidators() (validators []types.ValidatorUpdate, err error) { + err = db.db.View(func(txn *badger.Txn) error { + opts := badger.DefaultIteratorOptions + opts.PrefetchSize = 10 + it := txn.NewIterator(opts) + defer it.Close() + for it.Rewind(); it.Valid(); it.Next() { + var err error + item := it.Item() + k := item.Key() + if isValidatorTx(k) { + err := item.Value(func(v []byte) error { + validator := new(types.ValidatorUpdate) + err = types.ReadMessage(bytes.NewBuffer(v), validator) + if err == nil { + validators = append(validators, *validator) + } + return err + }) + if err != nil { + return err + } + } + } + return nil + }) + if err != nil { + return nil, err + } + return validators, nil +} + +func isValidatorTx(tx []byte) bool { + return bytes.HasPrefix(tx, []byte("val")) +} +``` + +### Explanation of code + +**Commit** + +`Commit` calls the Update method on the db object and passes a function as an argument. Inside this function, it calls the Commit method on a Txn object and returns its result. + +**NewDB** + +`NewDB` creates a new database instance. It uses the badger package to open a BadgerDB database at the specified `dbPath` +and returns a pointer to the newly created database instance. If there is an error during the database creation, it returns the error. + +**GetDB** + +`GetDB()` returns a pointer to a `badger.DB` object. + +**Size** + +This code defines a method `Size()` returns the sum of two values obtained from another method Size() of a DB instance: lsm and vlog. + +**CreateUser** + +`CreateUser` creates a new user in the database using Badger as the key-value store. The method checks if the user already +exists by performing a read operation on the database. If the user already exists, it returns an error. If the user does +not exist, it saves the user to the database by performing a write operation. The method returns any errors encountered +during the process. + +**FindUserByName** + +`FindUserByName` takes a name string as input and returns a pointer to a User struct and an error. The method reads a +user from the database using the db.db.View method provided by the badger package. It retrieves the user by the provided +name, un-marshals the JSON data into the user variable, and returns it along with any error that occurred during the process. + +**Set** + +`Set` takes in a key and a value as byte slices. It uses the badger database library to update the database with the given key and value. + +**ViewDB** + +`ViewDB` takes a pointer to a `badger.DB` object and a byte slice called key as arguments. The function reads a value +from the database using the provided key. If the key is not found in the database, it returns nil. Otherwise, it returns +the value associated with the key. + +The function uses the View method of the badger.DB object to perform a read-only transaction on the database. Inside the +View method, it retrieves the item corresponding to the key using the Get method of the transaction object. If the key +is not found, it handles the badger.ErrKeyNotFound error and returns nil. Otherwise, it copies the value associated with +the item using the ValueCopy method and assigns it to the value variable. Finally, it returns the value variable and any +error that occurred during the transaction. + +Overall, this code snippet provides a concise way to read data from a BadgerDB database using a specified key. + +**Close** + +`Close` takes a pointer receiver db of type `*DB` and returns an error. The method calls the Close method of the `db` +field of the DB struct. + +**Get** + +`Get` on a type `DB`. The method takes a key of type `[]byte` as input and returns a `[]byte` and an error. +Inside the method, it calls a function ViewDB with the db.db and key as arguments and returns the result. + +**GetValidators** + +`GetValidators` retrieves a list of validator updates from a database using the Badger library. It iterates over the +key-value pairs in the database, checks if a key corresponds to a validator transaction, and if so, reads the value and +appends it to the validators slice. + +Finally, it returns the validators slice and any potential error. + +**isValidatorTx** + +`isValidatorTx` takes a byte slice as input and returns a boolean value. It checks if the string representation of the +byte slice starts with the prefix "val" and returns true if it does, otherwise it returns false. + +--------------- + +*In the next session, you will learn about the [**main**](6.main.md) method responsible for running the Forum Application blockchain.* diff --git a/docs/tutorials/forum-application/6.main.md b/docs/tutorials/forum-application/6.main.md new file mode 100644 index 00000000000..9c4e23512f7 --- /dev/null +++ b/docs/tutorials/forum-application/6.main.md @@ -0,0 +1,168 @@ +--- +order: 6 +--- + +# Running a node + +**The main function in the `forum.go` file is responsible for running the Forum Application blockchain.** + +```go +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net/http" + "os" + "os/signal" + "path/filepath" + "syscall" + "time" + + "github.com/spf13/viper" + + "github.com/cometbft/cometbft/abci/tutorials/abci-v2-forum-app/abci" + cfg "github.com/cometbft/cometbft/config" + cmtflags "github.com/cometbft/cometbft/libs/cli/flags" + cmtlog "github.com/cometbft/cometbft/libs/log" + nm "github.com/cometbft/cometbft/node" + "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/privval" + "github.com/cometbft/cometbft/proxy" +) + +var homeDir string + +func init() { + flag.StringVar(&homeDir, "home", "", "Path to the CometBFT config directory (if empty, uses $HOME/.forumapp)") +} + +func main() { + flag.Parse() + if homeDir == "" { + homeDir = os.ExpandEnv("$HOME/.forumapp") + } + + config := cfg.DefaultConfig() + config.SetRoot(homeDir) + viper.SetConfigFile(fmt.Sprintf("%s/%s", homeDir, "config/config.toml")) + + if err := viper.ReadInConfig(); err != nil { + log.Fatalf("failed to read config: %v", err) + } + + logger := cmtlog.NewLogger(os.Stdout) + logger, err := cmtflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel) + if err != nil { + log.Printf("failed to parse log level: %v", err) + defer os.Exit(1) + } + + dbPath := filepath.Join(homeDir, "forum-db") + appConfigPath := "app.toml" + app, err := abci.NewForumApp(dbPath, appConfigPath, logger) + if err != nil { + log.Printf("failed to create Forum Application: %v\n", err) + os.Exit(1) + } + + nodeKey, err := nodekey.LoadNodeKey(config.NodeKeyFile()) + if err != nil { + log.Printf("failed to load node key: %v", err) + os.Exit(1) + } + + pv := privval.LoadFilePV( + config.PrivValidatorKeyFile(), + config.PrivValidatorStateFile(), + ) + + node, err := nm.NewNode( + context.Background(), + config, + pv, + nodeKey, + proxy.NewLocalClientCreator(app), + nm.DefaultGenesisDocProviderFunc(config), + cfg.DefaultDBProvider, + nm.DefaultMetricsProvider(config.Instrumentation), + logger, + ) + if err != nil { + log.Printf("failed to create CometBFT node") + os.Exit(1) + } + + if err := node.Start(); err != nil { + log.Printf("failed to start CometBFT node") + os.Exit(1) + } + defer func() { + _ = node.Stop() + node.Wait() + }() + + httpAddr := "127.0.0.1:8080" + + server := &http.Server{ + Addr: httpAddr, + ReadHeaderTimeout: 5 * time.Second, + } + + if err := server.ListenAndServe(); err != nil { + log.Printf("failed to start HTTP server: %v", err) + os.Exit(1) + } + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + <-sigCh + + log.Println("Forum application stopped") +} +``` + +**Explanation of code** + +The program begins by parsing command-line flags using `flag.Parse().` It then checks if the `homeDir` variable is empty +and assigns a default value if it is. + +Next, it creates a configuration object using `cfg.DefaultConfig`() and sets the root directory using `config.SetRoot(homeDir).` +It also sets the configuration file path using `viper.SetConfigFile(fmt.Sprintf("%s/%s", homeDir, "config.toml")).` +The program attempts to read the configuration file using viper.`ReadInConfig()`, and if there is an error, it logs +the failure and exits. + +It then creates a database using `db.NewPebbleDB(filepath.Join(homeDir, "forum-db"), ".").` If there is an error during +the creation of the database, it logs the failure and exits. + +The program proceeds to create an instance of the ForumApp object using `forum.NewForumApp(dbPath, appConfigPath)`. +If there is an error during the creation of the ForumApp instance, it logs the failure and exits. + +The program then sets up logging using `cmtlog.NewLogger(os.Stdout)` and parses the log level +using `cmtflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel).` If there is an error during log level parsing, +it logs the failure and exits. + +The program loads the node key using `nodekey.LoadNodeKey(config.NodeKeyFile()).` If there is an error during the loading +of the node key, it logs the failure and exits. + +Next, it loads the private validator using `privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()).` + +The program creates a CometBFT node using `nm.NewNode()` and passes various parameters such as configuration, private +validator, node key, client creator, genesis doc provider, database provider, metrics provider, and logger. If there is +an error during the creation of the CometBFT node, it logs the failure and exits. + +The program starts the CometBFT node using `node.Start().` If there is an error during node startup, it logs the failure +and exits. + +If there is an error starting the HTTP server, it logs the failure and exits. + +The program sets up a signal channel to handle `SIGINT` and `SIGTERM` signals. It waits for a signal to be received on +the channel, and when it does, it stops the CometBFT node using `node.Stop()` and waits for it to terminate using `node.Wait().` + +Finally, it prints a message indicating that the forum application has stopped. + +--------------- + +*In the next session, you will learn about [**Vote Extension**](7.vote-extension.md) that lets validators extend their vote in the forum application blockchain.* diff --git a/docs/tutorials/forum-application/7.vote-extension.md b/docs/tutorials/forum-application/7.vote-extension.md new file mode 100644 index 00000000000..4ae23ff90bf --- /dev/null +++ b/docs/tutorials/forum-application/7.vote-extension.md @@ -0,0 +1,130 @@ +--- +order: 7 +--- + +# Vote Extensions + +**In this section you will learn how a node can `ExtendVote` and `VerifyVote` on the forum application.** + +## ExtendVote + +The `ExtendVote` method allows applications to extend the pre-commit vote with arbitrary data. This allows applications +to force their validators to do more than just validate blocks within consensus. + +When a validator is preparing to send a `pre-commit` vote, it first calls `ExtendVote.` The application then returns a +blob of data called a `vote extension.` This data is opaque to the consensus algorithm but can contain application-specific +information. + +The validator then sends both the `pre-commit` vote and the `vote extension` together to other validators. Other validators +also call `ExtendVote` to generate their own `vote extensions.` + +When a validator receives a `pre-commit` vote with an attached `vote extension`, it calls `VerifyVoteExtension` to validate +the `vote extension.` If valid, the validator includes the vote in its tally. + +The proposer of the next block will receive all `vote extensions` in `PrepareProposalRequest`. + +This allows validators to have access to all vote extensions at the next height. They can then use the data in the +`vote extensions` to inform the transactions that make it into the next block. + +**Following is the code for the `ExtendVote` function:** + +```go +// ExtendVote returns curse words as vote extensions +func (app *ForumApp) ExtendVote(_ context.Context, _ *abci.ExtendVoteRequest) (*abci.ExtendVoteResponse, error) { + app.logger.Info("Executing Application ExtendVote") + + return &abci.ExtendVoteResponse{VoteExtension: []byte(app.CurseWords)}, nil +} +``` + +**Explanation of code:** + +`ExtendVote` function takes two parameters: a `context.Context` and a pointer to an abci.`ExtendVoteRequest` struct. +It returns a pointer to an `ExtendVoteResponse` struct and an error. + +The method implementation simply returns a new instance of `ExtendVoteResponse` with the `VoteExtension` field set to +the value of `app.CurseWords.` The `app.CurseWords` is expected to be a byte array containing the vote extension data. + +The `ExtendVoteResponse` struct is used to encapsulate the response data for the `ExtendVote` method. By setting +the `VoteExtension` field, the method includes the application-specific vote extension data in the response. + +In this implementation, the `ExtendVote` method in the ForumApp application returns the application-specific `vote extension` +data stored in the `app.CurseWords` variable. + +*Tip: The `vote extensions` are opaque to the consensus algorithm but visible to the application, allowing for a variety +of use cases like **price oracles, encrypted mempools, and threshold cryptography.*** + +## VerifyVoteExtension + +The `VerifyVoteExtension` method allows applications to verify the `VoteExtension` data attached to each `pre-commit` message. + +When a validator is preparing to send a `pre-commit` vote, it first calls `ExtendVote` to generate a `VoteExtension.` +This `VoteExtension` is broadcast along with the `pre-commit` vote. + +Other validators also call `ExtendVote` to generate their own `vote extensions.` However, not all validators will generate +the same `vote extension.` + +When a validator receives a `pre-commit` vote with an attached `vote extension`, it calls `VerifyVoteExtension` to +validate the `vote extension.` + +If the vote extension is successfully verified, the `pre-commit` vote is included in the tally. If validation fails, +the entire `pre-commit` message is ignored. + +**Following is the blurb of code for the `VerifyVoteExtension` function:** + +```go +// VerifyVoteExtension verifies the vote extensions and ensure they include the curse words +// It will not be called for extensions generated by this validator. +func (app *ForumApp) VerifyVoteExtension(_ context.Context, req *abci.VerifyVoteExtensionRequest) (*abci.VerifyVoteExtensionResponse, error) { + app.logger.Info("Executing Application VerifyVoteExtension") + + if _, ok := app.valAddrToPubKeyMap[string(req.ValidatorAddress)]; !ok { + // we do not have a validator with this address mapped; this should never happen + return nil, errors.New("unknown validator") + } + + curseWords := strings.Split(string(req.VoteExtension), "|") + if hasDuplicateWords(curseWords) { + return &abci.VerifyVoteExtensionResponse{Status: abci.VERIFY_VOTE_EXTENSION_STATUS_REJECT}, nil + } + + // ensure vote extension curse words limit has not been exceeded + if len(curseWords) > CurseWordsLimitVE { + return &abci.VerifyVoteExtensionResponse{Status: abci.VERIFY_VOTE_EXTENSION_STATUS_REJECT}, nil + } + return &abci.VerifyVoteExtensionResponse{Status: abci.VERIFY_VOTE_EXTENSION_STATUS_ACCEPT}, nil +} +``` + +**Explanation of code:** + +`VerifyVoteExtension` function takes two parameters: a `context.Context` and a pointer to an `VerifyVoteExtensionRequest` object. +It returns a pointer to an `VerifyVoteExtensionResponse` object and an error. + +The implementation checks if the validator address provided in the request (`req.ValidatorAddress`) is mapped to a public +key in the `app.valAddrToPubKeyMap.` If the validator address is not found in the map, it returns an error indicating +an "unknown validator". This check ensures that the validator making the request is recognized by the application. + +The method splits the `VoteExtension` field of the request (`req.VoteExtension`) into individual words using the +`strings.Split` function. The separator used is the pipe character (|). The resulting words are stored in the `curseWords` slice. + +The implementation creates a temporary map called `tmpCurseWordMap` to verify that there are no duplicate words in the +`curseWords` slice and to check if the validator is trying to cheat by including the same word multiple times. + +If the length of the `tmpCurseWordMap` is less than the length of the `curseWords` slice, it means that there are +duplicate words in the extension. In this case, the method returns a response with a status of `abci.VERIFY_VOTE_EXTENSION_STATUS_REJECT`, +indicating that the `vote extension` is rejected. + +If the length of the `curseWords` slic is greater than the maximum number of cursor words allowed in vote extensions (`CurseWordsLimitVE = 10`), +it is not permitted. In this case, the method returns a response with a status of `abci.VERIFY_VOTE_EXTENSION_STATUS_REJECT`, +indicating that the `vote extension` is rejected. + +If there are no duplicate words in the extension, the method returns a response with a status of `abci.VERIFY_VOTE_EXTENSION_STATUS_ACCEPT`, +indicating that the `vote extension` is accepted. + +*Tip: `Verified vote extensions` can be persisted by the application. For example, the application could store data +derived from the vote extensions.* + +--------------- + +*In the next session, you will find the entire implementation of the Forum Application in the [**app.go**](8.app.md)* file. diff --git a/docs/tutorials/forum-application/8.app.md b/docs/tutorials/forum-application/8.app.md new file mode 100644 index 00000000000..02bbb9c1a5e --- /dev/null +++ b/docs/tutorials/forum-application/8.app.md @@ -0,0 +1,449 @@ +--- +order: 8 +--- + +# Application code + +*In this section you will find the code for entire `app.go` file.* + +```go +package abci + +import ( + "context" + "crypto" + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/dgraph-io/badger/v4" + + "github.com/cometbft/cometbft/abci/tutorials/abci-v2-forum-app/model" + abci "github.com/cometbft/cometbft/abci/types" + cryptoencoding "github.com/cometbft/cometbft/crypto/encoding" + "github.com/cometbft/cometbft/libs/log" + "github.com/cometbft/cometbft/version" +) + +const ( + ApplicationVersion = 1 + CurseWordsLimitVE = 10 +) + +type ForumApp struct { + abci.BaseApplication + valAddrToPubKeyMap map[string]crypto.PublicKey + CurseWords string + state AppState + onGoingBlock *badger.Txn + logger log.Logger +} + +func NewForumApp(dbDir string, appConfigPath string, logger log.Logger) (*ForumApp, error) { + db, err := model.NewDB(dbDir) + if err != nil { + return nil, fmt.Errorf("error initializing database: %w", err) + } + cfg, err := LoadConfig(appConfigPath) + if err != nil { + return nil, fmt.Errorf("error loading config file: %w", err) + } + + cfg.CurseWords = DeduplicateCurseWords(cfg.CurseWords) + + state, err := loadState(db) + if err != nil { + return nil, err + } + + // Reading the validators from the DB because CometBFT expects the application to have them in memory + valMap := make(map[string]crypto.PublicKey) + validators, err := state.DB.GetValidators() + if err != nil { + return nil, fmt.Errorf("can't load validators: %w", err) + } + for _, v := range validators { + pubKey, err := cryptoencoding.PubKeyFromTypeAndBytes(v.PubKeyType, v.PubKeyBytes) + if err != nil { + return nil, fmt.Errorf("can't decode public key: %w", err) + } + + valMap[string(pubKey.Address())] = pubKey + } + + return &ForumApp{ + state: state, + valAddrToPubKeyMap: valMap, + CurseWords: cfg.CurseWords, + logger: logger, + }, nil +} + +// Info return application information. +func (app *ForumApp) Info(_ context.Context, _ *abci.InfoRequest) (*abci.InfoResponse, error) { + return &abci.InfoResponse{ + Version: version.ABCIVersion, + AppVersion: ApplicationVersion, + LastBlockHeight: app.state.Height, + + LastBlockAppHash: app.state.Hash(), + }, nil +} + +// Query the application state for specific information. +func (app *ForumApp) Query(_ context.Context, query *abci.QueryRequest) (*abci.QueryResponse, error) { + app.logger.Info("Executing Application Query") + + resp := abci.QueryResponse{Key: query.Data} + + // Parse sender from query data + sender := string(query.Data) + + if sender == "history" { + messages, err := model.FetchHistory(app.state.DB) + if err != nil { + return nil, err + } + resp.Log = messages + resp.Value = []byte(messages) + + return &resp, nil + } + // Retrieve all message sent by the sender + messages, err := model.GetMessagesBySender(app.state.DB, sender) + if err != nil { + return nil, err + } + + // Convert the messages to JSON and return as query result + resultBytes, err := json.Marshal(messages) + if err != nil { + return nil, err + } + + resp.Log = string(resultBytes) + resp.Value = resultBytes + + return &resp, nil +} + +// CheckTx handles validation of inbound transactions. If a transaction is not a valid message, or if a user +// does not exist in the database or if a user is banned it returns an error. +func (app *ForumApp) CheckTx(_ context.Context, req *abci.CheckTxRequest) (*abci.CheckTxResponse, error) { + app.logger.Info("Executing Application CheckTx") + + // Parse the tx message + msg, err := model.ParseMessage(req.Tx) + if err != nil { + app.logger.Info("CheckTx: failed to parse transaction message", "message", msg, "error", err) + return &abci.CheckTxResponse{Code: CodeTypeInvalidTxFormat, Log: "Invalid transaction", Info: err.Error()}, nil + } + + // Check for invalid sender + if len(msg.Sender) == 0 { + app.logger.Info("CheckTx: failed to parse transaction message", "message", msg, "error", "Sender is missing") + return &abci.CheckTxResponse{Code: CodeTypeInvalidTxFormat, Log: "Invalid transaction", Info: "Sender is missing"}, nil + } + + app.logger.Debug("searching for sender", "sender", msg.Sender) + u, err := app.state.DB.FindUserByName(msg.Sender) + + if err != nil { + if !errors.Is(err, badger.ErrKeyNotFound) { + app.logger.Error("CheckTx: Error in check tx", "tx", string(req.Tx), "error", err) + return &abci.CheckTxResponse{Code: CodeTypeEncodingError, Log: "Invalid transaction", Info: err.Error()}, nil + } + app.logger.Info("CheckTx: Sender not found", "sender", msg.Sender) + } else if u != nil && u.Banned { + return &abci.CheckTxResponse{Code: CodeTypeBanned, Log: "Invalid transaction", Info: "User is banned"}, nil + } + app.logger.Info("CheckTx: success checking tx", "message", msg.Message, "sender", msg.Sender) + return &abci.CheckTxResponse{Code: CodeTypeOK, Log: "Valid transaction", Info: "Transaction validation succeeded"}, nil +} + +// Consensus Connection + +// InitChain initializes the blockchain with information sent from CometBFT such as validators or consensus parameters. +func (app *ForumApp) InitChain(_ context.Context, req *abci.InitChainRequest) (*abci.InitChainResponse, error) { + app.logger.Info("Executing Application InitChain") + + for _, v := range req.Validators { + err := app.updateValidator(v) + if err != nil { + return nil, err + } + } + appHash := app.state.Hash() + + // This parameter can also be set in the genesis file + req.ConsensusParams.Feature.VoteExtensionsEnableHeight.Value = 1 + return &abci.InitChainResponse{ConsensusParams: req.ConsensusParams, AppHash: appHash}, nil +} + +// PrepareProposal is used to prepare a proposal for the next block in the blockchain. The application can re-order, remove +// or add transactions. +func (app *ForumApp) PrepareProposal(_ context.Context, req *abci.PrepareProposalRequest) (*abci.PrepareProposalResponse, error) { + app.logger.Info("Executing Application PrepareProposal") + + // Get the curse words from for all vote extensions received at the end of last height. + voteExtensionCurseWords := app.getWordsFromVe(req.LocalLastCommit.Votes) + + curseWords := strings.Split(voteExtensionCurseWords, "|") + if hasDuplicateWords(curseWords) { + return nil, errors.New("duplicate words found") + } + + // Prepare req puts the BanTx first, then adds the other transactions + // ProcessProposal should verify this + proposedTxs := make([][]byte, 0) + finalProposal := make([][]byte, 0) + bannedUsersString := make(map[string]struct{}) + for _, tx := range req.Txs { + msg, err := model.ParseMessage(tx) + if err != nil { + // this should never happen since the tx should have been validated by CheckTx + return nil, fmt.Errorf("failed to marshal tx in PrepareProposal: %w", err) + } + // Adding the curse words from vote extensions too + if !hasCurseWord(msg.Message, voteExtensionCurseWords) { + proposedTxs = append(proposedTxs, tx) + continue + } + // If the message contains curse words then ban the user by + // creating a "ban transaction" and adding it to the final proposal + banTx := model.BanTx{UserName: msg.Sender} + bannedUsersString[msg.Sender] = struct{}{} + resultBytes, err := json.Marshal(banTx) + if err != nil { + // this should never happen since the ban tx should have been validated by CheckTx + return nil, fmt.Errorf("failed to marshal ban tx in PrepareProposal: %w", err) + } + finalProposal = append(finalProposal, resultBytes) + } + + // Need to loop again through the proposed Txs to make sure there is none left by a user that was banned + // after the tx was accepted + for _, tx := range proposedTxs { + // there should be no error here as these are just transactions we have checked and added + msg, err := model.ParseMessage(tx) + if err != nil { + // this should never happen since the tx should have been validated by CheckTx + return nil, fmt.Errorf("failed to marshal tx in PrepareProposal: %w", err) + } + // If the user is banned then include this transaction in the final proposal + if _, ok := bannedUsersString[msg.Sender]; !ok { + finalProposal = append(finalProposal, tx) + } + } + return &abci.PrepareProposalResponse{Txs: finalProposal}, nil +} + +// ProcessProposal validates the proposed block and the transactions and return a status if it was accepted or rejected. +func (app *ForumApp) ProcessProposal(_ context.Context, req *abci.ProcessProposalRequest) (*abci.ProcessProposalResponse, error) { + app.logger.Info("Executing Application ProcessProposal") + + bannedUsers := make(map[string]struct{}, 0) + + finishedBanTxIdx := len(req.Txs) + for i, tx := range req.Txs { + if !isBanTx(tx) { + finishedBanTxIdx = i + break + } + var parsedBan model.BanTx + err := json.Unmarshal(tx, &parsedBan) + if err != nil { + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_REJECT}, err + } + bannedUsers[parsedBan.UserName] = struct{}{} + } + + for _, tx := range req.Txs[finishedBanTxIdx:] { + // From this point on, there should be no BanTxs anymore + // If there is one, ParseMessage will return an error as the + // format of the two transactions is different. + msg, err := model.ParseMessage(tx) + if err != nil { + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_REJECT}, err + } + if _, ok := bannedUsers[msg.Sender]; ok { + // sending us a tx from a banned user + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_REJECT}, nil + } + } + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_ACCEPT}, nil +} + +// FinalizeBlock Deliver the decided block to the Application. +func (app *ForumApp) FinalizeBlock(_ context.Context, req *abci.FinalizeBlockRequest) (*abci.FinalizeBlockResponse, error) { + app.logger.Info("Executing Application FinalizeBlock") + + // Iterate over Tx in current block + app.onGoingBlock = app.state.DB.GetDB().NewTransaction(true) + respTxs := make([]*abci.ExecTxResult, len(req.Txs)) + finishedBanTxIdx := len(req.Txs) + for i, tx := range req.Txs { + var err error + + if !isBanTx(tx) { + finishedBanTxIdx = i + break + } + banTx := new(model.BanTx) + err = json.Unmarshal(tx, &banTx) + if err != nil { + // since we did this in ProcessProposal this should never happen here + return nil, err + } + err = UpdateOrSetUser(app.state.DB, banTx.UserName, true, app.onGoingBlock) + if err != nil { + return nil, err + } + respTxs[i] = &abci.ExecTxResult{Code: CodeTypeOK} + } + + for idx, tx := range req.Txs[finishedBanTxIdx:] { + // From this point on, there should be no BanTxs anymore + // If there is one, ParseMessage will return an error as the + // format of the two transactions is different. + msg, err := model.ParseMessage(tx) + i := idx + finishedBanTxIdx + if err != nil { + // since we did this in ProcessProposal this should never happen here + return nil, err + } + + // Check if this sender already existed; if not, add the user too + err = UpdateOrSetUser(app.state.DB, msg.Sender, false, app.onGoingBlock) + if err != nil { + return nil, err + } + // Add the message for this sender + message, err := model.AppendToExistingMessages(app.state.DB, *msg) + if err != nil { + return nil, err + } + err = app.onGoingBlock.Set([]byte(msg.Sender+"msg"), []byte(message)) + if err != nil { + return nil, err + } + chatHistory, err := model.AppendToChat(app.state.DB, *msg) + if err != nil { + return nil, err + } + // Append messages to chat history + err = app.onGoingBlock.Set([]byte("history"), []byte(chatHistory)) + if err != nil { + return nil, err + } + // This adds the user to the DB, but the data is not committed nor persisted until Commit is called + respTxs[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} + app.state.Size++ + } + app.state.Height = req.Height + + response := &abci.FinalizeBlockResponse{TxResults: respTxs, AppHash: app.state.Hash()} + return response, nil +} + +// Commit the application state. +func (app *ForumApp) Commit(_ context.Context, _ *abci.CommitRequest) (*abci.CommitResponse, error) { + app.logger.Info("Executing Application Commit") + + if err := app.onGoingBlock.Commit(); err != nil { + return nil, err + } + err := saveState(&app.state) + if err != nil { + return nil, err + } + return &abci.CommitResponse{}, nil +} + +// ExtendVote returns curse words as vote extensions. +func (app *ForumApp) ExtendVote(_ context.Context, _ *abci.ExtendVoteRequest) (*abci.ExtendVoteResponse, error) { + app.logger.Info("Executing Application ExtendVote") + + return &abci.ExtendVoteResponse{VoteExtension: []byte(app.CurseWords)}, nil +} + +// VerifyVoteExtension verifies the vote extensions and ensure they include the curse words +// It will not be called for extensions generated by this validator. +func (app *ForumApp) VerifyVoteExtension(_ context.Context, req *abci.VerifyVoteExtensionRequest) (*abci.VerifyVoteExtensionResponse, error) { + app.logger.Info("Executing Application VerifyVoteExtension") + + if _, ok := app.valAddrToPubKeyMap[string(req.ValidatorAddress)]; !ok { + // we do not have a validator with this address mapped; this should never happen + return nil, errors.New("unknown validator") + } + + curseWords := strings.Split(string(req.VoteExtension), "|") + if hasDuplicateWords(curseWords) { + return &abci.VerifyVoteExtensionResponse{Status: abci.VERIFY_VOTE_EXTENSION_STATUS_REJECT}, nil + } + + // ensure vote extension curse words limit has not been exceeded + if len(curseWords) > CurseWordsLimitVE { + return &abci.VerifyVoteExtensionResponse{Status: abci.VERIFY_VOTE_EXTENSION_STATUS_REJECT}, nil + } + return &abci.VerifyVoteExtensionResponse{Status: abci.VERIFY_VOTE_EXTENSION_STATUS_ACCEPT}, nil +} + +// getWordsFromVE gets the curse words from the vote extensions as one string, the words are concatenated using a '|' +// this method also ensures there are no duplicate curse words in the final set returned. +func (app *ForumApp) getWordsFromVe(voteExtensions []abci.ExtendedVoteInfo) string { + curseWordMap := make(map[string]int) + for _, vote := range voteExtensions { + // This code gets the curse words and makes sure that we do not add them more than once + // Thus ensuring each validator only adds one word once + curseWords := strings.Split(string(vote.GetVoteExtension()), "|") + + for _, word := range curseWords { + if count, ok := curseWordMap[word]; !ok { + curseWordMap[word] = 1 + } else { + curseWordMap[word] = count + 1 + } + } + } + app.logger.Info("Processed vote extensions", "curse_words", curseWordMap) + majority := len(app.valAddrToPubKeyMap) / 3 // We define the majority to be at least 1/3 of the validators; + + voteExtensionCurseWords := "" + for word, count := range curseWordMap { + if count > majority { + if voteExtensionCurseWords == "" { + voteExtensionCurseWords = word + } else { + voteExtensionCurseWords += "|" + word + } + } + } + return voteExtensionCurseWords +} + +// hasDuplicateWords detects if there are duplicate words in the slice. +func hasDuplicateWords(words []string) bool { + wordMap := make(map[string]struct{}) + + for _, word := range words { + wordMap[word] = struct{}{} + } + + return len(words) != len(wordMap) +} +``` + +***Explanation of code:** + +The state of the app is stored in an `AppState` struct which contains the current height, hash and a `BadgerDB` instance. + +The `InitChain` function initializes the validators from the CometBFT response and loads the initial state. + +For explanation of other functions like `PrepareProposal`, `ProcessProposal`, `FinalizeBlock`, `VoteExtension` and +`VerifyVoteExtensions` please refer to previous sections. + +--------------- + +*In the next session, you will learn how to [**run the application**](9.run-app.md)* diff --git a/docs/tutorials/forum-application/9.run-app.md b/docs/tutorials/forum-application/9.run-app.md new file mode 100644 index 00000000000..923309d63cc --- /dev/null +++ b/docs/tutorials/forum-application/9.run-app.md @@ -0,0 +1,274 @@ +--- +order: 9 +--- + +# Running the Application + +In previous sections you learned about different ABCI 2.0 methods and how they are used. + +In this section you will learn how to run the `Forum Application`. + +## Initializing and Running + +The application is almost ready to run, but first we'll need to populate the CometBFT configuration file. + +The following command will create a cometbft home directory in your project and add a basic set of configuration +files in ~/config/. + +For more information on what these files contain see the [configuration documentation](https://docs.cometbft.com/v1.0/references/config/). + +### Install comebft + +Clone the `cometbft` repository: + +```bash +git clone https://github.com/cometbft/cometbft +``` + +checkout the latest `v1` release and install it + +```bash +cd cometbft +git checkout v1.0.0-rc2 +make install +``` + +initialize cometbft: + +```bash +cometbft init --home /tmp/forum-app +``` + +You should see an output similar to the following: + +```bash +I[2024-04-23|20:16:43.493] Found private validator module=main keyFile=/tmp/forum-app/config/priv_validator_key.json stateFile=/tmp/forum-app/data/priv_validator_state.json +I[2024-04-23|20:16:43.493] Found node key module=main path=/tmp/forum-app/config/node_key.json +I[2024-04-23|20:16:43.493] Found genesis file module=main path=/tmp/forum-app/config/genesis.json +``` + +Now build the app: + +```go +cd abci/tutorials/abci-v2-forum-app +go build +``` + +If there are no errors when running the build command above, then everything is now in place to run your application. + +> Note: If this is not the first time you're running this application, you need to remove the previously created database, please run +> the command `rm -Rf forum-db` to remove the previous database folder and run from a fresh start + +Lets run our Forum Application specifying a home directory (using `--home`) hosted in the `tmp/forum-app` folder +(if you don't specify the home folder, it is created under `$HOME/.cometbft` by default) + +```bash +./abci-v2-forum-app --home /tmp/forum-app +``` + +This should start the full node and connect to our ABCI application, which will be reflected in the application output. + +```bash +(abci-v2-forum-app) > ./abci-v2-forum-app --home /tmp/forum-app +badger 2024/07/11 11:00:19 INFO: All 0 tables opened in 0s +badger 2024/07/11 11:00:19 INFO: Discard stats nextEmptySlot: 0 +badger 2024/07/11 11:00:19 INFO: Set nextTxnTs to 0 +I[2024-07-11|15:00:19.091] State store key layout version version=vv1 +I[2024-07-11|15:00:19.100] Blockstore version version=v1 +I[2024-07-11|15:00:19.100] WARNING: deleting genesis file from database if present, the database stores a hash of the original genesis file now +I[2024-07-11|15:00:19.100] service start module=proxy msg="Starting multiAppConn service" impl=multiAppConn +I[2024-07-11|15:00:19.100] service start module=abci-client connection=query msg="Starting localClient service" impl=localClient +I[2024-07-11|15:00:19.100] service start module=abci-client connection=snapshot msg="Starting localClient service" impl=localClient +I[2024-07-11|15:00:19.100] service start module=abci-client connection=mempool msg="Starting localClient service" impl=localClient +I[2024-07-11|15:00:19.100] service start module=abci-client connection=consensus msg="Starting localClient service" impl=localClient +I[2024-07-11|15:00:19.100] service start module=events msg="Starting EventBus service" impl=EventBus +I[2024-07-11|15:00:19.100] service start module=pubsub msg="Starting PubSub service" impl=PubSub +I[2024-07-11|15:00:19.120] service start module=txindex msg="Starting IndexerService service" impl=IndexerService +I[2024-07-11|15:00:19.120] ABCI Handshake App Info module=consensus height=0 hash=0000000000000000 software-version=2.1.0 protocol-version=1 +I[2024-07-11|15:00:19.120] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 +I[2024-07-11|15:00:19.120] Executing Application InitChain +I[2024-07-11|15:00:19.124] Completed ABCI Handshake - CometBFT and App are synced module=consensus appHeight=0 appHash=0000000000000000 +I[2024-07-11|15:00:19.124] Version info tendermint_version=1.0.0-dev abci=2.1.0 block=11 p2p=9 commit_hash= +I[2024-07-11|15:00:19.124] This node is a validator module=consensus addr=488A741EC0B687FE06C045EFAB12DDBFF82A5993 pubKey=PubKeyEd25519{41AA07512ED9B4607C17AF49B7D30737FC19D77EAA2FE69F66F5F46AB619D221} +I[2024-07-11|15:00:19.157] P2P Node ID module=p2p ID=f32302c56b40b7bc249613185913b422be494ee0 file=/tmp/forum-app/config/node_key.json +I[2024-07-11|15:00:19.157] Adding persistent peers module=p2p addrs=[] +I[2024-07-11|15:00:19.157] Adding unconditional peer ids module=p2p ids=[] +I[2024-07-11|15:00:19.157] Add our address to book module=p2p book=/tmp/forum-app/config/addrbook.json addr=f32302c56b40b7bc249613185913b422be494ee0@0.0.0.0:26656 +I[2024-07-11|15:00:19.157] service start msg="Starting Node service" impl=Node +I[2024-07-11|15:00:19.158] service start module=p2p msg="Starting P2P Switch service" impl="P2P Switch" +I[2024-07-11|15:00:19.158] service start module=mempool msg="Starting Mempool service" impl=Mempool +I[2024-07-11|15:00:19.158] service start module=blocksync msg="Starting Reactor service" impl=Reactor +I[2024-07-11|15:00:19.158] serve module=rpc-server msg="Starting RPC HTTP server on 127.0.0.1:26657" +I[2024-07-11|15:00:19.158] service start module=consensus msg="Starting Consensus service" impl=ConsensusReactor +I[2024-07-11|15:00:19.158] service start module=consensus msg="Starting State service" impl=ConsensusState +I[2024-07-11|15:00:19.158] service start module=consensus wal=/tmp/forum-app/data/cs.wal/wal msg="Starting baseWAL service" impl=baseWAL +I[2024-07-11|15:00:19.162] service start module=consensus wal=/tmp/forum-app/data/cs.wal/wal msg="Starting Group service" impl=Group +I[2024-07-11|15:00:19.162] service start module=consensus msg="Starting TimeoutTicker service" impl=TimeoutTicker +I[2024-07-11|15:00:19.162] Searching for height module=consensus wal=/tmp/forum-app/data/cs.wal/wal height=1 min=0 max=0 +I[2024-07-11|15:00:19.162] Searching for height module=consensus wal=/tmp/forum-app/data/cs.wal/wal height=0 min=0 max=0 +I[2024-07-11|15:00:19.162] Found module=consensus wal=/tmp/forum-app/data/cs.wal/wal height=0 index=0 +I[2024-07-11|15:00:19.162] Catchup by replaying consensus messages module=consensus height=1 +I[2024-07-11|15:00:19.162] Replay: Done module=consensus +I[2024-07-11|15:00:19.162] service start module=evidence msg="Starting Evidence service" impl=Evidence +I[2024-07-11|15:00:19.162] service start module=statesync msg="Starting StateSync service" impl=StateSync +I[2024-07-11|15:00:19.162] service start module=pex msg="Starting PEX service" impl=PEX +I[2024-07-11|15:00:19.162] service start module=p2p book=/tmp/forum-app/config/addrbook.json msg="Starting AddrBook service" impl=AddrBook +I[2024-07-11|15:00:19.163] Saving AddrBook to file module=p2p book=/tmp/forum-app/config/addrbook.json size=0 +I[2024-07-11|15:00:19.163] Ensure peers module=pex numOutPeers=0 numInPeers=0 numDialing=0 numToDial=10 +I[2024-07-11|15:00:19.163] service start module=state msg="Starting Pruner service" impl=Pruner +I[2024-07-11|15:00:19.163] No addresses to dial. Falling back to seeds module=pex +I[2024-07-11|15:00:19.163] Started pruning blocks module=state interval=10s +I[2024-07-11|15:00:20.154] Timed out module=consensus dur=991.108ms height=1 round=0 step=RoundStepNewHeight +I[2024-07-11|15:00:20.154] Executing Application PrepareProposal +I[2024-07-11|15:00:20.154] Processed vote extensions curse_words=map[] +I[2024-07-11|15:00:20.166] Received proposal module=consensus proposal="Proposal{1/0 (3F859221DD1F92490BCB4D93E27BCF61716285DB1412091EDAEC41E31752A161:1:3E782D5C89A4, -1) C9C6EEDA8B33 @ 2024-07-11T15:00:11.194908Z}" proposer=488A741EC0B687FE06C045EFAB12DDBFF82A5993 +I[2024-07-11|15:00:20.171] Received complete proposal block module=consensus height=1 hash=3F859221DD1F92490BCB4D93E27BCF61716285DB1412091EDAEC41E31752A161 +I[2024-07-11|15:00:20.171] Executing Application ProcessProposal +I[2024-07-11|15:00:20.176] Executing Application ExtendVote +I[2024-07-11|15:00:20.181] Finalizing commit of block module=consensus height=1 hash=3F859221DD1F92490BCB4D93E27BCF61716285DB1412091EDAEC41E31752A161 root=0000000000000000 num_txs=0 +I[2024-07-11|15:00:20.190] Executing Application FinalizeBlock +I[2024-07-11|15:00:20.190] Finalized block module=state height=1 num_txs_res=0 num_val_updates=0 block_app_hash=0000000000000000 syncing_to_height=1 +I[2024-07-11|15:00:20.194] Executing Application Commit +I[2024-07-11|15:00:20.194] Committed state module=state height=1 block_app_hash=0000000000000000 +I[2024-07-11|15:00:20.203] indexed block events module=txindex height=1 +``` + +Also, the application using CometBFT Core is producing blocks 🎉🎉 and you can see this reflected in the log output of the service in lines like this: + +```bash +I[2024-07-11|15:00:21.181] Executing Application PrepareProposal +I[2024-07-11|15:00:21.181] Processed vote extensions curse_words="map[bad:1 bloodmagic:1 cry:1 muggle:1 rain:1]" +I[2024-07-11|15:00:21.193] Received proposal module=consensus proposal="Proposal{2/0 (2F2187B9E889B4C9DD33B5D510039F12CE70DC2854D8EDF706B6C46178909BA6:1:F60DF39D940E, -1) 9E244663B13B @ 2024-07-11T15:00:20.176517Z}" proposer=488A741EC0B687FE06C045EFAB12DDBFF82A5993 +I[2024-07-11|15:00:21.198] Received complete proposal block module=consensus height=2 hash=2F2187B9E889B4C9DD33B5D510039F12CE70DC2854D8EDF706B6C46178909BA6 +I[2024-07-11|15:00:21.198] Executing Application ProcessProposal +I[2024-07-11|15:00:21.202] Executing Application ExtendVote +I[2024-07-11|15:00:21.207] Finalizing commit of block module=consensus height=2 hash=2F2187B9E889B4C9DD33B5D510039F12CE70DC2854D8EDF706B6C46178909BA6 root=0000000000000000 num_txs=0 +I[2024-07-11|15:00:21.216] Executing Application FinalizeBlock +I[2024-07-11|15:00:21.216] Finalized block module=state height=2 num_txs_res=0 num_val_updates=0 block_app_hash=0000000000000000 syncing_to_height=2 +I[2024-07-11|15:00:21.220] Executing Application Commit +I[2024-07-11|15:00:21.220] Committed state module=state height=2 block_app_hash=0000000000000000 +I[2024-07-11|15:00:21.228] indexed block events module=txindex height=2 +``` + +## Using the application + +Let's try submitting a transaction to our new application. Open another terminal window and run the following `curl` command: + +### Submit a tx with non curse word + +```bash +curl -s 'localhost:26657/broadcast_tx_commit?tx="sender:Ron,message:Music"' +``` + +If everything went well, you should see a response indicating which height the transaction was included in the blockchain. + +```bash +{"jsonrpc":"2.0","id":-1,"result":{"check_tx":{"code":0,"data":null,"log":"","info":"","gas_wanted":"0","gas_used":"0","events":[],"codespace":""},"tx_result":{"code":0,"data":null,"log":"","info":"","gas_wanted":"0","gas_used":"0","events":[],"codespace":""},"hash":"DD0DD9613C83E5CCDE31342DC49CEF1DD6423271B090BC6A73E800FD163E7ADD","height":"25"}}% +``` + +Let's make sure that transaction really was persisted by the application. Run the following command: + +```bash +curl -s 'localhost:26657/abci_query?data="Ron"' +``` + +Let's examine the response object that this request returns. The request returns a `json` object with a `key` and `value` field set. + +```bash +{ + "jsonrpc": "2.0", + "id": -1, + "result": { + "response": { + "code": 0, + "log": "\"Music\"", + "info": "", + "index": "0", + "key": "Um9u", + "value": "Ik11c2ljIg==", + "proofOps": null, + "height": "0", + "codespace": "" + } + } +} +``` + +Those values don't look like the key and value we sent to CometBFT. What's going on here? + +The response contains a `base64` encoded representation of the data we submitted. To get the original value out of +this data, we can use the base64 command line utility to view the key (sender) and value (message): + +To view the value of `key` (which is the sender) run: + +```bash +echo Um9u | base64 -d +``` + +It will output `Ron` + +Then run the command below to view the `value` (which is the message): + +```bash +echo Ik11c2ljIg== | base64 -d +``` + +It will output `Music` + +As you can observe, the `Music` message was added by `Ron`. + +### Submit a tx with curse word + +```bash +curl -s 'localhost:26657/broadcast_tx_commit?tx="sender:Malfoy,message:muggle"' +``` + +After a certain amount of time you will see a message that tx could not be included in the block. This happens as tx is being rejected in `PrepareProposal` + +```bash +{"jsonrpc":"2.0","id":-1,"error":{"code":-32603,"message":"Internal error","data":"timed out waiting for tx to be included in a block"}} +``` + +Let's make sure that transaction really was not persisted by the application. Run the following command: + +```bash +curl -s 'localhost:26657/abci_query?data="Malfoy"' +``` + +This should return an error with `Key not found` since the sender and message were not included in the application + +```bash +{"jsonrpc":"2.0","id":-1,"error":{"code":-32603,"message":"Internal error","data":"Key not found"}} +``` + +### Submit a tx with non curse word for banned user + +```bash +curl -s 'localhost:26657/broadcast_tx_commit?tx="sender:Malfoy,message:heroic"' +``` + +After a certain amount of time you will observe that tx was rejected in `checkTx` since the user has been already banned. + +```bash +{"jsonrpc":"2.0","id":-1,"result":{"check_tx":{"code":3,"data":null,"log":"User is banned","info":"","gas_wanted":"0","gas_used":"0","events":[],"codespace":""},"tx_result":{"code":0,"data":null,"log":"","info":"","gas_wanted":"0","gas_used":"0","events":[],"codespace":""},"hash":"9B110D7648000037B8385562B15810C70E1957AF2F65790B3617CB84C9398536","height":"0"}}% +``` + +### Checking the chat history + +The application can also return the history of messages that were submitted and added to the application. To view the chat +history run the following query: + +```bash +curl -s 'localhost:26657/abci_query?data="history"' +``` + +You can see the chat history in the log field of the response: + +```bash +{"jsonrpc":"2.0","id":-1,"result":{"response":{"code":0,"log":"{sender:Ron,message:Music}","info":"","index":"0","key":"aGlzdG9yeQ==","value":"e3NlbmRlcjpSb24sbWVzc2FnZTpNdXNpY30=","proofOps":null,"height":"0","codespace":""}}}% +``` + +## Congratulations + +Our tutorial on building an ABCI 2 application using CometBFT has come to a close. We hope that the comprehensive tutorial +we've provided has equipped you with the knowledge and skills necessary to develop your own ABCI 2.0 applications with ease and confidence. diff --git a/docs/tutorials/forum-application/README.md b/docs/tutorials/forum-application/README.md new file mode 100644 index 00000000000..4dc7362edbc --- /dev/null +++ b/docs/tutorials/forum-application/README.md @@ -0,0 +1,16 @@ +--- +order: 1 +parent: + title: Forum Application Tutorial + order: 8 +--- + +# ABCI 2.0 Forum Application Tutorial + +In this tutorial, we will build a Forum application using [ABCI 2.0](https://docs.cometbft.com/v1.0/spec/abci/). +A Forum is an application where people can discuss topics by posting messages and responding to each other. + +We will demonstrate the use of `ABCI 2.0` methods like - `CheckTx`, `PrepareProposal`, `ProcessProposal`, `FinalizeBlock`, +`ExtendVote` and `VerifyVoteExtension` + +Follow the [tutorial](1.abci-intro.md) in order to create a forum application using ABCI 2.0 methods. diff --git a/docs/tutorials/forum-application/images/ABCI2.jpg b/docs/tutorials/forum-application/images/ABCI2.jpg new file mode 100644 index 00000000000..2fa8612436d Binary files /dev/null and b/docs/tutorials/forum-application/images/ABCI2.jpg differ diff --git a/docs/tutorials/forum-application/images/Forum.jpg b/docs/tutorials/forum-application/images/Forum.jpg new file mode 100644 index 00000000000..c6d4026cc2b Binary files /dev/null and b/docs/tutorials/forum-application/images/Forum.jpg differ diff --git a/docs/tutorials/forum-application/images/VE.jpg b/docs/tutorials/forum-application/images/VE.jpg new file mode 100644 index 00000000000..09eb87d802e Binary files /dev/null and b/docs/tutorials/forum-application/images/VE.jpg differ diff --git a/docs/guides/go-built-in.md b/docs/tutorials/go-built-in.md similarity index 70% rename from docs/guides/go-built-in.md rename to docs/tutorials/go-built-in.md index aed2faa3a08..6a85e2205fc 100644 --- a/docs/guides/go-built-in.md +++ b/docs/tutorials/go-built-in.md @@ -1,5 +1,5 @@ --- -order: 2 +order: 4 --- # Creating a built-in application in Go @@ -38,26 +38,45 @@ This is the approach followed in this tutorial. On the other hand, having a separate application might give you better security guarantees as two processes would be communicating via established binary protocol. CometBFT will not have access to application's state. -If that is the way you wish to proceed, use the [Creating an application in Go](./go.md) guide instead of this one. +If that is the way you wish to proceed, use the [Creating an application in Go](go.md) guide instead of this one. -## 1.1 Installing Go +## 1.0 Installing Go -Verify that you have the latest version of Go installed (refer to the [official guide for installing Go](https://golang.org/doc/install)): +Verify that you have the latest version of Go installed (refer to the [official guide for installing Go](https://golang.org/doc/install)), , +ou should see an output similar to this one (the `go version` might be a slight different depending on the Go you +have installed and the computer platform): ```bash $ go version -go version go1.21.1 darwin/amd64 +go version go1.23.1 darwin/amd64 + +``` + +## 1.1 Installing CometBFT + +Let's install `CometBFT` locally by running the following command: + +```bash +go install github.com/cometbft/cometbft/cmd/cometbft@v1.0 +``` + +Test the installation: + +```bash +$ cometbft version +v1.0.0 ``` ## 1.2 Creating a new Go project -We'll start by creating a new Go project. +We'll start by creating a new Go project and moving into the new directory: ```bash mkdir kvstore +cd kvstore ``` -Inside the example directory, create a `main.go` file with the following content: +Inside the `kvstore` directory, create a `main.go` file with the following content: ```go package main @@ -71,54 +90,97 @@ func main() { } ``` -When run, this should print "Hello, CometBFT" to the standard output. +Run the following command: + +```bash +go run main.go +``` + +This should print "Hello, CometBFT" to the standard output. ```bash -cd kvstore -$ go run main.go Hello, CometBFT ``` We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for dependency management, so let's start by including a dependency on the latest version of -CometBFT, `v0.38.0` in this example. +CometBFT, `v1.0` in this example. + +Run the commands below to create the go module file (`go.mod`) ```bash go mod init kvstore -go get github.com/cometbft/cometbft@v0.38.0 ``` +This should an output similar to this. + +**NOTE**: No need to run `go mod tidy` at this time, just ignore it for now. + +```bash +go: creating new go.mod: module kvstore +go: to add module requirements and sums: + go mod tidy +``` + +go 1.23.1 +Now, lets add `cometbft` as a dependency to our project. Run the `go get` command below: + +```bash +go get github.com/cometbft/cometbft@v1.0 +``` + +**NOTE**: This will add the latest release in the `v1.0` line, so you might a different patch release e.g. `v1.0.0` +or `v1.0.1` + +```bash +go: added github.com/cometbft/cometbft v1.0.0 +``` + +` After running the above commands you will see two generated files, `go.mod` and `go.sum`. The go.mod file should look similar to: ```go module kvstore -go 1.21.1 +go 1.23.1 -require ( -github.com/cometbft/cometbft v0.38.0 -) +require github.com/cometbft/cometbft v1.0.0 // indirect ``` As you write the kvstore application, you can rebuild the binary by pulling any new dependencies and recompiling it. -```sh +```bash go get go build ``` +At this point, if you ran the `go build` command above, you should see four files in the directory: + +```bash +$ ls +go.mod go.sum kvstore main.go +``` + +The `kvstore` file is the executable generated. You can run it again to ensure everything still works: + +```bash +$ ./kvstore +Hello, CometBFT +``` + ## 1.3 Writing a CometBFT application CometBFT communicates with the application through the Application -BlockChain Interface (ABCI). The messages exchanged through the interface are +BlockChain Interface (`ABCI`). The messages exchanged through the interface are defined in the ABCI [protobuf -file](https://github.com/cometbft/cometbft/blob/v0.38.x/proto/tendermint/abci/types.proto). +file](https://github.com/cometbft/cometbft/blob/main/proto/cometbft/abci/v1/types.proto). We begin by creating the basic scaffolding for an ABCI application by creating a new type, `KVStoreApplication`, which implements the -methods defined by the `abcitypes.Application` interface. +methods defined by the [abcitypes.Application](https://github.com/cometbft/cometbft/blob/main/abci/types/application.go) +interface. Create a file called `app.go` with the following contents: @@ -127,6 +189,7 @@ package main import ( abcitypes "github.com/cometbft/cometbft/abci/types" + "context" ) type KVStoreApplication struct{} @@ -137,60 +200,60 @@ func NewKVStoreApplication() *KVStoreApplication { return &KVStoreApplication{} } -func (app *KVStoreApplication) Info(_ context.Context, info *abcitypes.RequestInfo) (*abcitypes.ResponseInfo, error) { - return &abcitypes.ResponseInfo{}, nil +func (app *KVStoreApplication) Info(_ context.Context, info *abcitypes.InfoRequest) (*abcitypes.InfoResponse, error) { + return &abcitypes.InfoResponse{}, nil } -func (app *KVStoreApplication) Query(_ context.Context, req *abcitypes.RequestQuery) (*abcitypes.ResponseQuery, error) { - return &abcitypes.ResponseQuery{} +func (app *KVStoreApplication) Query(_ context.Context, req *abcitypes.QueryRequest) (*abcitypes.QueryResponse, error) { + return &abcitypes.QueryResponse{}, nil } -func (app *KVStoreApplication) CheckTx(_ context.Context, check *abcitypes.RequestCheckTx) (*abcitypes.ResponseCheckTx, error) { - return &abcitypes.ResponseCheckTx{}, nil +func (app *KVStoreApplication) CheckTx(_ context.Context, check *abcitypes.CheckTxRequest) (*abcitypes.CheckTxResponse, error) { + return &abcitypes.CheckTxResponse{Code: 0}, nil } -func (app *KVStoreApplication) InitChain(_ context.Context, chain *abcitypes.RequestInitChain) (*abcitypes.ResponseInitChain, error) { - return &abcitypes.ResponseInitChain{}, nil +func (app *KVStoreApplication) InitChain(_ context.Context, chain *abcitypes.InitChainRequest) (*abcitypes.InitChainResponse, error) { + return &abcitypes.InitChainResponse{}, nil } -func (app *KVStoreApplication) PrepareProposal(_ context.Context, proposal *abcitypes.RequestPrepareProposal) (*abcitypes.ResponsePrepareProposal, error) { - return &abcitypes.ResponsePrepareProposal{}, nil +func (app *KVStoreApplication) PrepareProposal(_ context.Context, proposal *abcitypes.PrepareProposalRequest) (*abcitypes.PrepareProposalResponse, error) { + return &abcitypes.PrepareProposalResponse{}, nil } -func (app *KVStoreApplication) ProcessProposal(_ context.Context, proposal *abcitypes.RequestProcessProposal) (*abcitypes.ResponseProcessProposal, error) { - return &abcitypes.ResponseProcessProposal{}, nil +func (app *KVStoreApplication) ProcessProposal(_ context.Context, proposal *abcitypes.ProcessProposalRequest) (*abcitypes.ProcessProposalResponse, error) { + return &abcitypes.ProcessProposalResponse{}, nil } -func (app *KVStoreApplication) FinalizeBlock(_ context.Context, req *abcitypes.RequestFinalizeBlock) (*abcitypes.ResponseFinalizeBlock, error) { - return &abcitypes.ResponseFinalizeBlock{}, nil +func (app *KVStoreApplication) FinalizeBlock(_ context.Context, req *abcitypes.FinalizeBlockRequest) (*abcitypes.FinalizeBlockResponse, error) { + return &abcitypes.FinalizeBlockResponse{}, nil } -func (app KVStoreApplication) Commit(_ context.Context, commit *abcitypes.RequestCommit) (*abcitypes.ResponseCommit, error) { - return &abcitypes.ResponseCommit{}, nil +func (app KVStoreApplication) Commit(_ context.Context, commit *abcitypes.CommitRequest) (*abcitypes.CommitResponse, error) { + return &abcitypes.CommitResponse{}, nil } -func (app *KVStoreApplication) ListSnapshots(_ context.Context, snapshots *abcitypes.RequestListSnapshots) (*abcitypes.ResponseListSnapshots, error) { - return &abcitypes.ResponseListSnapshots{}, nil +func (app *KVStoreApplication) ListSnapshots(_ context.Context, snapshots *abcitypes.ListSnapshotsRequest) (*abcitypes.ListSnapshotsResponse, error) { + return &abcitypes.ListSnapshotsResponse{}, nil } -func (app *KVStoreApplication) OfferSnapshot(_ context.Context, snapshot *abcitypes.RequestOfferSnapshot) (*abcitypes.ResponseOfferSnapshot, error) { - return &abcitypes.ResponseOfferSnapshot{}, nil +func (app *KVStoreApplication) OfferSnapshot(_ context.Context, snapshot *abcitypes.OfferSnapshotRequest) (*abcitypes.OfferSnapshotResponse, error) { + return &abcitypes.OfferSnapshotResponse{}, nil } -func (app *KVStoreApplication) LoadSnapshotChunk(_ context.Context, chunk *abcitypes.RequestLoadSnapshotChunk) (*abcitypes.ResponseLoadSnapshotChunk, error) { - return &abcitypes.ResponseLoadSnapshotChunk{}, nil +func (app *KVStoreApplication) LoadSnapshotChunk(_ context.Context, chunk *abcitypes.LoadSnapshotChunkRequest) (*abcitypes.LoadSnapshotChunkResponse, error) { + return &abcitypes.LoadSnapshotChunkResponse{}, nil } -func (app *KVStoreApplication) ApplySnapshotChunk(_ context.Context, chunk *abcitypes.RequestApplySnapshotChunk) (*abcitypes.ResponseApplySnapshotChunk, error) { - return &abcitypes.ResponseApplySnapshotChunk{Result: abcitypes.ResponseApplySnapshotChunk_ACCEPT}, nil +func (app *KVStoreApplication) ApplySnapshotChunk(_ context.Context, chunk *abcitypes.ApplySnapshotChunkRequest) (*abcitypes.ApplySnapshotChunkResponse, error) { + return &abcitypes.ApplySnapshotChunkResponse{Result: abcitypes.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT}, nil } -func (app KVStoreApplication) ExtendVote(_ context.Context, extend *abcitypes.RequestExtendVote) (*abcitypes.ResponseExtendVote, error) { - return &abcitypes.ResponseExtendVote{}, nil +func (app KVStoreApplication) ExtendVote(_ context.Context, extend *abcitypes.ExtendVoteRequest) (*abcitypes.ExtendVoteResponse, error) { + return &abcitypes.ExtendVoteResponse{}, nil } -func (app *KVStoreApplication) VerifyVoteExtension(_ context.Context, verify *abcitypes.RequestVerifyVoteExtension) (*abcitypes.ResponseVerifyVoteExtension, error) { - return &abcitypes.ResponseVerifyVoteExtension{}, nil +func (app *KVStoreApplication) VerifyVoteExtension(_ context.Context, verify *abcitypes.VerifyVoteExtensionRequest) (*abcitypes.VerifyVoteExtensionResponse, error) { + return &abcitypes.VerifyVoteExtensionResponse{}, nil } ``` @@ -198,7 +261,7 @@ The types used here are defined in the CometBFT library and were added as a depe to the project when you ran `go get`. If your IDE is not recognizing the types, go ahead and run the command again. ```bash -go get github.com/cometbft/cometbft@v0.38.0 +go get github.com/cometbft/cometbft@v1.0 ``` Now go back to the `main.go` and modify the `main` function so it matches the following, @@ -251,8 +314,9 @@ Next, update the `import` stanza at the top to include the Badger library: ```go import( - "github.com/dgraph-io/badger/v3" + "context" abcitypes "github.com/cometbft/cometbft/abci/types" + "github.com/dgraph-io/badger/v4" ) ``` @@ -288,9 +352,9 @@ func (app *KVStoreApplication) isValid(tx []byte) uint32 { Now you can rewrite the `CheckTx` method to use the helper function: ```go -func (app *KVStoreApplication) CheckTx(_ context.Context, check *abcitypes.RequestCheckTx) (*abcitypes.ResponseCheckTx, error) { +func (app *KVStoreApplication) CheckTx(_ context.Context, check *abcitypes.CheckTxRequest) (*abcitypes.CheckTxResponse, error) { code := app.isValid(check.Tx) - return &abcitypes.ResponseCheckTx{Code: code}, nil + return &abcitypes.CheckTxResponse{Code: code}, nil } ``` @@ -307,16 +371,17 @@ its validation checks. The specific value of the code is meaningless to CometBFT Non-zero codes are logged by CometBFT so applications can provide more specific information on why the transaction was rejected. -Note that `CheckTx` does not execute the transaction, it only verifies that the transaction could be executed. We do not know yet if the rest of the network has agreed to accept this transaction into a block. +Note that `CheckTx` does not execute the transaction, it only verifies that the transaction could be executed. We do +not know yet if the rest of the network has agreed to accept this transaction into a block. Finally, make sure to add the `bytes` package to the `import` stanza at the top of `app.go`: ```go import( "bytes" - - "github.com/dgraph-io/badger/v3" + "context" abcitypes "github.com/cometbft/cometbft/abci/types" + "github.com/dgraph-io/badger/v4" ) ``` @@ -329,19 +394,19 @@ application via `FinalizeBlock`. This method is responsible for executing the block and returning a response to the consensus engine. Providing a single `FinalizeBlock` method to signal the finalization of a block simplifies the ABCI interface and increases flexibility in the execution pipeline. -The `FinalizeBlock` method executes the block, including any necessary transaction processing and state updates, and returns a `ResponseFinalizeBlock` object which contains any necessary information about the executed block. +The `FinalizeBlock` method executes the block, including any necessary transaction processing and state updates, and returns a `FinalizeBlockResponse` object which contains any necessary information about the executed block. -**Note:** `FinalizeBlock` only prepares the update to be made and does not change the state of the application. The state change is actually committed in a later stage i.e. in `commit` phase. +**Note:** `FinalizeBlock` only prepares the update to be made and does not change the state of the application. The state change is actually committed in a later stage, in the `Commit` phase. Note that, to implement these calls in our application we're going to make use of Badger's transaction mechanism. We will always refer to these as Badger transactions, not to confuse them with the transactions included in the blocks delivered by CometBFT, the _application transactions_. First, let's create a new Badger transaction during `FinalizeBlock`. All application transactions in the current block will be executed within this Badger transaction. -Next, let's modify `FinalizeBlock` to add the `key` and `value` to the Badger transaction every time our application processes a new application transaction from the list received through `RequestFinalizeBlock`. +Next, let's modify `FinalizeBlock` to add the `key` and `value` to the Badger transaction every time our application processes a new application transaction from the list received through `FinalizeBlockRequest`. Note that we check the validity of the transaction _again_ during `FinalizeBlock`. ```go -func (app *KVStoreApplication) FinalizeBlock(_ context.Context, req *abcitypes.RequestFinalizeBlock) (*abcitypes.ResponseFinalizeBlock, error) { +func (app *KVStoreApplication) FinalizeBlock(_ context.Context, req *abcitypes.FinalizeBlockRequest) (*abcitypes.FinalizeBlockResponse, error) { var txs = make([]*abcitypes.ExecTxResult, len(req.Txs)) app.onGoingBlock = app.db.NewTransaction(true) @@ -360,16 +425,32 @@ func (app *KVStoreApplication) FinalizeBlock(_ context.Context, req *abcitypes.R log.Printf("Successfully added key %s with value %s", key, value) - txs[i] = &abcitypes.ExecTxResult{} + // Add an event for the transaction execution. + // Multiple events can be emitted for a transaction, but we are adding only one event + txs[i] = &abcitypes.ExecTxResult{ + Code: 0, + Events: []abcitypes.Event{ + { + Type: "app", + Attributes: []abcitypes.EventAttribute{ + {Key: "key", Value: string(key), Index: true}, + {Key: "value", Value: string(value), Index: true}, + }, + }, + }, + } } } - return &abcitypes.ResponseFinalizeBlock{ + return &abcitypes.FinalizeBlockResponse{ TxResults: txs, + NextBlockDelay: 1 * time.Second, }, nil } ``` +`NextBlockDelay` is a delay between the time when the current block is committed and the next height is started. Normally you don't need to change the default value (1s). Please refer to the [spec](../../spec/abci/abci++_methods.md#finalizeblock) for more information. + Transactions are not guaranteed to be valid when they are delivered to an application, even if they were valid when they were proposed. This can happen if the application state is used to determine transaction validity. @@ -385,20 +466,20 @@ Let's update the method to terminate the pending Badger transaction and persist the resulting state: ```go -func (app KVStoreApplication) Commit(_ context.Context, commit *abcitypes.RequestCommit) (*abcitypes.ResponseCommit, error) { - return &abcitypes.ResponseCommit{}, app.onGoingBlock.Commit() +func (app KVStoreApplication) Commit(_ context.Context, commit *abcitypes.CommitRequest) (*abcitypes.CommitResponse, error) { + return &abcitypes.CommitResponse{}, app.onGoingBlock.Commit() } ``` -Finally, make sure to add the log library to the `import` stanza as well: +Finally, make sure to add the `log` and `errors` libraries to the `import` stanza as well: ```go import ( - "bytes" - "log" - - "github.com/dgraph-io/badger/v3" + "context" + "errors" abcitypes "github.com/cometbft/cometbft/abci/types" + "github.com/dgraph-io/badger/v4" + "log" ) ``` @@ -415,13 +496,13 @@ When a client tries to read some information from the `kvstore`, the request wil handled in the `Query` method. To do this, let's rewrite the `Query` method in `app.go`: ```go -func (app *KVStoreApplication) Query(_ context.Context, req *abcitypes.RequestQuery) (*abcitypes.ResponseQuery, error) { - resp := abcitypes.ResponseQuery{Key: req.Data} +func (app *KVStoreApplication) Query(_ context.Context, req *abcitypes.QueryRequest) (*abcitypes.QueryResponse, error) { + resp := abcitypes.QueryResponse{Key: req.Data} dbErr := app.db.View(func(txn *badger.Txn) error { item, err := txn.Get(req.Data) if err != nil { - if err != badger.ErrKeyNotFound { + if !errors.Is(err, badger.ErrKeyNotFound) { return err } resp.Log = "key does not exist" @@ -454,50 +535,49 @@ included in blocks, it groups some of these transactions and then gives the appl to modify the group by invoking `PrepareProposal`. The application is free to modify the group before returning from the call, as long as the resulting set -does not use more bytes than `RequestPrepareProposal.max_tx_bytes` +does not use more bytes than `PrepareProposalRequest.max_tx_bytes` For example, the application may reorder, add, or even remove transactions from the group to improve the execution of the block once accepted. + In the following code, the application simply returns the unmodified group of transactions: ```go - func (app *KVStoreApplication) PrepareProposal(_ context.Context, proposal *abcitypes.RequestPrepareProposal) (*abcitypes.ResponsePrepareProposal, error) { - totalBytes := int64(0) - txs := make([]byte, 0) - - for _, tx := range proposal.Txs { - totalBytes += int64(len(tx)) - txs = append(txs, tx...) - - if totalBytes > int64(proposal.MaxTxBytes) { - break - } - } - - return &abcitypes.ResponsePrepareProposal{Txs: proposal.Txs}, nil - } - ``` - - This code snippet iterates through the proposed transactions and calculates the `total bytes`. If the `total bytes` exceeds the `MaxTxBytes` specified in the `RequestPrepareProposal` struct, the loop breaks and the transactions processed so far are returned. - - Note: It is the responsibility of the application to ensure that the `total bytes` of transactions returned does not exceed the `RequestPrepareProposal.max_tx_bytes` limit. +func (app *KVStoreApplication) PrepareProposal(_ context.Context, proposal *abcitypes.PrepareProposalRequest) (*abcitypes.PrepareProposalResponse, error) { + return &abcitypes.PrepareProposalResponse{Txs: proposal.Txs}, nil +} +``` - Once a proposed block is received by a node, the proposal is passed to the application to give - its blessing before voting to accept the proposal. +Once a proposed block is received by a node, the proposal is passed to the application to give +its blessing before voting to accept the proposal. - This mechanism may be used for different reasons, for example to deal with blocks manipulated - by malicious nodes, in which case the block should not be considered valid. +This mechanism may be used for different reasons, for example to deal with blocks manipulated +by malicious nodes, in which case the block should not be considered valid. The following code simply accepts all proposals: ```go -func (app *KVStoreApplication) ProcessProposal(_ context.Context, proposal *abcitypes.RequestProcessProposal) (*abcitypes.ResponseProcessProposal, error) { - return &abcitypes.ResponseProcessProposal{Status: abcitypes.ResponseProcessProposal_ACCEPT}, nil +func (app *KVStoreApplication) ProcessProposal(_ context.Context, proposal *abcitypes.ProcessProposalRequest) (*abcitypes.ProcessProposalResponse, error) { + return &abcitypes.ProcessProposalResponse{Status: abcitypes.PROCESS_PROPOSAL_STATUS_ACCEPT}, nil } ``` +### 1.3.6 Handling errors + +Please note that in the method signature for the ABCI methods, there is a response and an error return, such as +`(*abcitypes.[Method]Response, error)`. Some of the ABCI methods' responses might include a field that can return +an error in the response, such as the `Code` field in the `CheckTxResponse`. The application can use the `Code` +field to signal CometBFT that the transaction was rejected. Other examples are the `TxResults` in the +`FinalizeBlockResponse`, the `ExecTxResult` that also has a `Code` field which can be used by the application +to signal that the transactions didn't execute properly, or the `QueryResponse`. The `QueryResponse` also includes +a `Code` field to signal that a query to the application was unsuccessful or it could not find the information. + +The `error` return, as in `(*abcitypes.[Method]Response, error)`, can be used if there are unrecoverable errors. +In these cases, the application should abort to prevent further unintended consequences. + ## 1.4 Starting an application and a CometBFT instance in the same process -Now that we have the basic functionality of our application in place, let's put it all together inside of our main.go file. +Now that we have the basic functionality of our application in place, let's put +it all together inside of our `main.go` file. Change the contents of your `main.go` file to the following. @@ -505,6 +585,7 @@ Change the contents of your `main.go` file to the following. package main import ( + "context" "flag" "fmt" "github.com/cometbft/cometbft/p2p" @@ -516,7 +597,7 @@ import ( "path/filepath" "syscall" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/spf13/viper" cfg "github.com/cometbft/cometbft/config" cmtflags "github.com/cometbft/cometbft/libs/cli/flags" @@ -568,12 +649,12 @@ func main() { config.PrivValidatorStateFile(), ) - nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) + nodeKey, err := nodekey.LoadNodeKey(config.NodeKeyFile()) if err != nil { log.Fatalf("failed to load node's key: %v", err) } - logger := cmtlog.NewTMLogger(cmtlog.NewSyncWriter(os.Stdout)) + logger := cmtlog.NewLogger(os.Stdout) logger, err = cmtflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel) if err != nil { @@ -581,14 +662,15 @@ func main() { } node, err := nm.NewNode( + context.Background(), config, pv, nodeKey, proxy.NewLocalClientCreator(app), nm.DefaultGenesisDocProviderFunc(config), - nm.DefaultDBProvider, + cfg.DefaultDBProvider, nm.DefaultMetricsProvider(config.Instrumentation), - logger + logger, ) if err != nil { @@ -659,7 +741,7 @@ pv := privval.LoadFilePV( `nodeKey` is needed to identify the node in a p2p network. ```go -nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) +nodeKey, err := nodekey.LoadNodeKey(config.NodeKeyFile()) if err != nil { return nil, fmt.Errorf("failed to load node's key: %w", err) } @@ -671,14 +753,15 @@ the genesis information: ```go node, err := nm.NewNode( + context.Background(), config, pv, nodeKey, proxy.NewLocalClientCreator(app), nm.DefaultGenesisDocProviderFunc(config), - nm.DefaultDBProvider, + cfg.DefaultDBProvider, nm.DefaultMetricsProvider(config.Instrumentation), -logger) + logger) if err != nil { log.Fatalf("Creating node: %v", err) @@ -703,16 +786,30 @@ signal.Notify(c, os.Interrupt, syscall.SIGTERM) <-c ``` +Try again to build the code to ensure it is all good so far: + +```bash +go build +``` + +If there are no errors, everything worked well so far. But if you get some errors about `missing go.sum entry for module ...`, +you can try to fix them by running the commands below: + +```bash +go mod tidy +go build +``` + ## 1.5 Initializing and Running Our application is almost ready to run, but first we'll need to populate the CometBFT configuration files. The following command will create a `cometbft-home` directory in your project and add a basic set of configuration files in `cometbft-home/config/`. -For more information on what these files contain see [the configuration documentation](https://github.com/cometbft/cometbft/blob/v0.38.x/docs/core/configuration.md). +For more information on what these files contain see [the configuration documentation](https://docs.cometbft.com/v1.0/explanation/core/configuration). From the root of your project, run: ```bash -go run github.com/cometbft/cometbft/cmd/cometbft@v0.38.0 init --home /tmp/cometbft-home +cometbft init --home /tmp/cometbft-home ``` You should see an output similar to the following: @@ -747,7 +844,7 @@ I[2023-04-25|09:08:50.085] service start module=a ... ``` -More importantly, the application using CometBFT is producing blocks 🎉🎉 and you can see this reflected in the log output in lines like this: +More importantly, the application using CometBFT is producing blocks 🎉🎉 and you can see this reflected in the log output in lines like this: ```bash I[2023-04-25|09:08:52.147] received proposal module=consensus proposal="Proposal{2/0 (F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C:1:C73D3D1273F2, -1) AD19AE292A45 @ 2023-04-25T12:08:52.143393Z}" @@ -771,6 +868,10 @@ curl -s 'localhost:26657/broadcast_tx_commit?tx="cometbft=rocks"' If everything went well, you should see a response indicating which height the transaction was included in the blockchain. +```bash +{"jsonrpc":"2.0","id":-1,"result":{"check_tx":{"code":0,"data":null,"log":"","info":"","gas_wanted":"0","gas_used":"0","events":[],"codespace":""},"tx_result":{"code":0,"data":null,"log":"","info":"","gas_wanted":"0","gas_used":"0","events":[{"type":"app","attributes":[{"key":"key","value":"cometbft","index":true},{"key":"value","value":"rocks","index":true}]}],"codespace":""},"hash":"71276C4844CE72F6C6C868541D10923259F5F8DA5716B230555B36AD309D6FD1","height":"64"}} +``` + Finally, let's make sure that transaction really was persisted by the application. Run the following command: @@ -795,9 +896,21 @@ The response contains a `base64` encoded representation of the data we submitted To get the original value out of this data, we can use the `base64` command line utility: ```bash -echo cm9ja3M=" | base64 -d +$ echo "Y29tZXRiZnQ=" | base64 -d +cometbft +$ echo "cm9ja3M=" | base64 -d +rocks ``` +If you want to search for txs, you can leverage `CometBFT` kv indexer by using the `/tx_search` RPC endpoint: + +```bash +curl "localhost:26657/tx_search?query=\"app.key='cometbft'\"" +``` + +The events (`abcitypes.Event`) added in `FinalizeBlock` are indexed by CometBFT (assuming the `kv` indexer is enabled in the CometBFT's configuration). + + ## Outro -Hope you could run everything smoothly. If you have any difficulties running through this tutorial, reach out to us via [discord](https://discord.com/invite/cosmosnetwork) or open a new [issue](https://github.com/cometbft/cometbft/issues/new/choose) on Github. +Hope you could run everything smoothly. If you have any difficulties running through this tutorial, reach out to us via [discord](https://discord.com/invite/interchain) or open a new [issue](https://github.com/cometbft/cometbft/issues/new/choose) on Github. diff --git a/docs/guides/go.md b/docs/tutorials/go.md similarity index 69% rename from docs/guides/go.md rename to docs/tutorials/go.md index 15fcc972f92..badd2d2b06f 100644 --- a/docs/guides/go.md +++ b/docs/tutorials/go.md @@ -1,5 +1,5 @@ --- -order: 1 +order: 6 --- # Creating an application in Go @@ -33,31 +33,47 @@ On the one hand, to get maximum performance you can run your application in the same process as the CometBFT, as long as your application is written in Go. [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written this way. -If that is the way you wish to proceed, use the [Creating a built-in application in Go](./go-built-in.md) guide instead of this one. +If that is the way you wish to proceed, use the [Creating a built-in application in Go](go-built-in.md) guide instead of this one. On the other hand, having a separate application might give you better security guarantees as two processes would be communicating via established binary protocol. CometBFT will not have access to application's state. This is the approach followed in this tutorial. -## 1.1 Installing Go +## 1.0 Installing Go -Verify that you have the latest version of Go installed (refer to the [official guide for installing Go](https://golang.org/doc/install)): +Verify that you have the latest version of Go installed (refer to the [official guide for installing Go](https://golang.org/doc/install)), you should see an output similar to this one (the `go version` might be a slight different depending on the Go you have installed and the computer platform): ```bash $ go version -go version go1.21.1 darwin/amd64 +go version go1.23.1 darwin/amd64 +``` + +## 1.1 Installing CometBFT + +Let's install `CometBFT` locally by running the following command: + +```bash +go install github.com/cometbft/cometbft/cmd/cometbft@v1.0 +``` + +Test the installation: + +```bash +$ cometbft version +v1.0.0 ``` ## 1.2 Creating a new Go project -We'll start by creating a new Go project. +We'll start by creating a new Go project and moving into the new directory: ```bash mkdir kvstore +cd kvstore ``` -Inside the example directory, create a `main.go` file with the following content: +Inside the `kvstore` directory, create a `main.go` file with the following content: ```go package main @@ -71,21 +87,48 @@ func main() { } ``` -When run, this should print "Hello, CometBFT" to the standard output. +Run the following command: + +```bash +go run main.go +``` + +This should print "Hello, CometBFT" to the standard output. ```bash -cd kvstore -$ go run main.go Hello, CometBFT ``` We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for dependency management, so let's start by including a dependency on the latest version of -CometBFT, `v0.38.0` in this example. +CometBFT, `v1.0` in this example. + +Run the commands below to create the go module file (`go.mod`) ```bash go mod init kvstore -go get github.com/cometbft/cometbft@v0.38.0 +```` + +This should an output similar to this. + +**NOTE**: No need to run `go mod tidy` at this time, just ignore it for now. +```bash +go: creating new go.mod: module kvstore +go: to add module requirements and sums: + go mod tidy +``` + +Now, lets add `cometbft` as a dependency to our project. Run the `go get` command below: + +```bash +go get github.com/cometbft/cometbft@v1.0 +``` + +**NOTE**: This will add the latest release in the `v1.0` line, so you might a different patch release e.g. `v1.0.0` +or `v1.0.1` + +```bash +go: added github.com/cometbft/cometbft v1.0.0 ``` After running the above commands you will see two generated files, `go.mod` and `go.sum`. @@ -94,31 +137,46 @@ The go.mod file should look similar to: ```go module kvstore -go 1.21.1 +go 1.23.1 -require ( -github.com/cometbft/cometbft v0.38.0 -) + +require github.com/cometbft/cometbft v1.0.0 // indirect ``` As you write the kvstore application, you can rebuild the binary by pulling any new dependencies and recompiling it. -```sh +```bash go get go build ``` +At this point, if you ran the `go build` command above, you should see four files in the directory: + +```bash +$ ls +go.mod go.sum kvstore main.go +``` + +The `kvstore` file is the executable generated. You can run it again to ensure everything still works: + +```bash +$ ./kvstore +Hello, CometBFT +``` + ## 1.3 Writing a CometBFT application +Now, let's start adding some logic to our application. + CometBFT communicates with the application through the Application -BlockChain Interface (ABCI). The messages exchanged through the interface are +BlockChain Interface (`ABCI`). The messages exchanged through the interface are defined in the ABCI [protobuf -file](https://github.com/cometbft/cometbft/blob/v0.38.x/proto/tendermint/abci/types.proto). +file](https://github.com/cometbft/cometbft/blob/main/proto/cometbft/abci/v1/types.proto). We begin by creating the basic scaffolding for an ABCI application by creating a new type, `KVStoreApplication`, which implements the -methods defined by the `abcitypes.Application` interface. +methods defined by the [abcitypes.Application](https://github.com/cometbft/cometbft/blob/main/abci/types/application.go) interface. Create a file called `app.go` with the following contents: @@ -127,6 +185,7 @@ package main import ( abcitypes "github.com/cometbft/cometbft/abci/types" + "context" ) type KVStoreApplication struct{} @@ -137,61 +196,60 @@ func NewKVStoreApplication() *KVStoreApplication { return &KVStoreApplication{} } -func (app *KVStoreApplication) Info(_ context.Context, info *abcitypes.RequestInfo) (*abcitypes.ResponseInfo, error) { - return &abcitypes.ResponseInfo{}, nil +func (app *KVStoreApplication) Info(_ context.Context, info *abcitypes.InfoRequest) (*abcitypes.InfoResponse, error) { + return &abcitypes.InfoResponse{}, nil } -func (app *KVStoreApplication) Query(_ context.Context, req *abcitypes.RequestQuery) (*abcitypes.ResponseQuery, error) { - return &abcitypes.ResponseQuery{}, nil +func (app *KVStoreApplication) Query(_ context.Context, req *abcitypes.QueryRequest) (*abcitypes.QueryResponse, error) { + return &abcitypes.QueryResponse{}, nil } -func (app *KVStoreApplication) CheckTx(_ context.Context, check *abcitypes.RequestCheckTx) (*abcitypes.ResponseCheckTx, error) { - return &abcitypes.ResponseCheckTx{Code: code}, nil +func (app *KVStoreApplication) CheckTx(_ context.Context, check *abcitypes.CheckTxRequest) (*abcitypes.CheckTxResponse, error) { + return &abcitypes.CheckTxResponse{Code: 0}, nil } -func (app *KVStoreApplication) InitChain(_ context.Context, chain *abcitypes.RequestInitChain) (*abcitypes.ResponseInitChain, error) { - return &abcitypes.ResponseInitChain{}, nil +func (app *KVStoreApplication) InitChain(_ context.Context, chain *abcitypes.InitChainRequest) (*abcitypes.InitChainResponse, error) { + return &abcitypes.InitChainResponse{}, nil } -func (app *KVStoreApplication) PrepareProposal(_ context.Context, proposal *abcitypes.RequestPrepareProposal) (*abcitypes.ResponsePrepareProposal, error) { - return &abcitypes.ResponsePrepareProposal{}, nil +func (app *KVStoreApplication) PrepareProposal(_ context.Context, proposal *abcitypes.PrepareProposalRequest) (*abcitypes.PrepareProposalResponse, error) { + return &abcitypes.PrepareProposalResponse{}, nil } -func (app *KVStoreApplication) ProcessProposal(_ context.Context, proposal *abcitypes.RequestProcessProposal) (*abcitypes.ResponseProcessProposal, error) { - return &abcitypes.ResponseProcessProposal{}, nil +func (app *KVStoreApplication) ProcessProposal(_ context.Context, proposal *abcitypes.ProcessProposalRequest) (*abcitypes.ProcessProposalResponse, error) { + return &abcitypes.ProcessProposalResponse{}, nil } -func (app *KVStoreApplication) FinalizeBlock(_ context.Context, req *abcitypes.RequestFinalizeBlock) (*abcitypes.ResponseFinalizeBlock, error) { - return &abcitypes.ResponseFinalizeBlock{}, nil +func (app *KVStoreApplication) FinalizeBlock(_ context.Context, req *abcitypes.FinalizeBlockRequest) (*abcitypes.FinalizeBlockResponse, error) { + return &abcitypes.FinalizeBlockResponse{}, nil } -func (app KVStoreApplication) Commit(_ context.Context, commit *abcitypes.RequestCommit) (*abcitypes.ResponseCommit, error) { - return &abcitypes.ResponseCommit{}, nil +func (app KVStoreApplication) Commit(_ context.Context, commit *abcitypes.CommitRequest) (*abcitypes.CommitResponse, error) { + return &abcitypes.CommitResponse{}, nil } -func (app *KVStoreApplication) ListSnapshots(_ context.Context, snapshots *abcitypes.RequestListSnapshots) (*abcitypes.ResponseListSnapshots, error) { - return &abcitypes.ResponseListSnapshots{}, nil +func (app *KVStoreApplication) ListSnapshots(_ context.Context, snapshots *abcitypes.ListSnapshotsRequest) (*abcitypes.ListSnapshotsResponse, error) { + return &abcitypes.ListSnapshotsResponse{}, nil } -func (app *KVStoreApplication) OfferSnapshot(_ context.Context, snapshot *abcitypes.RequestOfferSnapshot) (*abcitypes.ResponseOfferSnapshot, error) { - return &abcitypes.ResponseOfferSnapshot{}, nil +func (app *KVStoreApplication) OfferSnapshot(_ context.Context, snapshot *abcitypes.OfferSnapshotRequest) (*abcitypes.OfferSnapshotResponse, error) { + return &abcitypes.OfferSnapshotResponse{}, nil } -func (app *KVStoreApplication) LoadSnapshotChunk(_ context.Context, chunk *abcitypes.RequestLoadSnapshotChunk) (*abcitypes.ResponseLoadSnapshotChunk, error) { - return &abcitypes.ResponseLoadSnapshotChunk{}, nil +func (app *KVStoreApplication) LoadSnapshotChunk(_ context.Context, chunk *abcitypes.LoadSnapshotChunkRequest) (*abcitypes.LoadSnapshotChunkResponse, error) { + return &abcitypes.LoadSnapshotChunkResponse{}, nil } -func (app *KVStoreApplication) ApplySnapshotChunk(_ context.Context, chunk *abcitypes.RequestApplySnapshotChunk) (*abcitypes.ResponseApplySnapshotChunk, error) { - - return &abcitypes.ResponseApplySnapshotChunk{Result: abcitypes.ResponseApplySnapshotChunk_ACCEPT}, nil +func (app *KVStoreApplication) ApplySnapshotChunk(_ context.Context, chunk *abcitypes.ApplySnapshotChunkRequest) (*abcitypes.ApplySnapshotChunkResponse, error) { + return &abcitypes.ApplySnapshotChunkResponse{Result: abcitypes.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT}, nil } -func (app KVStoreApplication) ExtendVote(_ context.Context, extend *abcitypes.RequestExtendVote) (*abcitypes.ResponseExtendVote, error) { - return &abcitypes.ResponseExtendVote{}, nil +func (app KVStoreApplication) ExtendVote(_ context.Context, extend *abcitypes.ExtendVoteRequest) (*abcitypes.ExtendVoteResponse, error) { + return &abcitypes.ExtendVoteResponse{}, nil } -func (app *KVStoreApplication) VerifyVoteExtension(_ context.Context, verify *abcitypes.RequestVerifyVoteExtension) (*abcitypes.ResponseVerifyVoteExtension, error) { - return &abcitypes.ResponseVerifyVoteExtension{}, nil +func (app *KVStoreApplication) VerifyVoteExtension(_ context.Context, verify *abcitypes.VerifyVoteExtensionRequest) (*abcitypes.VerifyVoteExtensionResponse, error) { + return &abcitypes.VerifyVoteExtensionResponse{}, nil } ``` @@ -199,7 +257,7 @@ The types used here are defined in the CometBFT library and were added as a depe to the project when you ran `go get`. If your IDE is not recognizing the types, go ahead and run the command again. ```bash -go get github.com/cometbft/cometbft@v0.38.0 +go get github.com/cometbft/cometbft@v1.0 ``` Now go back to the `main.go` and modify the `main` function so it matches the following, @@ -228,7 +286,7 @@ a fast embedded key-value store. First, add Badger as a dependency of your go module using the `go get` command: -`go get github.com/dgraph-io/badger/v3` +`go get github.com/dgraph-io/badger/v4` Next, let's update the application and its constructor to receive a handle to the database, as follows: @@ -251,9 +309,10 @@ is completed. Don't worry about it for now, we'll get to that later. Next, update the `import` stanza at the top to include the Badger library: ```go -import( - "github.com/dgraph-io/badger/v3" +import ( + "context" abcitypes "github.com/cometbft/cometbft/abci/types" + "github.com/dgraph-io/badger/v4" ) ``` @@ -288,9 +347,9 @@ func (app *KVStoreApplication) isValid(tx []byte) uint32 { Now you can rewrite the `CheckTx` method to use the helper function: ```go -func (app *KVStoreApplication) CheckTx(_ context.Context, check *abcitypes.RequestCheckTx) (*abcitypes.ResponseCheckTx, error) { +func (app *KVStoreApplication) CheckTx(_ context.Context, check *abcitypes.CheckTxRequest) (*abcitypes.CheckTxResponse, error) { code := app.isValid(check.Tx) - return &abcitypes.ResponseCheckTx{Code: code}, nil + return &abcitypes.CheckTxResponse{Code: code}, nil } ``` @@ -307,16 +366,17 @@ its validation checks. The specific value of the code is meaningless to CometBFT Non-zero codes are logged by CometBFT so applications can provide more specific information on why the transaction was rejected. -Note that `CheckTx` does not execute the transaction, it only verifies that that the transaction could be executed. We do not know yet if the rest of the network has agreed to accept this transaction into a block. +Note that `CheckTx` does not execute the transaction, it only verifies that the transaction could be executed. We do +not know yet if the rest of the network has agreed to accept this transaction into a block. Finally, make sure to add the bytes package to the `import` stanza at the top of `app.go`: ```go -import( +import ( "bytes" - - "github.com/dgraph-io/badger/v3" + "context" abcitypes "github.com/cometbft/cometbft/abci/types" + "github.com/dgraph-io/badger/v4" ) ``` @@ -330,19 +390,19 @@ application via the `FinalizeBlock` method. This method is responsible for executing the block and returning a response to the consensus engine. Providing a single `FinalizeBlock` method to signal the finalization of a block simplifies the ABCI interface and increases flexibility in the execution pipeline. -The `FinalizeBlock` method executes the block, including any necessary transaction processing and state updates, and returns a `ResponseFinalizeBlock` object which contains any necessary information about the executed block. +The `FinalizeBlock` method executes the block, including any necessary transaction processing and state updates, and returns a `FinalizeBlockResponse` object which contains any necessary information about the executed block. -**Note:** `FinalizeBlock` only prepares the update to be made and does not change the state of the application. The state change is actually committed in a later stage i.e. in `commit` phase. +**Note:** `FinalizeBlock` only prepares the update to be made and does not change the state of the application. The state change is actually committed in a later stage, in the `Commit` phase. Note that, to implement these calls in our application we're going to make use of Badger's transaction mechanism. We will always refer to these as Badger transactions, not to confuse them with the transactions included in the blocks delivered by CometBFT, the _application transactions_. First, let's create a new Badger transaction during `FinalizeBlock`. All application transactions in the current block will be executed within this Badger transaction. -Next, let's modify `FinalizeBlock` to add the `key` and `value` to the database transaction every time our application processes a new application transaction from the list received through `RequestFinalizeBlock`. +Next, let's modify `FinalizeBlock` to add the `key` and `value` to the database transaction every time our application processes a new application transaction from the list received through `FinalizeBlockRequest`. Note that we check the validity of the transaction _again_ during `FinalizeBlock`. ```go -func (app *KVStoreApplication) FinalizeBlock(_ context.Context, req *abcitypes.RequestFinalizeBlock) (*abcitypes.ResponseFinalizeBlock, error) { +func (app *KVStoreApplication) FinalizeBlock(_ context.Context, req *abcitypes.FinalizeBlockRequest) (*abcitypes.FinalizeBlockResponse, error) { var txs = make([]*abcitypes.ExecTxResult, len(req.Txs)) app.onGoingBlock = app.db.NewTransaction(true) @@ -360,16 +420,32 @@ func (app *KVStoreApplication) FinalizeBlock(_ context.Context, req *abcitypes.R } log.Printf("Successfully added key %s with value %s", key, value) - txs[i] = &abcitypes.ExecTxResult{} + // Add an event for the transaction execution. + // Multiple events can be emitted for a transaction, but we are adding only one event + txs[i] = &abcitypes.ExecTxResult{ + Code: 0, + Events: []abcitypes.Event{ + { + Type: "app", + Attributes: []abcitypes.EventAttribute{ + {Key: "key", Value: string(key), Index: true}, + {Key: "value", Value: string(value), Index: true}, + }, + }, + }, + } } } - return &abcitypes.ResponseFinalizeBlock{ - TxResults: txs, + return &abcitypes.FinalizeBlockResponse{ + TxResults: txs, + NextBlockDelay: 1 * time.Second, }, nil } ``` +`NextBlockDelay` is a delay between the time when the current block is committed and the next height is started. Normally you don't need to change the default value (1s). Please refer to the [spec](../../spec/abci/abci++_methods.md#finalizeblock) for more information. + Transactions are not guaranteed to be valid when they are delivered to an application, even if they were valid when they were proposed. This can happen if the application state is used to determine transaction validity. The application state may have changed between the initial execution of `CheckTx` and the transaction delivery in `FinalizeBlock` in a way that rendered the transaction no longer valid. @@ -382,20 +458,21 @@ The `Commit` method tells the application to make permanent the effects of the a Let's update the method to terminate the pending Badger transaction and persist the resulting state: ```go -func (app KVStoreApplication) Commit(_ context.Context, commit *abcitypes.RequestCommit) (*abcitypes.ResponseCommit, error) { - return &abcitypes.ResponseCommit{}, app.onGoingBlock.Commit() +func (app KVStoreApplication) Commit(_ context.Context, commit *abcitypes.CommitRequest) (*abcitypes.CommitResponse, error) { + return &abcitypes.CommitResponse{}, app.onGoingBlock.Commit() } ``` -Finally, make sure to add the log library to the `import` stanza as well: +Finally, make sure to add the `log` and `errors` libraries to the `import` stanza as well: ```go import ( "bytes" - "log" - - "github.com/dgraph-io/badger/v3" + "context" + "errors" abcitypes "github.com/cometbft/cometbft/abci/types" + "github.com/dgraph-io/badger/v4" + "log" ) ``` @@ -410,13 +487,13 @@ When a client tries to read some information from the `kvstore`, the request wil handled in the `Query` method. To do this, let's rewrite the `Query` method in `app.go`: ```go -func (app *KVStoreApplication) Query(_ context.Context, req *abcitypes.RequestQuery) (*abcitypes.ResponseQuery, error) { - resp := abcitypes.ResponseQuery{Key: req.Data} +func (app *KVStoreApplication) Query(_ context.Context, req *abcitypes.QueryRequest) (*abcitypes.QueryResponse, error) { + resp := abcitypes.QueryResponse{Key: req.Data} dbErr := app.db.View(func(txn *badger.Txn) error { item, err := txn.Get(req.Data) if err != nil { - if err != badger.ErrKeyNotFound { + if !errors.Is(err, badger.ErrKeyNotFound) { return err } resp.Log = "key does not exist" @@ -449,34 +526,20 @@ included in blocks, it groups some of these transactions and then gives the appl to modify the group by invoking `PrepareProposal`. The application is free to modify the group before returning from the call, as long as the resulting set -does not use more bytes than `RequestPrepareProposal.max_tx_bytes' +does not use more bytes than `PrepareProposalRequest.max_tx_bytes'. For example, the application may reorder, add, or even remove transactions from the group to improve the execution of the block once accepted. + In the following code, the application simply returns the unmodified group of transactions: ```go -func (app *KVStoreApplication) PrepareProposal(_ context.Context, proposal *abcitypes.RequestPrepareProposal) (*abcitypes.ResponsePrepareProposal, error) { - totalBytes := int64(0) - txs := make([]byte, 0) - - for _, tx := range proposal.Txs { - totalBytes += int64(len(tx)) - txs = append(txs, tx...) - - if totalBytes > int64(proposal.MaxTxBytes) { - break - } - } - - return &abcitypes.ResponsePrepareProposal{Txs: proposal.Txs}, nil +func (app *KVStoreApplication) PrepareProposal(_ context.Context, proposal *abcitypes.PrepareProposalRequest) (*abcitypes.PrepareProposalResponse, error) { + return &abcitypes.PrepareProposalResponse{Txs: proposal.Txs}, nil } ``` -This code snippet iterates through the proposed transactions and calculates the `total bytes`. If the `total bytes` exceeds the `MaxTxBytes` specified in the `RequestPrepareProposal` struct, the loop breaks and the transactions processed so far are returned. - -Note: It is the responsibility of the application to ensure that the `total bytes` of transactions returned does not exceed the `RequestPrepareProposal.max_tx_bytes` limit. - -Once a proposed block is received by a node, the proposal is passed to the application to determine its validity before voting to accept the proposal. +Once a proposed block is received by a node, the proposal is passed to the +application to determine its validity before voting to accept the proposal. This mechanism may be used for different reasons, for example to deal with blocks manipulated by malicious nodes, in which case the block should not be considered valid. @@ -484,14 +547,27 @@ by malicious nodes, in which case the block should not be considered valid. The following code simply accepts all proposals: ```go -func (app *KVStoreApplication) ProcessProposal(_ context.Context, proposal *abcitypes.RequestProcessProposal) (*abcitypes.ResponseProcessProposal, error) { - return &abcitypes.ResponseProcessProposal{Status: abcitypes.ResponseProcessProposal_ACCEPT}, nil +func (app *KVStoreApplication) ProcessProposal(_ context.Context, proposal *abcitypes.ProcessProposalRequest) (*abcitypes.ProcessProposalResponse, error) { + return &abcitypes.ProcessProposalResponse{Status: abcitypes.PROCESS_PROPOSAL_STATUS_ACCEPT}, nil } ``` +### 1.3.6 Handling errors + +Please note that in the method signature for the ABCI methods, there is a response and an error return, such as +`(*abcitypes.[Method]Response, error)`. Some of the ABCI methods' responses might include a field that can return +an error in the response, such as the `Code` field in the `CheckTxResponse`. The application can use the `Code` +field to signal CometBFT that the transaction was rejected. Another example is `FinalizeBlockResponse`, which has a `TxResults` array field with each result containing a `Code` field that can be used by the application +to signal that a transaction didn't execute properly. Or `QueryResponse`, which also includes +a `Code` field to signal that a query to the application was unsuccessful or it could not find the information. + +The `error` return, as in `(*abcitypes.[Method]Response, error)`, can be used if there are unrecoverable errors. +In these cases, the application should abort to prevent further unintended consequences. + ## 1.4 Starting an application and a CometBFT instance -Now that we have the basic functionality of our application in place, let's put it all together inside of our `main.go` file. +Now that we have the basic functionality of our application in place, let's put +it all together inside our `main.go` file. Change the contents of your `main.go` file to the following. @@ -508,7 +584,7 @@ import ( "path/filepath" "syscall" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" cmtlog "github.com/cometbft/cometbft/libs/log" ) @@ -539,7 +615,7 @@ func main() { }() app := NewKVStoreApplication(db) - logger := cmtlog.NewTMLogger(cmtlog.NewSyncWriter(os.Stdout)) + logger := cmtlog.NewLogger(os.Stdout) server := abciserver.NewSocketServer(socketAddr, app) server.SetLogger(logger) @@ -595,16 +671,30 @@ signal.Notify(c, os.Interrupt, syscall.SIGTERM) <-c ``` +Try again to build the code to ensure it is all good so far: + +```bash +go build +``` + +If there are no errors, everything worked well so far. But if you get some errors about `missing go.sum entry for module ...`, +you can try to fix them by running the commands below: + +```bash +go mod tidy +go build +``` + ## 1.5 Initializing and Running Our application is almost ready to run, but first we'll need to populate the CometBFT configuration files. The following command will create a `cometbft-home` directory in your project and add a basic set of configuration files in `cometbft-home/config/`. -For more information on what these files contain see [the configuration documentation](https://github.com/cometbft/cometbft/blob/v0.38.x/docs/core/configuration.md). +For more information on what these files contain see [the configuration documentation](https://docs.cometbft.com/v1.0/explanation/core/configuration). From the root of your project, run: ```bash -go run github.com/cometbft/cometbft/cmd/cometbft@v0.38.0 init --home /tmp/cometbft-home +cometbft init --home /tmp/cometbft-home ``` You should see an output similar to the following: @@ -638,11 +728,14 @@ I[2023-04-25|17:01:28.726] Waiting for new connection... ``` Then we need to start CometBFT service and point it to our application. -Open a new terminal window and cd to the same folder where the app is running. + +Open a new terminal window and cd into the same folder where the app is running (this is important because when you run the `kvstore` command above +a file will be created `example.sock` and you need to run `cometbft` in the same folder so that they can communicate via sockets) + Then execute the following command: ```bash -go run github.com/cometbft/cometbft/cmd/cometbft@v0.38.0 node --home /tmp/cometbft-home --proxy_app=unix://example.sock +cometbft node --home /tmp/cometbft-home --proxy_app=unix://example.sock ``` This should start the full node and connect to our ABCI application, which will be @@ -682,6 +775,10 @@ curl -s 'localhost:26657/broadcast_tx_commit?tx="cometbft=rocks"' If everything went well, you should see a response indicating which height the transaction was included in the blockchain. +```bash +{"jsonrpc":"2.0","id":-1,"result":{"check_tx":{"code":0,"data":null,"log":"","info":"","gas_wanted":"0","gas_used":"0","events":[],"codespace":""},"tx_result":{"code":0,"data":null,"log":"","info":"","gas_wanted":"0","gas_used":"0","events":[{"type":"app","attributes":[{"key":"key","value":"cometbft","index":true},{"key":"value","value":"rocks","index":true}]}],"codespace":""},"hash":"71276C4844CE72F6C6C868541D10923259F5F8DA5716B230555B36AD309D6FD1","height":"64"}} +``` + Finally, let's make sure that transaction really was persisted by the application. Run the following command: @@ -706,9 +803,20 @@ The response contains a `base64` encoded representation of the data we submitted To get the original value out of this data, we can use the `base64` command line utility: ```bash -echo cm9ja3M=" | base64 -d +$ echo "Y29tZXRiZnQ=" | base64 -d +cometbft +$ echo "cm9ja3M=" | base64 -d +rocks ``` +If you want to search for txs, you can leverage `CometBFT` kv indexer by using the `/tx_search` RPC endpoint: + +```bash +curl "localhost:26657/tx_search?query=\"app.key='cometbft'\"" +``` + +The events (`abcitypes.Event`) added in `FinalizeBlock` are indexed by CometBFT (assuming the `kv` indexer is enabled in the CometBFT's configuration). + ## Outro -Hope you could run everything smoothly. If you have any difficulties running through this tutorial, reach out to us via [discord](https://discord.com/invite/cosmosnetwork) or open a new [issue](https://github.com/cometbft/cometbft/issues/new/choose) on Github. +Hope you could run everything smoothly. If you have any difficulties running through this tutorial, reach out to us via [discord](https://discord.com/invite/interchain) or open a new [issue](https://github.com/cometbft/cometbft/issues/new/choose) on Github. diff --git a/docs/guides/install.md b/docs/tutorials/install.md similarity index 53% rename from docs/guides/install.md rename to docs/tutorials/install.md index 366c0c90a27..bba3d1a32ed 100644 --- a/docs/guides/install.md +++ b/docs/tutorials/install.md @@ -1,9 +1,23 @@ --- -order: 3 +order: 1 --- # Install CometBFT +## From Go Package + +Install the latest version of CometBFT's Go package: + +```sh +go install github.com/cometbft/cometbft/cmd/cometbft@latest +``` + +Install a specific version of CometBFT's Go package: + +```sh +go install github.com/cometbft/cometbft/cmd/cometbft@v0.38 +``` + ## From Binary To download pre-built binaries, see the [releases page](https://github.com/cometbft/cometbft/releases). @@ -51,15 +65,6 @@ running: cometbft version ``` -## Run - -To start a one-node blockchain with a simple in-process application: - -```sh -cometbft init -cometbft node --proxy_app=kvstore -``` - ## Reinstall If you already have CometBFT installed, and you make updates, simply @@ -74,47 +79,3 @@ To upgrade, run git pull origin main make install ``` - -## Compile with CLevelDB support - -Install [LevelDB](https://github.com/google/leveldb) (minimum version is 1.7). - -Install LevelDB with snappy (optionally). Below are commands for Ubuntu: - -```sh -sudo apt-get update -sudo apt install build-essential - -sudo apt-get install libsnappy-dev - -wget https://github.com/google/leveldb/archive/v1.20.tar.gz && \ - tar -zxvf v1.20.tar.gz && \ - cd leveldb-1.20/ && \ - make && \ - sudo cp -r out-static/lib* out-shared/lib* /usr/local/lib/ && \ - cd include/ && \ - sudo cp -r leveldb /usr/local/include/ && \ - sudo ldconfig && \ - rm -f v1.20.tar.gz -``` - -Set a database backend to `cleveldb`: - -```toml -# config/config.toml -db_backend = "cleveldb" -``` - -To install CometBFT, run: - -```sh -CGO_LDFLAGS="-lsnappy" make install COMETBFT_BUILD_OPTIONS=cleveldb -``` - -or run: - -```sh -CGO_LDFLAGS="-lsnappy" make build COMETBFT_BUILD_OPTIONS=cleveldb -``` - -which puts the binary in `./build`. diff --git a/docs/guides/quick-start.md b/docs/tutorials/quick-start.md similarity index 76% rename from docs/guides/quick-start.md rename to docs/tutorials/quick-start.md index b0eecf25187..f2f0131f3fe 100644 --- a/docs/guides/quick-start.md +++ b/docs/tutorials/quick-start.md @@ -11,7 +11,7 @@ works and want to get started right away, continue. ## Install -See the [install guide](./install.md). +See the [install guide](install.md). ## Initialization @@ -95,7 +95,7 @@ First create four Ubuntu cloud machines. The following was tested on Digital Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP addresses below as IP1, IP2, IP3, IP4. -Then, `ssh` into each machine and install CometBFT following the [instructions](./install.md). +Then, `ssh` into each machine and install CometBFT following the [instructions](install.md). Next, use the `cometbft testnet` command to create four directories of config files (found in `./mytestnet`) and copy each directory to the relevant machine in the cloud, so that each machine has `$HOME/mytestnet/node[0-3]` directory. @@ -108,6 +108,46 @@ cometbft show_node_id --home ./mytestnet/node2 cometbft show_node_id --home ./mytestnet/node3 ``` +Here's a handy Bash script to compile the persistent peers string, which will +be needed for our next step: + +```bash +#!/bin/bash + +# Check if the required argument is provided +if [ $# -eq 0 ]; then + echo "Usage: $0 ..." + exit 1 +fi + +# Command to run on each IP +BASE_COMMAND="cometbft show_node_id --home ./mytestnet/node" + +# Initialize an array to store results +PERSISTENT_PEERS="" + +# Iterate through provided IPs +for i in "${!@}"; do + IP="${!i}" + NODE_IDX=$((i - 1)) # Adjust for zero-based indexing + + echo "Getting ID of $IP (node $NODE_IDX)..." + + # Run the command on the current IP and capture the result + ID=$($BASE_COMMAND$NODE_IDX) + + # Store the result in the array + PERSISTENT_PEERS+="$ID@$IP:26656" + + # Add a comma if not the last IP + if [ $i -lt $# ]; then + PERSISTENT_PEERS+="," + fi +done + +echo "$PERSISTENT_PEERS" +``` + Finally, from each machine, run: ```sh @@ -119,6 +159,6 @@ cometbft node --home ./mytestnet/node3 --proxy_app=kvstore --p2p.persistent_peer Note that after the third node is started, blocks will start to stream in because >2/3 of validators (defined in the `genesis.json`) have come online. -Persistent peers can also be specified in the `config.toml`. See [here](../core/configuration.md) for more information about configuration options. +Persistent peers can also be specified in the `config.toml`. See [here](../explanation/core/configuration.md) for more information about configuration options. Transactions can then be sent as covered in the single, local node example above. diff --git a/go.mod b/go.mod index 8900213531e..b89e1eced0b 100644 --- a/go.mod +++ b/go.mod @@ -1,320 +1,151 @@ module github.com/cometbft/cometbft -go 1.21 +go 1.23.1 require ( - github.com/BurntSushi/toml v1.3.2 - github.com/adlio/schema v1.3.4 + github.com/BurntSushi/toml v1.4.0 + github.com/Masterminds/semver/v3 v3.3.1 + github.com/adlio/schema v1.3.6 + github.com/btcsuite/btcd/btcutil v1.1.6 github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cometbft/cometbft-db v1.0.1 + github.com/cometbft/cometbft-load-test v0.3.0 + github.com/cometbft/cometbft/api v1.0.0-rc.1 + github.com/cosmos/gogoproto v1.7.0 + github.com/creachadair/atomicfile v0.3.6 + github.com/creachadair/tomledit v0.0.26 + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 + github.com/dgraph-io/badger/v4 v4.4.0 github.com/fortytw2/leaktest v1.3.0 - github.com/go-kit/kit v0.13.0 - github.com/go-kit/log v0.2.1 - github.com/go-logfmt/logfmt v0.6.0 - github.com/golang/protobuf v1.5.3 - github.com/golangci/golangci-lint v1.55.2 + github.com/goccmack/goutil v1.2.3 + github.com/golang/protobuf v1.5.4 // indirect github.com/google/orderedcode v0.0.1 - github.com/gorilla/websocket v1.5.1 - github.com/informalsystems/tm-load-test v1.3.0 + github.com/google/uuid v1.6.0 + github.com/gorilla/websocket v1.5.3 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/lib/pq v1.10.9 - github.com/libp2p/go-buffer-pool v0.1.0 - github.com/minio/highwayhash v1.0.2 + github.com/lmittmann/tint v1.0.5 + github.com/minio/highwayhash v1.0.3 + github.com/mitchellh/mapstructure v1.5.0 + github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae github.com/ory/dockertest v3.3.5+incompatible + github.com/pelletier/go-toml/v2 v2.2.3 github.com/pkg/errors v0.9.1 - github.com/pointlander/peg v1.0.1 - github.com/prometheus/client_golang v1.17.0 - github.com/prometheus/client_model v0.5.0 - github.com/prometheus/common v0.45.0 + github.com/prometheus/client_golang v1.20.5 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.60.1 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 - github.com/rs/cors v1.10.1 - github.com/sasha-s/go-deadlock v0.3.1 + github.com/rs/cors v1.11.1 + github.com/sasha-s/go-deadlock v0.3.5 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v1.8.0 - github.com/spf13/viper v1.17.0 - github.com/stretchr/testify v1.8.4 - golang.org/x/crypto v0.14.0 - golang.org/x/net v0.17.0 - google.golang.org/grpc v1.59.0 -) - -require ( - github.com/bufbuild/buf v1.27.2 + github.com/spf13/cobra v1.8.1 + github.com/spf13/viper v1.19.0 + github.com/stretchr/testify v1.10.0 + github.com/supranational/blst v0.3.13 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 + golang.org/x/crypto v0.29.0 + golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa + golang.org/x/net v0.31.0 + golang.org/x/sync v0.9.0 + golang.org/x/text v0.20.0 + gonum.org/v1/gonum v0.15.1 + google.golang.org/grpc v1.68.0 ) require ( - github.com/Masterminds/semver/v3 v3.2.1 - github.com/btcsuite/btcd/btcec/v2 v2.3.2 - github.com/btcsuite/btcd/btcutil v1.1.3 - github.com/cometbft/cometbft-db v0.7.0 - github.com/cosmos/gogoproto v1.4.11 - github.com/go-git/go-git/v5 v5.10.0 - github.com/goccmack/goutil v1.2.3 - github.com/gofrs/uuid v4.4.0+incompatible - github.com/google/uuid v1.4.0 - github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae - github.com/vektra/mockery/v2 v2.36.1 - golang.org/x/exp v0.0.0-20231006140011-7918f672742d - golang.org/x/sync v0.5.0 - gonum.org/v1/gonum v0.14.0 - google.golang.org/protobuf v1.31.0 + github.com/go-git/go-git/v5 v5.12.0 + google.golang.org/protobuf v1.35.1 ) require ( - 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect - 4d63.com/gochecknoglobals v0.2.1 // indirect - connectrpc.com/connect v1.11.1 // indirect - connectrpc.com/otelconnect v0.6.0 // indirect dario.cat/mergo v1.0.0 // indirect - github.com/4meepo/tagalign v1.3.3 // indirect - github.com/Abirdcfly/dupword v0.0.13 // indirect - github.com/Antonboom/errname v0.1.12 // indirect - github.com/Antonboom/nilnil v0.1.7 // indirect - github.com/Antonboom/testifylint v0.2.3 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect - github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 // indirect - github.com/Masterminds/semver v1.5.0 // indirect + github.com/DataDog/zstd v1.4.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect - github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect - github.com/acomagu/bufpipe v1.0.4 // indirect - github.com/alecthomas/go-check-sumtype v0.1.3 // indirect - github.com/alexkohler/nakedret/v2 v2.0.2 // indirect - github.com/alexkohler/prealloc v1.0.0 // indirect - github.com/alingse/asasalint v0.0.11 // indirect - github.com/ashanbrown/forbidigo v1.6.0 // indirect - github.com/ashanbrown/makezero v1.1.1 // indirect + github.com/ProtonMail/go-crypto v1.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bkielbasa/cyclop v1.2.1 // indirect - github.com/blizzy78/varnamelen v0.8.0 // indirect - github.com/bombsimon/wsl/v3 v3.4.0 // indirect - github.com/breml/bidichk v0.2.7 // indirect - github.com/breml/errchkjson v0.3.6 // indirect - github.com/bufbuild/protocompile v0.6.0 // indirect - github.com/butuzov/ireturn v0.2.2 // indirect - github.com/butuzov/mirror v1.1.0 // indirect - github.com/catenacyber/perfsprint v0.2.0 // indirect - github.com/ccojocar/zxcvbn-go v1.0.1 // indirect - github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/charithe/durationcheck v0.0.10 // indirect - github.com/chavacava/garif v0.1.0 // indirect - github.com/chigopher/pathlib v1.0.0 // indirect - github.com/cloudflare/circl v1.3.3 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble v1.1.2 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/containerd/continuity v0.3.0 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect - github.com/curioswitch/go-reassign v0.2.0 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect - github.com/daixiang0/gci v0.11.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect - github.com/denis-tingaikin/go-header v0.4.3 // indirect - github.com/dgraph-io/badger/v2 v2.2007.4 // indirect - github.com/dgraph-io/ristretto v0.1.1 // indirect - github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/distribution/reference v0.5.0 // indirect - github.com/docker/cli v24.0.6+incompatible // indirect - github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v24.0.7+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.0 // indirect + github.com/dgraph-io/ristretto/v2 v2.0.0 // indirect + github.com/docker/cli v24.0.7+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/esimonov/ifshort v1.0.4 // indirect - github.com/ettle/strcase v0.1.1 // indirect - github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect - github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect - github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect - github.com/fatih/color v1.15.0 // indirect - github.com/fatih/structtag v1.2.0 // indirect - github.com/felixge/fgprof v0.9.3 // indirect - github.com/firefart/nonamedreturns v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/fzipp/gocyclo v0.6.0 // indirect - github.com/ghostiam/protogetter v0.2.3 // indirect - github.com/go-chi/chi/v5 v5.0.10 // indirect - github.com/go-critic/go-critic v0.9.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-toolsmith/astcast v1.1.0 // indirect - github.com/go-toolsmith/astcopy v1.1.0 // indirect - github.com/go-toolsmith/astequal v1.1.0 // indirect - github.com/go-toolsmith/astfmt v1.1.0 // indirect - github.com/go-toolsmith/astp v1.1.0 // indirect - github.com/go-toolsmith/strparse v1.1.0 // indirect - github.com/go-toolsmith/typep v1.1.0 // indirect - github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect - github.com/gobwas/glob v0.2.3 // indirect - github.com/gofrs/flock v0.8.1 // indirect - github.com/gofrs/uuid/v5 v5.0.0 // indirect + github.com/go-sql-driver/mysql v1.7.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect - github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect - github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect - github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e // indirect - github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect - github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect - github.com/golangci/misspell v0.4.1 // indirect - github.com/golangci/revgrep v0.5.2 // indirect - github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect - github.com/google/btree v1.1.2 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/go-containerregistry v0.16.1 // indirect - github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect - github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 // indirect - github.com/gostaticanalysis/analysisutil v0.7.1 // indirect - github.com/gostaticanalysis/comment v1.4.2 // indirect - github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect - github.com/gostaticanalysis/nilerr v0.1.1 // indirect github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hexops/gotextdiff v1.0.3 // indirect - github.com/huandu/xstrings v1.4.0 // indirect - github.com/iancoleman/strcase v0.2.0 // indirect github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect - github.com/jdx/go-netrc v1.0.0 // indirect - github.com/jgautheron/goconst v1.6.0 // indirect - github.com/jingyugao/rowserrcheck v1.1.1 // indirect - github.com/jinzhu/copier v0.3.5 // indirect - github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect github.com/jmhodges/levigo v1.0.0 // indirect - github.com/julz/importas v0.1.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/kisielk/errcheck v1.6.3 // indirect - github.com/kisielk/gotool v1.0.0 // indirect - github.com/kkHAIKE/contextcheck v1.1.4 // indirect - github.com/klauspost/compress v1.17.2 // indirect - github.com/klauspost/pgzip v1.2.6 // indirect - github.com/kulti/thelper v0.6.3 // indirect - github.com/kunwardeep/paralleltest v1.0.8 // indirect - github.com/kyoh86/exportloopref v0.1.11 // indirect - github.com/ldez/gomoddirectives v0.2.3 // indirect - github.com/ldez/tagliatelle v0.5.0 // indirect - github.com/leonklingele/grouper v1.1.1 // indirect - github.com/lufeee/execinquery v1.2.1 // indirect - github.com/macabu/inamedparam v0.1.2 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/linxGnu/grocksdb v1.9.3 // indirect github.com/magiconair/properties v1.8.7 // indirect - github.com/maratori/testableexamples v1.0.0 // indirect - github.com/maratori/testpackage v1.1.1 // indirect - github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect - github.com/mbilski/exhaustivestruct v1.2.0 // indirect - github.com/mgechev/revive v1.3.4 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/term v0.5.0 // indirect - github.com/moricho/tparallel v0.3.1 // indirect - github.com/morikuni/aec v1.0.0 // indirect - github.com/nakabonne/nestif v0.3.1 // indirect - github.com/nishanths/exhaustive v0.11.0 // indirect - github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.14.1 // indirect - github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/gomega v1.28.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc5 // indirect - github.com/opencontainers/runc v1.1.5 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect - github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect + github.com/opencontainers/runc v1.1.12 // indirect + github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/pkg/profile v1.7.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3 // indirect - github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4 // indirect - github.com/polyfloyd/go-errorlint v1.4.5 // indirect - github.com/prometheus/procfs v0.11.1 // indirect - github.com/quasilyte/go-ruleguard v0.4.0 // indirect - github.com/quasilyte/gogrep v0.5.0 // indirect - github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect - github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect - github.com/rs/zerolog v1.29.0 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/ryancurrah/gomodguard v1.3.0 // indirect - github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect - github.com/sagikazarmark/locafero v0.3.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect - github.com/sashamelentyev/interfacebloat v1.1.0 // indirect - github.com/sashamelentyev/usestdlibvars v1.24.0 // indirect - github.com/satori/go.uuid v1.2.0 // indirect - github.com/securego/gosec/v2 v2.18.2 // indirect - github.com/sergi/go-diff v1.2.0 // indirect - github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/sivchari/containedctx v1.0.3 // indirect - github.com/sivchari/nosnakecase v1.7.0 // indirect - github.com/sivchari/tenv v1.7.1 // indirect - github.com/skeema/knownhosts v1.2.0 // indirect - github.com/sonatard/noctx v0.0.2 // indirect + github.com/skeema/knownhosts v1.2.2 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - github.com/sourcegraph/go-diff v0.7.0 // indirect - github.com/spf13/afero v1.10.0 // indirect - github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect - github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect - github.com/tdakkota/asciicheck v0.2.0 // indirect - github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect - github.com/tetafro/godot v1.4.15 // indirect - github.com/tetratelabs/wazero v1.5.0 // indirect - github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect - github.com/timonwong/loggercheck v0.9.4 // indirect - github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect - github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect - github.com/ultraware/funlen v0.1.0 // indirect - github.com/ultraware/whitespace v0.0.5 // indirect - github.com/uudashr/gocognit v1.1.2 // indirect - github.com/vbatts/tar-split v0.11.5 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect - github.com/xen0n/gosmopolitan v1.2.2 // indirect - github.com/yagipy/maintidx v1.0.0 // indirect - github.com/yeya24/promlinter v0.2.0 // indirect - github.com/ykadowak/zerologlint v0.1.3 // indirect - gitlab.com/bosi/decorder v0.4.1 // indirect - go-simpler.org/sloglint v0.1.2 // indirect - go.etcd.io/bbolt v1.3.6 // indirect - go.opentelemetry.io/otel v1.19.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect - go.opentelemetry.io/otel/sdk v1.19.0 // indirect - go.opentelemetry.io/otel/trace v1.19.0 // indirect - go.tmz.dev/musttag v0.7.2 // indirect - go.uber.org/atomic v1.11.0 // indirect + go.etcd.io/bbolt v1.3.11 // indirect + go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect - golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 // indirect - golang.org/x/mod v0.13.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/tools v0.14.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools v2.2.0+incompatible // indirect - honnef.co/go/tools v0.4.6 // indirect - mvdan.cc/gofumpt v0.5.0 // indirect - mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect - mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect - mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d // indirect +) + +retract ( + // a regression was introduced + v0.38.4 + // a breaking change was introduced + v0.38.3 + // superseeded by v0.38.3 because of ASA-2024-001 + [v0.38.0, v0.38.2] ) diff --git a/go.sum b/go.sum index 46de1359a27..5ac97b1274a 100644 --- a/go.sum +++ b/go.sum @@ -1,151 +1,48 @@ -4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA= -4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= -4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= -4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -connectrpc.com/connect v1.11.1 h1:dqRwblixqkVh+OFBOOL1yIf1jS/yP0MSJLijRj29bFg= -connectrpc.com/connect v1.11.1/go.mod h1:3AGaO6RRGMx5IKFfqbe3hvK1NqLosFNP2BxDYTPmNPo= -connectrpc.com/otelconnect v0.6.0 h1:VJAdQL9+sgdUw9+7+J+jq8pQo/h1S7tSFv2+vDcR7bU= -connectrpc.com/otelconnect v0.6.0/go.mod h1:jdcs0uiwXQVmSMgTJ2dAaWR5VbpNd7QKNkuoH7n86RA= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw= -github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= -github.com/Abirdcfly/dupword v0.0.13 h1:SMS17YXypwP000fA7Lr+kfyBQyW14tTT+nRv9ASwUUo= -github.com/Abirdcfly/dupword v0.0.13/go.mod h1:Ut6Ue2KgF/kCOawpW4LnExT+xZLQviJPE4klBPMK/5Y= -github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClDcQY= -github.com/Antonboom/errname v0.1.12/go.mod h1:bK7todrzvlaZoQagP1orKzWXv59X/x0W0Io2XT1Ssro= -github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTow= -github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ= -github.com/Antonboom/testifylint v0.2.3 h1:MFq9zyL+rIVpsvLX4vDPLojgN7qODzWsrnftNX2Qh60= -github.com/Antonboom/testifylint v0.2.3/go.mod h1:IYaXaOX9NbfAyO+Y04nfjGI8wDemC1rUyM/cYolz018= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 h1:3ZBs7LAezy8gh0uECsA6CGU43FF3zsx5f4eah5FxTMA= -github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0/go.mod h1:rZLTje5A9kFBe0pzhpe2TdhRniBF++PRHQuRpR8esVc= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= +github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= +github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard/v2 v2.1.0 h1:aQl70G173h/GZYhWf36aE5H0KaujXfVMnn/f1kSDVYY= -github.com/OpenPeeDeeP/depguard/v2 v2.1.0/go.mod h1:PUBgk35fX4i7JDmwzlJwJ+GMe6NfO1723wmJMgPThNQ= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= -github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/adlio/schema v1.3.4 h1:8K+41sfQkxfT6a79aLBxx+dBKcid6Raw2JPk5COqeqE= -github.com/adlio/schema v1.3.4/go.mod h1:gFMaHYzLkZRfaIqZ5u96LLXPt+DdXSFWUwtr6YBz0kk= +github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= +github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/adlio/schema v1.3.6 h1:k1/zc2jNfeiZBA5aFTRy37jlBIuCkXCm0XmvpzCKI9I= +github.com/adlio/schema v1.3.6/go.mod h1:qkxwLgPBd1FgLRHYVCmQT/rrBr3JH38J9LjmVzWNudg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= -github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= -github.com/alecthomas/go-check-sumtype v0.1.3 h1:M+tqMxB68hcgccRXBMVCPI4UJ+QUfdSx0xdbypKCqA8= -github.com/alecthomas/go-check-sumtype v0.1.3/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= -github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= -github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexkohler/nakedret/v2 v2.0.2 h1:qnXuZNvv3/AxkAb22q/sEsEpcA99YxLFACDtEw9TPxE= -github.com/alexkohler/nakedret/v2 v2.0.2/go.mod h1:2b8Gkk0GsOrqQv/gPWjNLDSKwG8I5moSXG1K4VIBcTQ= -github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= -github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= -github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= -github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= -github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= -github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= -github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= -github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= -github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= -github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU= -github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo= -github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= -github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= -github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= -github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= -github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA= -github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY= +github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= +github.com/btcsuite/btcd v0.24.2/go.mod h1:5C8ChTkl5ejr3WHj8tkQSCmydiMEPB0ZhQhehpq7Dgg= github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= -github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= -github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= -github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ= -github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= +github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= +github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/AYFd6c= +github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= @@ -155,112 +52,81 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bufbuild/buf v1.27.2 h1:uX2kvZfPfRoOsrxUW4LwpykSyH+wI5dUnIG0QWHDCCU= -github.com/bufbuild/buf v1.27.2/go.mod h1:7RImDhFDqhEsdK5wbuMhoVSlnrMggGGcd3s9WozvHtM= -github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= -github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= -github.com/butuzov/ireturn v0.2.2 h1:jWI36dxXwVrI+RnXDwux2IZOewpmfv930OuIRfaBUJ0= -github.com/butuzov/ireturn v0.2.2/go.mod h1:RfGHUvvAuFFxoHKf4Z8Yxuh6OjlCw1KvR2zM1NFHeBk= -github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI= -github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/catenacyber/perfsprint v0.2.0 h1:azOocHLscPjqXVJ7Mf14Zjlkn4uNua0+Hcg1wTR6vUo= -github.com/catenacyber/perfsprint v0.2.0/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= -github.com/ccojocar/zxcvbn-go v1.0.1 h1:+sxrANSCj6CdadkcMnvde/GWU1vZiiXRbqYSCalV4/4= -github.com/ccojocar/zxcvbn-go v1.0.1/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= -github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= -github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= -github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= -github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= -github.com/chigopher/pathlib v1.0.0 h1:SbsCrFX4vDf4M2d8mT/RTzuVlKOjTKoPHK0HidsQFak= -github.com/chigopher/pathlib v1.0.0/go.mod h1:3+YPPV21mU9vyw8Mjp+F33CyCfE6iOzinpiqBcccv7I= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= -github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA= +github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/cometbft/cometbft-db v1.0.1 h1:SylKuLseMLQKw3+i8y8KozZyJcQSL98qEe2CGMCGTYE= +github.com/cometbft/cometbft-db v1.0.1/go.mod h1:EBrFs1GDRiTqrWXYi4v90Awf/gcdD5ExzdPbg4X8+mk= +github.com/cometbft/cometbft-load-test v0.3.0 h1:z6iZZvFwhci29ca/EZQaWh/d92NLe8bK4eBvFyv2EKY= +github.com/cometbft/cometbft-load-test v0.3.0/go.mod h1:zKrQpRm3Ay5+RfeRTNWoLniFJNIPnw9JPEM1wuWS3TA= +github.com/cometbft/cometbft/api v1.0.0-rc.1 h1:GtdXwDGlqwHYs16A4egjwylfYOMYyEacLBrs3Zvpt7g= +github.com/cometbft/cometbft/api v1.0.0-rc.1/go.mod h1:NDFKiBBD8HJC6QQLAoUI99YhsiRZtg2+FJWfk6A6m6o= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= -github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= -github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cosmos/gogoproto v1.4.11 h1:LZcMHrx4FjUgrqQSWeaGC1v/TeuVFqSLa43CC6aWR2g= -github.com/cosmos/gogoproto v1.4.11/go.mod h1:/g39Mh8m17X8Q/GDEs5zYTSNaNnInBSohtaxzQnYq1Y= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= -github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cosmos/gogoproto v1.7.0 h1:79USr0oyXAbxg3rspGh/m4SWNyoz/GLaAh0QlCe2fro= +github.com/cosmos/gogoproto v1.7.0/go.mod h1:yWChEv5IUEYURQasfyBW5ffkMHR/90hiHgbNgrtp4j0= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creachadair/atomicfile v0.3.6 h1:BcXSDRq7waImZhKflqnTJjf+02CLi0W2Exlb2lyZ2yE= +github.com/creachadair/atomicfile v0.3.6/go.mod h1:iaBMVDkRBQTIGzbYGCTS+gXeZPidWAeVbthIxSbEphE= +github.com/creachadair/mds v0.21.4 h1:osKuLbjkV7YswBnhuTJh1lCDkqZMQnNfFVn0j8wLpz8= +github.com/creachadair/mds v0.21.4/go.mod h1:1ltMWZd9yXhaHEoZwBialMaviWVUpRPvMwVP7saFAzM= +github.com/creachadair/tomledit v0.0.26 h1:MoDdgHIHZ5PctBVsAZDjxdxreWUEa9ObPKTRkk5PPwA= +github.com/creachadair/tomledit v0.0.26/go.mod h1:SJi1OxKpMyR141tq1lzsbPtIg3j8TeVPM/ZftfieD7o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/daixiang0/gci v0.11.2 h1:Oji+oPsp3bQ6bNNgX30NBAVT18P4uBH4sRZnlOlTj7Y= -github.com/daixiang0/gci v0.11.2/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= -github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA= github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= -github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= -github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= -github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgraph-io/badger/v4 v4.4.0 h1:rA48XiDynZLyMdlaJl67p9+lqfqwxlgKtCpYLAio7Zk= +github.com/dgraph-io/badger/v4 v4.4.0/go.mod h1:sONMmPPfbnj9FPwS/etCqky/ULth6CQJuAZSuWCmixE= +github.com/dgraph-io/ristretto/v2 v2.0.0 h1:l0yiSOtlJvc0otkqyMaDNysg8E9/F/TYZwMbxscNOAQ= +github.com/dgraph-io/ristretto/v2 v2.0.0/go.mod h1:FVFokF2dRqXyPyeMnK1YDy8Fc6aTe0IKgbcd03CYeEk= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY= -github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= -github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= -github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= -github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= +github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= +github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= +github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= @@ -270,142 +136,49 @@ github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FM github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= -github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= -github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= -github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= -github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= -github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= -github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= -github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= -github.com/ghostiam/protogetter v0.2.3 h1:qdv2pzo3BpLqezwqfGDLZ+nHEYmc5bUpIdsMbBVwMjw= -github.com/ghostiam/protogetter v0.2.3/go.mod h1:KmNLOsy1v04hKbvZs8EfGI1fk39AgTdRDxWNYPfXVc4= -github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= -github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= -github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk= -github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= -github.com/go-critic/go-critic v0.9.0 h1:Pmys9qvU3pSML/3GEQ2Xd9RZ/ip+aXHKILuxczKGV/U= -github.com/go-critic/go-critic v0.9.0/go.mod h1:5P8tdXL7m/6qnyG6oRAlYLORvoXH0WDypYgAEmagT40= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= +github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.10.0 h1:F0x3xXrAWmhwtzoCokU4IMPcBdncG+HAAqi9FcOOjbQ= -github.com/go-git/go-git/v5 v5.10.0/go.mod h1:1FOZ/pQnqw24ghP2n7cunVl0ON55BsjPYvhWHvZGhoo= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= -github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= -github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= -github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= -github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= -github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= -github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= -github.com/go-toolsmith/astequal v1.1.0 h1:kHKm1AWqClYn15R0K1KKE4RG614D46n+nqUQ06E1dTw= -github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= -github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= -github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= -github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= -github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= -github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= -github.com/go-toolsmith/pkgload v1.2.2/go.mod h1:R2hxLNRKuAsiXCo2i5J6ZQPhnPMOVtU+f0arbFPWCus= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= -github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= -github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= -github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= -github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= -github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccmack/goutil v1.2.3 h1:acIQAjDl8RLs64e11yFHoPgE3wmvTDbniDZrXq3/GxA= github.com/goccmack/goutil v1.2.3/go.mod h1:dPBoKv07AeI2DGYE3ECrSLOLpGaBIBGCUCGKHclOPyU= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= -github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M= -github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 h1:+eHOFJl1BaXrQxKX+T06f78590z4qA2ZzBTqahsKSE4= +github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= -github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -414,279 +187,94 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= -github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= -github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e h1:ULcKCDV1LOZPFxGZaA6TlQbiM3J2GCPnkx/bGF6sX/g= -github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM= -github.com/golangci/golangci-lint v1.55.2 h1:yllEIsSJ7MtlDBwDJ9IMBkyEUz2fYE0b5B8IUgO1oP8= -github.com/golangci/golangci-lint v1.55.2/go.mod h1:H60CZ0fuqoTwlTvnbyjhpZPWp7KmsjwV2yupIMiMXbM= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g= -github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI= -github.com/golangci/revgrep v0.5.2 h1:EndcWoRhcnfj2NHQ+28hyuXpLMF+dQmCN+YaeeIl4FU= -github.com/golangci/revgrep v0.5.2/go.mod h1:bjAMA+Sh/QUfTDcHzxfyHxr4xKvllVr/0sCv2e7jJHA= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= +github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= -github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 h1:mrEEilTAUmaAORhssPPkxj84TsHrPMLBGW2Z4SoTxm8= -github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= -github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= -github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= -github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= -github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= -github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= -github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70= -github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= -github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= -github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= -github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= -github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= -github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= -github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= -github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/informalsystems/tm-load-test v1.3.0 h1:FGjKy7vBw6mXNakt+wmNWKggQZRsKkEYpaFk/zR64VA= -github.com/informalsystems/tm-load-test v1.3.0/go.mod h1:OQ5AQ9TbT5hKWBNIwsMjn6Bf4O0U4b1kRc+0qZlQJKw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jdx/go-netrc v1.0.0 h1:QbLMLyCZGj0NA8glAhxUpf1zDg6cxnWgMBbjq40W0gQ= -github.com/jdx/go-netrc v1.0.0/go.mod h1:Gh9eFQJnoTNIRHXl2j5bJXA1u84hQWJWgGh569zF3v8= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jgautheron/goconst v1.6.0 h1:gbMLWKRMkzAc6kYsQL6/TxaoBUg3Jm9LSF/Ih1ADWGA= -github.com/jgautheron/goconst v1.6.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= -github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= -github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= -github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= -github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= -github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= -github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8= -github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= -github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= -github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= -github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= -github.com/kunwardeep/paralleltest v1.0.8 h1:Ul2KsqtzFxTlSU7IP0JusWlLiNqQaloB9vguyjbE558= -github.com/kunwardeep/paralleltest v1.0.8/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= -github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= -github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= -github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= -github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= -github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= -github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= -github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= -github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= -github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= -github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= -github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= -github.com/macabu/inamedparam v0.1.2 h1:RR5cnayM6Q7cDhQol32DE2BGAPGMnffJ31LFE+UklaU= -github.com/macabu/inamedparam v0.1.2/go.mod h1:Xg25QvY7IBRl1KLPV9Rbml8JOMZtF/iAkNkmV7eQgjw= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/linxGnu/grocksdb v1.9.3 h1:s1cbPcOd0cU2SKXRG1nEqCOWYAELQjdqg3RVI2MH9ik= +github.com/linxGnu/grocksdb v1.9.3/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA= +github.com/lmittmann/tint v1.0.5 h1:NQclAutOfYsqs2F1Lenue6OoWCajs5wJcP3DfWVpePw= +github.com/lmittmann/tint v1.0.5/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= -github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= -github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= -github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= -github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2srm/LN17lpybq15AryXIRcWYLE= -github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= -github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= -github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= -github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/revive v1.3.4 h1:k/tO3XTaWY4DEHal9tWBkkUMJYO/dLDVyMmAQxmIMDc= -github.com/mgechev/revive v1.3.4/go.mod h1:W+pZCMu9qj8Uhfs1iJMQsEFLRozUfvwFwqVvRbSNLVw= -github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= -github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q= +github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA= -github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= -github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= -github.com/nishanths/exhaustive v0.11.0 h1:T3I8nUGhl/Cwu5Z2hfc92l0e04D2GEW6e0l8pzda2l0= -github.com/nishanths/exhaustive v0.11.0/go.mod h1:RqwDsZ1xY0dNdqHho2z6X+bgzizwbLYOWnZbbl2wLB4= -github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= -github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= -github.com/nunnatsa/ginkgolinter v0.14.1 h1:khx0CqR5U4ghsscjJ+lZVthp3zjIFytRXPTaQ/TMiyA= -github.com/nunnatsa/ginkgolinter v0.14.1/go.mod h1:nY0pafUSst7v7F637e7fymaMlQqI9c0Wka2fGsDkzWg= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae h1:FatpGJD2jmJfhZiFDElaC0QhZUDQnxUeAwTGkfAHN3I= github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= @@ -697,225 +285,90 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= -github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= +github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM= -github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= -github.com/otiai10/copy v1.11.0 h1:OKBD80J/mLBrwnzXqGtFCzprFSGioo30JcmR4APsNwc= -github.com/otiai10/copy v1.11.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= -github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= -github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= -github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3 h1:hUmXhbljNFtrH5hzV9kiRoddZ5nfPTq3K0Sb2hYYiqE= -github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3/go.mod h1:q5NXNGzqj5uPnVuhGkZfmgHqNUhf15VLi6L9kW0VEc0= -github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4 h1:RHHRCZeaNyBXdYPMjZNH8/XHDBH38TZzw8izrW7dmBE= -github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4/go.mod h1:RdR1j20Aj5pB6+fw6Y9Ur7lMHpegTEjY1vc19hEZL40= -github.com/pointlander/peg v1.0.1 h1:mgA/GQE8TeS9MdkU6Xn6iEzBmQUQCNuWD7rHCK6Mjs0= -github.com/pointlander/peg v1.0.1/go.mod h1:5hsGDQR2oZI4QoWz0/Kdg3VSVEC31iJw/b7WjqCBGRI= -github.com/polyfloyd/go-errorlint v1.4.5 h1:70YWmMy4FgRHehGNOUask3HtSFSOLKgmDn7ryNe7LqI= -github.com/polyfloyd/go-errorlint v1.4.5/go.mod h1:sIZEbFoDOCnTYYZoVkjc4hTnM459tuWA9H/EkdXwsKk= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= -github.com/quasilyte/go-ruleguard v0.4.0 h1:DyM6r+TKL+xbKB4Nm7Afd1IQh9kEUKQs2pboWGKtvQo= -github.com/quasilyte/go-ruleguard v0.4.0/go.mod h1:Eu76Z/R8IXtViWUIHkE3p8gdH3/PKk1eh3YGfaEof10= -github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= -github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= -github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= -github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= -github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= -github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= -github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw= -github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50= -github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= -github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= -github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ= -github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= -github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= -github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= -github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= -github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= -github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= -github.com/sashamelentyev/usestdlibvars v1.24.0 h1:MKNzmXtGh5N0y74Z/CIaJh4GlB364l0K1RUT08WSWAc= -github.com/sashamelentyev/usestdlibvars v1.24.0/go.mod h1:9cYkq+gYJ+a5W2RPdhfaSCnTVUC1OQP/bSiiBhq3OZE= -github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/securego/gosec/v2 v2.18.2 h1:DkDt3wCiOtAHf1XkiXZBhQ6m6mK/b9T/wD257R3/c+I= -github.com/securego/gosec/v2 v2.18.2/go.mod h1:xUuqSF6i0So56Y2wwohWAmB07EdBkUN6crbLlHwbyJs= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= +github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= -github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= -github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8= -github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= -github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= -github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= -github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM= -github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4= -github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= -github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= -github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= -github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= -github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= -github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= -github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= -github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk= +github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= -github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= -github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= -github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= -github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= -github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= -github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= -github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= -github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= -github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.15 h1:QzdIs+XB8q+U1WmQEWKHQbKmCw06QuQM7gLx/dky2RM= -github.com/tetafro/godot v1.4.15/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= -github.com/tetratelabs/wazero v1.5.0 h1:Yz3fZHivfDiZFUXnWMPUoiW7s8tC1sjdBtlJn08qYa0= -github.com/tetratelabs/wazero v1.5.0/go.mod h1:0U0G41+ochRKoPKCJlh0jMg1CHkyfK8kDqiirMmKY8A= -github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= -github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= -github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= -github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= -github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQpIXDJRhQ= -github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE= -github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= -github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= -github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= -github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI= -github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI= -github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= -github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= -github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vektra/mockery/v2 v2.36.1 h1:F/2tEFFRWdHe36smr+e6YIiKzXTZVd0cCAUqG0GTw1s= -github.com/vektra/mockery/v2 v2.36.1/go.mod h1:diB13hxXG6QrTR0ol2Rk8s2dRMftzvExSvPDKr+IYKk= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= @@ -924,477 +377,154 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= -github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= -github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= -github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= -github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= -github.com/ykadowak/zerologlint v0.1.3 h1:TLy1dTW3Nuc+YE3bYRPToG1Q9Ej78b5UUN6bjbGdxPE= -github.com/ykadowak/zerologlint v0.1.3/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -gitlab.com/bosi/decorder v0.4.1 h1:VdsdfxhstabyhZovHafFw+9eJ6eU0d2CkFNJcZz/NU4= -gitlab.com/bosi/decorder v0.4.1/go.mod h1:jecSqWUew6Yle1pCr2eLWTensJMmsxHsBwt+PVbkAqA= -go-simpler.org/assert v0.6.0 h1:QxSrXa4oRuo/1eHMXSBFHKvJIpWABayzKldqZyugG7E= -go-simpler.org/assert v0.6.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= -go-simpler.org/sloglint v0.1.2 h1:IjdhF8NPxyn0Ckn2+fuIof7ntSnVUAqBFcQRrnG9AiM= -go-simpler.org/sloglint v0.1.2/go.mod h1:2LL+QImPfTslD5muNPydAEYmpXIj6o/WYcqnJjLi4o4= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= -go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= -go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= -go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= -go.tmz.dev/musttag v0.7.2 h1:1J6S9ipDbalBSODNT5jCep8dhZyMr4ttnjQagmGYR5s= -go.tmz.dev/musttag v0.7.2/go.mod h1:m6q5NiiSKMnQYokefa2xGoyoXnrswCbJ0AWYzf4Zs28= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= -golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 h1:jWGQJV4niP+CCmFW9ekjA9Zx8vYORzOUH2/Nl5WPuLQ= -golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= -golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= -gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= +gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= +gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1403,20 +533,13 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -1427,7 +550,6 @@ gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -1436,26 +558,6 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8= -honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= -mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E= -mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d h1:3rvTIIM22r9pvXk+q3swxUQAQOxksVMGK7sml4nG57w= -mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d/go.mod h1:IeHQjmn6TOD+e4Z3RFiZMMsLVL+A96Nvptar8Fj71is= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/go.work b/go.work new file mode 100644 index 00000000000..719c3c9b7eb --- /dev/null +++ b/go.work @@ -0,0 +1,6 @@ +go 1.23.1 + +use ( + . + ./api +) diff --git a/libs/async/async.go b/internal/async/async.go similarity index 86% rename from libs/async/async.go rename to internal/async/async.go index e716821b633..8729a8ad7b2 100644 --- a/libs/async/async.go +++ b/internal/async/async.go @@ -6,16 +6,16 @@ import ( "sync/atomic" ) -//---------------------------------------- +// ---------------------------------------- // Task // val: the value returned after task execution. // err: the error returned during task completion. // abort: tells Parallel to return, whether or not all tasks have completed. -type Task func(i int) (val interface{}, abort bool, err error) +type Task func(i int) (val any, abort bool, err error) type TaskResult struct { - Value interface{} + Value any Error error } @@ -54,7 +54,7 @@ func (trs *TaskResultSet) LatestResult(index int) (TaskResult, bool) { // Writes results to trs.results without waiting for all tasks to complete. func (trs *TaskResultSet) Reap() *TaskResultSet { for i := 0; i < len(trs.results); i++ { - var trch = trs.chz[i] + trch := trs.chz[i] select { case result, ok := <-trch: if ok { @@ -78,7 +78,7 @@ func (trs *TaskResultSet) Reap() *TaskResultSet { // Like Reap() but waits until all tasks have returned or panic'd. func (trs *TaskResultSet) Wait() *TaskResultSet { for i := 0; i < len(trs.results); i++ { - var trch = trs.chz[i] + trch := trs.chz[i] result, ok := <-trch if ok { // Write result. @@ -96,7 +96,7 @@ func (trs *TaskResultSet) Wait() *TaskResultSet { // Returns the firstmost (by task index) error as // discovered by all previous Reap() calls. -func (trs *TaskResultSet) FirstValue() interface{} { +func (trs *TaskResultSet) FirstValue() any { for _, result := range trs.results { if result.Value != nil { return result.Value @@ -116,7 +116,7 @@ func (trs *TaskResultSet) FirstError() error { return nil } -//---------------------------------------- +// ---------------------------------------- // Parallel // Run tasks in parallel, with ability to abort early. @@ -125,9 +125,9 @@ func (trs *TaskResultSet) FirstError() error { // concurrent quit-like primitives, passed implicitly via Task closures. (e.g. // it's not Parallel's concern how you quit/abort your tasks). func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) { - var taskResultChz = make([]TaskResultCh, len(tasks)) // To return. - var taskDoneCh = make(chan bool, len(tasks)) // A "wait group" channel, early abort if any true received. - var numPanics = new(int32) // Keep track of panics to set ok=false later. + taskResultChz := make([]TaskResultCh, len(tasks)) // To return. + taskDoneCh := make(chan bool, len(tasks)) // A "wait group" channel, early abort if any true received. + numPanics := new(int32) // Keep track of panics to set ok=false later. // We will set it to false iff any tasks panic'd or returned abort. ok = true @@ -136,7 +136,7 @@ func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) { // When the task is complete, it will appear in the // respective taskResultCh (associated by task index). for i, task := range tasks { - var taskResultCh = make(chan TaskResult, 1) // Capacity for 1 result. + taskResultCh := make(chan TaskResult, 1) // Capacity for 1 result. taskResultChz[i] = taskResultCh go func(i int, task Task, taskResultCh chan TaskResult) { // Recovery @@ -155,7 +155,7 @@ func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) { } }() // Run the task. - var val, abort, err = task(i) + val, abort, err := task(i) // Send val/err to taskResultCh. // NOTE: Below this line, nothing must panic/ taskResultCh <- TaskResult{val, err} diff --git a/libs/async/async_test.go b/internal/async/async_test.go similarity index 70% rename from libs/async/async_test.go rename to internal/async/async_test.go index 4faead4443e..5609a9100fb 100644 --- a/libs/async/async_test.go +++ b/internal/async/async_test.go @@ -8,26 +8,26 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestParallel(t *testing.T) { - // Create tasks. - var counter = new(int32) - var tasks = make([]Task, 100*1000) + counter := new(int32) + tasks := make([]Task, 100*1000) for i := 0; i < len(tasks); i++ { - tasks[i] = func(i int) (res interface{}, abort bool, err error) { + tasks[i] = func(i int) (res any, abort bool, err error) { atomic.AddInt32(counter, 1) return -1 * i, false, nil } } // Run in parallel. - var trs, ok = Parallel(tasks...) + trs, ok := Parallel(tasks...) assert.True(t, ok) // Verify. - assert.Equal(t, int(*counter), len(tasks), "Each task should have incremented the counter already") + assert.Len(t, tasks, int(*counter), "Each task should have incremented the counter already") var failedTasks int for i := 0; i < len(tasks); i++ { taskResult, ok := trs.LatestResult(i) @@ -46,44 +46,43 @@ func TestParallel(t *testing.T) { // Good! // } } - assert.Equal(t, failedTasks, 0, "No task should have failed") - assert.Nil(t, trs.FirstError(), "There should be no errors") + assert.Equal(t, 0, failedTasks, "No task should have failed") + require.NoError(t, trs.FirstError(), "There should be no errors") assert.Equal(t, 0, trs.FirstValue(), "First value should be 0") } func TestParallelAbort(t *testing.T) { - - var flow1 = make(chan struct{}, 1) - var flow2 = make(chan struct{}, 1) - var flow3 = make(chan struct{}, 1) // Cap must be > 0 to prevent blocking. - var flow4 = make(chan struct{}, 1) + flow1 := make(chan struct{}, 1) + flow2 := make(chan struct{}, 1) + flow3 := make(chan struct{}, 1) // Cap must be > 0 to prevent blocking. + flow4 := make(chan struct{}, 1) // Create tasks. - var tasks = []Task{ - func(i int) (res interface{}, abort bool, err error) { - assert.Equal(t, i, 0) + tasks := []Task{ + func(i int) (res any, abort bool, err error) { + assert.Equal(t, 0, i) flow1 <- struct{}{} return 0, false, nil }, - func(i int) (res interface{}, abort bool, err error) { - assert.Equal(t, i, 1) + func(i int) (res any, abort bool, err error) { + assert.Equal(t, 1, i) flow2 <- <-flow1 return 1, false, errors.New("some error") }, - func(i int) (res interface{}, abort bool, err error) { - assert.Equal(t, i, 2) + func(i int) (res any, abort bool, err error) { + assert.Equal(t, 2, i) flow3 <- <-flow2 return 2, true, nil }, - func(i int) (res interface{}, abort bool, err error) { - assert.Equal(t, i, 3) + func(i int) (res any, abort bool, err error) { + assert.Equal(t, 3, i) <-flow4 return 3, false, nil }, } // Run in parallel. - var taskResultSet, ok = Parallel(tasks...) + taskResultSet, ok := Parallel(tasks...) assert.False(t, ok, "ok should be false since we aborted task #2.") // Verify task #3. @@ -104,22 +103,21 @@ func TestParallelAbort(t *testing.T) { } func TestParallelRecover(t *testing.T) { - // Create tasks. - var tasks = []Task{ - func(i int) (res interface{}, abort bool, err error) { + tasks := []Task{ + func(_ int) (res any, abort bool, err error) { return 0, false, nil }, - func(i int) (res interface{}, abort bool, err error) { + func(_ int) (res any, abort bool, err error) { return 1, false, errors.New("some error") }, - func(i int) (res interface{}, abort bool, err error) { + func(_ int) (res any, abort bool, err error) { panic(2) }, } // Run in parallel. - var taskResultSet, ok = Parallel(tasks...) + taskResultSet, ok := Parallel(tasks...) assert.False(t, ok, "ok should be false since we panic'd in task #2.") // Verify task #0, #1, #2. @@ -128,9 +126,11 @@ func TestParallelRecover(t *testing.T) { checkResult(t, taskResultSet, 2, nil, nil, fmt.Errorf("panic in task %v", 2).Error()) } -// Wait for result +// Wait for result. func checkResult(t *testing.T, taskResultSet *TaskResultSet, index int, - val interface{}, err error, pnk interface{}) { + val any, err error, pnk any, +) { + t.Helper() taskResult, ok := taskResultSet.LatestResult(index) taskName := fmt.Sprintf("Task #%v", index) assert.True(t, ok, "TaskResultCh unexpectedly closed for %v", taskName) @@ -141,12 +141,13 @@ func checkResult(t *testing.T, taskResultSet *TaskResultSet, index int, case pnk != nil: assert.Contains(t, taskResult.Error.Error(), pnk, taskName) default: - assert.Nil(t, taskResult.Error, taskName) + require.NoError(t, taskResult.Error, taskName) } } -// Wait for timeout (no result) +// Wait for timeout (no result). func waitTimeout(t *testing.T, taskResultCh TaskResultCh, taskName string) { + t.Helper() select { case _, ok := <-taskResultCh: if !ok { diff --git a/libs/autofile/README.md b/internal/autofile/README.md similarity index 100% rename from libs/autofile/README.md rename to internal/autofile/README.md diff --git a/libs/autofile/autofile.go b/internal/autofile/autofile.go similarity index 96% rename from libs/autofile/autofile.go rename to internal/autofile/autofile.go index b67af20e150..a5656f1d67c 100644 --- a/libs/autofile/autofile.go +++ b/internal/autofile/autofile.go @@ -8,7 +8,7 @@ import ( "syscall" "time" - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtrand "github.com/cometbft/cometbft/internal/rand" ) /* AutoFile usage @@ -35,7 +35,7 @@ if err != nil { const ( autoFileClosePeriod = 1000 * time.Millisecond - autoFilePerms = os.FileMode(0600) + autoFilePerms = os.FileMode(0o600) ) // AutoFile automatically closes and re-opens file for writing. The file is @@ -133,12 +133,12 @@ func (af *AutoFile) Write(b []byte) (n int, err error) { if af.file == nil { if err = af.openFile(); err != nil { - return + return 0, err } } n, err = af.file.Write(b) - return + return n, err } // Sync commits the current contents of the file to stable storage. Typically, diff --git a/libs/autofile/autofile_test.go b/internal/autofile/autofile_test.go similarity index 98% rename from libs/autofile/autofile_test.go rename to internal/autofile/autofile_test.go index 2713e5482a0..d651f225ab5 100644 --- a/libs/autofile/autofile_test.go +++ b/internal/autofile/autofile_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - cmtos "github.com/cometbft/cometbft/libs/os" + cmtos "github.com/cometbft/cometbft/internal/os" ) func TestSIGHUP(t *testing.T) { diff --git a/libs/autofile/cmd/logjack.go b/internal/autofile/cmd/logjack.go similarity index 91% rename from libs/autofile/cmd/logjack.go rename to internal/autofile/cmd/logjack.go index 92386e50023..c63aa40549b 100644 --- a/libs/autofile/cmd/logjack.go +++ b/internal/autofile/cmd/logjack.go @@ -1,6 +1,7 @@ package main import ( + "errors" "flag" "fmt" "io" @@ -8,8 +9,8 @@ import ( "strconv" "strings" - auto "github.com/cometbft/cometbft/libs/autofile" - cmtos "github.com/cometbft/cometbft/libs/os" + auto "github.com/cometbft/cometbft/internal/autofile" + cmtos "github.com/cometbft/cometbft/internal/os" ) const ( @@ -17,7 +18,7 @@ const ( readBufferSize = 1024 // 1KB at a time ) -// Parse command-line options +// Parse command-line options. func parseFlags() (headPath string, chopSize int64, limitSize int64, version bool) { flagSet := flag.NewFlagSet(os.Args[0], flag.ExitOnError) var chopSizeStr, limitSizeStr string @@ -31,12 +32,12 @@ func parseFlags() (headPath string, chopSize int64, limitSize int64, version boo } chopSize = parseBytesize(chopSizeStr) limitSize = parseBytesize(limitSizeStr) - return + return headPath, chopSize, limitSize, version } type fmtLogger struct{} -func (fmtLogger) Info(msg string, keyvals ...interface{}) { +func (fmtLogger) Info(msg string, keyvals ...any) { strs := make([]string, len(keyvals)) for i, kv := range keyvals { strs[i] = fmt.Sprintf("%v", kv) @@ -78,7 +79,7 @@ func main() { fmt.Fprintf(os.Stderr, "logjack stopped with error %v\n", headPath) os.Exit(1) } - if err == io.EOF { + if errors.Is(err, io.EOF) { os.Exit(0) } fmt.Println("logjack errored") diff --git a/libs/autofile/group.go b/internal/autofile/group.go similarity index 89% rename from libs/autofile/group.go rename to internal/autofile/group.go index 057957915af..f05e90f6231 100644 --- a/libs/autofile/group.go +++ b/internal/autofile/group.go @@ -16,6 +16,8 @@ import ( "github.com/cometbft/cometbft/libs/service" ) +var indexedFilePattern = regexp.MustCompile(`^.+\.([0-9]{3,})$`) + const ( defaultGroupCheckDuration = 5000 * time.Millisecond defaultHeadSizeLimit = 10 * 1024 * 1024 // 10MB @@ -107,8 +109,7 @@ func OpenGroup(headPath string, groupOptions ...func(*Group)) (*Group, error) { g.BaseService = *service.NewBaseService(nil, "Group", g) gInfo := g.readGroupInfo() - g.minIndex = gInfo.MinIndex - g.maxIndex = gInfo.MaxIndex + g.minIndex, g.maxIndex = gInfo.MinIndex, gInfo.MaxIndex return g, nil } @@ -146,7 +147,7 @@ func (g *Group) OnStart() error { func (g *Group) OnStop() { g.ticker.Stop() if err := g.FlushAndSync(); err != nil { - g.Logger.Error("Error flushin to disk", "err", err) + g.Logger.Error("Error flushing to disk", "err", err) } } @@ -160,7 +161,7 @@ func (g *Group) Wait() { // Close closes the head file. The group must be stopped by this moment. func (g *Group) Close() { if err := g.FlushAndSync(); err != nil { - g.Logger.Error("Error flushin to disk", "err", err) + g.Logger.Error("Error flushing to disk", "err", err) } g.mtx.Lock() @@ -170,29 +171,21 @@ func (g *Group) Close() { // HeadSizeLimit returns the current head size limit. func (g *Group) HeadSizeLimit() int64 { - g.mtx.Lock() - defer g.mtx.Unlock() return g.headSizeLimit } // TotalSizeLimit returns total size limit of the group. func (g *Group) TotalSizeLimit() int64 { - g.mtx.Lock() - defer g.mtx.Unlock() return g.totalSizeLimit } // MaxIndex returns index of the last file in the group. func (g *Group) MaxIndex() int { - g.mtx.Lock() - defer g.mtx.Unlock() return g.maxIndex } // MinIndex returns index of the first file in the group. func (g *Group) MinIndex() int { - g.mtx.Lock() - defer g.mtx.Unlock() return g.minIndex } @@ -200,7 +193,7 @@ func (g *Group) MinIndex() int { // returns the number of bytes written. If nn < len(p), it also returns an // error explaining why the write is short. // NOTE: Writes are buffered so they don't write synchronously -// TODO: Make it halt if space is unavailable +// TODO: Make it halt if space is unavailable. func (g *Group) Write(p []byte) (nn int, err error) { g.mtx.Lock() defer g.mtx.Unlock() @@ -209,7 +202,7 @@ func (g *Group) Write(p []byte) (nn int, err error) { // WriteLine writes line into the current head of the group. It also appends "\n". // NOTE: Writes are buffered so they don't write synchronously -// TODO: Make it halt if space is unavailable +// TODO: Make it halt if space is unavailable. func (g *Group) WriteLine(line string) error { g.mtx.Lock() defer g.mtx.Unlock() @@ -219,8 +212,6 @@ func (g *Group) WriteLine(line string) error { // Buffered returns the size of the currently buffered data. func (g *Group) Buffered() int { - g.mtx.Lock() - defer g.mtx.Unlock() return g.headBuf.Buffered() } @@ -330,8 +321,7 @@ func (g *Group) RotateFile() { // CONTRACT: Caller must close the returned GroupReader. func (g *Group) NewReader(index int) (*GroupReader, error) { r := newGroupReader(g) - err := r.SetIndex(index) - if err != nil { + if err := r.SetIndex(index); err != nil { return nil, err } return r, nil @@ -347,13 +337,11 @@ type GroupInfo struct { // Returns info after scanning all files in g.Head's dir. func (g *Group) ReadGroupInfo() GroupInfo { - g.mtx.Lock() - defer g.mtx.Unlock() return g.readGroupInfo() } // Index includes the head. -// CONTRACT: caller should have called g.mtx.Lock +// CONTRACT: caller should have called g.mtx.Lock. func (g *Group) readGroupInfo() GroupInfo { groupDir := filepath.Dir(g.Head.Path) headBase := filepath.Base(g.Head.Path) @@ -372,32 +360,33 @@ func (g *Group) readGroupInfo() GroupInfo { // For each file in the directory, filter by pattern for _, fileInfo := range fiz { - if fileInfo.Name() == headBase { - fileSize := fileInfo.Size() - totalSize += fileSize + fileName := fileInfo.Name() + fileSize := fileInfo.Size() + totalSize += fileSize + + if fileName == headBase { headSize = fileSize continue - } else if strings.HasPrefix(fileInfo.Name(), headBase) { - fileSize := fileInfo.Size() - totalSize += fileSize - indexedFilePattern := regexp.MustCompile(`^.+\.([0-9]{3,})$`) - submatch := indexedFilePattern.FindSubmatch([]byte(fileInfo.Name())) - if len(submatch) != 0 { - // Matches - fileIndex, err := strconv.Atoi(string(submatch[1])) - if err != nil { - panic(err) - } - if maxIndex < fileIndex { - maxIndex = fileIndex - } - if minIndex == -1 || fileIndex < minIndex { - minIndex = fileIndex - } + } + + if !strings.HasPrefix(fileName, headBase) { + continue + } + + submatch := indexedFilePattern.FindStringSubmatch(fileName) + if len(submatch) == 2 { + fileIndex, err := strconv.Atoi(submatch[1]) + if err != nil { + panic(err) + } + if fileIndex > maxIndex { + maxIndex = fileIndex + } + if minIndex == -1 || fileIndex < minIndex { + minIndex = fileIndex } } } - // Now account for the head. if minIndex == -1 { // If there were no numbered files, @@ -417,7 +406,7 @@ func filePathForIndex(headPath string, index int, maxIndex int) string { return fmt.Sprintf("%v.%03d", headPath, index) } -//-------------------------------------------------------------------------------- +// -------------------------------------------------------------------------------- // GroupReader provides an interface for reading from a Group. type GroupReader struct { @@ -479,7 +468,7 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) { nn, err = gr.curReader.Read(p[n:]) n += nn switch { - case err == io.EOF: + case errors.Is(err, io.EOF): if n >= lenP { return n, nil } @@ -496,7 +485,7 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) { } // IF index > gr.Group.maxIndex, returns io.EOF -// CONTRACT: caller should hold gr.mtx +// CONTRACT: caller should hold gr.mtx. func (gr *GroupReader) openFile(index int) error { // Lock on Group to ensure that head doesn't move in the meanwhile. gr.Group.mtx.Lock() diff --git a/libs/autofile/group_test.go b/internal/autofile/group_test.go similarity index 86% rename from libs/autofile/group_test.go rename to internal/autofile/group_test.go index e2813b6c972..dbc53e3f06d 100644 --- a/libs/autofile/group_test.go +++ b/internal/autofile/group_test.go @@ -9,14 +9,15 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - cmtos "github.com/cometbft/cometbft/libs/os" - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtos "github.com/cometbft/cometbft/internal/os" + cmtrand "github.com/cometbft/cometbft/internal/rand" ) func createTestGroupWithHeadSizeLimit(t *testing.T, headSizeLimit int64) *Group { + t.Helper() testID := cmtrand.Str(12) testDir := "_test_" + testID - err := cmtos.EnsureDir(testDir, 0700) + err := cmtos.EnsureDir(testDir, 0o700) require.NoError(t, err, "Error creating dir") headPath := testDir + "/myfile" @@ -28,14 +29,16 @@ func createTestGroupWithHeadSizeLimit(t *testing.T, headSizeLimit int64) *Group } func destroyTestGroup(t *testing.T, g *Group) { + t.Helper() g.Close() err := os.RemoveAll(g.Dir) require.NoError(t, err, "Error removing test Group directory") } -func assertGroupInfo(t *testing.T, gInfo GroupInfo, minIndex, maxIndex int, totalSize, headSize int64) { - assert.Equal(t, minIndex, gInfo.MinIndex) +func assertGroupInfo(t *testing.T, gInfo GroupInfo, maxIndex int, totalSize, headSize int64) { + t.Helper() + assert.Equal(t, 0, gInfo.MinIndex) assert.Equal(t, maxIndex, gInfo.MaxIndex) assert.Equal(t, totalSize, gInfo.TotalSize) assert.Equal(t, headSize, gInfo.HeadSize) @@ -45,7 +48,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { g := createTestGroupWithHeadSizeLimit(t, 1000*1000) // At first, there are no files. - assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 0, 0) + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 0) // Write 1000 bytes 999 times. for i := 0; i < 999; i++ { @@ -54,11 +57,11 @@ func TestCheckHeadSizeLimit(t *testing.T) { } err := g.FlushAndSync() require.NoError(t, err) - assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) + assertGroupInfo(t, g.ReadGroupInfo(), 0, 999000, 999000) // Even calling checkHeadSizeLimit manually won't rotate it. g.checkHeadSizeLimit() - assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) + assertGroupInfo(t, g.ReadGroupInfo(), 0, 999000, 999000) // Write 1000 more bytes. err = g.WriteLine(cmtrand.Str(999)) @@ -68,7 +71,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { // Calling checkHeadSizeLimit this time rolls it. g.checkHeadSizeLimit() - assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1000000, 0) + assertGroupInfo(t, g.ReadGroupInfo(), 1, 1000000, 0) // Write 1000 more bytes. err = g.WriteLine(cmtrand.Str(999)) @@ -78,7 +81,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { // Calling checkHeadSizeLimit does nothing. g.checkHeadSizeLimit() - assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1001000, 1000) + assertGroupInfo(t, g.ReadGroupInfo(), 1, 1001000, 1000) // Write 1000 bytes 999 times. for i := 0; i < 999; i++ { @@ -87,22 +90,22 @@ func TestCheckHeadSizeLimit(t *testing.T) { } err = g.FlushAndSync() require.NoError(t, err) - assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 1000000) + assertGroupInfo(t, g.ReadGroupInfo(), 1, 2000000, 1000000) // Calling checkHeadSizeLimit rolls it again. g.checkHeadSizeLimit() - assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2000000, 0) + assertGroupInfo(t, g.ReadGroupInfo(), 2, 2000000, 0) // Write 1000 more bytes. _, err = g.Head.Write([]byte(cmtrand.Str(999) + "\n")) require.NoError(t, err, "Error appending to head") err = g.FlushAndSync() require.NoError(t, err) - assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) + assertGroupInfo(t, g.ReadGroupInfo(), 2, 2001000, 1000) // Calling checkHeadSizeLimit does nothing. g.checkHeadSizeLimit() - assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) + assertGroupInfo(t, g.ReadGroupInfo(), 2, 2001000, 1000) // Cleanup destroyTestGroup(t, g) @@ -151,14 +154,14 @@ func TestRotateFile(t *testing.T) { // Read g.Head.Path+"000" body1, err := os.ReadFile(g.Head.Path + ".000") - assert.NoError(t, err, "Failed to read first rolled file") + require.NoError(t, err, "Failed to read first rolled file: %v", err) if string(body1) != "Line 1\nLine 2\nLine 3\n" { t.Errorf("got unexpected contents: [%v]", string(body1)) } // Read g.Head.Path body2, err := os.ReadFile(g.Head.Path) - assert.NoError(t, err, "Failed to read first rolled file") + require.NoError(t, err, "Failed to read first rolled file: %v", err) if string(body2) != "Line 4\nLine 5\nLine 6\n" { t.Errorf("got unexpected contents: [%v]", string(body2)) } @@ -186,7 +189,7 @@ func TestWrite(t *testing.T) { require.NoError(t, err, "failed to create reader") _, err = gr.Read(read) - assert.NoError(t, err, "failed to read data") + require.NoError(t, err, "failed to read data: %v", err) assert.Equal(t, written, read) // Cleanup @@ -216,7 +219,7 @@ func TestGroupReaderRead(t *testing.T) { require.NoError(t, err, "failed to create reader") n, err := gr.Read(read) - assert.NoError(t, err, "failed to read data") + require.NoError(t, err, "failed to read data: %v", err) assert.Equal(t, totalWrittenLength, n, "not enough bytes read") professorPlusFrankenstein := professor professorPlusFrankenstein = append(professorPlusFrankenstein, frankenstein...) diff --git a/libs/bits/bit_array.go b/internal/bits/bit_array.go similarity index 77% rename from libs/bits/bit_array.go rename to internal/bits/bit_array.go index 358e37be8df..4dad7a94ebe 100644 --- a/libs/bits/bit_array.go +++ b/internal/bits/bit_array.go @@ -3,13 +3,14 @@ package bits import ( "encoding/binary" "fmt" + "math/bits" + "math/rand" "regexp" "strings" "sync" + cmtprotobits "github.com/cometbft/cometbft/api/cometbft/libs/bits/v1" cmtmath "github.com/cometbft/cometbft/libs/math" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cmtprotobits "github.com/cometbft/cometbft/proto/tendermint/libs/bits" ) // BitArray is a thread-safe implementation of a bit array. @@ -31,7 +32,27 @@ func NewBitArray(bits int) *BitArray { } } -// Size returns the number of bits in the bitarray +// NewBitArrayFromFn returns a new bit array. +// It returns nil if the number of bits is zero. +// It initializes the `i`th bit to the value of `fn(i)`. +func NewBitArrayFromFn(bits int, fn func(int) bool) *BitArray { + if bits <= 0 { + return nil + } + bA := &BitArray{ + Bits: bits, + Elems: make([]uint64, (bits+63)/64), + } + for i := 0; i < bits; i++ { + v := fn(i) + if v { + bA.Elems[i/64] |= (uint64(1) << uint(i%64)) + } + } + return bA +} + +// Size returns the number of bits in the bitarray. func (bA *BitArray) Size() int { if bA == nil { return 0 @@ -40,7 +61,7 @@ func (bA *BitArray) Size() int { } // GetIndex returns the bit at index i within the bit array. -// The behavior is undefined if i >= bA.Bits +// The behavior is undefined if i >= bA.Bits. func (bA *BitArray) GetIndex(i int) bool { if bA == nil { return false @@ -58,7 +79,7 @@ func (bA *BitArray) getIndex(i int) bool { } // SetIndex sets the bit at index i within the bit array. -// The behavior is undefined if i >= bA.Bits +// The behavior is undefined if i >= bA.Bits. func (bA *BitArray) SetIndex(i int, v bool) bool { if bA == nil { return false @@ -178,7 +199,7 @@ func (bA *BitArray) not() *BitArray { // Sub subtracts the two bit-arrays bitwise, without carrying the bits. // Note that carryless subtraction of a - b is (a and not b). // The output is the same as bA, regardless of o's size. -// If bA is longer than o, o is right padded with zeroes +// If bA is longer than o, o is right padded with zeroes. func (bA *BitArray) Sub(o *BitArray) *BitArray { if bA == nil || o == nil { // TODO: Decide if we should do 1's complement here? @@ -202,7 +223,7 @@ func (bA *BitArray) Sub(o *BitArray) *BitArray { return c } -// IsEmpty returns true iff all bits in the bit array are 0 +// IsEmpty returns true iff all bits in the bit array are 0. func (bA *BitArray) IsEmpty() bool { if bA == nil { return true // should this be opposite? @@ -240,51 +261,76 @@ func (bA *BitArray) IsFull() bool { // PickRandom returns a random index for a set bit in the bit array. // If there is no such value, it returns 0, false. -// It uses the global randomness in `random.go` to get this index. -func (bA *BitArray) PickRandom() (int, bool) { +// It uses the provided randomness to get this index. +func (bA *BitArray) PickRandom(r *rand.Rand) (int, bool) { if bA == nil { return 0, false } bA.mtx.Lock() - trueIndices := bA.getTrueIndices() + numTrueIndices := bA.getNumTrueIndices() + if numTrueIndices == 0 { // no bits set to true + bA.mtx.Unlock() + return 0, false + } + index := bA.getNthTrueIndex(r.Intn(numTrueIndices)) bA.mtx.Unlock() - - if len(trueIndices) == 0 { // no bits set to true + if index == -1 { return 0, false } - - return trueIndices[cmtrand.Intn(len(trueIndices))], true + return index, true } -func (bA *BitArray) getTrueIndices() []int { - trueIndices := make([]int, 0, bA.Bits) - curBit := 0 +func (bA *BitArray) getNumTrueIndices() int { + count := 0 numElems := len(bA.Elems) - // set all true indices + // handle all elements except the last one for i := 0; i < numElems-1; i++ { - elem := bA.Elems[i] - if elem == 0 { - curBit += 64 - continue - } - for j := 0; j < 64; j++ { - if (elem & (uint64(1) << uint64(j))) > 0 { - trueIndices = append(trueIndices, curBit) - } - curBit++ - } + count += bits.OnesCount64(bA.Elems[i]) } // handle last element - lastElem := bA.Elems[numElems-1] - numFinalBits := bA.Bits - curBit + numFinalBits := bA.Bits - (numElems-1)*64 for i := 0; i < numFinalBits; i++ { - if (lastElem & (uint64(1) << uint64(i))) > 0 { - trueIndices = append(trueIndices, curBit) + if (bA.Elems[numElems-1] & (uint64(1) << uint64(i))) > 0 { + count++ + } + } + return count +} + +// getNthTrueIndex returns the index of the nth true bit in the bit array. +// n is 0 indexed. (e.g. for bitarray x__x, getNthTrueIndex(0) returns 0). +// If there is no such value, it returns -1. +func (bA *BitArray) getNthTrueIndex(n int) int { + numElems := len(bA.Elems) + count := 0 + + // Iterate over each element + for i := 0; i < numElems; i++ { + // Count set bits in the current element + setBits := bits.OnesCount64(bA.Elems[i]) + + // If the count of set bits in this element plus the count so far + // is greater than or equal to n, then the nth bit must be in this element + if count+setBits >= n { + // Find the index of the nth set bit within this element + for j := 0; j < 64; j++ { + if bA.Elems[i]&(1<}, @@ -409,16 +455,27 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error { // Construct new BitArray and copy over. numBits := len(bits) bA2 := NewBitArray(numBits) + if bA2 == nil { + // Treat it as if we encountered the case: b == "null" + bA.Bits = 0 + bA.Elems = nil + return nil + } + for i := 0; i < numBits; i++ { if bits[i] == 'x' { bA2.SetIndex(i, true) } } - *bA = *bA2 //nolint:govet + + // Instead of *bA = *bA2 + bA.Bits = bA2.Bits + bA.Elems = make([]uint64, len(bA2.Elems)) + copy(bA.Elems, bA2.Elems) return nil } -// ToProto converts BitArray to protobuf +// ToProto converts BitArray to protobuf. func (bA *BitArray) ToProto() *cmtprotobits.BitArray { if bA == nil || len(bA.Elems) == 0 { return nil @@ -433,6 +490,7 @@ func (bA *BitArray) ToProto() *cmtprotobits.BitArray { // FromProto sets a protobuf BitArray to the given pointer. func (bA *BitArray) FromProto(protoBitArray *cmtprotobits.BitArray) { if protoBitArray == nil { + //nolint:wastedassign bA = nil return } diff --git a/libs/bits/bit_array_test.go b/internal/bits/bit_array_test.go similarity index 53% rename from libs/bits/bit_array_test.go rename to internal/bits/bit_array_test.go index c9bfbb3c21d..5b0a86c52cd 100644 --- a/libs/bits/bit_array_test.go +++ b/internal/bits/bit_array_test.go @@ -4,32 +4,35 @@ import ( "bytes" "encoding/json" "fmt" + "math/rand" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtrand "github.com/cometbft/cometbft/internal/rand" ) -func randBitArray(bits int) (*BitArray, []byte) { +var ( + empty16Bits = "________________" + empty64Bits = empty16Bits + empty16Bits + empty16Bits + empty16Bits + full16bits = "xxxxxxxxxxxxxxxx" + full64bits = full16bits + full16bits + full16bits + full16bits + grand = rand.New(rand.NewSource(time.Now().UnixNano())) +) + +func randBitArray(bits int) *BitArray { src := cmtrand.Bytes((bits + 7) / 8) - bA := NewBitArray(bits) - for i := 0; i < len(src); i++ { - for j := 0; j < 8; j++ { - if i*8+j >= bits { - return bA, src - } - setBit := src[i]&(1< 0 - bA.SetIndex(i*8+j, setBit) - } + srcIndexToBit := func(i int) bool { + return src[i/8]&(1< 0 } - return bA, src + return NewBitArrayFromFn(bits, srcIndexToBit) } func TestAnd(t *testing.T) { - bA1, _ := randBitArray(51) - bA2, _ := randBitArray(31) + bA1 := randBitArray(51) + bA2 := randBitArray(31) bA3 := bA1.And(bA2) var bNil *BitArray @@ -52,8 +55,8 @@ func TestAnd(t *testing.T) { } func TestOr(t *testing.T) { - bA1, _ := randBitArray(51) - bA2, _ := randBitArray(31) + bA1 := randBitArray(57) + bA2 := randBitArray(31) bA3 := bA1.Or(bA2) bNil := (*BitArray)(nil) @@ -61,7 +64,7 @@ func TestOr(t *testing.T) { require.Equal(t, bA1.Or(nil), bA1) require.Equal(t, bNil.Or(nil), (*BitArray)(nil)) - if bA3.Bits != 51 { + if bA3.Bits != 57 { t.Error("Expected max bits") } if len(bA3.Elems) != len(bA1.Elems) { @@ -73,6 +76,10 @@ func TestOr(t *testing.T) { t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) } } + if bA3.getNumTrueIndices() == 0 { + t.Error("Expected at least one true bit. " + + "This has a false positive rate that is less than 1 in 2^80 (cryptographically improbable).") + } } func TestSub(t *testing.T) { @@ -95,13 +102,14 @@ func TestSub(t *testing.T) { for _, tc := range testCases { var bA *BitArray err := json.Unmarshal([]byte(tc.initBA), &bA) - require.Nil(t, err) + require.NoError(t, err) var o *BitArray err = json.Unmarshal([]byte(tc.subtractingBA), &o) - require.Nil(t, err) + require.NoError(t, err) - got, _ := json.Marshal(bA.Sub(o)) + got, err := json.Marshal(bA.Sub(o)) + require.NoError(t, err) require.Equal( t, tc.expectedBA, @@ -115,8 +123,6 @@ func TestSub(t *testing.T) { } func TestPickRandom(t *testing.T) { - empty16Bits := "________________" - empty64Bits := empty16Bits + empty16Bits + empty16Bits + empty16Bits testCases := []struct { bA string ok bool @@ -131,16 +137,99 @@ func TestPickRandom(t *testing.T) { {`"x` + empty64Bits + `"`, true}, {`"` + empty64Bits + `x"`, true}, {`"x` + empty64Bits + `x"`, true}, + {`"` + empty64Bits + `___x"`, true}, } for _, tc := range testCases { var bitArr *BitArray err := json.Unmarshal([]byte(tc.bA), &bitArr) require.NoError(t, err) - _, ok := bitArr.PickRandom() + _, ok := bitArr.PickRandom(grand) require.Equal(t, tc.ok, ok, "PickRandom got an unexpected result on input %s", tc.bA) } } +func TestGetNumTrueIndices(t *testing.T) { + type testcase struct { + Input string + ExpectedResult int + } + testCases := []testcase{ + {"x_x_x_", 3}, + {"______", 0}, + {"xxxxxx", 6}, + {"x_x_x_x_x_x_x_x_x_", 9}, + } + numOriginalTestCases := len(testCases) + for i := 0; i < numOriginalTestCases; i++ { + testCases = append(testCases, testcase{testCases[i].Input + "x", testCases[i].ExpectedResult + 1}) + testCases = append(testCases, testcase{full64bits + testCases[i].Input, testCases[i].ExpectedResult + 64}) + testCases = append(testCases, testcase{empty64Bits + testCases[i].Input, testCases[i].ExpectedResult}) + } + + for _, tc := range testCases { + var bitArr *BitArray + err := json.Unmarshal([]byte(`"`+tc.Input+`"`), &bitArr) + require.NoError(t, err) + result := bitArr.getNumTrueIndices() + require.Equal(t, tc.ExpectedResult, result, "for input %s, expected %d, got %d", tc.Input, tc.ExpectedResult, result) + result = bitArr.Not().getNumTrueIndices() + require.Equal(t, bitArr.Bits-result, bitArr.getNumTrueIndices()) + } +} + +func TestGetNthTrueIndex(t *testing.T) { + type testcase struct { + Input string + N int + ExpectedResult int + } + testCases := []testcase{ + // Basic cases + {"x_x_x_", 0, 0}, + {"x_x_x_", 1, 2}, + {"x_x_x_", 2, 4}, + {"______", 1, -1}, // No true indices + {"xxxxxx", 5, 5}, // Last true index + {"x_x_x_x_x_x_x_", 9, -1}, // Out-of-range + + // Edge cases + {"xxxxxx", 7, -1}, // Out-of-range + {"______", 0, -1}, // No true indices + {"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", 49, 49}, // Last true index + {"____________________________________________", 1, -1}, // No true indices + {"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", 63, 63}, // last index of first word + {"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", 64, 64}, // first index of second word + {"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", 100, -1}, // Out-of-range + + // Input beyond 64 bits + {"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", 99, 99}, // Last true index + + // Input less than 64 bits + {"x_x_x_", 3, -1}, // Out-of-range + } + + numOriginalTestCases := len(testCases) + // Add 64 underscores to each test case + for i := 0; i < numOriginalTestCases; i++ { + expectedResult := testCases[i].ExpectedResult + if expectedResult != -1 { + expectedResult += 64 + } + testCases = append(testCases, testcase{empty64Bits + testCases[i].Input, testCases[i].N, expectedResult}) + } + + for _, tc := range testCases { + var bitArr *BitArray + err := json.Unmarshal([]byte(`"`+tc.Input+`"`), &bitArr) + require.NoError(t, err) + + // Get the nth true index + result := bitArr.getNthTrueIndex(tc.N) + + require.Equal(t, tc.ExpectedResult, result, "for bit array %s, input %d, expected %d, got %d", tc.Input, tc.N, tc.ExpectedResult, result) + } +} + func TestBytes(_ *testing.T) { bA := NewBitArray(4) bA.SetIndex(0, true) @@ -188,7 +277,7 @@ func TestEmptyFull(t *testing.T) { func TestUpdateNeverPanics(_ *testing.T) { newRandBitArray := func(n int) *BitArray { - ba, _ := randBitArray(n) + ba := randBitArray(n) return ba } pairs := []struct { @@ -239,7 +328,6 @@ func TestJSONMarshalUnmarshal(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.bA.String(), func(t *testing.T) { bz, err := json.Marshal(tc.bA) require.NoError(t, err) @@ -285,3 +373,30 @@ func TestBitArrayProtoBuf(t *testing.T) { } } } + +// Tests that UnmarshalJSON doesn't crash when no bits are passed into the JSON. +// See issue https://github.com/cometbft/cometbft/issues/2658 +func TestUnmarshalJSONDoesntCrashOnZeroBits(t *testing.T) { + type indexCorpus struct { + BitArray *BitArray `json:"ba"` + Index int `json:"i"` + } + + ic := new(indexCorpus) + blob := []byte(`{"BA":""}`) + err := json.Unmarshal(blob, ic) + require.NoError(t, err) + require.Equal(t, ic.BitArray, &BitArray{Bits: 0, Elems: nil}) +} + +func BenchmarkPickRandomBitArray(b *testing.B) { + // A random 150 bit string to use as the benchmark bit array + benchmarkBitArrayStr := "_______xx__xxx_xx__x_xx_x_x_x__x_x_x_xx__xx__xxx__xx_x_xxx_x__xx____x____xx__xx____x_x__x_____xx_xx_xxxxxxx__xx_x_xxxx_x___x_xxxxx_xx__xxxx_xx_x___x_x" + var bitArr *BitArray + err := json.Unmarshal([]byte(`"`+benchmarkBitArrayStr+`"`), &bitArr) + require.NoError(b, err) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = bitArr.PickRandom(grand) + } +} diff --git a/blocksync/errors.go b/internal/blocksync/errors.go similarity index 86% rename from blocksync/errors.go rename to internal/blocksync/errors.go index 04926db9945..bafe2d1ac41 100644 --- a/blocksync/errors.go +++ b/internal/blocksync/errors.go @@ -7,12 +7,10 @@ import ( "github.com/cosmos/gogoproto/proto" ) -var ( - // ErrNilMessage is returned when provided message is empty - ErrNilMessage = errors.New("message cannot be nil") -) +// ErrNilMessage is returned when provided message is empty. +var ErrNilMessage = errors.New("message cannot be nil") -// ErrInvalidBase is returned when peer informs of a status with invalid height +// ErrInvalidBase is returned when peer informs of a status with invalid height. type ErrInvalidHeight struct { Height int64 Reason string @@ -22,7 +20,7 @@ func (e ErrInvalidHeight) Error() string { return fmt.Sprintf("invalid height %v: %s", e.Height, e.Reason) } -// ErrInvalidBase is returned when peer informs of a status with invalid base +// ErrInvalidBase is returned when peer informs of a status with invalid base. type ErrInvalidBase struct { Base int64 Reason string diff --git a/blocksync/metrics.gen.go b/internal/blocksync/metrics.gen.go similarity index 93% rename from blocksync/metrics.gen.go rename to internal/blocksync/metrics.gen.go index 46c24df208d..1739453f356 100644 --- a/blocksync/metrics.gen.go +++ b/internal/blocksync/metrics.gen.go @@ -3,8 +3,8 @@ package blocksync import ( - "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/cometbft/cometbft/libs/metrics/discard" + prometheus "github.com/cometbft/cometbft/libs/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) diff --git a/blocksync/metrics.go b/internal/blocksync/metrics.go similarity index 88% rename from blocksync/metrics.go rename to internal/blocksync/metrics.go index 204b127ba4a..b6666b222ce 100644 --- a/blocksync/metrics.go +++ b/internal/blocksync/metrics.go @@ -1,8 +1,7 @@ package blocksync import ( - "github.com/go-kit/kit/metrics" - + "github.com/cometbft/cometbft/libs/metrics" "github.com/cometbft/cometbft/types" ) @@ -12,7 +11,7 @@ const ( MetricsSubsystem = "blocksync" ) -//go:generate go run ../scripts/metricsgen -struct=Metrics +//go:generate go run ../../scripts/metricsgen -struct=Metrics // Metrics contains metrics exposed by this package. type Metrics struct { diff --git a/blocksync/msgs.go b/internal/blocksync/msgs.go similarity index 82% rename from blocksync/msgs.go rename to internal/blocksync/msgs.go index 39a45d6c5dd..97d5bdd1bf9 100644 --- a/blocksync/msgs.go +++ b/internal/blocksync/msgs.go @@ -5,12 +5,12 @@ import ( "github.com/cosmos/gogoproto/proto" - bcproto "github.com/cometbft/cometbft/proto/tendermint/blocksync" + bcproto "github.com/cometbft/cometbft/api/cometbft/blocksync/v1" "github.com/cometbft/cometbft/types" ) const ( - // NOTE: keep up to date with bcproto.BlockResponse + // NOTE: keep up to date with bcproto.BlockResponse. BlockResponseMessagePrefixSize = 4 BlockResponseMessageFieldKeySize = 1 MaxMsgSize = types.MaxBlockSizeBytes + @@ -30,10 +30,9 @@ func ValidateMsg(pb proto.Message) error { return ErrInvalidHeight{Height: msg.Height, Reason: "negative height"} } case *bcproto.BlockResponse: - _, err := types.BlockFromProto(msg.Block) - if err != nil { - return err - } + // Avoid double-calling `types.BlockFromProto` for performance reasons. + // See https://github.com/cometbft/cometbft/issues/1964 + return nil case *bcproto.NoBlockResponse: if msg.Height < 0 { return ErrInvalidHeight{Height: msg.Height, Reason: "negative height"} diff --git a/blocksync/msgs_test.go b/internal/blocksync/msgs_test.go similarity index 66% rename from blocksync/msgs_test.go rename to internal/blocksync/msgs_test.go index 1100771e8a4..81441d81fc0 100644 --- a/blocksync/msgs_test.go +++ b/internal/blocksync/msgs_test.go @@ -9,8 +9,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/cometbft/cometbft/blocksync" - bcproto "github.com/cometbft/cometbft/proto/tendermint/blocksync" + bcproto "github.com/cometbft/cometbft/api/cometbft/blocksync/v1" + "github.com/cometbft/cometbft/internal/blocksync" "github.com/cometbft/cometbft/types" ) @@ -26,7 +26,6 @@ func TestBcBlockRequestMessageValidateBasic(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { request := bcproto.BlockRequest{Height: tc.requestHeight} assert.Equal(t, tc.expectErr, blocksync.ValidateMsg(&request) != nil, "Validate Basic had an unexpected result") @@ -46,7 +45,6 @@ func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { nonResponse := bcproto.NoBlockResponse{Height: tc.nonResponseHeight} assert.Equal(t, tc.expectErr, blocksync.ValidateMsg(&nonResponse) != nil, "Validate Basic had an unexpected result") @@ -56,7 +54,7 @@ func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) { func TestBcStatusRequestMessageValidateBasic(t *testing.T) { request := bcproto.StatusRequest{} - assert.NoError(t, blocksync.ValidateMsg(&request)) + require.NoError(t, blocksync.ValidateMsg(&request)) } func TestBcStatusResponseMessageValidateBasic(t *testing.T) { @@ -71,7 +69,6 @@ func TestBcStatusResponseMessageValidateBasic(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { response := bcproto.StatusResponse{Height: tc.responseHeight} assert.Equal(t, tc.expectErr, blocksync.ValidateMsg(&response) != nil, "Validate Basic had an unexpected result") @@ -93,30 +90,47 @@ func TestBlocksyncMessageVectors(t *testing.T) { expBytes string }{ {"BlockRequestMessage", &bcproto.Message{Sum: &bcproto.Message_BlockRequest{ - BlockRequest: &bcproto.BlockRequest{Height: 1}}}, "0a020801"}, - {"BlockRequestMessage", &bcproto.Message{Sum: &bcproto.Message_BlockRequest{ - BlockRequest: &bcproto.BlockRequest{Height: math.MaxInt64}}}, - "0a0a08ffffffffffffffff7f"}, + BlockRequest: &bcproto.BlockRequest{Height: 1}, + }}, "0a020801"}, + { + "BlockRequestMessage", &bcproto.Message{Sum: &bcproto.Message_BlockRequest{ + BlockRequest: &bcproto.BlockRequest{Height: math.MaxInt64}, + }}, + "0a0a08ffffffffffffffff7f", + }, {"BlockResponseMessage", &bcproto.Message{Sum: &bcproto.Message_BlockResponse{ - BlockResponse: &bcproto.BlockResponse{Block: bpb}}}, "1a700a6e0a5b0a02080b1803220b088092b8c398feffffff012a0212003a20c4da88e876062aa1543400d50d0eaa0dac88096057949cfb7bca7f3a48c04bf96a20e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855120d0a0b48656c6c6f20576f726c641a00"}, - {"NoBlockResponseMessage", &bcproto.Message{Sum: &bcproto.Message_NoBlockResponse{ - NoBlockResponse: &bcproto.NoBlockResponse{Height: 1}}}, "12020801"}, + BlockResponse: &bcproto.BlockResponse{Block: bpb}, + }}, "1a700a6e0a5b0a02080b1803220b088092b8c398feffffff012a0212003a20c4da88e876062aa1543400d50d0eaa0dac88096057949cfb7bca7f3a48c04bf96a20e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855120d0a0b48656c6c6f20576f726c641a00"}, {"NoBlockResponseMessage", &bcproto.Message{Sum: &bcproto.Message_NoBlockResponse{ - NoBlockResponse: &bcproto.NoBlockResponse{Height: math.MaxInt64}}}, - "120a08ffffffffffffffff7f"}, - {"StatusRequestMessage", &bcproto.Message{Sum: &bcproto.Message_StatusRequest{ - StatusRequest: &bcproto.StatusRequest{}}}, - "2200"}, - {"StatusResponseMessage", &bcproto.Message{Sum: &bcproto.Message_StatusResponse{ - StatusResponse: &bcproto.StatusResponse{Height: 1, Base: 2}}}, - "2a0408011002"}, - {"StatusResponseMessage", &bcproto.Message{Sum: &bcproto.Message_StatusResponse{ - StatusResponse: &bcproto.StatusResponse{Height: math.MaxInt64, Base: math.MaxInt64}}}, - "2a1408ffffffffffffffff7f10ffffffffffffffff7f"}, + NoBlockResponse: &bcproto.NoBlockResponse{Height: 1}, + }}, "12020801"}, + { + "NoBlockResponseMessage", &bcproto.Message{Sum: &bcproto.Message_NoBlockResponse{ + NoBlockResponse: &bcproto.NoBlockResponse{Height: math.MaxInt64}, + }}, + "120a08ffffffffffffffff7f", + }, + { + "StatusRequestMessage", &bcproto.Message{Sum: &bcproto.Message_StatusRequest{ + StatusRequest: &bcproto.StatusRequest{}, + }}, + "2200", + }, + { + "StatusResponseMessage", &bcproto.Message{Sum: &bcproto.Message_StatusResponse{ + StatusResponse: &bcproto.StatusResponse{Height: 1, Base: 2}, + }}, + "2a0408011002", + }, + { + "StatusResponseMessage", &bcproto.Message{Sum: &bcproto.Message_StatusResponse{ + StatusResponse: &bcproto.StatusResponse{Height: math.MaxInt64, Base: math.MaxInt64}, + }}, + "2a1408ffffffffffffffff7f10ffffffffffffffff7f", + }, } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { bz, _ := proto.Marshal(tc.bmsg) diff --git a/internal/blocksync/pool.go b/internal/blocksync/pool.go new file mode 100644 index 00000000000..00c2357de10 --- /dev/null +++ b/internal/blocksync/pool.go @@ -0,0 +1,868 @@ +package blocksync + +import ( + "errors" + "fmt" + "math" + "sort" + "time" + + flow "github.com/cometbft/cometbft/internal/flowrate" + "github.com/cometbft/cometbft/libs/log" + "github.com/cometbft/cometbft/libs/service" + cmtsync "github.com/cometbft/cometbft/libs/sync" + "github.com/cometbft/cometbft/p2p/nodekey" + "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" +) + +/* +eg, L = latency = 0.1s + P = num peers = 10 + FN = num full nodes + BS = 1kB block size + CB = 1 Mbit/s = 128 kB/s + CB/P = 12.8 kB + B/S = CB/P/BS = 12.8 blocks/s + + 12.8 * 0.1 = 1.28 blocks on conn +*/ + +const ( + maxPendingRequestsPerPeer = 20 + requestRetrySeconds = 30 + + // Minimum recv rate to ensure we're receiving blocks from a peer fast + // enough. If a peer is not sending us data at least that rate, we consider + // them to have timed out, and we disconnect. + // + // Based on the experiments with [Osmosis](https://osmosis.zone/), the + // minimum rate could be as high as 500 KB/s. However, we're setting it to + // 128 KB/s for now to be conservative. + minRecvRate = 128 * 1024 // 128 KB/s + + // peerConnWait is the time that must have elapsed since the pool routine + // was created before we start making requests. This is to give the peer + // routine time to connect to peers. + peerConnWait = 3 * time.Second + + // If we're within minBlocksForSingleRequest blocks of the pool's height, we + // send 2 parallel requests to 2 peers for the same block. If we're further + // away, we send a single request. + minBlocksForSingleRequest = 50 +) + +var ( + requestInterval = 10 * time.Millisecond // timeout between requests + peerTimeout = 15 * time.Second // not const so we can override with tests +) + +/* + Peers self report their heights when we join the block pool. + Starting from our latest pool.height, we request blocks + in sequence from peers that reported higher heights than ours. + Every so often we ask peers what height they're on so we can keep going. + + Requests are continuously made for blocks of higher heights until + the limit is reached. If most of the requests have no available peers, and we + are not at peer limits, we can probably switch to consensus reactor. +*/ + +// BlockPool keeps track of the block sync peers, block requests and block responses. +type BlockPool struct { + service.BaseService + startTime time.Time + startHeight int64 + + mtx cmtsync.Mutex + // block requests + requesters map[int64]*bpRequester + height int64 // the lowest key in requesters. + // peers + peers map[nodekey.ID]*bpPeer + bannedPeers map[nodekey.ID]time.Time + sortedPeers []*bpPeer // sorted by curRate, highest first + maxPeerHeight int64 // the biggest reported height + + requestsCh chan<- BlockRequest + errorsCh chan<- peerError +} + +// NewBlockPool returns a new BlockPool with the height equal to start. Block +// requests and errors will be sent to requestsCh and errorsCh accordingly. +func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool { + bp := &BlockPool{ + peers: make(map[nodekey.ID]*bpPeer), + bannedPeers: make(map[nodekey.ID]time.Time), + requesters: make(map[int64]*bpRequester), + height: start, + startHeight: start, + + requestsCh: requestsCh, + errorsCh: errorsCh, + } + bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp) + return bp +} + +// OnStart implements service.Service by spawning requesters routine and recording +// pool's start time. +func (pool *BlockPool) OnStart() error { + pool.startTime = time.Now() + go pool.makeRequestersRoutine() + return nil +} + +func (pool *BlockPool) makeRequestersRoutine() { + for { + if !pool.IsRunning() { + return + } + + // Check if we are within peerConnWait seconds of start time + // This gives us some time to connect to peers before starting a wave of requests + if time.Since(pool.startTime) < peerConnWait { + // Calculate the duration to sleep until peerConnWait seconds have passed since pool.startTime + sleepDuration := peerConnWait - time.Since(pool.startTime) + time.Sleep(sleepDuration) + } + + pool.mtx.Lock() + var ( + maxRequestersCreated = len(pool.requesters) >= len(pool.peers)*maxPendingRequestsPerPeer + + nextHeight = pool.height + int64(len(pool.requesters)) + maxPeerHeightReached = nextHeight > pool.maxPeerHeight + ) + pool.mtx.Unlock() + + switch { + case maxRequestersCreated: // If we have enough requesters, wait for them to finish. + time.Sleep(requestInterval) + pool.removeTimedoutPeers() + case maxPeerHeightReached: // If we're caught up, wait for a bit so reactor could finish or a higher height is reported. + time.Sleep(requestInterval) + default: + pool.makeNextRequester(nextHeight) + // Sleep for a bit to make the requests more ordered. + time.Sleep(requestInterval) + } + } +} + +func (pool *BlockPool) removeTimedoutPeers() { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + for _, peer := range pool.peers { + if !peer.didTimeout && peer.numPending > 0 { + curRate := peer.recvMonitor.Status().CurRate + // curRate can be 0 on start + if curRate != 0 && curRate < minRecvRate { + err := errors.New("peer is not sending us data fast enough") + pool.sendError(err, peer.id) + pool.Logger.Error("SendTimeout", "peer", peer.id, + "reason", err, + "curRate", fmt.Sprintf("%d KB/s", curRate/1024), + "minRate", fmt.Sprintf("%d KB/s", minRecvRate/1024)) + peer.didTimeout = true + } + + peer.curRate = curRate + } + + if peer.didTimeout { + pool.removePeer(peer.id) + } + } + + for peerID := range pool.bannedPeers { + if !pool.isPeerBanned(peerID) { + delete(pool.bannedPeers, peerID) + } + } + + pool.sortPeers() +} + +// IsCaughtUp returns true if this node is caught up, false - otherwise. +// TODO: relax conditions, prevent abuse. +func (pool *BlockPool) IsCaughtUp() (isCaughtUp bool, height, maxPeerHeight int64) { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + // Need at least 1 peer to be considered caught up. + if len(pool.peers) == 0 { + pool.Logger.Debug("Blockpool has no peers") + return false, pool.height, pool.maxPeerHeight + } + + // Some conditions to determine if we're caught up. + // Ensures we've either received a block or waited some amount of time, + // and that we're synced to the highest known height. + // Note we use maxPeerHeight - 1 because to sync block H requires block H+1 + // to verify the LastCommit. + receivedBlockOrTimedOut := pool.height > 0 || time.Since(pool.startTime) > 5*time.Second + ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= (pool.maxPeerHeight-1) + isCaughtUp = receivedBlockOrTimedOut && ourChainIsLongestAmongPeers + return isCaughtUp, pool.height, pool.maxPeerHeight +} + +// PeekTwoBlocks returns blocks at pool.height and pool.height+1. We need to +// see the second block's Commit to validate the first block. So we peek two +// blocks at a time. We return an extended commit, containing vote extensions +// and their associated signatures, as this is critical to consensus in ABCI++ +// as we switch from block sync to consensus mode. +// +// The caller will verify the commit. +func (pool *BlockPool) PeekTwoBlocks() (first, second *types.Block, firstExtCommit *types.ExtendedCommit) { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + if r := pool.requesters[pool.height]; r != nil { + first = r.getBlock() + firstExtCommit = r.getExtendedCommit() + } + if r := pool.requesters[pool.height+1]; r != nil { + second = r.getBlock() + } + return first, second, firstExtCommit +} + +// PopRequest removes the requester at pool.height and increments pool.height. +func (pool *BlockPool) PopRequest() { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + r := pool.requesters[pool.height] + if r == nil { + panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height)) + } + + if err := r.Stop(); err != nil { + pool.Logger.Error("Error stopping requester", "err", err) + } + delete(pool.requesters, pool.height) + pool.height++ + + // Notify the next minBlocksForSingleRequest requesters about new height, so + // they can potentially request a block from the second peer. + for i := int64(0); i < minBlocksForSingleRequest && i < int64(len(pool.requesters)); i++ { + pool.requesters[pool.height+i].newHeight(pool.height) + } +} + +// RemovePeerAndRedoAllPeerRequests retries the request at the given height and +// all the requests made to the same peer. The peer is removed from the pool. +// Returns the ID of the removed peer. +func (pool *BlockPool) RemovePeerAndRedoAllPeerRequests(height int64) nodekey.ID { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + request := pool.requesters[height] + peerID := request.gotBlockFromPeerID() + // RemovePeer will redo all requesters associated with this peer. + pool.removePeer(peerID) + pool.banPeer(peerID) + return peerID +} + +// RedoRequestFrom retries the request at the given height. It does not remove the +// peer. +func (pool *BlockPool) RedoRequestFrom(height int64, peerID nodekey.ID) { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + if requester, ok := pool.requesters[height]; ok { // If we requested this block + if requester.didRequestFrom(peerID) { // From this specific peer + requester.redo(peerID) + } + } +} + +// Deprecated: use RemovePeerAndRedoAllPeerRequests instead. +func (pool *BlockPool) RedoRequest(height int64) nodekey.ID { + return pool.RemovePeerAndRedoAllPeerRequests(height) +} + +// AddBlock validates that the block comes from the peer it was expected from +// and calls the requester to store it. +// +// This requires an extended commit at the same height as the supplied block - +// the block contains the last commit, but we need the latest commit in case we +// need to switch over from block sync to consensus at this height. If the +// height of the extended commit and the height of the block do not match, we +// do not add the block and return an error. +func (pool *BlockPool) AddBlock(peerID nodekey.ID, block *types.Block, extCommit *types.ExtendedCommit, blockSize int) error { + if extCommit != nil && block.Height != extCommit.Height { + err := fmt.Errorf("block height %d != extCommit height %d", block.Height, extCommit.Height) + // Peer sent us an invalid block => remove it. + pool.sendError(err, peerID) + return err + } + + pool.mtx.Lock() + defer pool.mtx.Unlock() + + requester := pool.requesters[block.Height] + if requester == nil { + // Because we're issuing 2nd requests for closer blocks, it's possible to + // receive a block we've already processed from a second peer. Hence, we + // can't punish it. But if the peer sent us a block we clearly didn't + // request, we disconnect. + if block.Height > pool.height || block.Height < pool.startHeight { + err := fmt.Errorf("peer sent us block #%d we didn't expect (current height: %d, start height: %d)", + block.Height, pool.height, pool.startHeight) + pool.sendError(err, peerID) + return err + } + + return fmt.Errorf("got an already committed block #%d (possibly from the slow peer %s)", block.Height, peerID) + } + + if !requester.setBlock(block, extCommit, peerID) { + err := fmt.Errorf("requested block #%d from %v, not %s", block.Height, requester.requestedFrom(), peerID) + pool.sendError(err, peerID) + return err + } + + peer := pool.peers[peerID] + if peer != nil { + peer.decrPending(blockSize) + } + + return nil +} + +// Height returns the pool's height. +func (pool *BlockPool) Height() int64 { + pool.mtx.Lock() + defer pool.mtx.Unlock() + return pool.height +} + +// MaxPeerHeight returns the highest reported height. +func (pool *BlockPool) MaxPeerHeight() int64 { + pool.mtx.Lock() + defer pool.mtx.Unlock() + return pool.maxPeerHeight +} + +// SetPeerRange sets the peer's alleged blockchain base and height. +func (pool *BlockPool) SetPeerRange(peerID nodekey.ID, base int64, height int64) { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + peer := pool.peers[peerID] + if peer != nil { + peer.base = base + peer.height = height + } else { + if pool.isPeerBanned(peerID) { + pool.Logger.Debug("Ignoring banned peer", peerID) + return + } + peer = newBPPeer(pool, peerID, base, height) + peer.setLogger(pool.Logger.With("peer", peerID)) + pool.peers[peerID] = peer + // no need to sort because curRate is 0 at start. + // just add to the beginning so it's picked first by pickIncrAvailablePeer. + pool.sortedPeers = append([]*bpPeer{peer}, pool.sortedPeers...) + } + + if height > pool.maxPeerHeight { + pool.maxPeerHeight = height + } +} + +// RemovePeer removes the peer with peerID from the pool. If there's no peer +// with peerID, function is a no-op. +func (pool *BlockPool) RemovePeer(peerID nodekey.ID) { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + pool.removePeer(peerID) +} + +func (pool *BlockPool) removePeer(peerID nodekey.ID) { + for _, requester := range pool.requesters { + if requester.didRequestFrom(peerID) { + requester.redo(peerID) + } + } + + peer, ok := pool.peers[peerID] + if ok { + if peer.timeout != nil { + peer.timeout.Stop() + } + + delete(pool.peers, peerID) + for i, p := range pool.sortedPeers { + if p.id == peerID { + pool.sortedPeers = append(pool.sortedPeers[:i], pool.sortedPeers[i+1:]...) + break + } + } + + // Find a new peer with the biggest height and update maxPeerHeight if the + // peer's height was the biggest. + if peer.height == pool.maxPeerHeight { + pool.updateMaxPeerHeight() + } + } +} + +// If no peers are left, maxPeerHeight is set to 0. +func (pool *BlockPool) updateMaxPeerHeight() { + var max int64 + for _, peer := range pool.peers { + if peer.height > max { + max = peer.height + } + } + pool.maxPeerHeight = max +} + +func (pool *BlockPool) isPeerBanned(peerID nodekey.ID) bool { + return cmttime.Since(pool.bannedPeers[peerID]) < time.Second*60 +} + +func (pool *BlockPool) banPeer(peerID nodekey.ID) { + pool.Logger.Debug("Banning peer", peerID) + pool.bannedPeers[peerID] = cmttime.Now() +} + +// Pick an available peer with the given height available. +// If no peers are available, returns nil. +func (pool *BlockPool) pickIncrAvailablePeer(height int64, excludePeerID nodekey.ID) *bpPeer { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + for _, peer := range pool.sortedPeers { + if peer.id == excludePeerID { + continue + } + if peer.didTimeout { + pool.removePeer(peer.id) + continue + } + if peer.numPending >= maxPendingRequestsPerPeer { + continue + } + if height < peer.base || height > peer.height { + continue + } + peer.incrPending() + return peer + } + + return nil +} + +// Sort peers by curRate, highest first. +// +// CONTRACT: pool.mtx must be locked. +func (pool *BlockPool) sortPeers() { + sort.Slice(pool.sortedPeers, func(i, j int) bool { + return pool.sortedPeers[i].curRate > pool.sortedPeers[j].curRate + }) +} + +func (pool *BlockPool) makeNextRequester(nextHeight int64) { + pool.mtx.Lock() + request := newBPRequester(pool, nextHeight) + pool.requesters[nextHeight] = request + pool.mtx.Unlock() + + if err := request.Start(); err != nil { + request.Logger.Error("Error starting request", "err", err) + } +} + +// thread-safe. +func (pool *BlockPool) sendRequest(height int64, peerID nodekey.ID) { + if !pool.IsRunning() { + return + } + pool.requestsCh <- BlockRequest{height, peerID} +} + +// thread-safe. +func (pool *BlockPool) sendError(err error, peerID nodekey.ID) { + if !pool.IsRunning() { + return + } + pool.errorsCh <- peerError{err, peerID} +} + +// for debugging purposes +// +//nolint:unused +func (pool *BlockPool) debug() string { + pool.mtx.Lock() + defer pool.mtx.Unlock() + + str := "" + nextHeight := pool.height + int64(len(pool.requesters)) + for h := pool.height; h < nextHeight; h++ { + if pool.requesters[h] == nil { + str += fmt.Sprintf("H(%v):X ", h) + } else { + str += fmt.Sprintf("H(%v):", h) + str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil) + str += fmt.Sprintf("C?(%v) ", pool.requesters[h].extCommit != nil) + } + } + return str +} + +// ------------------------------------- + +type bpPeer struct { + didTimeout bool + curRate int64 + numPending int32 + height int64 + base int64 + pool *BlockPool + id nodekey.ID + recvMonitor *flow.Monitor + + timeout *time.Timer + + logger log.Logger +} + +func newBPPeer(pool *BlockPool, peerID nodekey.ID, base int64, height int64) *bpPeer { + peer := &bpPeer{ + pool: pool, + id: peerID, + base: base, + height: height, + numPending: 0, + logger: log.NewNopLogger(), + } + return peer +} + +func (peer *bpPeer) setLogger(l log.Logger) { + peer.logger = l +} + +func (peer *bpPeer) resetMonitor() { + peer.recvMonitor = flow.New(time.Second, time.Second*40) + initialValue := float64(minRecvRate) * math.E + peer.recvMonitor.SetREMA(initialValue) +} + +func (peer *bpPeer) resetTimeout() { + if peer.timeout == nil { + peer.timeout = time.AfterFunc(peerTimeout, peer.onTimeout) + } else { + peer.timeout.Reset(peerTimeout) + } +} + +func (peer *bpPeer) incrPending() { + if peer.numPending == 0 { + peer.resetMonitor() + peer.resetTimeout() + } + peer.numPending++ +} + +func (peer *bpPeer) decrPending(recvSize int) { + peer.numPending-- + if peer.numPending == 0 { + peer.timeout.Stop() + } else { + peer.recvMonitor.Update(recvSize) + peer.resetTimeout() + } +} + +func (peer *bpPeer) onTimeout() { + peer.pool.mtx.Lock() + defer peer.pool.mtx.Unlock() + + err := errors.New("peer did not send us anything") + peer.pool.sendError(err, peer.id) + peer.logger.Error("SendTimeout", "reason", err, "timeout", peerTimeout) + peer.didTimeout = true +} + +// ------------------------------------- + +// bpRequester requests a block from a peer. +// +// If the height is within minBlocksForSingleRequest blocks of the pool's +// height, it will send an additional request to another peer. This is to avoid +// a situation where blocksync is stuck because of a single slow peer. Note +// that it's okay to send a single request when the requested height is far +// from the pool's height. If the peer is slow, it will timeout and be replaced +// with another peer. +type bpRequester struct { + service.BaseService + + pool *BlockPool + height int64 + gotBlockCh chan struct{} + redoCh chan nodekey.ID // redo may got multiple messages, add peerId to identify repeat + newHeightCh chan int64 + + mtx cmtsync.Mutex + peerID nodekey.ID + secondPeerID nodekey.ID // alternative peer to request from (if close to pool's height) + gotBlockFrom nodekey.ID + block *types.Block + extCommit *types.ExtendedCommit +} + +func newBPRequester(pool *BlockPool, height int64) *bpRequester { + bpr := &bpRequester{ + pool: pool, + height: height, + gotBlockCh: make(chan struct{}, 1), + redoCh: make(chan nodekey.ID, 1), + newHeightCh: make(chan int64, 1), + + peerID: "", + secondPeerID: "", + block: nil, + } + bpr.BaseService = *service.NewBaseService(nil, "bpRequester", bpr) + return bpr +} + +func (bpr *bpRequester) OnStart() error { + go bpr.requestRoutine() + return nil +} + +// Returns true if the peer(s) match and block doesn't already exist. +func (bpr *bpRequester) setBlock(block *types.Block, extCommit *types.ExtendedCommit, peerID nodekey.ID) bool { + bpr.mtx.Lock() + if bpr.peerID != peerID && bpr.secondPeerID != peerID { + bpr.mtx.Unlock() + return false + } + if bpr.block != nil { + bpr.mtx.Unlock() + return true // getting a block from both peers is not an error + } + + bpr.block = block + bpr.extCommit = extCommit + bpr.gotBlockFrom = peerID + bpr.mtx.Unlock() + + select { + case bpr.gotBlockCh <- struct{}{}: + default: + } + return true +} + +func (bpr *bpRequester) getBlock() *types.Block { + bpr.mtx.Lock() + defer bpr.mtx.Unlock() + return bpr.block +} + +func (bpr *bpRequester) getExtendedCommit() *types.ExtendedCommit { + bpr.mtx.Lock() + defer bpr.mtx.Unlock() + return bpr.extCommit +} + +// Returns the IDs of peers we've requested a block from. +func (bpr *bpRequester) requestedFrom() []nodekey.ID { + bpr.mtx.Lock() + defer bpr.mtx.Unlock() + peerIDs := make([]nodekey.ID, 0, 2) + if bpr.peerID != "" { + peerIDs = append(peerIDs, bpr.peerID) + } + if bpr.secondPeerID != "" { + peerIDs = append(peerIDs, bpr.secondPeerID) + } + return peerIDs +} + +// Returns true if we've requested a block from the given peer. +func (bpr *bpRequester) didRequestFrom(peerID nodekey.ID) bool { + bpr.mtx.Lock() + defer bpr.mtx.Unlock() + return bpr.peerID == peerID || bpr.secondPeerID == peerID +} + +// Returns the ID of the peer who sent us the block. +func (bpr *bpRequester) gotBlockFromPeerID() nodekey.ID { + bpr.mtx.Lock() + defer bpr.mtx.Unlock() + return bpr.gotBlockFrom +} + +// Removes the block (IF we got it from the given peer) and resets the peer. +func (bpr *bpRequester) reset(peerID nodekey.ID) (removedBlock bool) { + bpr.mtx.Lock() + defer bpr.mtx.Unlock() + + // Only remove the block if we got it from that peer. + if bpr.gotBlockFrom == peerID { + bpr.block = nil + bpr.extCommit = nil + bpr.gotBlockFrom = "" + removedBlock = true + } + + if bpr.peerID == peerID { + bpr.peerID = "" + } else { + bpr.secondPeerID = "" + } + + return removedBlock +} + +// Tells bpRequester to pick another peer and try again. +// NOTE: Nonblocking, and does nothing if another redo +// was already requested. +func (bpr *bpRequester) redo(peerID nodekey.ID) { + select { + case bpr.redoCh <- peerID: + default: + } +} + +func (bpr *bpRequester) pickPeerAndSendRequest() { + bpr.mtx.Lock() + secondPeerID := bpr.secondPeerID + bpr.mtx.Unlock() + + var peer *bpPeer +PICK_PEER_LOOP: + for { + if !bpr.IsRunning() || !bpr.pool.IsRunning() { + return + } + peer = bpr.pool.pickIncrAvailablePeer(bpr.height, secondPeerID) + if peer == nil { + bpr.Logger.Debug("No peers currently available; will retry shortly", "height", bpr.height) + time.Sleep(requestInterval) + continue PICK_PEER_LOOP + } + break PICK_PEER_LOOP + } + bpr.mtx.Lock() + bpr.peerID = peer.id + bpr.mtx.Unlock() + + bpr.pool.sendRequest(bpr.height, peer.id) +} + +// Picks a second peer and sends a request to it. If the second peer is already +// set, does nothing. +func (bpr *bpRequester) pickSecondPeerAndSendRequest() (picked bool) { + bpr.mtx.Lock() + if bpr.secondPeerID != "" { + bpr.mtx.Unlock() + return false + } + peerID := bpr.peerID + bpr.mtx.Unlock() + + secondPeer := bpr.pool.pickIncrAvailablePeer(bpr.height, peerID) + if secondPeer != nil { + bpr.mtx.Lock() + bpr.secondPeerID = secondPeer.id + bpr.mtx.Unlock() + + bpr.pool.sendRequest(bpr.height, secondPeer.id) + return true + } + + return false +} + +// Informs the requester of a new pool's height. +func (bpr *bpRequester) newHeight(height int64) { + select { + case bpr.newHeightCh <- height: + default: + } +} + +// Responsible for making more requests as necessary +// Returns only when a block is found (e.g. AddBlock() is called). +func (bpr *bpRequester) requestRoutine() { + gotBlock := false + +OUTER_LOOP: + for { + bpr.pickPeerAndSendRequest() + + poolHeight := bpr.pool.Height() + if bpr.height-poolHeight < minBlocksForSingleRequest { + bpr.pickSecondPeerAndSendRequest() + } + + retryTimer := time.NewTimer(requestRetrySeconds * time.Second) + defer retryTimer.Stop() + + for { + select { + case <-bpr.pool.Quit(): + if err := bpr.Stop(); err != nil { + bpr.Logger.Error("Error stopped requester", "err", err) + } + return + case <-bpr.Quit(): + return + case <-retryTimer.C: + if !gotBlock { + bpr.Logger.Debug("Retrying block request(s) after timeout", "height", bpr.height, "peer", bpr.peerID, "secondPeerID", bpr.secondPeerID) + bpr.reset(bpr.peerID) + bpr.reset(bpr.secondPeerID) + continue OUTER_LOOP + } + case peerID := <-bpr.redoCh: + if bpr.didRequestFrom(peerID) { + removedBlock := bpr.reset(peerID) + if removedBlock { + gotBlock = false + } + } + // If both peers returned NoBlockResponse or bad block, reschedule both + // requests. If not, wait for the other peer. + if len(bpr.requestedFrom()) == 0 { + retryTimer.Stop() + continue OUTER_LOOP + } + case newHeight := <-bpr.newHeightCh: + if !gotBlock && bpr.height-newHeight < minBlocksForSingleRequest { + // The operation is a noop if the second peer is already set. The cost is checking a mutex. + // + // If the second peer was just set, reset the retryTimer to give the + // second peer a chance to respond. + if picked := bpr.pickSecondPeerAndSendRequest(); picked { + _ = retryTimer.Stop() + retryTimer.Reset(requestRetrySeconds * time.Second) + } + } + case <-bpr.gotBlockCh: + gotBlock = true + // We got a block! + // Continue the for-loop and wait til Quit. + } + } + } +} + +// BlockRequest stores a block request identified by the block Height and the PeerID responsible for +// delivering the block. +type BlockRequest struct { + Height int64 + PeerID nodekey.ID +} diff --git a/internal/blocksync/pool_test.go b/internal/blocksync/pool_test.go new file mode 100644 index 00000000000..42b54a027ff --- /dev/null +++ b/internal/blocksync/pool_test.go @@ -0,0 +1,385 @@ +package blocksync + +import ( + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmtrand "github.com/cometbft/cometbft/internal/rand" + "github.com/cometbft/cometbft/libs/log" + "github.com/cometbft/cometbft/p2p/nodekey" + "github.com/cometbft/cometbft/types" +) + +func init() { + peerTimeout = 2 * time.Second +} + +type testPeer struct { + id nodekey.ID + base int64 + height int64 + inputChan chan inputData // make sure each peer's data is sequential + malicious bool +} + +type inputData struct { + t *testing.T + pool *BlockPool + request BlockRequest +} + +// Malicious nodes parameters. +const ( + MaliciousLie = 5 // This is how much the malicious node claims to be higher than the real height + BlackholeSize = 3 // This is how many blocks the malicious node will not return (missing) above real height + MaliciousTestMaximumLength = 5 * time.Minute +) + +func (p testPeer) runInputRoutine() { + go func() { + for input := range p.inputChan { + p.simulateInput(input) + } + }() +} + +// Request desired, pretend like we got the block immediately. +func (p testPeer) simulateInput(input inputData) { + block := &types.Block{Header: types.Header{Height: input.request.Height}, LastCommit: &types.Commit{}} // real blocks have LastCommit + extCommit := &types.ExtendedCommit{ + Height: input.request.Height, + } + // If this peer is malicious + if p.malicious { + realHeight := p.height - MaliciousLie + // And the requested height is above the real height + if input.request.Height > realHeight { + // Then provide a fake block + block.LastCommit = nil // Fake block, no LastCommit + // or provide no block at all, if we are close to the real height + if input.request.Height <= realHeight+BlackholeSize { + input.pool.RedoRequestFrom(input.request.Height, p.id) + return + } + } + } + err := input.pool.AddBlock(input.request.PeerID, block, extCommit, 123) + if !p.malicious { + require.NoError(input.t, err) + } + // TODO: uncommenting this creates a race which is detected by: + // https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856 + // see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890 + // input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height) +} + +type testPeers map[nodekey.ID]*testPeer + +func (ps testPeers) start() { + for _, v := range ps { + v.runInputRoutine() + } +} + +func (ps testPeers) stop() { + for _, v := range ps { + close(v.inputChan) + } +} + +func makePeers(numPeers int, minHeight, maxHeight int64) testPeers { + peers := make(testPeers, numPeers) + for i := 0; i < numPeers; i++ { + peerID := nodekey.ID(cmtrand.Str(12)) + height := minHeight + cmtrand.Int63n(maxHeight-minHeight) + base := minHeight + int64(i) + if base > height { + base = height + } + peers[peerID] = &testPeer{peerID, base, height, make(chan inputData, 10), false} + } + return peers +} + +func TestBlockPoolBasic(t *testing.T) { + var ( + start = int64(42) + peers = makePeers(10, start, 1000) + errorsCh = make(chan peerError, 10) + requestsCh = make(chan BlockRequest) + ) + + pool := NewBlockPool(start, requestsCh, errorsCh) + pool.SetLogger(log.TestingLogger()) + + err := pool.Start() + if err != nil { + t.Error(err) + } + + t.Cleanup(func() { + if err := pool.Stop(); err != nil { + t.Error(err) + } + }) + + for _, peer := range peers { + pool.SetPeerRange(peer.id, peer.base, peer.height) + } + + peers.start() + defer peers.stop() + + // Start a goroutine to pull blocks + go func() { + for { + if !pool.IsRunning() { + return + } + first, second, _ := pool.PeekTwoBlocks() + if first != nil && second != nil { + pool.PopRequest() + } else { + time.Sleep(10 * time.Millisecond) + } + } + }() + + // Pull from channels + for { + select { + case err := <-errorsCh: + t.Error(err) + case request := <-requestsCh: + t.Logf("Pulled new BlockRequest %v", request) + if request.Height == 300 { + return // Done! + } + peers[request.PeerID].inputChan <- inputData{t, pool, request} + case <-time.After(10 * time.Second): + t.Error("Timed out waiting for block requests") + return + } + } +} + +func TestBlockPoolTimeout(t *testing.T) { + var ( + start = int64(42) + peers = makePeers(10, start, 1000) + errorsCh = make(chan peerError, 10) + requestsCh = make(chan BlockRequest) + ) + + pool := NewBlockPool(start, requestsCh, errorsCh) + pool.SetLogger(log.TestingLogger()) + + err := pool.Start() + if err != nil { + t.Error(err) + } + + t.Cleanup(func() { + if err := pool.Stop(); err != nil { + t.Error(err) + } + }) + + for _, peer := range peers { + pool.SetPeerRange(peer.id, peer.base, peer.height) + } + + // Start a goroutine to pull blocks + go func() { + for { + if !pool.IsRunning() { + return + } + first, second, _ := pool.PeekTwoBlocks() + if first != nil && second != nil { + pool.PopRequest() + } else { + time.Sleep(10 * time.Millisecond) + } + } + }() + + // Pull from channels + counter := 0 + timedOut := map[nodekey.ID]struct{}{} + for { + select { + case err := <-errorsCh: + t.Log(err) + // consider error to be always timeout here + if _, ok := timedOut[err.peerID]; !ok { + counter++ + if counter == len(peers) { + return // Done! + } + } + case request := <-requestsCh: + t.Logf("Pulled new BlockRequest %+v", request) + case <-time.After(10 * time.Second): + t.Error("Timed out waiting for block requests") + return + } + } +} + +func TestBlockPoolRemovePeer(t *testing.T) { + peers := make(testPeers, 10) + for i := 0; i < 10; i++ { + peerID := nodekey.ID(strconv.Itoa(i + 1)) + height := int64(i + 1) + peers[peerID] = &testPeer{peerID, 0, height, make(chan inputData), false} + } + requestsCh := make(chan BlockRequest) + errorsCh := make(chan peerError, 10) + + pool := NewBlockPool(1, requestsCh, errorsCh) + pool.SetLogger(log.TestingLogger()) + err := pool.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := pool.Stop(); err != nil { + t.Error(err) + } + }) + + // add peers + for peerID, peer := range peers { + pool.SetPeerRange(peerID, peer.base, peer.height) + } + assert.EqualValues(t, 10, pool.MaxPeerHeight()) + + // remove not-existing peer + assert.NotPanics(t, func() { pool.RemovePeer(nodekey.ID("Superman")) }) + + // remove peer with biggest height + pool.RemovePeer(nodekey.ID("10")) + assert.EqualValues(t, 9, pool.MaxPeerHeight()) + + // remove all peers + for peerID := range peers { + pool.RemovePeer(peerID) + } + + assert.EqualValues(t, 0, pool.MaxPeerHeight()) +} + +func TestBlockPoolMaliciousNode(t *testing.T) { + // Setup: + // * each peer has blocks 1..N but the malicious peer reports 1..N+5 (block N+1,N+2,N+3 missing, N+4,N+5 fake) + // * The malicious peer is ahead of the network but not by much, so it does not get dropped from the pool + // with a timeout error. (If a peer does not send blocks after 2 seconds, they are disconnected.) + // * The network creates new blocks every second. The malicious peer will also get ahead with another fake block. + // * The pool verifies blocks every half second. This ensures that the pool catches up with the network. + // * When the pool encounters a fake block sent by the malicious peer and has the previous block from a good peer, + // it can prove that the block is fake. The malicious peer gets banned, together with the sender of the previous (valid) block. + // Additional notes: + // * After a minute of ban, the malicious peer is unbanned. If the pool IsCaughtUp() by then and consensus started, + // there is no impact. If blocksync did not catch up yet, the malicious peer can continue its lie until the next ban. + // * The pool has an initial 3 seconds spin-up time before it starts verifying peers. (So peers have a chance to + // connect.) If the initial height is 7 and the block creation is 1/second, verification will start at height 10. + // * Testing with height 7, the main functionality of banning a malicious peer is tested. + // Testing with height 127, a malicious peer can reconnect and the subsequent banning is also tested. + // This takes a couple of minutes to complete, so we don't run it. + const initialHeight = 7 + peers := testPeers{ + nodekey.ID("good"): &testPeer{nodekey.ID("good"), 1, initialHeight, make(chan inputData), false}, + nodekey.ID("bad"): &testPeer{nodekey.ID("bad"), 1, initialHeight + MaliciousLie, make(chan inputData), true}, + nodekey.ID("good1"): &testPeer{nodekey.ID("good1"), 1, initialHeight, make(chan inputData), false}, + } + errorsCh := make(chan peerError, 3) + requestsCh := make(chan BlockRequest) + + pool := NewBlockPool(1, requestsCh, errorsCh) + pool.SetLogger(log.TestingLogger()) + + err := pool.Start() + if err != nil { + t.Error(err) + } + + t.Cleanup(func() { + if err := pool.Stop(); err != nil { + t.Error(err) + } + }) + + peers.start() + t.Cleanup(func() { peers.stop() }) + + // Simulate blocks created on each peer regularly and update pool max height. + go func() { + // Introduce each peer + for _, peer := range peers { + pool.SetPeerRange(peer.id, peer.base, peer.height) + } + for { + time.Sleep(1 * time.Second) // Speed of new block creation + for _, peer := range peers { + peer.height++ // Network height increases on all peers + pool.SetPeerRange(peer.id, peer.base, peer.height) // Tell the pool that a new height is available + } + } + }() + + // Start a goroutine to verify blocks + go func() { + for { + time.Sleep(500 * time.Millisecond) // Speed of block verification + if !pool.IsRunning() { + return + } + first, second, _ := pool.PeekTwoBlocks() + if first != nil && second != nil { + if second.LastCommit == nil { + // Second block is fake + pool.RemovePeerAndRedoAllPeerRequests(second.Height) + } else { + pool.PopRequest() + } + } + } + }() + + testTicker := time.NewTicker(200 * time.Millisecond) // speed of test execution + t.Cleanup(func() { testTicker.Stop() }) + + bannedOnce := false // true when the malicious peer was banned at least once + startTime := time.Now() + + // Pull from channels + for { + select { + case err := <-errorsCh: + if err.peerID == "bad" { // ignore errors from the malicious peer + t.Log(err) + } else { + t.Error(err) + } + case request := <-requestsCh: + // Process request + peers[request.PeerID].inputChan <- inputData{t, pool, request} + case <-testTicker.C: + banned := pool.isPeerBanned("bad") + bannedOnce = bannedOnce || banned // Keep bannedOnce true, even if the malicious peer gets unbanned + caughtUp, _, _ := pool.IsCaughtUp() + // Success: pool caught up and malicious peer was banned at least once + if caughtUp && bannedOnce { + t.Logf("Pool caught up, malicious peer was banned at least once, start consensus.") + return + } + // Failure: the pool caught up without banning the bad peer at least once + require.False(t, caughtUp, "Network caught up without banning the malicious peer at least once.") + // Failure: the network could not catch up in the allotted time + require.True(t, time.Since(startTime) < MaliciousTestMaximumLength, "Network ran too long, stopping test.") + } + } +} diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go new file mode 100644 index 00000000000..be5f426a922 --- /dev/null +++ b/internal/blocksync/reactor.go @@ -0,0 +1,608 @@ +package blocksync + +import ( + "fmt" + "reflect" + "sync" + "time" + + bcproto "github.com/cometbft/cometbft/api/cometbft/blocksync/v1" + "github.com/cometbft/cometbft/crypto" + "github.com/cometbft/cometbft/libs/log" + "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/p2p/nodekey" + tcpconn "github.com/cometbft/cometbft/p2p/transport/tcp/conn" + sm "github.com/cometbft/cometbft/state" + "github.com/cometbft/cometbft/store" + "github.com/cometbft/cometbft/types" +) + +const ( + // BlocksyncChannel is a channel for blocks and status updates (`BlockStore` height). + BlocksyncChannel = byte(0x40) + + trySyncIntervalMS = 10 + + // stop syncing when last block's time is + // within this much of the system time. + // stopSyncingDurationMinutes = 10. + + // ask for best height every 10s. + statusUpdateIntervalSeconds = 10 + // check if we should switch to consensus reactor. + switchToConsensusIntervalSeconds = 1 +) + +type consensusReactor interface { + // for when we switch from blocksync reactor and block sync to + // the consensus machine + SwitchToConsensus(state sm.State, skipWAL bool) +} + +type mempoolReactor interface { + // for when we finish doing block sync or state sync + EnableInOutTxs() +} + +type peerError struct { + err error + peerID nodekey.ID +} + +func (e peerError) Error() string { + return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error()) +} + +// Reactor handles long-term catchup syncing. +type Reactor struct { + p2p.BaseReactor + + // immutable + initialState sm.State + + blockExec *sm.BlockExecutor + store sm.BlockStore + pool *BlockPool + blockSync bool + localAddr crypto.Address + poolRoutineWg sync.WaitGroup + + requestsCh <-chan BlockRequest + errorsCh <-chan peerError + + switchToConsensusMs int + + metrics *Metrics +} + +// NewReactor returns new reactor instance. +func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, + blockSync bool, localAddr crypto.Address, metrics *Metrics, offlineStateSyncHeight int64, +) *Reactor { + storeHeight := store.Height() + if storeHeight == 0 { + // If state sync was performed offline and the stores were bootstrapped to height H + // the state store's lastHeight will be H while blockstore's Height and Base are still 0 + // 1. This scenario should not lead to a panic in this case, which is indicated by + // having a OfflineStateSyncHeight > 0 + // 2. We need to instruct the blocksync reactor to start fetching blocks from H+1 + // instead of 0. + storeHeight = offlineStateSyncHeight + } + if state.LastBlockHeight != storeHeight { + panic(fmt.Sprintf("state (%v) and store (%v) height mismatch, stores were left in an inconsistent state", state.LastBlockHeight, + storeHeight)) + } + + // It's okay to block since sendRequest is called from a separate goroutine + // (bpRequester#requestRoutine; 1 per each peer). + requestsCh := make(chan BlockRequest) + + const capacity = 1000 // must be bigger than peers count + errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock + + startHeight := storeHeight + 1 + if startHeight == 1 { + startHeight = state.InitialHeight + } + pool := NewBlockPool(startHeight, requestsCh, errorsCh) + + bcR := &Reactor{ + initialState: state, + blockExec: blockExec, + store: store, + pool: pool, + blockSync: blockSync, + localAddr: localAddr, + requestsCh: requestsCh, + errorsCh: errorsCh, + metrics: metrics, + } + bcR.BaseReactor = *p2p.NewBaseReactor("Reactor", bcR) + return bcR +} + +// SetLogger implements service.Service by setting the logger on reactor and pool. +func (bcR *Reactor) SetLogger(l log.Logger) { + bcR.BaseService.Logger = l + bcR.pool.Logger = l +} + +// OnStart implements service.Service. +func (bcR *Reactor) OnStart() error { + if bcR.blockSync { + return bcR.startPool(false) + } + return nil +} + +// SwitchToBlockSync is called by the statesync reactor when switching to blocksync. +func (bcR *Reactor) SwitchToBlockSync(state sm.State) error { + bcR.blockSync = true + + if !state.IsEmpty() { // if we have a state, start from there + bcR.initialState = state + bcR.pool.height = state.LastBlockHeight + 1 + return bcR.startPool(true) + } + + // if we don't have a state due to an error or a timeout, start from genesis. + return bcR.startPool(false) +} + +func (bcR *Reactor) startPool(stateSynced bool) error { + err := bcR.pool.Start() + if err != nil { + return err + } + bcR.poolRoutineWg.Add(1) + go func() { + defer bcR.poolRoutineWg.Done() + bcR.poolRoutine(stateSynced) + }() + return nil +} + +// OnStop implements service.Service. +func (bcR *Reactor) OnStop() { + if bcR.blockSync { + if err := bcR.pool.Stop(); err != nil { + bcR.Logger.Error("Error stopping pool", "err", err) + } + bcR.poolRoutineWg.Wait() + } +} + +// StreamDescriptors implements Reactor. +func (*Reactor) StreamDescriptors() []p2p.StreamDescriptor { + return []p2p.StreamDescriptor{ + &tcpconn.ChannelDescriptor{ + ID: BlocksyncChannel, + Priority: 5, + SendQueueCapacity: 1000, + RecvBufferCapacity: 50 * 4096, + RecvMessageCapacity: MaxMsgSize, + MessageTypeI: &bcproto.Message{}, + }, + } +} + +// AddPeer implements Reactor by sending our state to peer. +func (bcR *Reactor) AddPeer(peer p2p.Peer) { + peer.Send(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.StatusResponse{ + Base: bcR.store.Base(), + Height: bcR.store.Height(), + }, + }) + // it's OK if send fails. will try later in poolRoutine + + // peer is added to the pool once we receive the first + // bcStatusResponseMessage from the peer and call pool.SetPeerRange +} + +// RemovePeer implements Reactor by removing peer from the pool. +func (bcR *Reactor) RemovePeer(peer p2p.Peer, _ any) { + bcR.pool.RemovePeer(peer.ID()) +} + +// respondToPeer loads a block and sends it to the requesting peer, +// if we have it. Otherwise, we'll respond saying we don't have it. +func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest, src p2p.Peer) (queued bool) { + block, _ := bcR.store.LoadBlock(msg.Height) + if block == nil { + bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height) + return src.TrySend(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.NoBlockResponse{Height: msg.Height}, + }) + } + + state, err := bcR.blockExec.Store().Load() + if err != nil { + bcR.Logger.Error("loading state", "err", err) + return false + } + var extCommit *types.ExtendedCommit + if state.ConsensusParams.Feature.VoteExtensionsEnabled(msg.Height) { + extCommit = bcR.store.LoadBlockExtendedCommit(msg.Height) + if extCommit == nil { + bcR.Logger.Error("found block in store with no extended commit", "block", block) + return false + } + } + + bl, err := block.ToProto() + if err != nil { + bcR.Logger.Error("could not convert msg to protobuf", "err", err) + return false + } + + return src.TrySend(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.BlockResponse{ + Block: bl, + ExtCommit: extCommit.ToProto(), + }, + }) +} + +func (bcR *Reactor) handlePeerResponse(msg *bcproto.BlockResponse, src p2p.Peer) { + bi, err := types.BlockFromProto(msg.Block) + if err != nil { + bcR.Logger.Error("Peer sent us invalid block", "peer", src, "msg", msg, "err", err) + bcR.Switch.StopPeerForError(src, err) + return + } + var extCommit *types.ExtendedCommit + if msg.ExtCommit != nil { + var err error + extCommit, err = types.ExtendedCommitFromProto(msg.ExtCommit) + if err != nil { + bcR.Logger.Error("failed to convert extended commit from proto", + "peer", src, + "err", err) + bcR.Switch.StopPeerForError(src, err) + return + } + } + + if err := bcR.pool.AddBlock(src.ID(), bi, extCommit, msg.Block.Size()); err != nil { + bcR.Logger.Error("failed to add block", "peer", src, "err", err) + } +} + +// Receive implements Reactor by handling 4 types of messages (look below). +func (bcR *Reactor) Receive(e p2p.Envelope) { + if err := ValidateMsg(e.Message); err != nil { + bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err) + bcR.Switch.StopPeerForError(e.Src, err) + return + } + + bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message) + + switch msg := e.Message.(type) { + case *bcproto.BlockRequest: + bcR.respondToPeer(msg, e.Src) + case *bcproto.BlockResponse: + go bcR.handlePeerResponse(msg, e.Src) + case *bcproto.StatusRequest: + // Send peer our state. + e.Src.TrySend(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.StatusResponse{ + Height: bcR.store.Height(), + Base: bcR.store.Base(), + }, + }) + case *bcproto.StatusResponse: + // Got a peer status. Unverified. + bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height) + case *bcproto.NoBlockResponse: + bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height) + bcR.pool.RedoRequestFrom(msg.Height, e.Src.ID()) + default: + bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } +} + +// Handle messages from the poolReactor telling the reactor what to do. +// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! +func (bcR *Reactor) poolRoutine(stateSynced bool) { + bcR.metrics.Syncing.Set(1) + defer bcR.metrics.Syncing.Set(0) + + var ( + blocksSynced = uint64(0) + state = bcR.initialState + lastHundred = time.Now() + lastRate = 0.0 + didProcessCh = make(chan struct{}, 1) + ) + + go bcR.handleBlockRequestsRoutine() + + if bcR.switchToConsensusMs == 0 { + bcR.switchToConsensusMs = switchToConsensusIntervalSeconds * 1000 + } + switchToConsensusTicker := time.NewTicker(time.Duration(bcR.switchToConsensusMs) * time.Millisecond) + defer switchToConsensusTicker.Stop() + + trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond) + defer trySyncTicker.Stop() + + initialCommitHasExtensions := (bcR.initialState.LastBlockHeight > 0 && bcR.store.LoadBlockExtendedCommit(bcR.initialState.LastBlockHeight) != nil) + +FOR_LOOP: + for { + select { + case <-switchToConsensusTicker.C: + outbound, inbound, _ := bcR.Switch.NumPeers() + bcR.Logger.Debug("Consensus ticker", "outbound", outbound, "inbound", inbound, "lastHeight", state.LastBlockHeight) + + if !initialCommitHasExtensions { + // If require extensions, but since we don't have them yet, then we cannot switch to consensus yet. + if bcR.isMissingExtension(state, blocksSynced) { + continue FOR_LOOP + } + } + + if bcR.isCaughtUp(state, blocksSynced, stateSynced) { + break FOR_LOOP + } + + case <-trySyncTicker.C: // chan time + select { + case didProcessCh <- struct{}{}: + default: + } + + case <-didProcessCh: + // NOTE: It is a subtle mistake to process more than a single block + // at a time (e.g. 10) here, because we only TrySend 1 request per + // loop. The ratio mismatch can result in starving of blocks, a + // sudden burst of requests and responses, and repeat. + // Consequently, it is better to split these routines rather than + // coupling them as it's written here. TODO uncouple from request + // routine. + + // See if there are any blocks to sync. + first, second, extCommit := bcR.pool.PeekTwoBlocks() + if first == nil || second == nil { + // we need to have fetched two consecutive blocks in order to + // perform blocksync verification + continue FOR_LOOP + } + // Some sanity checks on heights + if state.LastBlockHeight > 0 && state.LastBlockHeight+1 != first.Height { + // Panicking because the block pool's height MUST keep consistent with the state; the block pool is totally under our control + panic(fmt.Errorf("peeked first block has unexpected height; expected %d, got %d", state.LastBlockHeight+1, first.Height)) + } + if first.Height+1 != second.Height { + // Panicking because this is an obvious bug in the block pool, which is totally under our control + panic(fmt.Errorf("heights of first and second block are not consecutive; expected %d, got %d", state.LastBlockHeight, first.Height)) + } + + // Before priming didProcessCh for another check on the next + // iteration, break the loop if the BlockPool or the Reactor itself + // has quit. This avoids case ambiguity of the outer select when two + // channels are ready. + if !bcR.IsRunning() || !bcR.pool.IsRunning() { + break FOR_LOOP + } + // Try again quickly next loop. + didProcessCh <- struct{}{} + + firstParts, err := first.MakePartSet(types.BlockPartSizeBytes) + if err != nil { + bcR.Logger.Error("failed to make ", + "height", first.Height, + "err", err.Error()) + break FOR_LOOP + } + + if state, err = bcR.processBlock(first, second, firstParts, state, extCommit); err != nil { + bcR.Logger.Error("Invalid block", "height", first.Height, "err", err) + continue FOR_LOOP + } + + blocksSynced++ + + if blocksSynced%100 == 0 { + _, height, maxPeerHeight := bcR.pool.IsCaughtUp() + lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) + bcR.Logger.Info("Block Sync Rate", "height", height, "max_peer_height", maxPeerHeight, "blocks/s", lastRate) + lastHundred = time.Now() + } + + continue FOR_LOOP + + case <-bcR.Quit(): + break FOR_LOOP + case <-bcR.pool.Quit(): + break FOR_LOOP + } + } +} + +// BroadcastStatusRequest broadcasts `BlockStore` base and height. +func (bcR *Reactor) BroadcastStatusRequest() { + bcR.Switch.Broadcast(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.StatusRequest{}, + }) +} + +func (bcR *Reactor) handleBlockRequest(request BlockRequest) { + peer := bcR.Switch.Peers().Get(request.PeerID) + if peer == nil { + return + } + queued := peer.TrySend(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.BlockRequest{Height: request.Height}, + }) + if !queued { + bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height) + } +} + +func (bcR *Reactor) handleBlockRequestsRoutine() { + statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) + defer statusUpdateTicker.Stop() + + for { + select { + case <-bcR.Quit(): + return + case <-bcR.pool.Quit(): + return + case request := <-bcR.requestsCh: + bcR.handleBlockRequest(request) + case err := <-bcR.errorsCh: + peer := bcR.Switch.Peers().Get(err.peerID) + if peer != nil { + bcR.Switch.StopPeerForError(peer, err) + } + case <-statusUpdateTicker.C: + // ask for status updates + go bcR.BroadcastStatusRequest() + } + } +} + +func (bcR *Reactor) isMissingExtension(state sm.State, blocksSynced uint64) bool { + // The "if" statement below is a bit confusing, so here is a breakdown + // of its logic and purpose: + // + // If we are at genesis (no block in the chain), we don't need VoteExtensions + // because the first block's LastCommit is empty anyway. + // + // If VoteExtensions were disabled for the previous height then we don't need + // VoteExtensions. + // + // If we have sync'd at least one block, then we are guaranteed to have extensions + // if we need them by the logic inside loop FOR_LOOP: it requires that the blocks + // it fetches have extensions if extensions were enabled during the height. + // + // If we already had extensions for the initial height (e.g. we are recovering), + // then we are guaranteed to have extensions for the last block (if required) even + // if we did not blocksync any block. + // + missingExtension := true + voteExtensionsDisabled := state.LastBlockHeight > 0 && !state.ConsensusParams.Feature.VoteExtensionsEnabled(state.LastBlockHeight) + if state.LastBlockHeight == 0 || voteExtensionsDisabled || blocksSynced > 0 { + missingExtension = false + } + + if missingExtension { + bcR.Logger.Info( + "no extended commit yet", + "last_block_height", state.LastBlockHeight, + "vote_extensions_disabled", voteExtensionsDisabled, + "blocks_synced", blocksSynced, + ) + } + + return missingExtension +} + +func (bcR *Reactor) isCaughtUp(state sm.State, blocksSynced uint64, stateSynced bool) bool { + if isCaughtUp, height, _ := bcR.pool.IsCaughtUp(); isCaughtUp || state.Validators.ValidatorBlocksTheChain(bcR.localAddr) { + bcR.Logger.Info("Time to switch to consensus mode!", "height", height) + if err := bcR.pool.Stop(); err != nil { + bcR.Logger.Error("Error stopping pool", "err", err) + } + if memR, ok := bcR.Switch.Reactor("MEMPOOL").(mempoolReactor); ok { + memR.EnableInOutTxs() + } + if conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor); ok { + conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced) + } + // else { + // should only happen during testing + // } + return true + } + return false +} + +func (bcR *Reactor) processBlock(first, second *types.Block, firstParts *types.PartSet, state sm.State, extCommit *types.ExtendedCommit) (sm.State, error) { + var ( + chainID = bcR.initialState.ChainID + firstPartSetHeader = firstParts.Header() + firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader} + ) + + // Finally, verify the first block using the second's commit + // NOTE: we can probably make this more efficient, but note that calling + // first.Hash() doesn't verify the tx contents, so MakePartSet() is + // currently necessary. + // TODO(sergio): Should we also validate against the extended commit? + err := state.Validators.VerifyCommitLight( + chainID, firstID, first.Height, second.LastCommit) + + if err == nil { + // validate the block before we persist it + err = bcR.blockExec.ValidateBlock(state, first) + } + + presentExtCommit := extCommit != nil + extensionsEnabled := state.ConsensusParams.Feature.VoteExtensionsEnabled(first.Height) + if presentExtCommit != extensionsEnabled { + err = fmt.Errorf("non-nil extended commit must be received iff vote extensions are enabled for its height "+ + "(height %d, non-nil extended commit %t, extensions enabled %t)", + first.Height, presentExtCommit, extensionsEnabled, + ) + } + if err == nil && extensionsEnabled { + // if vote extensions were required at this height, ensure they exist. + err = extCommit.EnsureExtensions(true) + } + + if err != nil { + peerID := bcR.pool.RemovePeerAndRedoAllPeerRequests(first.Height) + peer := bcR.Switch.Peers().Get(peerID) + if peer != nil { + // NOTE: we've already removed the peer's request, but we + // still need to clean up the rest. + bcR.Switch.StopPeerForError(peer, ErrReactorValidation{Err: err}) + } + peerID2 := bcR.pool.RemovePeerAndRedoAllPeerRequests(second.Height) + peer2 := bcR.Switch.Peers().Get(peerID2) + if peer2 != nil && peer2 != peer { + // NOTE: we've already removed the peer's request, but we + // still need to clean up the rest. + bcR.Switch.StopPeerForError(peer2, ErrReactorValidation{Err: err}) + } + return state, err + } + + // SUCCESS. Pop the block from the pool. + bcR.pool.PopRequest() + + // TODO: batch saves so we dont persist to disk every block + if extensionsEnabled { + bcR.store.SaveBlockWithExtendedCommit(first, firstParts, extCommit) + } else { + // We use LastCommit here instead of extCommit. extCommit is not + // guaranteed to be populated by the peer if extensions are not enabled. + // Currently, the peer should provide an extCommit even if the vote extension data are absent + // but this may change so using second.LastCommit is safer. + bcR.store.SaveBlock(first, firstParts, second.LastCommit) + } + + // TODO: same thing for app - but we would need a way to + // get the hash without persisting the state + state, err = bcR.blockExec.ApplyVerifiedBlock(state, firstID, first, bcR.pool.MaxPeerHeight()) + if err != nil { + // TODO This is bad, are we zombie? + panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) + } + + bcR.metrics.recordBlockMetrics(first) + + return state, nil +} diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go new file mode 100644 index 00000000000..cf728dd8dd9 --- /dev/null +++ b/internal/blocksync/reactor_test.go @@ -0,0 +1,582 @@ +package blocksync + +import ( + "fmt" + "os" + "reflect" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + dbm "github.com/cometbft/cometbft-db" + abci "github.com/cometbft/cometbft/abci/types" + bcproto "github.com/cometbft/cometbft/api/cometbft/blocksync/v1" + cfg "github.com/cometbft/cometbft/config" + "github.com/cometbft/cometbft/internal/test" + "github.com/cometbft/cometbft/libs/log" + mpmocks "github.com/cometbft/cometbft/mempool/mocks" + "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/proxy" + sm "github.com/cometbft/cometbft/state" + "github.com/cometbft/cometbft/store" + "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" +) + +var config *cfg.Config + +func randGenesisDoc() (*types.GenesisDoc, []types.PrivValidator) { + minPower := int64(30) + numValidators := 1 + validators := make([]types.GenesisValidator, numValidators) + privValidators := make([]types.PrivValidator, numValidators) + for i := 0; i < numValidators; i++ { + val, privVal := types.RandValidator(false, minPower) + validators[i] = types.GenesisValidator{ + PubKey: val.PubKey, + Power: val.VotingPower, + } + privValidators[i] = privVal + } + sort.Sort(types.PrivValidatorsByAddress(privValidators)) + + consPar := types.DefaultConsensusParams() + consPar.Feature.VoteExtensionsEnableHeight = 1 + return &types.GenesisDoc{ + GenesisTime: cmttime.Now(), + ChainID: test.DefaultTestChainID, + Validators: validators, + ConsensusParams: consPar, + }, privValidators +} + +type ReactorPair struct { + reactor *ByzantineReactor + app proxy.AppConns +} + +func newReactor( + t *testing.T, + logger log.Logger, + genDoc *types.GenesisDoc, + privVals []types.PrivValidator, + maxBlockHeight int64, + incorrectData ...int64, +) ReactorPair { + t.Helper() + if len(privVals) != 1 { + panic("only support one validator") + } + var incorrectBlock int64 + if len(incorrectData) > 0 { + incorrectBlock = incorrectData[0] + } + + app := abci.NewBaseApplication() + cc := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) + err := proxyApp.Start() + if err != nil { + panic(fmt.Errorf("error start app: %w", err)) + } + + blockDB := dbm.NewMemDB() + stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) + blockStore := store.NewBlockStore(blockDB) + + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) + if err != nil { + panic(fmt.Errorf("error constructing state from genesis file: %w", err)) + } + + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("PreUpdate").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + + // Make the Reactor itself. + // NOTE we have to create and commit the blocks first because + // pool.height is determined from the store. + blockSync := true + db := dbm.NewMemDB() + stateStore = sm.NewStore(db, sm.StoreOptions{ + DiscardABCIResponses: false, + }) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), + mp, sm.EmptyEvidencePool{}, blockStore) + if err = stateStore.Save(state); err != nil { + panic(err) + } + + // The commit we are building for the current height. + seenExtCommit := &types.ExtendedCommit{} + + pubKey, err := privVals[0].GetPubKey() + if err != nil { + panic(err) + } + addr := pubKey.Address() + idx, _ := state.Validators.GetByAddress(addr) + + // let's add some blocks in + for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { + voteExtensionIsEnabled := genDoc.ConsensusParams.Feature.VoteExtensionsEnabled(blockHeight) + + lastExtCommit := seenExtCommit.Clone() + + thisBlock := state.MakeBlock(blockHeight, nil, lastExtCommit.ToCommit(), nil, state.Validators.Proposer.Address) + + thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} + + // Simulate a commit for the current height + vote, err := types.MakeVote( + privVals[0], + thisBlock.Header.ChainID, + idx, + thisBlock.Header.Height, + 0, + types.PrecommitType, + blockID, + cmttime.Now(), + ) + if err != nil { + panic(err) + } + seenExtCommit = &types.ExtendedCommit{ + Height: vote.Height, + Round: vote.Round, + BlockID: blockID, + ExtendedSignatures: []types.ExtendedCommitSig{vote.ExtendedCommitSig()}, + } + + state, err = blockExec.ApplyBlock(state, blockID, thisBlock, maxBlockHeight) + if err != nil { + panic(fmt.Errorf("error apply block: %w", err)) + } + + saveCorrectVoteExtensions := blockHeight != incorrectBlock + if saveCorrectVoteExtensions == voteExtensionIsEnabled { + blockStore.SaveBlockWithExtendedCommit(thisBlock, thisParts, seenExtCommit) + } else { + blockStore.SaveBlock(thisBlock, thisParts, seenExtCommit.ToCommit()) + } + } + + // As the tests only support one validator in the valSet, we pass a different address to bypass the `localNodeBlocksTheChain` check. Namely, the tested node is not an active validator. + bcReactor := NewByzantineReactor(incorrectBlock, NewReactor(state.Copy(), blockExec, blockStore, blockSync, []byte("anotherAddress"), NopMetrics(), 0)) + bcReactor.SetLogger(logger.With("module", "blocksync")) + + return ReactorPair{bcReactor, proxyApp} +} + +func TestNoBlockResponse(t *testing.T) { + config = test.ResetTestRoot("blocksync_reactor_test") + defer os.RemoveAll(config.RootDir) + genDoc, privVals := randGenesisDoc() + + maxBlockHeight := int64(65) + + reactorPairs := make([]ReactorPair, 2) + + reactorPairs[0] = newReactor(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight) + reactorPairs[1] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0) + + p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch { + s.AddReactor("BLOCKSYNC", reactorPairs[i].reactor) + return s + }, p2p.Connect2Switches) + + defer func() { + for _, r := range reactorPairs { + err := r.reactor.Stop() + require.NoError(t, err) + err = r.app.Stop() + require.NoError(t, err) + } + }() + + tests := []struct { + height int64 + existent bool + }{ + {maxBlockHeight + 2, false}, + {10, true}, + {1, true}, + {100, false}, + } + + for { + if isCaughtUp, _, _ := reactorPairs[1].reactor.pool.IsCaughtUp(); isCaughtUp { + break + } + + time.Sleep(10 * time.Millisecond) + } + + assert.Equal(t, maxBlockHeight, reactorPairs[0].reactor.store.Height()) + + for _, tt := range tests { + block, _ := reactorPairs[1].reactor.store.LoadBlock(tt.height) + if tt.existent { + assert.NotNil(t, block) + } else { + assert.Nil(t, block) + } + } +} + +// NOTE: This is too hard to test without +// an easy way to add test peer to switch +// or without significant refactoring of the module. +// Alternatively we could actually dial a TCP conn but +// that seems extreme. +func TestBadBlockStopsPeer(t *testing.T) { + config = test.ResetTestRoot("blocksync_reactor_test") + defer os.RemoveAll(config.RootDir) + genDoc, privVals := randGenesisDoc() + + maxBlockHeight := int64(148) + + // Other chain needs a different validator set + otherGenDoc, otherPrivVals := randGenesisDoc() + otherChain := newReactor(t, log.TestingLogger(), otherGenDoc, otherPrivVals, maxBlockHeight) + + defer func() { + err := otherChain.reactor.Stop() + require.Error(t, err) + err = otherChain.app.Stop() + require.NoError(t, err) + }() + + reactorPairs := make([]ReactorPair, 4) + + reactorPairs[0] = newReactor(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight) + reactorPairs[1] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0) + reactorPairs[2] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0) + reactorPairs[3] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0) + + switches := p2p.MakeConnectedSwitches(config.P2P, 4, func(i int, s *p2p.Switch) *p2p.Switch { + s.AddReactor("BLOCKSYNC", reactorPairs[i].reactor) + return s + }, p2p.Connect2Switches) + + defer func() { + for _, r := range reactorPairs { + err := r.reactor.Stop() + require.NoError(t, err) + + err = r.app.Stop() + require.NoError(t, err) + } + }() + + for { + time.Sleep(1 * time.Second) + caughtUp := true + for _, r := range reactorPairs { + if isCaughtUp, _, _ := r.reactor.pool.IsCaughtUp(); !isCaughtUp { + caughtUp = false + } + } + if caughtUp { + break + } + } + + // at this time, reactors[0-3] is the newest + assert.Equal(t, 3, reactorPairs[1].reactor.Switch.Peers().Size()) + + // Mark reactorPairs[3] as an invalid peer. Fiddling with .store without a mutex is a data + // race, but can't be easily avoided. + reactorPairs[3].reactor.store = otherChain.reactor.store + + lastReactorPair := newReactor(t, log.TestingLogger(), genDoc, privVals, 0) + reactorPairs = append(reactorPairs, lastReactorPair) //nolint:makezero // when initializing with 0, the test breaks. + + switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(_ int, s *p2p.Switch) *p2p.Switch { + s.AddReactor("BLOCKSYNC", reactorPairs[len(reactorPairs)-1].reactor) + return s + }, p2p.Connect2Switches)...) + + for i := 0; i < len(reactorPairs)-1; i++ { + p2p.Connect2Switches(switches, i, len(reactorPairs)-1) + } + + for { + isCaughtUp, _, _ := lastReactorPair.reactor.pool.IsCaughtUp() + if isCaughtUp || lastReactorPair.reactor.Switch.Peers().Size() == 0 { + break + } + + time.Sleep(1 * time.Second) + } + + assert.Less(t, lastReactorPair.reactor.Switch.Peers().Size(), len(reactorPairs)-1) +} + +func TestCheckSwitchToConsensusLastHeightZero(t *testing.T) { + const maxBlockHeight = int64(45) + + config = test.ResetTestRoot("blocksync_reactor_test") + defer os.RemoveAll(config.RootDir) + genDoc, privVals := randGenesisDoc() + + reactorPairs := make([]ReactorPair, 1, 2) + reactorPairs[0] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0) + reactorPairs[0].reactor.switchToConsensusMs = 50 + defer func() { + for _, r := range reactorPairs { + err := r.reactor.Stop() + require.NoError(t, err) + err = r.app.Stop() + require.NoError(t, err) + } + }() + + reactorPairs = append(reactorPairs, newReactor(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight)) + + var switches []*p2p.Switch + for _, r := range reactorPairs { + switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(_ int, s *p2p.Switch) *p2p.Switch { + s.AddReactor("BLOCKSYNC", r.reactor) + return s + }, p2p.Connect2Switches)...) + } + + time.Sleep(60 * time.Millisecond) + + // Connect both switches + p2p.Connect2Switches(switches, 0, 1) + + startTime := time.Now() + for { + time.Sleep(20 * time.Millisecond) + caughtUp := true + for _, r := range reactorPairs { + if isCaughtUp, _, _ := r.reactor.pool.IsCaughtUp(); !isCaughtUp { + caughtUp = false + break + } + } + if caughtUp { + break + } + if time.Since(startTime) > 90*time.Second { + msg := "timeout: reactors didn't catch up;" + for i, r := range reactorPairs { + c, h, maxH := r.reactor.pool.IsCaughtUp() + msg += fmt.Sprintf(" reactor#%d (h %d, maxH %d, c %t);", i, h, maxH, c) + } + require.Fail(t, msg) + } + } + + // -1 because of "-1" in IsCaughtUp + // -1 pool.height points to the _next_ height + // -1 because we measure height of block store + const maxDiff = 3 + for _, r := range reactorPairs { + assert.GreaterOrEqual(t, r.reactor.store.Height(), maxBlockHeight-maxDiff) + } +} + +func ExtendedCommitNetworkHelper(t *testing.T, maxBlockHeight int64, enableVoteExtensionAt int64, invalidBlockHeightAt int64) { + t.Helper() + config = test.ResetTestRoot("blocksync_reactor_test") + defer os.RemoveAll(config.RootDir) + genDoc, privVals := randGenesisDoc() + genDoc.ConsensusParams.Feature.VoteExtensionsEnableHeight = enableVoteExtensionAt + + reactorPairs := make([]ReactorPair, 1, 2) + reactorPairs[0] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0) + reactorPairs[0].reactor.switchToConsensusMs = 50 + defer func() { + for _, r := range reactorPairs { + err := r.reactor.Stop() + require.NoError(t, err) + err = r.app.Stop() + require.NoError(t, err) + } + }() + + reactorPairs = append(reactorPairs, newReactor(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight, invalidBlockHeightAt)) + + var switches []*p2p.Switch + for _, r := range reactorPairs { + switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(_ int, s *p2p.Switch) *p2p.Switch { + s.AddReactor("BLOCKSYNC", r.reactor) + return s + }, p2p.Connect2Switches)...) + } + + time.Sleep(60 * time.Millisecond) + + // Connect both switches + p2p.Connect2Switches(switches, 0, 1) + + startTime := time.Now() + for { + time.Sleep(20 * time.Millisecond) + // The reactor can never catch up, because at one point it disconnects. + c, _, _ := reactorPairs[0].reactor.pool.IsCaughtUp() + require.False(t, c, "node caught up when it should not have") + // After 5 seconds, the test should have executed. + if time.Since(startTime) > 5*time.Second { + assert.Equal(t, 0, reactorPairs[0].reactor.Switch.Peers().Size(), "node should have disconnected but didn't") + assert.Equal(t, 0, reactorPairs[1].reactor.Switch.Peers().Size(), "node should have disconnected but didn't") + break + } + } +} + +// TestCheckExtendedCommitExtra tests when VoteExtension is disabled but an ExtendedVote is present in the block. +func TestCheckExtendedCommitExtra(t *testing.T) { + const maxBlockHeight = 10 + const enableVoteExtension = 5 + const invalidBlockHeight = 3 + + ExtendedCommitNetworkHelper(t, maxBlockHeight, enableVoteExtension, invalidBlockHeight) +} + +// TestCheckExtendedCommitMissing tests when VoteExtension is enabled but the ExtendedVote is missing from the block. +func TestCheckExtendedCommitMissing(t *testing.T) { + const maxBlockHeight = 10 + const enableVoteExtension = 5 + const invalidBlockHeight = 8 + + ExtendedCommitNetworkHelper(t, maxBlockHeight, enableVoteExtension, invalidBlockHeight) +} + +// ByzantineReactor is a blockstore reactor implementation where a corrupted block can be sent to a peer. +// The corruption is that the block contains extended commit signatures when vote extensions are disabled or +// it has no extended commit signatures while vote extensions are enabled. +// If the corrupted block height is set to 0, the reactor behaves as normal. +type ByzantineReactor struct { + *Reactor + corruptedBlock int64 +} + +func NewByzantineReactor(invalidBlock int64, conR *Reactor) *ByzantineReactor { + return &ByzantineReactor{ + Reactor: conR, + corruptedBlock: invalidBlock, + } +} + +// respondToPeer (overridden method) loads a block and sends it to the requesting peer, +// if we have it. Otherwise, we'll respond saying we don't have it. +// Byzantine modification: if corruptedBlock is set, send the wrong Block. +func (bcR *ByzantineReactor) respondToPeer(msg *bcproto.BlockRequest, src p2p.Peer) (queued bool) { + block, _ := bcR.store.LoadBlock(msg.Height) + if block == nil { + bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height) + return src.TrySend(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.NoBlockResponse{Height: msg.Height}, + }) + } + + state, err := bcR.blockExec.Store().Load() + if err != nil { + bcR.Logger.Error("loading state", "err", err) + return false + } + var extCommit *types.ExtendedCommit + voteExtensionEnabled := state.ConsensusParams.Feature.VoteExtensionsEnabled(msg.Height) + incorrectBlock := bcR.corruptedBlock == msg.Height + if voteExtensionEnabled && !incorrectBlock || !voteExtensionEnabled && incorrectBlock { + extCommit = bcR.store.LoadBlockExtendedCommit(msg.Height) + if extCommit == nil { + bcR.Logger.Error("found block in store with no extended commit", "block", block) + return false + } + } + + bl, err := block.ToProto() + if err != nil { + bcR.Logger.Error("could not convert msg to protobuf", "err", err) + return false + } + + return src.TrySend(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.BlockResponse{ + Block: bl, + ExtCommit: extCommit.ToProto(), + }, + }) +} + +// Receive implements Reactor by handling 4 types of messages (look below). +// Copied unchanged from reactor.go so the correct respondToPeer is called. +func (bcR *ByzantineReactor) Receive(e p2p.Envelope) { + if err := ValidateMsg(e.Message); err != nil { + bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err) + bcR.Switch.StopPeerForError(e.Src, err) + return + } + + bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message) + + switch msg := e.Message.(type) { + case *bcproto.BlockRequest: + bcR.respondToPeer(msg, e.Src) + case *bcproto.BlockResponse: + bi, err := types.BlockFromProto(msg.Block) + if err != nil { + bcR.Logger.Error("Peer sent us invalid block", "peer", e.Src, "msg", e.Message, "err", err) + bcR.Switch.StopPeerForError(e.Src, err) + return + } + var extCommit *types.ExtendedCommit + if msg.ExtCommit != nil { + var err error + extCommit, err = types.ExtendedCommitFromProto(msg.ExtCommit) + if err != nil { + bcR.Logger.Error("failed to convert extended commit from proto", + "peer", e.Src, + "err", err) + bcR.Switch.StopPeerForError(e.Src, err) + return + } + } + + if err := bcR.pool.AddBlock(e.Src.ID(), bi, extCommit, msg.Block.Size()); err != nil { + bcR.Logger.Error("failed to add block", "peer", e.Src, "err", err) + } + case *bcproto.StatusRequest: + // Send peer our state. + e.Src.TrySend(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.StatusResponse{ + Height: bcR.store.Height(), + Base: bcR.store.Base(), + }, + }) + case *bcproto.StatusResponse: + // Got a peer status. Unverified. + bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height) + case *bcproto.NoBlockResponse: + bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height) + bcR.pool.RedoRequestFrom(msg.Height, e.Src.ID()) + default: + bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } +} diff --git a/internal/blocksync/types.go b/internal/blocksync/types.go new file mode 100644 index 00000000000..b24a5c8945a --- /dev/null +++ b/internal/blocksync/types.go @@ -0,0 +1,14 @@ +package blocksync + +import ( + cmtbs "github.com/cometbft/cometbft/api/cometbft/blocksync/v1" + "github.com/cometbft/cometbft/types" +) + +var ( + _ types.Wrapper = &cmtbs.StatusRequest{} + _ types.Wrapper = &cmtbs.StatusResponse{} + _ types.Wrapper = &cmtbs.NoBlockResponse{} + _ types.Wrapper = &cmtbs.BlockResponse{} + _ types.Wrapper = &cmtbs.BlockRequest{} +) diff --git a/libs/clist/bench_test.go b/internal/clist/bench_test.go similarity index 100% rename from libs/clist/bench_test.go rename to internal/clist/bench_test.go diff --git a/libs/clist/clist.go b/internal/clist/clist.go similarity index 94% rename from libs/clist/clist.go rename to internal/clist/clist.go index 5eb48f00a12..15886d272ff 100644 --- a/libs/clist/clist.go +++ b/internal/clist/clist.go @@ -51,7 +51,7 @@ type CElement struct { nextWaitCh chan struct{} removed bool - Value interface{} // immutable + Value any // immutable } // Blocking implementation of Next(). @@ -166,7 +166,7 @@ func (e *CElement) SetNext(newNext *CElement) { // If a WaitGroup is reused to wait for several independent sets of // events, new Add calls must happen after all previous Wait calls have // returned. - e.nextWg = waitGroup1() // WaitGroups are difficult to re-use. + e.nextWg = waitGroup1() // WaitGroups are difficult to reuse. e.nextWaitCh = make(chan struct{}) } if oldNext == nil && newNext != nil { @@ -177,14 +177,14 @@ func (e *CElement) SetNext(newNext *CElement) { } // NOTE: This function needs to be safe for -// concurrent goroutines waiting on prevWg +// concurrent goroutines waiting on prevWg. func (e *CElement) SetPrev(newPrev *CElement) { e.mtx.Lock() oldPrev := e.prev e.prev = newPrev if oldPrev != nil && newPrev == nil { - e.prevWg = waitGroup1() // WaitGroups are difficult to re-use. + e.prevWg = waitGroup1() // WaitGroups are difficult to reuse. e.prevWaitCh = make(chan struct{}) } if oldPrev == nil && newPrev != nil { @@ -211,7 +211,7 @@ func (e *CElement) SetRemoved() { e.mtx.Unlock() } -//-------------------------------------------------------------------------------- +// -------------------------------------------------------------------------------- // CList represents a linked list. // The zero value for CList is an empty list ready to use. @@ -313,7 +313,7 @@ func (l *CList) WaitChan() <-chan struct{} { } // Panics if list grows beyond its max length. -func (l *CList) PushBack(v interface{}) *CElement { +func (l *CList) PushBack(v any) *CElement { l.mtx.Lock() // Construct a new element @@ -353,7 +353,7 @@ func (l *CList) PushBack(v interface{}) *CElement { // CONTRACT: Caller must call e.DetachPrev() and/or e.DetachNext() to avoid memory leaks. // NOTE: As per the contract of CList, removed elements cannot be added back. -func (l *CList) Remove(e *CElement) interface{} { +func (l *CList) Remove(e *CElement) any { l.mtx.Lock() prev := e.Prev() @@ -374,7 +374,7 @@ func (l *CList) Remove(e *CElement) interface{} { // If we're removing the only item, make CList FrontWait/BackWait wait. if l.curLen == 1 { - l.wg = waitGroup1() // WaitGroups are difficult to re-use. + l.wg = waitGroup1() // WaitGroups are difficult to reuse. l.waitCh = make(chan struct{}) } @@ -403,5 +403,5 @@ func (l *CList) Remove(e *CElement) interface{} { func waitGroup1() (wg *sync.WaitGroup) { wg = &sync.WaitGroup{} wg.Add(1) - return + return wg } diff --git a/libs/clist/clist_test.go b/internal/clist/clist_test.go similarity index 80% rename from libs/clist/clist_test.go rename to internal/clist/clist_test.go index 9cff7e34f2c..64a267ff09d 100644 --- a/libs/clist/clist_test.go +++ b/internal/clist/clist_test.go @@ -3,13 +3,13 @@ package clist import ( "fmt" "runtime" - "sync/atomic" + "sync" "testing" "time" "github.com/stretchr/testify/assert" - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtrand "github.com/cometbft/cometbft/internal/rand" ) func TestPanicOnMaxLength(t *testing.T) { @@ -63,75 +63,65 @@ func TestSmall(t *testing.T) { if l.Len() != 0 { t.Error("Expected len 0, got ", l.Len()) } - } -// This test is quite hacky because it relies on SetFinalizer -// which isn't guaranteed to run at all. -// -//nolint:unused,deadcode -func _TestGCFifo(t *testing.T) { +// This test was quite hacky because it relies on SetFinalizer +// it has been made less hacky (I think) by using a WaitGroup. +func TestGCFifo(t *testing.T) { + t.Helper() if runtime.GOARCH != "amd64" { t.Skipf("Skipping on non-amd64 machine") } const numElements = 1000000 l := New() - gcCount := new(uint64) - // SetFinalizer doesn't work well with circular structures, - // so we construct a trivial non-circular structure to - // track. + // Use a WaitGroup to wait for all finalizers to run. + var wg sync.WaitGroup + wg.Add(numElements) + type value struct { Int int } - done := make(chan struct{}) for i := 0; i < numElements; i++ { v := new(value) v.Int = i l.PushBack(v) - runtime.SetFinalizer(v, func(v *value) { - atomic.AddUint64(gcCount, 1) + runtime.SetFinalizer(v, func(_ *value) { + wg.Done() }) } for el := l.Front(); el != nil; { + next := el.Next() l.Remove(el) - // oldEl := el - el = el.Next() - // oldEl.DetachPrev() - // oldEl.DetachNext() + el = next } - runtime.GC() - time.Sleep(time.Second * 3) - runtime.GC() - time.Sleep(time.Second * 3) - _ = done + // Wait for all finalizers to run. + wg.Wait() - if *gcCount != numElements { - t.Errorf("expected gcCount to be %v, got %v", numElements, - *gcCount) + if l.Len() != 0 { + t.Errorf("expected list to be empty, got %v elements", l.Len()) } } // This test is quite hacky because it relies on SetFinalizer // which isn't guaranteed to run at all. -// -//nolint:unused,deadcode -func _TestGCRandom(t *testing.T) { +func TestGCRandom(t *testing.T) { + t.Helper() if runtime.GOARCH != "amd64" { t.Skipf("Skipping on non-amd64 machine") } const numElements = 1000000 l := New() - gcCount := 0 - // SetFinalizer doesn't work well with circular structures, - // so we construct a trivial non-circular structure to - // track. + // Use a WaitGroup to wait for all finalizers to run. + var wg sync.WaitGroup + wg.Add(numElements) + type value struct { Int int } @@ -140,8 +130,8 @@ func _TestGCRandom(t *testing.T) { v := new(value) v.Int = i l.PushBack(v) - runtime.SetFinalizer(v, func(v *value) { - gcCount++ + runtime.SetFinalizer(v, func(_ *value) { + wg.Done() }) } @@ -156,17 +146,15 @@ func _TestGCRandom(t *testing.T) { _ = el.Next() } - runtime.GC() - time.Sleep(time.Second * 3) + // Wait for all finalizers to run. + wg.Wait() - if gcCount != numElements { - t.Errorf("expected gcCount to be %v, got %v", numElements, - gcCount) + if l.Len() != 0 { + t.Errorf("expected list to be empty, got %v elements", l.Len()) } } func TestScanRightDeleteRandom(t *testing.T) { - const numElements = 1000 const numTimes = 100 const numScanners = 10 @@ -222,7 +210,6 @@ func TestScanRightDeleteRandom(t *testing.T) { if i%100000 == 0 { fmt.Printf("Pushed %vK elements so far...\n", i/1000) } - } // Stop scanners diff --git a/libs/cmap/cmap.go b/internal/cmap/cmap.go similarity index 60% rename from libs/cmap/cmap.go rename to internal/cmap/cmap.go index 2169c02fe96..9ec75ed4729 100644 --- a/libs/cmap/cmap.go +++ b/internal/cmap/cmap.go @@ -4,35 +4,35 @@ import ( cmtsync "github.com/cometbft/cometbft/libs/sync" ) -// CMap is a goroutine-safe map +// CMap is a goroutine-safe map. type CMap struct { - m map[string]interface{} - l cmtsync.Mutex + m map[string]any + l cmtsync.RWMutex } func NewCMap() *CMap { return &CMap{ - m: make(map[string]interface{}), + m: make(map[string]any), } } -func (cm *CMap) Set(key string, value interface{}) { +func (cm *CMap) Set(key string, value any) { cm.l.Lock() cm.m[key] = value cm.l.Unlock() } -func (cm *CMap) Get(key string) interface{} { - cm.l.Lock() +func (cm *CMap) Get(key string) any { + cm.l.RLock() val := cm.m[key] - cm.l.Unlock() + cm.l.RUnlock() return val } func (cm *CMap) Has(key string) bool { - cm.l.Lock() + cm.l.RLock() _, ok := cm.m[key] - cm.l.Unlock() + cm.l.RUnlock() return ok } @@ -43,35 +43,34 @@ func (cm *CMap) Delete(key string) { } func (cm *CMap) Size() int { - cm.l.Lock() + cm.l.RLock() size := len(cm.m) - cm.l.Unlock() + cm.l.RUnlock() return size } func (cm *CMap) Clear() { cm.l.Lock() - cm.m = make(map[string]interface{}) + cm.m = make(map[string]any) cm.l.Unlock() } func (cm *CMap) Keys() []string { - cm.l.Lock() - + cm.l.RLock() keys := make([]string, 0, len(cm.m)) for k := range cm.m { keys = append(keys, k) } - cm.l.Unlock() + cm.l.RUnlock() return keys } -func (cm *CMap) Values() []interface{} { - cm.l.Lock() - items := make([]interface{}, 0, len(cm.m)) +func (cm *CMap) Values() []any { + cm.l.RLock() + items := make([]any, 0, len(cm.m)) for _, v := range cm.m { items = append(items, v) } - cm.l.Unlock() + cm.l.RUnlock() return items } diff --git a/libs/cmap/cmap_test.go b/internal/cmap/cmap_test.go similarity index 94% rename from libs/cmap/cmap_test.go rename to internal/cmap/cmap_test.go index bab78da965e..bc507bce5a4 100644 --- a/libs/cmap/cmap_test.go +++ b/internal/cmap/cmap_test.go @@ -17,8 +17,8 @@ func TestIterateKeysWithValues(t *testing.T) { // Testing size assert.Equal(t, 10, cmap.Size()) - assert.Equal(t, 10, len(cmap.Keys())) - assert.Equal(t, 10, len(cmap.Values())) + assert.Len(t, cmap.Keys(), 10) + assert.Len(t, cmap.Values(), 10) // Iterating Keys, checking for matching Value for _, key := range cmap.Keys() { diff --git a/internal/confix/README.md b/internal/confix/README.md new file mode 100644 index 00000000000..131bea414f0 --- /dev/null +++ b/internal/confix/README.md @@ -0,0 +1,54 @@ +# Confix + +`Confix` is a configuration management tool that allows you to manage your configuration via CLI. + +It is based on the [CometBFT RFC 019](https://github.com/cometbft/cometbft/blob/5013bc3f4a6d64dcc2bf02ccc002ebc9881c62e4/docs/rfc/rfc-019-config-version.md). + +## Usage + +### Get + +Get a configuration value, e.g.: + +```shell +cometbft config get mempool.size # gets the value mempool.size +cometbft config get moniker # gets the value moniker +``` + +### Set + +Set a configuration value, e.g.: + +```shell +cometbft config set mempool.size 1000 # sets the value mempool.size +cometbft config set moniker "foo-1" # sets the value moniker +``` +### Migrate + +Migrate a configuration file to a new version: + +```shell +cometbft config migrate v0.38 # migrates defaultHome/config/config.toml to the latest v0.38 config +``` + +### Diff + +Get the diff between a given configuration file and the default configuration +file, e.g.: + +```shell +cometbft config diff v0.38 # gets the diff between defaultHome/config/config.toml and the latest v0.38 config +``` + +### View + +View a configuration file, e.g: + +```shell +cometbft config view # views the current config +``` + +## Credits + +This project is based on the [CometBFT RFC 019](https://github.com/cometbft/cometbft/blob/5013bc3f4a6d64dcc2bf02ccc002ebc9881c62e4/docs/rfc/rfc-019-config-version.md) and their own implementation of [confix](https://github.com/cometbft/cometbft/blob/v0.36.x/scripts/confix/confix.go). +Most of the code is copied over from [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/tree/main/tools/confix). diff --git a/internal/confix/data/v0.34.toml b/internal/confix/data/v0.34.toml new file mode 100644 index 00000000000..35df417ff4e --- /dev/null +++ b/internal/confix/data/v0.34.toml @@ -0,0 +1,487 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable +# or --home cmd flag. + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the CometBFT binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "test" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "info" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for CometBFT to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behaviour. +experimental_close_on_slow_client = false + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# NOTE: both tls-cert-file and tls-key-file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. ip and port are required +# example: 159.89.10.97:26656 +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +####################################################### +### Mempool Configuration Option ### +####################################################### +[mempool] + +# Mempool version to use: +# 1) "v0" - (default) FIFO mempool. +# 2) "v1" - prioritized mempool. +version = "v0" + +# Recheck (default: true) defines whether CometBFT should recheck the +# validity for all remaining transaction in the mempool after a block. +# Since a block affects the application state, some transactions in the +# mempool may become invalid. If this does not apply to your application, +# you can disable rechecking. +recheck = true +broadcast = true +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. +max_tx_bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max_batch_bytes = 0 + +# ttl-duration, if non-zero, defines the maximum amount of time a transaction +# can exist for in the mempool. +# +# Note, if ttl-num-blocks is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if it's +# insertion time into the mempool is beyond ttl-duration. +ttl-duration = "0s" + +# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction +# can exist for in the mempool. +# +# Note, if ttl-duration is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if +# it's insertion time into the mempool is beyond ttl-duration. +ttl-num-blocks = 0 + +# Experimental parameters to limit gossiping txs to up to the specified number of peers. +# This feature is only available for the default mempool (version config set to "v0"). +# We use two independent upper values for persistent and non-persistent peers. +# Unconditional peers are not affected by this feature. +# If we are connected to more than the specified number of persistent peers, only send txs to +# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those +# persistent peers disconnects, activate another persistent peer. +# Similarly for non-persistent peers, with an upper limit of +# ExperimentalMaxGossipConnectionsToNonPersistentPeers. +# If set to 0, the feature is disabled for the corresponding group of peers, that is, the +# number of active connections to that group of peers is not bounded. +# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental +# performance results using the default P2P configuration. +experimental_max_gossip_connections_to_persistent_peers = 0 +experimental_max_gossip_connections_to_non_persistent_peers = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + +####################################################### +### Fast Sync Configuration Connections ### +####################################################### +[fastsync] + +# Fast Sync version to use: +# 1) "v0" (default) - the legacy fast sync implementation +# 2) "v1" - refactor of v0 version for better testability +# 2) "v2" - complete redesign of v0, optimized for testability & readability +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "3s" +# How much timeout_propose increases with each round +timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) +timeout_prevote = "1s" +# How much the timeout_prevote increases with each round +timeout_prevote_delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) +timeout_precommit = "1s" +# How much the timeout_precommit increases with each round +timeout_precommit_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "1s" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = false + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "kv" + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "cometbft" diff --git a/internal/confix/data/v0.37.toml b/internal/confix/data/v0.37.toml new file mode 100644 index 00000000000..1591199ee66 --- /dev/null +++ b/internal/confix/data/v0.37.toml @@ -0,0 +1,492 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable +# or --home cmd flag. + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the CometBFT binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "test" + +# If this node is many blocks behind the tip of the chain, BlockSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +# +# Deprecated: this key will be removed and BlockSync will be enabled +# unconditionally in the next major release. +block_sync = true + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "info" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for CometBFT to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behavior. +experimental_close_on_slow_client = false + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# NOTE: both tls-cert-file and tls-key-file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial. If empty, will use the same +# port as the laddr, and will introspect on the listener to figure out the +# address. IP and port are required. Example: 159.89.10.97:26656 +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +####################################################### +### Mempool Configuration Option ### +####################################################### +[mempool] + +# Mempool version to use: +# 1) "v0" - (default) FIFO mempool. +# 2) "v1" - prioritized mempool (deprecated; will be removed in the next release). +version = "v0" + +# The type of mempool for this node to use. +# +# Possible types: +# - "flood" : concurrent linked list mempool with flooding gossip protocol +# (default) +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" is +# not supported. +type = "flood" + +recheck = true +broadcast = true +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. +max_tx_bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max_batch_bytes = 0 + +# ttl-duration, if non-zero, defines the maximum amount of time a transaction +# can exist for in the mempool. +# +# Note, if ttl-num-blocks is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if it's +# insertion time into the mempool is beyond ttl-duration. +ttl-duration = "0s" + +# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction +# can exist for in the mempool. +# +# Note, if ttl-duration is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if +# it's insertion time into the mempool is beyond ttl-duration. +ttl-num-blocks = 0 + +# Experimental parameters to limit gossiping txs to up to the specified number of peers. +# This feature is only available for the default mempool (version config set to "v0"). +# We use two independent upper values for persistent and non-persistent peers. +# Unconditional peers are not affected by this feature. +# If we are connected to more than the specified number of persistent peers, only send txs to +# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those +# persistent peers disconnects, activate another persistent peer. +# Similarly for non-persistent peers, with an upper limit of +# ExperimentalMaxGossipConnectionsToNonPersistentPeers. +# If set to 0, the feature is disabled for the corresponding group of peers, that is, the +# number of active connections to that group of peers is not bounded. +# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental +# performance results using the default P2P configuration. +experimental_max_gossip_connections_to_persistent_peers = 0 +experimental_max_gossip_connections_to_non_persistent_peers = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + +####################################################### +### Block Sync Configuration Options ### +####################################################### +[blocksync] + +# Block Sync version to use: +# +# In v0.37, v1 and v2 of the block sync protocols were deprecated. +# Please use v0 instead. +# +# 1) "v0" - the default block sync implementation +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "3s" +# How much timeout_propose increases with each round +timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) +timeout_prevote = "1s" +# How much the timeout_prevote increases with each round +timeout_prevote_delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) +timeout_precommit = "1s" +# How much the timeout_precommit increases with each round +timeout_precommit_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "1s" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = false + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "kv" + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "cometbft" diff --git a/internal/confix/data/v0.38.toml b/internal/confix/data/v0.38.toml new file mode 100644 index 00000000000..ad2f94846eb --- /dev/null +++ b/internal/confix/data/v0.38.toml @@ -0,0 +1,482 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable +# or --home cmd flag. + +# The version of the CometBFT binary that created or +# last modified the config file. Do not modify this. +version = "0.38.7" + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the CometBFT binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "test" + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "info" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for CometBFT to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behavior. +experimental_close_on_slow_client = false + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# NOTE: both tls-cert-file and tls-key-file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial. If empty, will use the same +# port as the laddr, and will introspect on the listener to figure out the +# address. IP and port are required. Example: 159.89.10.97:26656 +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +####################################################### +### Mempool Configuration Option ### +####################################################### +[mempool] + +# The type of mempool for this node to use. +# +# Possible types: +# - "flood" : concurrent linked list mempool with flooding gossip protocol +# (default) +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" is +# not supported. +type = "flood" + +# Recheck (default: true) defines whether CometBFT should recheck the +# validity for all remaining transaction in the mempool after a block. +# Since a block affects the application state, some transactions in the +# mempool may become invalid. If this does not apply to your application, +# you can disable rechecking. +recheck = true + +# Broadcast (default: true) defines whether the mempool should relay +# transactions to other peers. Setting this to false will stop the mempool +# from relaying transactions to other peers until they are included in a +# block. In other words, if Broadcast is disabled, only the peer you send +# the tx to will see it until it is included in a block. +broadcast = true + +# WalPath (default: "") configures the location of the Write Ahead Log +# (WAL) for the mempool. The WAL is disabled by default. To enable, set +# WalPath to where you want the WAL to be written (e.g. +# "data/mempool.wal"). +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. +max_tx_bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max_batch_bytes = 0 + +# Experimental parameters to limit gossiping txs to up to the specified number of peers. +# We use two independent upper values for persistent and non-persistent peers. +# Unconditional peers are not affected by this feature. +# If we are connected to more than the specified number of persistent peers, only send txs to +# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those +# persistent peers disconnects, activate another persistent peer. +# Similarly for non-persistent peers, with an upper limit of +# ExperimentalMaxGossipConnectionsToNonPersistentPeers. +# If set to 0, the feature is disabled for the corresponding group of peers, that is, the +# number of active connections to that group of peers is not bounded. +# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental +# performance results using the default P2P configuration. +experimental_max_gossip_connections_to_persistent_peers = 0 +experimental_max_gossip_connections_to_non_persistent_peers = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + +####################################################### +### Block Sync Configuration Options ### +####################################################### +[blocksync] + +# Block Sync version to use: +# +# In v0.37, v1 and v2 of the block sync protocols were deprecated. +# Please use v0 instead. +# +# 1) "v0" - the default block sync implementation +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "3s" +# How much timeout_propose increases with each round +timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) +timeout_prevote = "1s" +# How much the timeout_prevote increases with each round +timeout_prevote_delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) +timeout_precommit = "1s" +# How much the timeout_precommit increases with each round +timeout_precommit_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "1s" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = false + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "kv" + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "cometbft" diff --git a/docs/core/configuration.md b/internal/confix/data/v1.0.toml similarity index 77% rename from docs/core/configuration.md rename to internal/confix/data/v1.0.toml index 733488b670e..51c53ee2265 100644 --- a/docs/core/configuration.md +++ b/internal/confix/data/v1.0.toml @@ -1,22 +1,3 @@ ---- -order: 3 ---- - -# Configuration - -CometBFT can be configured via a TOML file in -`$CMTHOME/config/config.toml`. Some of these parameters can be overridden by -command-line flags. For most users, the options in the `##### main base configuration options #####` are intended to be modified while config options -further below are intended for advance power users. - -## Options - -The default configuration file create by `cometbft init` has all -the parameters set with their default values. It will look something -like the file below, however, double check by inspecting the -`config.toml` created with your version of `cometbft` installed: - -```toml # This is a TOML config file. # For more information, see https://github.com/toml-lang/toml @@ -27,7 +8,7 @@ like the file below, however, double check by inspecting the # The version of the CometBFT binary that created or # last modified the config file. Do not modify this. -version = "0.39.0" +version = "1.0.0-alpha.2" ####################################################################### ### Main Base Config Options ### @@ -38,27 +19,35 @@ version = "0.39.0" proxy_app = "tcp://127.0.0.1:26658" # A custom human readable name for this node -moniker = "thinkpad" +moniker = "test" -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb | pebbledb +# * goleveldb (github.com/syndtr/goleveldb) +# - UNMAINTAINED # - stable +# - pure go # * cleveldb (uses levigo wrapper) -# - fast +# - DEPRECATED # - requires gcc # - use cleveldb build tag (go build -tags cleveldb) # * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - DEPRECATED # - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) +# - stable # - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) +# * rocksdb (uses github.com/linxGnu/grocksdb) # - EXPERIMENTAL # - requires gcc # - use rocksdb build tag (go build -tags rocksdb) # * badgerdb (uses github.com/dgraph-io/badger) # - EXPERIMENTAL +# - stable # - use badgerdb build tag (go build -tags badgerdb) +# * pebbledb (uses github.com/cockroachdb/pebble) +# - EXPERIMENTAL +# - stable +# - pure go +# - use pebbledb build tag (go build -tags pebbledb) db_backend = "goleveldb" # Database directory @@ -173,6 +162,11 @@ experimental_close_on_slow_client = false # See https://github.com/tendermint/tendermint/issues/3435 timeout_broadcast_tx_commit = "10s" +# Maximum number of requests that can be sent in a batch +# If the value is set to '0' (zero-value), then no maximum batch size will be +# enforced for a JSON-RPC batch request. +max_request_batch_size = 10 + # Maximum size of request body, in bytes max_body_bytes = 1000000 @@ -231,6 +225,32 @@ enabled = true [grpc.block_service] enabled = true +# The gRPC block results service returns block results for a given height. If no height +# is given, it will return the block results from the latest height. +[grpc.block_results_service] +enabled = true + +# +# Configuration for privileged gRPC endpoints, which should **never** be exposed +# to the public internet. +# +[grpc.privileged] +# The host/port on which to expose privileged gRPC endpoints. +laddr = "" + +# +# Configuration specifically for the gRPC pruning service, which is considered a +# privileged service. +# +[grpc.privileged.pruning_service] + +# Only controls whether the pruning service is accessible via the gRPC API - not +# whether a previously set pruning service retain height is honored by the +# node. See the [storage.pruning] section for control over pruning. +# +# Disabled by default. +enabled = false + ####################################################### ### P2P Configuration Options ### ####################################################### @@ -270,7 +290,7 @@ unconditional_peer_ids = "" persistent_peers_max_dial_period = "0s" # Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" +flush_throttle_timeout = "10ms" # Maximum size of a message packet payload, in bytes max_packet_msg_payload_size = 1024 @@ -305,6 +325,16 @@ dial_timeout = "3s" ####################################################### [mempool] +# The type of mempool for this node to use. +# +# Possible types: +# - "flood" : concurrent linked list mempool with flooding gossip protocol +# (default) +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" is +# not supported. +type = "flood" + # recheck (default: true) defines whether CometBFT should recheck the # validity for all remaining transaction in the mempool after a block. # Since a block affects the application state, some transactions in the @@ -312,6 +342,12 @@ dial_timeout = "3s" # you can disable rechecking. recheck = true +# recheck_timeout is the time the application has during the rechecking process +# to return CheckTx responses, once all requests have been sent. Responses that +# arrive after the timeout expires are discarded. It only applies to +# non-local ABCI clients and when recheck is enabled. +recheck_timeout = "1s" + # broadcast (default: true) defines whether the mempool should relay # transactions to other peers. Setting this to false will stop the mempool # from relaying transactions to other peers until they are included in a @@ -328,10 +364,14 @@ wal_dir = "" # Maximum number of transactions in the mempool size = 5000 -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 +# Maximum size in bytes of a single transaction accepted into the mempool. +max_tx_bytes = 1048576 + +# The maximum size in bytes of all transactions stored in the mempool. +# This is the raw, total transaction size. For example, given 1MB +# transactions and a 5MB maximum mempool byte size, the mempool will +# only accept five transactions. +max_txs_bytes = 67108864 # Size of the cache (used to filter transactions we saw earlier) in transactions cache_size = 10000 @@ -341,14 +381,20 @@ cache_size = 10000 # again in the future. keep-invalid-txs-in-cache = false -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 +# Experimental parameters to limit gossiping txs to up to the specified number of peers. +# We use two independent upper values for persistent and non-persistent peers. +# Unconditional peers are not affected by this feature. +# If we are connected to more than the specified number of persistent peers, only send txs to +# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those +# persistent peers disconnects, activate another persistent peer. +# Similarly for non-persistent peers, with an upper limit of +# ExperimentalMaxGossipConnectionsToNonPersistentPeers. +# If set to 0, the feature is disabled for the corresponding group of peers, that is, the +# number of active connections to that group of peers is not bounded. +# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental +# performance results using the default P2P configuration. +experimental_max_gossip_connections_to_persistent_peers = 0 +experimental_max_gossip_connections_to_non_persistent_peers = 0 ####################################################### ### State Sync Configuration Options ### @@ -410,18 +456,18 @@ wal_file = "data/cs.wal/wal" timeout_propose = "3s" # How much timeout_propose increases with each round timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" +# How long we wait after receiving +2/3 prevotes/precommits for “anything” (ie. not a single block or nil) +timeout_vote = "1s" +# How much the timeout_vote increases with each round +timeout_vote_delta = "500ms" # How long we wait after committing a block, before starting on the new # height (this gives us a chance to receive some more precommits, even # though we already have +2/3). -timeout_commit = "1s" +# Deprecated: use `next_block_delay` in the ABCI application's `FinalizeBlockResponse`. +timeout_commit = "0s" + +# Deprecated: set `timeout_commit` to 0 instead. +skip_timeout_commit = false # How many blocks to look back to check existence of the node's consensus votes before joining consensus # When non-zero, the node will panic upon restart @@ -429,9 +475,6 @@ timeout_commit = "1s" # So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. double_sign_check_height = 0 -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - # EmptyBlocks mode and possible interval between empty blocks create_empty_blocks = true create_empty_blocks_interval = "0s" @@ -452,6 +495,35 @@ peer_query_maj23_sleep_duration = "2s" # reindex events in the command-line tool. discard_abci_responses = false +# The representation of keys in the database. +# The current representation of keys in Comet's stores is considered to be v1 +# Users can experiment with a different layout by setting this field to v2. +# Note that this is an experimental feature and switching back from v2 to v1 +# is not supported by CometBFT. +# If the database was initially created with v1, it is necessary to migrate the DB +# before switching to v2. The migration is not done automatically. +# v1 - the legacy layout existing in Comet prior to v1. +# v2 - Order preserving representation ordering entries by height. +experimental_db_key_layout = "v1" + +# If set to true, CometBFT will force compaction to happen for databases that support this feature. +# and save on storage space. Setting this to true is most benefits when used in combination +# with pruning as it will physically delete the entries marked for deletion. +# false by default (forcing compaction is disabled). +compact = false + +# To avoid forcing compaction every time, this parameter instructs CometBFT to wait +# the given amount of blocks to be pruned before triggering compaction. +# It should be tuned depending on the number of items. If your retain height is 1 block, +# it is too much of an overhead to try compaction every block. But it should also not be a very +# large multiple of your retain height as it might occur bigger overheads. +compaction_interval = "1000" + +# Hash of the Genesis file (as hex string), passed to CometBFT via the command line. +# If this hash mismatches the hash that CometBFT computes on the genesis file, +# the node is not able to boot. +genesis_hash = "" + [storage.pruning] # The time period between automated background pruning operations. @@ -525,72 +597,3 @@ max_open_connections = 3 # Instrumentation namespace namespace = "cometbft" -``` - -## Empty blocks VS no empty blocks - -### create_empty_blocks = true - -If `create_empty_blocks` is set to `true` in your config, blocks will be -created ~ every second (with default consensus parameters). You can regulate -the delay between blocks by changing the `timeout_commit`. E.g. `timeout_commit = "10s"` should result in ~ 10 second blocks. - -### create_empty_blocks = false - -In this setting, blocks are created when transactions received. - -Note after the block H, CometBFT creates something we call a "proof block" -(only if the application hash changed) H+1. The reason for this is to support -proofs. If you have a transaction in block H that changes the state to X, the -new application hash will only be included in block H+1. If after your -transaction is committed, you want to get a light-client proof for the new state -(X), you need the new block to be committed in order to do that because the new -block has the new application hash for the state X. That's why we make a new -(empty) block if the application hash changes. Otherwise, you won't be able to -make a proof for the new state. - -Plus, if you set `create_empty_blocks_interval` to something other than the -default (`0`), CometBFT will be creating empty blocks even in the absence of -transactions every `create_empty_blocks_interval`. For instance, with -`create_empty_blocks = false` and `create_empty_blocks_interval = "30s"`, -CometBFT will only create blocks if there are transactions, or after waiting -30 seconds without receiving any transactions. - -## Consensus timeouts explained - -There's a variety of information about timeouts in [Running in -production](./running-in-production.md#configuration-parameters). - -You can also find more detailed explanation in the paper describing -the Tendermint consensus algorithm, adopted by CometBFT: [The latest -gossip on BFT consensus](https://arxiv.org/abs/1807.04938). - -```toml -[consensus] -... - -timeout_propose = "3s" -timeout_propose_delta = "500ms" -timeout_prevote = "1s" -timeout_prevote_delta = "500ms" -timeout_precommit = "1s" -timeout_precommit_delta = "500ms" -timeout_commit = "1s" -``` - -Note that in a successful round, the only timeout that we absolutely wait no -matter what is `timeout_commit`. - -Here's a brief summary of the timeouts: - -- `timeout_propose` = how long a validator should wait for a proposal block before prevoting nil -- `timeout_propose_delta` = how much `timeout_propose` increases with each round -- `timeout_prevote` = how long a validator should wait after receiving +2/3 prevotes for - anything (ie. not a single block or nil) -- `timeout_prevote_delta` = how much the `timeout_prevote` increases with each round -- `timeout_precommit` = how long a validator should wait after receiving +2/3 precommits for - anything (ie. not a single block or nil) -- `timeout_precommit_delta` = how much the `timeout_precommit` increases with each round -- `timeout_commit` = how long a validator should wait after committing a block, before starting - on the new height (this gives us a chance to receive some more precommits, - even though we already have +2/3) diff --git a/internal/confix/diff.go b/internal/confix/diff.go new file mode 100644 index 00000000000..6d25c8412ba --- /dev/null +++ b/internal/confix/diff.go @@ -0,0 +1,180 @@ +package confix + +import ( + "fmt" + "io" + "sort" + + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/parser" + "github.com/creachadair/tomledit/transform" +) + +type DiffType string + +const ( + Section DiffType = "S" + Mapping DiffType = "M" +) + +type KV struct { + Key string + Value string + Block []string // comment block +} + +type Diff struct { + Type DiffType + Deleted bool + + KV KV +} + +// DiffKeys diffs the keyspaces of the TOML documents in files lhs and rhs. +// Comments, order, and values are ignored for comparison purposes. +func DiffKeys(lhs, rhs *tomledit.Document) []Diff { + // diff sections + diff := diffDocs(allKVs(lhs.Global), allKVs(rhs.Global), false) + + lsec, rsec := lhs.Sections, rhs.Sections + transform.SortSectionsByName(lsec) + transform.SortSectionsByName(rsec) + + i, j := 0, 0 + for i < len(lsec) && j < len(rsec) { + switch { + case lsec[i].Name.Before(rsec[j].Name): + diff = append(diff, Diff{Type: Section, Deleted: true, KV: KV{Key: lsec[i].Name.String()}}) + for _, kv := range allKVs(lsec[i]) { + diff = append(diff, Diff{Type: Mapping, Deleted: true, KV: kv}) + } + i++ + case rsec[j].Name.Before(lsec[i].Name): + diff = append(diff, Diff{Type: Section, KV: KV{Key: rsec[j].Name.String()}}) + for _, kv := range allKVs(rsec[j]) { + diff = append(diff, Diff{Type: Mapping, KV: kv}) + } + j++ + default: + diff = append(diff, diffDocs(allKVs(lsec[i]), allKVs(rsec[j]), false)...) + i++ + j++ + } + } + for ; i < len(lsec); i++ { + diff = append(diff, Diff{Type: Section, Deleted: true, KV: KV{Key: lsec[i].Name.String()}}) + for _, kv := range allKVs(lsec[i]) { + diff = append(diff, Diff{Type: Mapping, Deleted: true, KV: kv}) + } + } + for ; j < len(rsec); j++ { + diff = append(diff, Diff{Type: Section, KV: KV{Key: rsec[j].Name.String()}}) + for _, kv := range allKVs(rsec[j]) { + diff = append(diff, Diff{Type: Mapping, KV: kv}) + } + } + + return diff +} + +// DiffValues diffs the keyspaces with different values of the TOML documents in files lhs and rhs. +func DiffValues(lhs, rhs *tomledit.Document) []Diff { + diff := diffDocs(allKVs(lhs.Global), allKVs(rhs.Global), true) + + lsec, rsec := lhs.Sections, rhs.Sections + transform.SortSectionsByName(lsec) + transform.SortSectionsByName(rsec) + + i, j := 0, 0 + for i < len(lsec) && j < len(rsec) { + switch { + case lsec[i].Name.Before(rsec[j].Name): + // skip keys present in lhs but not in rhs + i++ + case rsec[j].Name.Before(lsec[i].Name): + // skip keys present in rhs but not in lhs + j++ + default: + for _, d := range diffDocs(allKVs(lsec[i]), allKVs(rsec[j]), true) { + if !d.Deleted { + diff = append(diff, d) + } + } + i++ + j++ + } + } + + return diff +} + +func allKVs(s *tomledit.Section) []KV { + keys := []KV{} + s.Scan(func(key parser.Key, entry *tomledit.Entry) bool { + keys = append(keys, KV{ + Key: key.String(), + // we get the value of the current configuration (i.e the one we want to compare/migrate) + Value: entry.Value.String(), + Block: entry.Block, + }) + + return true + }) + return keys +} + +// diffDocs get the diff between all keys in lhs and rhs. +// when a key is in both lhs and rhs, it is ignored, unless value is true in which case the value is as well compared. +func diffDocs(lhs, rhs []KV, value bool) []Diff { + diff := []Diff{} + + sort.Slice(lhs, func(i, j int) bool { + return lhs[i].Key < lhs[j].Key + }) + sort.Slice(rhs, func(i, j int) bool { + return rhs[i].Key < rhs[j].Key + }) + + i, j := 0, 0 + for i < len(lhs) && j < len(rhs) { + switch { + case lhs[i].Key < rhs[j].Key: + diff = append(diff, Diff{Type: Mapping, Deleted: true, KV: lhs[i]}) + i++ + case lhs[i].Key > rhs[j].Key: + diff = append(diff, Diff{Type: Mapping, KV: rhs[j]}) + j++ + default: + // key exists in both lhs and rhs + // if value is true, compare the values + if value && lhs[i].Value != rhs[j].Value { + diff = append(diff, Diff{Type: Mapping, KV: lhs[i]}) + } + i++ + j++ + } + } + for ; i < len(lhs); i++ { + diff = append(diff, Diff{Type: Mapping, Deleted: true, KV: lhs[i]}) + } + for ; j < len(rhs); j++ { + diff = append(diff, Diff{Type: Mapping, KV: rhs[j]}) + } + + return diff +} + +// PrintDiff output prints one line per key that differs: +// -S name -- section exists in f1 but not f2 +// +S name -- section exists in f2 but not f1 +// -M name -- mapping exists in f1 but not f2 +// +M name -- mapping exists in f2 but not f1. +func PrintDiff(w io.Writer, diffs []Diff) { + for _, diff := range diffs { + if diff.Deleted { + fmt.Fprintln(w, fmt.Sprintf("-%s", diff.Type), fmt.Sprintf("%s=%s", diff.KV.Key, diff.KV.Value)) + } else { + fmt.Fprintln(w, fmt.Sprintf("+%s", diff.Type), fmt.Sprintf("%s=%s", diff.KV.Key, diff.KV.Value)) + } + } +} diff --git a/internal/confix/doc.go b/internal/confix/doc.go new file mode 100644 index 00000000000..83110931f09 --- /dev/null +++ b/internal/confix/doc.go @@ -0,0 +1,4 @@ +// Package confix applies changes to a CometBFT TOML configuration file, to +// update configurations created with an older version of CometBFT to a +// compatible format for a newer version. +package confix diff --git a/internal/confix/file.go b/internal/confix/file.go new file mode 100644 index 00000000000..0d16f0bed2c --- /dev/null +++ b/internal/confix/file.go @@ -0,0 +1,35 @@ +package confix + +import ( + "embed" + "fmt" + "os" + "path/filepath" + + "github.com/creachadair/tomledit" +) + +//go:embed data +var data embed.FS + +// LoadLocalConfig loads and parses the TOML document from confix data. +func LoadLocalConfig(fileName string) (*tomledit.Document, error) { + f, err := data.Open(filepath.Join("data", fileName)) + if err != nil { + return nil, fmt.Errorf("failed to read file: %w. This file should have been included in confix", err) + } + defer f.Close() + + return tomledit.Parse(f) +} + +// LoadConfig loads and parses the TOML document from path. +func LoadConfig(path string) (*tomledit.Document, error) { + f, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open %q: %w", path, err) + } + defer f.Close() + + return tomledit.Parse(f) +} diff --git a/internal/confix/log.go b/internal/confix/log.go new file mode 100644 index 00000000000..67d9bc47d74 --- /dev/null +++ b/internal/confix/log.go @@ -0,0 +1,14 @@ +package confix + +import ( + "context" + "io" + + "github.com/creachadair/tomledit/transform" +) + +// WithLogWriter returns a child of ctx with a logger attached that sends +// output to w. This is a convenience wrapper for transform.WithLogWriter. +func WithLogWriter(ctx context.Context, w io.Writer) context.Context { + return transform.WithLogWriter(ctx, w) +} diff --git a/internal/confix/migrations.go b/internal/confix/migrations.go new file mode 100644 index 00000000000..f9fbec4b597 --- /dev/null +++ b/internal/confix/migrations.go @@ -0,0 +1,121 @@ +package confix + +import ( + "context" + "fmt" + "strings" + + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/parser" + "github.com/creachadair/tomledit/transform" + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +const ( + Config = "config.toml" +) + +// MigrationMap defines a mapping from a version to a transformation plan. +type MigrationMap map[string]func(from *tomledit.Document, to string) transform.Plan + +var Migrations = MigrationMap{ + "v0.34": NoPlan, + "v0.37": PlanBuilder, + "v0.38": PlanBuilder, + "v1.0": PlanBuilder, + // "v0.xx.x": PlanBuilder, // add specific migration in case of configuration changes in minor versions +} + +// PlanBuilder is a function that returns a transformation plan for a given diff between two files. +func PlanBuilder(from *tomledit.Document, to string) transform.Plan { + plan := transform.Plan{} + deletedSections := map[string]bool{} + + target, err := LoadLocalConfig(to + ".toml") + if err != nil { + panic(fmt.Errorf("failed to parse file: %w. This file should have been valid", err)) + } + + diffs := DiffKeys(from, target) + for _, diff := range diffs { + kv := diff.KV + + var step transform.Step + keys := strings.Split(kv.Key, ".") + + if !diff.Deleted { + switch diff.Type { + case Section: + step = transform.Step{ + Desc: fmt.Sprintf("add %s section", kv.Key), + T: transform.Func(func(_ context.Context, doc *tomledit.Document) error { + caser := cases.Title(language.English) + title := fmt.Sprintf("### %s Configuration ###", caser.String(kv.Key)) + doc.Sections = append(doc.Sections, &tomledit.Section{ + Heading: &parser.Heading{ + Block: parser.Comments{ + strings.Repeat("#", len(title)), + title, + strings.Repeat("#", len(title)), + }, + Name: keys, + }, + }) + return nil + }), + } + case Mapping: + if len(keys) == 1 { // top-level key + step = transform.Step{ + Desc: fmt.Sprintf("add %s key", kv.Key), + T: transform.EnsureKey(nil, &parser.KeyValue{ + Block: kv.Block, + Name: parser.Key{keys[0]}, + Value: parser.MustValue(kv.Value), + }), + } + } else if len(keys) > 1 { + step = transform.Step{ + Desc: fmt.Sprintf("add %s key", kv.Key), + T: transform.EnsureKey(keys[0:len(keys)-1], &parser.KeyValue{ + Block: kv.Block, + Name: parser.Key{keys[len(keys)-1]}, + Value: parser.MustValue(kv.Value), + }), + } + } + default: + panic(fmt.Errorf("unknown diff type: %s", diff.Type)) + } + } else { + if diff.Type == Section { + deletedSections[kv.Key] = true + step = transform.Step{ + Desc: fmt.Sprintf("remove %s section", kv.Key), + T: transform.Remove(keys), + } + } else { + // when the whole section is deleted we don't need to remove the keys + if len(keys) > 1 && deletedSections[keys[0]] { + continue + } + + step = transform.Step{ + Desc: fmt.Sprintf("remove %s key", kv.Key), + T: transform.Remove(keys), + } + } + } + + plan = append(plan, step) + } + + return plan +} + +// NoPlan returns a no-op plan. +func NoPlan(_ *tomledit.Document, to string) transform.Plan { + fmt.Printf("no migration needed to %s\n", to) + return transform.Plan{} +} diff --git a/internal/confix/upgrade.go b/internal/confix/upgrade.go new file mode 100644 index 00000000000..aca921d84de --- /dev/null +++ b/internal/confix/upgrade.go @@ -0,0 +1,82 @@ +package confix + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + + "github.com/creachadair/atomicfile" + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/transform" + "github.com/spf13/viper" + + "github.com/cometbft/cometbft/config" +) + +// Upgrade reads the configuration file at configPath and applies any +// transformations necessary to Upgrade it to the current version. If this +// succeeds, the transformed output is written to outputPath. As a special +// case, if outputPath == "" the output is written to stdout. +// +// It is safe if outputPath == inputPath. If a regular file outputPath already +// exists, it is overwritten. In case of error, the output is not written. +// +// Upgrade is a convenience wrapper for calls to LoadConfig, ApplyFixes, and +// CheckValid. If the caller requires more control over the behavior of the +// Upgrade, call those functions directly. +func Upgrade(ctx context.Context, plan transform.Plan, configPath, outputPath string, skipValidate bool) error { + if configPath == "" { + return errors.New("empty input configuration path") + } + + doc, err := LoadConfig(configPath) + if err != nil { + return fmt.Errorf("loading config: %w", err) + } + + // transforms doc and reports whether it succeeded. + if err := plan.Apply(ctx, doc); err != nil { + return fmt.Errorf("updating %q: %w", configPath, err) + } + + var buf bytes.Buffer + if err := tomledit.Format(&buf, doc); err != nil { + return fmt.Errorf("formatting config: %w", err) + } + + // allow to skip validation + if !skipValidate { + // verify that file is valid after applying fixes + if err := CheckValid(buf.Bytes()); err != nil { + return fmt.Errorf("updated config is invalid: %w", err) + } + } + + if outputPath == "" { + _, err = os.Stdout.Write(buf.Bytes()) + } else { + err = atomicfile.WriteData(outputPath, buf.Bytes(), 0o600) + } + + return err +} + +// CheckValid checks whether the specified config appears to be a valid CometBFT config file. +// It tries to unmarshal the config into both the server and client config structs. +func CheckValid(data []byte) error { + v := viper.New() + v.SetConfigType("toml") + + if err := v.ReadConfig(bytes.NewReader(data)); err != nil { + return fmt.Errorf("reading config: %w", err) + } + + var cfg config.Config + if err := v.Unmarshal(&cfg); err != nil { + return fmt.Errorf("failed to unmarshal as config: %w", err) + } + + return nil +} diff --git a/internal/confix/upgrade_test.go b/internal/confix/upgrade_test.go new file mode 100644 index 00000000000..a805f18171a --- /dev/null +++ b/internal/confix/upgrade_test.go @@ -0,0 +1,34 @@ +package confix_test + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/cometbft/cometbft/internal/confix" +) + +func mustReadConfig(t *testing.T, path string) []byte { + t.Helper() + f, err := os.ReadFile(path) + if err != nil { + t.Fatalf("failed to open file: %v", err) + } + + return f +} + +func TestCheckValid(t *testing.T) { + err := confix.CheckValid(mustReadConfig(t, "data/v0.34.toml")) + assert.NoError(t, err) + + err = confix.CheckValid(mustReadConfig(t, "data/v0.37.toml")) + assert.NoError(t, err) + + err = confix.CheckValid(mustReadConfig(t, "data/v0.38.toml")) + assert.NoError(t, err) + + err = confix.CheckValid(mustReadConfig(t, "data/v1.0.toml")) + assert.NoError(t, err) +} diff --git a/consensus/README.md b/internal/consensus/README.md similarity index 100% rename from consensus/README.md rename to internal/consensus/README.md diff --git a/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go similarity index 85% rename from consensus/byzantine_test.go rename to internal/consensus/byzantine_test.go index 07cb47791d7..eabb5413032 100644 --- a/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -13,28 +13,26 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - abcicli "github.com/cometbft/cometbft/abci/client" abci "github.com/cometbft/cometbft/abci/types" - "github.com/cometbft/cometbft/evidence" + cmtcons "github.com/cometbft/cometbft/api/cometbft/consensus/v1" + "github.com/cometbft/cometbft/internal/evidence" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/service" cmtsync "github.com/cometbft/cometbft/libs/sync" mempl "github.com/cometbft/cometbft/mempool" - "github.com/cometbft/cometbft/proxy" - "github.com/cometbft/cometbft/p2p" - cmtcons "github.com/cometbft/cometbft/proto/tendermint/consensus" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cometbft/cometbft/proxy" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/store" "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" ) -//---------------------------------------------- +// ---------------------------------------------- // byzantine failures -// Byzantine node sends two different prevotes (nil and blockID) to the same validator +// Byzantine node sends two different prevotes (nil and blockID) to the same validator. func TestByzantinePrevoteEquivocation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -46,7 +44,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { tickerFunc := newMockTickerFunc(true) appFunc := newKVStore - genDoc, privVals := randGenesisDoc(nValidators, false, 30, nil) + genDoc, privVals := randGenesisDoc(nValidators, 30, nil, cmttime.Now()) css := make([]*State, nValidators) for i := 0; i < nValidators; i++ { @@ -58,10 +56,10 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) defer os.RemoveAll(thisConfig.RootDir) - ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0o700) // dir for wal + ensureDir(path.Dir(thisConfig.Consensus.WalFile())) // dir for wal app := appFunc() vals := types.TM2PB.ValidatorUpdates(state.Validators) - _, err := app.InitChain(context.Background(), &abci.RequestInitChain{Validators: vals}) + _, err := app.InitChain(context.Background(), &abci.InitChainRequest{Validators: vals}) require.NoError(t, err) blockDB := dbm.NewMemDB() @@ -73,8 +71,10 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { proxyAppConnMem := proxy.NewAppConnMempool(abcicli.NewLocalClient(mtx, app), proxy.NopMetrics()) // Make Mempool + _, lanesInfo := fetchAppInfo(app) mempool := mempl.NewCListMempool(config.Mempool, proxyAppConnMem, + lanesInfo, state.LastBlockHeight, mempl.WithPreCheck(sm.TxPreCheck(state)), mempl.WithPostCheck(sm.TxPostCheck(state))) @@ -145,11 +145,11 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // allow first height to happen normally so that byzantine validator is no longer proposer if height == prevoteHeight { bcs.Logger.Info("Sending two votes") - prevote1, err := bcs.signVote(cmtproto.PrevoteType, bcs.ProposalBlock.Hash(), bcs.ProposalBlockParts.Header(), nil) + prevote1, err := bcs.signVote(types.PrevoteType, bcs.ProposalBlock.Hash(), bcs.ProposalBlockParts.Header(), bcs.ProposalBlock) require.NoError(t, err) - prevote2, err := bcs.signVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}, nil) + prevote2, err := bcs.signVote(types.PrevoteType, nil, types.PartSetHeader{}, nil) require.NoError(t, err) - peerList := reactors[byzantineNode].Switch.Peers().List() + peerList := reactors[byzantineNode].Switch.Peers().Copy() bcs.Logger.Info("Getting peer list", "peers", peerList) // send two votes to all peers (1st to one half, 2nd to another half) for i, peer := range peerList { @@ -192,7 +192,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { extCommit = &types.ExtendedCommit{} case lazyProposer.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit - veHeightParam := types.ABCIParams{VoteExtensionsEnableHeight: height} + // Vote extensions are enabled by default for test units + veHeightParam := lazyProposer.state.ConsensusParams.Feature extCommit = lazyProposer.LastCommit.MakeExtendedCommit(veHeightParam) default: // This shouldn't happen. lazyProposer.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") @@ -224,16 +225,16 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // Make proposal propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} - proposal := types.NewProposal(height, round, lazyProposer.ValidRound, propBlockID) + proposal := types.NewProposal(height, round, lazyProposer.ValidRound, propBlockID, block.Header.Time) p := proposal.ToProto() if err := lazyProposer.privValidator.SignProposal(lazyProposer.state.ChainID, p); err == nil { proposal.Signature = p.Signature // send proposal and block parts on internal msg queue - lazyProposer.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) + lazyProposer.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, "", cmttime.Now()}) for i := 0; i < int(blockParts.Total()); i++ { part := blockParts.GetPart(i) - lazyProposer.sendInternalMessage(msgInfo{&BlockPartMessage{lazyProposer.Height, lazyProposer.Round, part}, ""}) + lazyProposer.sendInternalMessage(msgInfo{&BlockPartMessage{lazyProposer.Height, lazyProposer.Round, part}, "", time.Time{}}) } lazyProposer.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal) lazyProposer.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block)) @@ -296,16 +297,16 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // byzantine validator sends conflicting proposals into A and B, // and prevotes/precommits on both of them. // B sees a commit, A doesn't. -// Heal partition and ensure A sees the commit +// Heal partition and ensure A sees the commit. func TestByzantineConflictingProposalsWithPartition(t *testing.T) { - N := 4 + n := 4 logger := consensusLogger().With("test", "byzantine") ctx, cancel := context.WithCancel(context.Background()) defer cancel() app := newKVStore - css, cleanup := randConsensusNet(t, N, "consensus_byzantine_test", newMockTickerFunc(false), app) + css, cleanup := randConsensusNet(t, n, "consensus_byzantine_test", newMockTickerFunc(false), app) defer cleanup() // give the byzantine validator a normal ticker @@ -313,22 +314,21 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { ticker.SetLogger(css[0].Logger) css[0].SetTimeoutTicker(ticker) - switches := make([]*p2p.Switch, N) + switches := make([]*p2p.Switch, n) p2pLogger := logger.With("module", "p2p") - for i := 0; i < N; i++ { + for i := 0; i < n; i++ { switches[i] = p2p.MakeSwitch( config.P2P, i, - func(i int, sw *p2p.Switch) *p2p.Switch { + func(_ int, sw *p2p.Switch) *p2p.Switch { return sw }) switches[i].SetLogger(p2pLogger.With("validator", i)) } - blocksSubs := make([]types.Subscription, N) - reactors := make([]p2p.Reactor, N) - for i := 0; i < N; i++ { - + blocksSubs := make([]types.Subscription, n) + reactors := make([]p2p.Reactor, n) + for i := 0; i < n; i++ { // enable txs so we can create different proposals assertMempool(css[i].txNotifier).EnableTxsAvailable() // make first val byzantine @@ -342,7 +342,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { } // We are setting the prevote function to do nothing because the prevoting // and precommitting are done alongside the proposal. - css[i].doPrevote = func(height int64, round int32) {} + css[i].doPrevote = func(_ int64, _ int32) {} } eventBus := css[i].eventBus @@ -380,7 +380,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { } }() - p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { + p2p.MakeConnectedSwitches(config.P2P, n, func(i int, _ *p2p.Switch) *p2p.Switch { // ignore new switch s, we already made ours switches[i].AddReactor("CONSENSUS", reactors[i]) return switches[i] @@ -394,7 +394,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { // start the non-byz state machines. // note these must be started before the byz - for i := 1; i < N; i++ { + for i := 1; i < n; i++ { cr := reactors[i].(*Reactor) cr.SwitchToConsensus(cr.conS.GetState(), false) } @@ -407,7 +407,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { // byz proposer sends one block to peers[0] // and the other block to peers[1] and peers[2]. // note peers and switches order don't match. - peers := switches[0].Peers().List() + peers := switches[0].Peers().Copy() // partition A ind0 := getSwitchIndex(switches, peers[0]) @@ -427,7 +427,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { // wait till everyone makes the first new block // (one of them already has) wg := new(sync.WaitGroup) - for i := 1; i < N-1; i++ { + for i := 1; i < n-1; i++ { wg.Add(1) go func(j int) { <-blocksSubs[j].Out() @@ -453,20 +453,18 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { } } -//------------------------------- +// ------------------------------- // byzantine consensus functions -func byzantineDecideProposalFunc(ctx context.Context, t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch) { +func byzantineDecideProposalFunc(_ context.Context, t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch) { + t.Helper() // byzantine user should create two proposals and try to split the vote. // Avoid sending on internalMsgQueue and running consensus state. // Create a new proposal block from state/txs from the mempool. - block1, err := cs.createProposalBlock(ctx) - require.NoError(t, err) - blockParts1, err := block1.MakePartSet(types.BlockPartSizeBytes) - require.NoError(t, err) - polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()} - proposal1 := types.NewProposal(height, round, polRound, propBlockID) + block1, blockParts1, propBlockID := createProposalBlock(t, cs) + polRound := cs.ValidRound + proposal1 := types.NewProposal(height, round, polRound, propBlockID, block1.Time) p1 := proposal1.ToProto() if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil { t.Error(err) @@ -475,15 +473,12 @@ func byzantineDecideProposalFunc(ctx context.Context, t *testing.T, height int64 proposal1.Signature = p1.Signature // some new transactions come in (this ensures that the proposals are different) - deliverTxsRange(t, cs, 0, 1) + deliverTxsRange(t, cs, 1) // Create a new proposal block from state/txs from the mempool. - block2, err := cs.createProposalBlock(ctx) - require.NoError(t, err) - blockParts2, err := block2.MakePartSet(types.BlockPartSizeBytes) - require.NoError(t, err) - polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()} - proposal2 := types.NewProposal(height, round, polRound, propBlockID) + block2, blockParts2, propBlockID := createProposalBlock(t, cs) + polRound = cs.ValidRound + proposal2 := types.NewProposal(height, round, polRound, propBlockID, block2.Time) p2 := proposal2.ToProto() if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil { t.Error(err) @@ -493,15 +488,16 @@ func byzantineDecideProposalFunc(ctx context.Context, t *testing.T, height int64 block1Hash := block1.Hash() block2Hash := block2.Hash() + require.NotEqual(t, block1Hash, block2Hash) // broadcast conflicting proposals/block parts to peers - peers := sw.Peers().List() + peers := sw.Peers().Copy() t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers)) for i, peer := range peers { if i < len(peers)/2 { - go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1) + go sendProposalAndParts(height, round, cs, peer, proposal1, block1, block1Hash, blockParts1) } else { - go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2) + go sendProposalAndParts(height, round, cs, peer, proposal2, block2, block2Hash, blockParts2) } } } @@ -512,6 +508,7 @@ func sendProposalAndParts( cs *State, peer p2p.Peer, proposal *types.Proposal, + block *types.Block, blockHash []byte, parts *types.PartSet, ) { @@ -540,8 +537,8 @@ func sendProposalAndParts( // votes cs.mtx.Lock() - prevote, _ := cs.signVote(cmtproto.PrevoteType, blockHash, parts.Header(), nil) - precommit, _ := cs.signVote(cmtproto.PrecommitType, blockHash, parts.Header(), nil) + prevote, _ := cs.signVote(types.PrevoteType, blockHash, parts.Header(), nil) + precommit, _ := cs.signVote(types.PrecommitType, blockHash, parts.Header(), block) cs.mtx.Unlock() peer.Send(p2p.Envelope{ ChannelID: VoteChannel, @@ -553,7 +550,7 @@ func sendProposalAndParts( }) } -//---------------------------------------- +// ---------------------------------------- // byzantine consensus reactor type ByzantineReactor struct { @@ -568,8 +565,11 @@ func NewByzantineReactor(conR *Reactor) *ByzantineReactor { } } -func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) } -func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() } +func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) } +func (br *ByzantineReactor) StreamDescriptors() []p2p.StreamDescriptor { + return br.reactor.StreamDescriptors() +} + func (br *ByzantineReactor) AddPeer(peer p2p.Peer) { if !br.reactor.IsRunning() { return @@ -586,11 +586,13 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) { } } -func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) { +func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason any) { br.reactor.RemovePeer(peer, reason) } +// Receive forwards all messages to the underlying reactor. func (br *ByzantineReactor) Receive(e p2p.Envelope) { br.reactor.Receive(e) } -func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer } + +func (*ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer } diff --git a/consensus/common_test.go b/internal/consensus/common_test.go similarity index 74% rename from consensus/common_test.go rename to internal/consensus/common_test.go index 7e103bea900..725f503b8d4 100644 --- a/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -3,6 +3,7 @@ package consensus import ( "bytes" "context" + "errors" "fmt" "os" "path" @@ -12,27 +13,26 @@ import ( "testing" "time" - "github.com/go-kit/log/term" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - abcicli "github.com/cometbft/cometbft/abci/client" "github.com/cometbft/cometbft/abci/example/kvstore" abci "github.com/cometbft/cometbft/abci/types" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" cfg "github.com/cometbft/cometbft/config" - cstypes "github.com/cometbft/cometbft/consensus/types" + "github.com/cometbft/cometbft/crypto" + cstypes "github.com/cometbft/cometbft/internal/consensus/types" + cmtos "github.com/cometbft/cometbft/internal/os" "github.com/cometbft/cometbft/internal/test" cmtbytes "github.com/cometbft/cometbft/libs/bytes" "github.com/cometbft/cometbft/libs/log" - cmtos "github.com/cometbft/cometbft/libs/os" cmtpubsub "github.com/cometbft/cometbft/libs/pubsub" cmtsync "github.com/cometbft/cometbft/libs/sync" mempl "github.com/cometbft/cometbft/mempool" "github.com/cometbft/cometbft/p2p" "github.com/cometbft/cometbft/privval" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/proxy" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/store" @@ -48,15 +48,15 @@ const ( // test. type cleanupFunc func() -// genesis, chain_id, priv_val +// genesis, chain_id, priv_val. var ( config *cfg.Config // NOTE: must be reset for each _test.go file consensusReplayConfig *cfg.Config ensureTimeout = time.Millisecond * 200 ) -func ensureDir(dir string, mode os.FileMode) { - if err := cmtos.EnsureDir(dir, mode); err != nil { +func ensureDir(dir string) { + if err := cmtos.EnsureDir(dir, 0o700); err != nil { panic(err) } } @@ -65,13 +65,14 @@ func ResetConfig(name string) *cfg.Config { return test.ResetTestRoot(name) } -//------------------------------------------------------------------------------- +// ------------------------------------------------------------------------------- // validator stub (a kvstore consensus peer we control) type validatorStub struct { Index int32 // Validator index. NOTE: we don't assume validator set changes. Height int64 Round int32 + clock cmttime.Source types.PrivValidator VotingPower int64 lastVote *types.Vote @@ -84,15 +85,25 @@ func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *valida Index: valIndex, PrivValidator: privValidator, VotingPower: testMinPower, + clock: cmttime.DefaultSource{}, } } +func signProposal(t *testing.T, proposal *types.Proposal, chainID string, vss *validatorStub) { + t.Helper() + p := proposal.ToProto() + err := vss.SignProposal(chainID, p) + require.NoError(t, err) + proposal.Signature = p.Signature +} + func (vs *validatorStub) signVote( - voteType cmtproto.SignedMsgType, - hash []byte, - header types.PartSetHeader, + voteType types.SignedMsgType, + chainID string, + blockID types.BlockID, voteExtension []byte, extEnabled bool, + timestamp time.Time, ) (*types.Vote, error) { pubKey, err := vs.PrivValidator.GetPubKey() if err != nil { @@ -102,14 +113,14 @@ func (vs *validatorStub) signVote( Type: voteType, Height: vs.Height, Round: vs.Round, - BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, - Timestamp: cmttime.Now(), + BlockID: blockID, + Timestamp: timestamp, ValidatorAddress: pubKey.Address(), ValidatorIndex: vs.Index, Extension: voteExtension, } v := vote.ToProto() - if err = vs.PrivValidator.SignVote(test.DefaultTestChainID, v); err != nil { + if err = vs.PrivValidator.SignVote(chainID, v, true); err != nil { return nil, fmt.Errorf("sign vote failed: %w", err) } @@ -131,20 +142,21 @@ func (vs *validatorStub) signVote( return vote, err } -// Sign vote for type/hash/header -func signVote(vs *validatorStub, voteType cmtproto.SignedMsgType, hash []byte, header types.PartSetHeader, extEnabled bool) *types.Vote { +// Sign vote for type/hash/header. +func signVoteWithTimestamp(vs *validatorStub, voteType types.SignedMsgType, chainID string, + blockID types.BlockID, extEnabled bool, timestamp time.Time, +) *types.Vote { var ext []byte // Only non-nil precommits are allowed to carry vote extensions. if extEnabled { - if voteType != cmtproto.PrecommitType { - panic(fmt.Errorf("vote type is not precommit but extensions enabled")) + if voteType != types.PrecommitType { + panic(errors.New("vote type is not precommit but extensions enabled")) } - if len(hash) != 0 || !header.IsZero() { + if len(blockID.Hash) != 0 || !blockID.PartSetHeader.IsZero() { ext = []byte("extension") } } - v, err := vs.signVote(voteType, hash, header, ext, extEnabled) - + v, err := vs.signVote(voteType, chainID, blockID, ext, extEnabled, timestamp) if err != nil { panic(fmt.Errorf("failed to sign vote: %v", err)) } @@ -154,16 +166,20 @@ func signVote(vs *validatorStub, voteType cmtproto.SignedMsgType, hash []byte, h return v } +func signVote(vs *validatorStub, voteType types.SignedMsgType, chainID string, blockID types.BlockID, extEnabled bool) *types.Vote { + return signVoteWithTimestamp(vs, voteType, chainID, blockID, extEnabled, vs.clock.Now()) +} + func signVotes( - voteType cmtproto.SignedMsgType, - hash []byte, - header types.PartSetHeader, + voteType types.SignedMsgType, + chainID string, + blockID types.BlockID, extEnabled bool, vss ...*validatorStub, ) []*types.Vote { votes := make([]*types.Vote, len(vss)) for i, vs := range vss { - votes[i] = signVote(vs, voteType, hash, header, extEnabled) + votes[i] = signVote(vs, voteType, chainID, blockID, extEnabled) } return votes } @@ -171,6 +187,7 @@ func signVotes( func incrementHeight(vss ...*validatorStub) { for _, vs := range vss { vs.Height++ + vs.Round = 0 } } @@ -210,7 +227,7 @@ func (vss ValidatorStubsByPower) Swap(i, j int) { vss[j].Index = int32(j) } -//------------------------------------------------------------------------------- +// ------------------------------------------------------------------------------- // Functions for transitioning the consensus state func startTestRound(cs *State, height int64, round int32) { @@ -218,20 +235,38 @@ func startTestRound(cs *State, height int64, round int32) { cs.startRoutines(0) } +func createProposalBlockWithTime(t *testing.T, cs *State, time time.Time) (*types.Block, *types.PartSet, types.BlockID) { + t.Helper() + block, err := cs.createProposalBlock(context.Background()) + if !time.IsZero() { + block.Time = cmttime.Canonical(time) + } + assert.NoError(t, err) + blockParts, err := block.MakePartSet(types.BlockPartSizeBytes) + assert.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} + return block, blockParts, blockID +} + +func createProposalBlock(t *testing.T, cs *State) (*types.Block, *types.PartSet, types.BlockID) { + t.Helper() + return createProposalBlockWithTime(t, cs, time.Time{}) +} + // Create proposal block from cs1 but sign it with vs. func decideProposal( - ctx context.Context, + _ context.Context, t *testing.T, cs1 *State, vs *validatorStub, height int64, round int32, ) (*types.Proposal, *types.Block) { + t.Helper() + cs1.mtx.Lock() - block, err := cs1.createProposalBlock(ctx) - require.NoError(t, err) - blockParts, err := block.MakePartSet(types.BlockPartSizeBytes) - require.NoError(t, err) + block, _, propBlockID := createProposalBlock(t, cs1) + validRound := cs1.ValidRound chainID := cs1.state.ChainID cs1.mtx.Unlock() @@ -240,8 +275,7 @@ func decideProposal( } // Make proposal - polRound, propBlockID := validRound, types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} - proposal := types.NewProposal(height, round, polRound, propBlockID) + proposal := types.NewProposal(height, round, validRound, propBlockID, block.Header.Time) p := proposal.ToProto() if err := vs.SignProposal(chainID, p); err != nil { panic(err) @@ -260,17 +294,18 @@ func addVotes(to *State, votes ...*types.Vote) { func signAddVotes( to *State, - voteType cmtproto.SignedMsgType, - hash []byte, - header types.PartSetHeader, + voteType types.SignedMsgType, + chainID string, + blockID types.BlockID, extEnabled bool, vss ...*validatorStub, ) { - votes := signVotes(voteType, hash, header, extEnabled, vss...) + votes := signVotes(voteType, chainID, blockID, extEnabled, vss...) addVotes(to, votes...) } func validatePrevote(t *testing.T, cs *State, round int32, privVal *validatorStub, blockHash []byte) { + t.Helper() prevotes := cs.Votes.Prevotes(round) pubKey, err := privVal.GetPubKey() require.NoError(t, err) @@ -284,13 +319,16 @@ func validatePrevote(t *testing.T, cs *State, round int32, privVal *validatorStu panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockID.Hash)) } } else { - if !bytes.Equal(vote.BlockID.Hash, blockHash) { + if vote.BlockID.Hash == nil { + panic(fmt.Sprintf("Expected prevote to be for %X, got ", blockHash)) + } else if !bytes.Equal(vote.BlockID.Hash, blockHash) { panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockID.Hash)) } } } func validateLastPrecommit(t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) { + t.Helper() votes := cs.LastCommit pv, err := privVal.GetPubKey() require.NoError(t, err) @@ -313,6 +351,7 @@ func validatePrecommit( votedBlockHash, lockedBlockHash []byte, ) { + t.Helper() precommits := cs.Votes.Precommits(thisRound) pv, err := privVal.GetPubKey() require.NoError(t, err) @@ -371,12 +410,40 @@ func subscribeToVoter(cs *State, addr []byte) <-chan cmtpubsub.Message { return ch } -//------------------------------------------------------------------------------- +func subscribeToVoterBuffered(cs *State, addr []byte) <-chan cmtpubsub.Message { + votesSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote, 10) + if err != nil { + panic(fmt.Sprintf("failed to subscribe %s to %v with outcapacity 10", testSubscriber, types.EventQueryVote)) + } + ch := make(chan cmtpubsub.Message, 10) + go func() { + for msg := range votesSub.Out() { + vote := msg.Data().(types.EventDataVote) + // we only fire for our own votes + if bytes.Equal(addr, vote.Vote.ValidatorAddress) { + ch <- msg + } + } + }() + return ch +} + +// ------------------------------------------------------------------------------- +// application + +func fetchAppInfo(app abci.Application) (*abci.InfoResponse, *mempl.LanesInfo) { + resp, _ := app.Info(context.Background(), proxy.InfoRequest) + lanesInfo, _ := mempl.BuildLanesInfo(resp.LanePriorities, resp.DefaultLane) + return resp, lanesInfo +} + +// ------------------------------------------------------------------------------- // consensus states func newState(state sm.State, pv types.PrivValidator, app abci.Application) *State { config := test.ResetTestRoot("consensus_state_test") - return newStateWithConfig(config, state, pv, app) + _, lanesInfo := fetchAppInfo(app) + return newStateWithConfig(config, state, pv, app, lanesInfo) } func newStateWithConfig( @@ -384,9 +451,10 @@ func newStateWithConfig( state sm.State, pv types.PrivValidator, app abci.Application, + laneInfo *mempl.LanesInfo, ) *State { blockDB := dbm.NewMemDB() - return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB) + return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB, laneInfo) } func newStateWithConfigAndBlockStore( @@ -395,6 +463,7 @@ func newStateWithConfigAndBlockStore( pv types.PrivValidator, app abci.Application, blockDB dbm.DB, + laneInfo *mempl.LanesInfo, ) *State { // Get BlockStore blockStore := store.NewBlockStore(blockDB) @@ -410,6 +479,7 @@ func newStateWithConfigAndBlockStore( // Make Mempool mempool := mempl.NewCListMempool(config.Mempool, proxyAppConnMem, + laneInfo, state.LastBlockHeight, mempl.WithMetrics(memplMetrics), mempl.WithPreCheck(sm.TxPreCheck(state)), @@ -433,7 +503,7 @@ func newStateWithConfigAndBlockStore( blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) - cs.SetLogger(log.TestingLogger().With("module", "consensus")) + cs.SetLogger(consensusLogger()) cs.SetPrivValidator(pv) eventBus := types.NewEventBus() @@ -446,13 +516,16 @@ func newStateWithConfigAndBlockStore( return cs } -func loadPrivValidator(config *cfg.Config) *privval.FilePV { +func loadPrivValidator(config *cfg.Config) (*privval.FilePV, error) { privValidatorKeyFile := config.PrivValidatorKeyFile() - ensureDir(filepath.Dir(privValidatorKeyFile), 0o700) + ensureDir(filepath.Dir(privValidatorKeyFile)) privValidatorStateFile := config.PrivValidatorStateFile() - privValidator := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) + privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile, nil) + if err != nil { + return nil, err + } privValidator.Reset() - return privValidator + return privValidator, nil } func randState(nValidators int) (*State, []*validatorStub) { @@ -465,9 +538,10 @@ func randStateWithAppWithHeight( height int64, ) (*State, []*validatorStub) { c := test.ConsensusParams() - c.ABCI.VoteExtensionsEnableHeight = height + c.Feature.VoteExtensionsEnableHeight = height return randStateWithAppImpl(nValidators, app, c) } + func randStateWithApp(nValidators int, app abci.Application) (*State, []*validatorStub) { c := test.ConsensusParams() return randStateWithAppImpl(nValidators, app, c) @@ -477,9 +551,18 @@ func randStateWithAppImpl( nValidators int, app abci.Application, consensusParams *types.ConsensusParams, +) (*State, []*validatorStub) { + return randStateWithAppImplGenesisTime(nValidators, app, consensusParams, cmttime.Now()) +} + +func randStateWithAppImplGenesisTime( + nValidators int, + app abci.Application, + consensusParams *types.ConsensusParams, + genesisTime time.Time, ) (*State, []*validatorStub) { // Get State - state, privVals := randGenesisState(nValidators, false, 10, consensusParams) + state, privVals := randGenesisStateWithTime(nValidators, consensusParams, genesisTime) vss := make([]*validatorStub, nValidators) @@ -494,14 +577,13 @@ func randStateWithAppImpl( return cs, vss } -//------------------------------------------------------------------------------- +// ------------------------------------------------------------------------------- func ensureNoNewEvent(ch <-chan cmtpubsub.Message, timeout time.Duration, errorMessage string, ) { select { case <-time.After(timeout): - break case <-ch: panic(errorMessage) } @@ -574,25 +656,6 @@ func ensureNewTimeout(timeoutCh <-chan cmtpubsub.Message, height int64, round in "Timeout expired while waiting for NewTimeout event") } -func ensureNewProposal(proposalCh <-chan cmtpubsub.Message, height int64, round int32) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for NewProposal event") - case msg := <-proposalCh: - proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal) - if !ok { - panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?", - msg.Data())) - } - if proposalEvent.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, proposalEvent.Height)) - } - if proposalEvent.Round != round { - panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round)) - } - } -} - func ensureNewValidBlock(validBlockCh <-chan cmtpubsub.Message, height int64, round int32) { ensureNewEvent(validBlockCh, height, round, ensureTimeout, "Timeout expired while waiting for NewValidBlock event") @@ -643,9 +706,9 @@ func ensureRelock(relockCh <-chan cmtpubsub.Message, height int64, round int32) "Timeout expired while waiting for RelockValue event") } -func ensureProposal(proposalCh <-chan cmtpubsub.Message, height int64, round int32, propID types.BlockID) { +func ensureProposalWithTimeout(proposalCh <-chan cmtpubsub.Message, height int64, round int32, propID *types.BlockID, timeout time.Duration) { select { - case <-time.After(ensureTimeout): + case <-time.After(timeout): panic("Timeout expired while waiting for NewProposal event") case msg := <-proposalCh: proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal) @@ -659,22 +722,33 @@ func ensureProposal(proposalCh <-chan cmtpubsub.Message, height int64, round int if proposalEvent.Round != round { panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round)) } - if !proposalEvent.BlockID.Equals(propID) { - panic(fmt.Sprintf("Proposed block does not match expected block (%v != %v)", proposalEvent.BlockID, propID)) + if propID != nil { + if !proposalEvent.BlockID.Equals(*propID) { + panic(fmt.Sprintf("Proposed block does not match expected block (%v != %v)", proposalEvent.BlockID, *propID)) + } } } } +func ensureProposal(proposalCh <-chan cmtpubsub.Message, height int64, round int32, propID types.BlockID) { + ensureProposalWithTimeout(proposalCh, height, round, &propID, ensureTimeout) +} + +// For the propose, as we do not know the blockID in advance. +func ensureNewProposal(proposalCh <-chan cmtpubsub.Message, height int64, round int32) { + ensureProposalWithTimeout(proposalCh, height, round, nil, ensureTimeout) +} + func ensurePrecommit(voteCh <-chan cmtpubsub.Message, height int64, round int32) { - ensureVote(voteCh, height, round, cmtproto.PrecommitType) + ensureVote(voteCh, height, round, types.PrecommitType) } func ensurePrevote(voteCh <-chan cmtpubsub.Message, height int64, round int32) { - ensureVote(voteCh, height, round, cmtproto.PrevoteType) + ensureVote(voteCh, height, round, types.PrevoteType) } func ensureVote(voteCh <-chan cmtpubsub.Message, height int64, round int32, - voteType cmtproto.SignedMsgType, + voteType types.SignedMsgType, ) { select { case <-time.After(ensureTimeout): @@ -700,15 +774,15 @@ func ensureVote(voteCh <-chan cmtpubsub.Message, height int64, round int32, func ensurePrevoteMatch(t *testing.T, voteCh <-chan cmtpubsub.Message, height int64, round int32, hash []byte) { t.Helper() - ensureVoteMatch(t, voteCh, height, round, hash, cmtproto.PrevoteType) + ensureVoteMatch(t, voteCh, height, round, hash, types.PrevoteType) } func ensurePrecommitMatch(t *testing.T, voteCh <-chan cmtpubsub.Message, height int64, round int32, hash []byte) { t.Helper() - ensureVoteMatch(t, voteCh, height, round, hash, cmtproto.PrecommitType) + ensureVoteMatch(t, voteCh, height, round, hash, types.PrecommitType) } -func ensureVoteMatch(t *testing.T, voteCh <-chan cmtpubsub.Message, height int64, round int32, hash []byte, voteType cmtproto.SignedMsgType) { +func ensureVoteMatch(t *testing.T, voteCh <-chan cmtpubsub.Message, height int64, round int32, hash []byte, voteType types.SignedMsgType) { t.Helper() select { case <-time.After(ensureTimeout): @@ -730,14 +804,6 @@ func ensureVoteMatch(t *testing.T, voteCh <-chan cmtpubsub.Message, height int64 } } -func ensurePrecommitTimeout(ch <-chan cmtpubsub.Message) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for the Precommit to Timeout") - case <-ch: - } -} - func ensureNewEventOnChannel(ch <-chan cmtpubsub.Message) { select { case <-time.After(ensureTimeout * 12 / 10): // 20% leniency for goroutine scheduling uncertainty @@ -746,26 +812,18 @@ func ensureNewEventOnChannel(ch <-chan cmtpubsub.Message) { } } -//------------------------------------------------------------------------------- +// ------------------------------------------------------------------------------- // consensus nets -// consensusLogger is a TestingLogger which uses a different -// color for each validator ("validator" key must exist). func consensusLogger() log.Logger { - return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { - for i := 0; i < len(keyvals)-1; i += 2 { - if keyvals[i] == "validator" { - return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} - } - } - return term.FgBgColor{} - }).With("module", "consensus") + return log.TestingLogger().With("module", "consensus") } func randConsensusNet(t *testing.T, nValidators int, testName string, tickerFunc func() TimeoutTicker, - appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*State, cleanupFunc) { + appFunc func() abci.Application, configOpts ...func(*cfg.Config), +) ([]*State, cleanupFunc) { t.Helper() - genDoc, privVals := randGenesisDoc(nValidators, false, 30, nil) + genDoc, privVals := randGenesisDoc(nValidators, 30, nil, cmttime.Now()) css := make([]*State, nValidators) logger := consensusLogger() configRootDirs := make([]string, 0, nValidators) @@ -780,15 +838,16 @@ func randConsensusNet(t *testing.T, nValidators int, testName string, tickerFunc for _, opt := range configOpts { opt(thisConfig) } - ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0o700) // dir for wal + ensureDir(filepath.Dir(thisConfig.Consensus.WalFile())) // dir for wal app := appFunc() + _, lanesInfo := fetchAppInfo(app) vals := types.TM2PB.ValidatorUpdates(state.Validators) - _, err := app.InitChain(context.Background(), &abci.RequestInitChain{Validators: vals}) + _, err := app.InitChain(context.Background(), &abci.InitChainRequest{Validators: vals}) require.NoError(t, err) - css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB) + css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB, lanesInfo) css[i].SetTimeoutTicker(tickerFunc()) - css[i].SetLogger(logger.With("validator", i, "module", "consensus")) + css[i].SetLogger(logger.With("validator", i)) } return css, func() { for _, dir := range configRootDirs { @@ -797,7 +856,7 @@ func randConsensusNet(t *testing.T, nValidators int, testName string, tickerFunc } } -// nPeers = nValidators + nNotValidator +// nPeers = nValidators + nNotValidator. func randConsensusNetWithPeers( t *testing.T, nValidators, @@ -806,8 +865,9 @@ func randConsensusNetWithPeers( tickerFunc func() TimeoutTicker, appFunc func(string) abci.Application, ) ([]*State, *types.GenesisDoc, *cfg.Config, cleanupFunc) { + t.Helper() c := test.ConsensusParams() - genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower, c) + genDoc, privVals := randGenesisDoc(nValidators, testMinPower, c, cmttime.Now()) css := make([]*State, nPeers) logger := consensusLogger() var peer0Config *cfg.Config @@ -818,10 +878,11 @@ func randConsensusNetWithPeers( DiscardABCIResponses: false, }) t.Cleanup(func() { _ = stateStore.Close() }) - state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) + require.NoError(t, err) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) configRootDirs = append(configRootDirs, thisConfig.RootDir) - ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0o700) // dir for wal + ensureDir(filepath.Dir(thisConfig.Consensus.WalFile())) // dir for wal if i == 0 { peer0Config = thisConfig } @@ -830,29 +891,26 @@ func randConsensusNetWithPeers( privVal = privVals[i] } else { tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") - if err != nil { - panic(err) - } + require.NoError(t, err) tempStateFile, err := os.CreateTemp("", "priv_validator_state_") - if err != nil { - panic(err) - } - - privVal = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + require.NoError(t, err) + privVal, err = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), nil) + require.NoError(t, err) } app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i))) + _, lanesInfo := fetchAppInfo(app) vals := types.TM2PB.ValidatorUpdates(state.Validators) if _, ok := app.(*kvstore.Application); ok { // simulate handshake, receive app version. If don't do this, replay test will fail state.Version.Consensus.App = kvstore.AppVersion } - _, err := app.InitChain(context.Background(), &abci.RequestInitChain{Validators: vals}) + _, err = app.InitChain(context.Background(), &abci.InitChainRequest{Validators: vals}) require.NoError(t, err) - css[i] = newStateWithConfig(thisConfig, state, privVal, app) + css[i] = newStateWithConfig(thisConfig, state, privVal, app, lanesInfo) css[i].SetTimeoutTicker(tickerFunc()) - css[i].SetLogger(logger.With("validator", i, "module", "consensus")) + css[i].SetLogger(logger.With("validator", i)) } return css, genDoc, peer0Config, func() { for _, dir := range configRootDirs { @@ -867,17 +925,18 @@ func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int { return i } } - panic("didnt find peer in switches") + panic("didn't find peer in switches") } -//------------------------------------------------------------------------------- +// ------------------------------------------------------------------------------- // genesis func randGenesisDoc(numValidators int, - randPower bool, minPower int64, consensusParams *types.ConsensusParams, + genesisTime time.Time, ) (*types.GenesisDoc, []types.PrivValidator) { + randPower := false validators := make([]types.GenesisValidator, numValidators) privValidators := make([]types.PrivValidator, numValidators) for i := 0; i < numValidators; i++ { @@ -890,8 +949,12 @@ func randGenesisDoc(numValidators int, } sort.Sort(types.PrivValidatorsByAddress(privValidators)) + if consensusParams == nil { + consensusParams = test.ConsensusParams() + } + return &types.GenesisDoc{ - GenesisTime: cmttime.Now(), + GenesisTime: genesisTime, InitialHeight: 1, ChainID: test.DefaultTestChainID, Validators: validators, @@ -900,30 +963,40 @@ func randGenesisDoc(numValidators int, } func randGenesisState( + numValidators int, //nolint: unparam + consensusParams *types.ConsensusParams, //nolint: unparam +) (sm.State, []types.PrivValidator) { + if consensusParams == nil { + consensusParams = test.ConsensusParams() + } + return randGenesisStateWithTime(numValidators, consensusParams, cmttime.Now()) +} + +func randGenesisStateWithTime( numValidators int, - randPower bool, - minPower int64, consensusParams *types.ConsensusParams, + genesisTime time.Time, ) (sm.State, []types.PrivValidator) { - genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower, consensusParams) + minPower := int64(10) + genDoc, privValidators := randGenesisDoc(numValidators, minPower, consensusParams, genesisTime) s0, _ := sm.MakeGenesisState(genDoc) return s0, privValidators } -//------------------------------------ +// ------------------------------------ // mock ticker func newMockTickerFunc(onlyOnce bool) func() TimeoutTicker { return func() TimeoutTicker { return &mockTicker{ - c: make(chan timeoutInfo, 10), + c: make(chan timeoutInfo, 100), onlyOnce: onlyOnce, } } } // mock ticker only fires on RoundStepNewHeight -// and only once if onlyOnce=true +// and only once if onlyOnce=true. type mockTicker struct { c chan timeoutInfo @@ -932,11 +1005,11 @@ type mockTicker struct { fired bool } -func (m *mockTicker) Start() error { +func (*mockTicker) Start() error { return nil } -func (m *mockTicker) Stop() error { +func (*mockTicker) Stop() error { return nil } @@ -987,3 +1060,7 @@ func signDataIsEqual(v1 *types.Vote, v2 *cmtproto.Vote) bool { v1.ValidatorIndex == v2.GetValidatorIndex() && bytes.Equal(v1.Extension, v2.Extension) } + +func updateValTx(pubKey crypto.PubKey, power int64) []byte { + return kvstore.MakeValSetChangeTx(abci.NewValidatorUpdate(pubKey, power)) +} diff --git a/consensus/errors.go b/internal/consensus/errors.go similarity index 71% rename from consensus/errors.go rename to internal/consensus/errors.go index e37e3dc7605..7faf28245a4 100644 --- a/consensus/errors.go +++ b/internal/consensus/errors.go @@ -14,15 +14,36 @@ var ( ErrProposalWithoutPreviousCommit = errors.New("propose step; cannot propose anything without commit for the previous block") ) -// Consensus sentinel errors +// Consensus sentinel errors. var ( ErrInvalidProposalSignature = errors.New("error invalid proposal signature") ErrInvalidProposalPOLRound = errors.New("error invalid proposal POL round") - ErrAddingVote = errors.New("error adding vote") ErrSignatureFoundInPastBlocks = errors.New("found signature from the same key") ErrPubKeyIsNotSet = errors.New("pubkey is not set. Look for \"Can't get private validator pubkey\" errors") + ErrProposalTooManyParts = errors.New("proposal block has too many parts") ) +type ErrInvalidVote struct { + Reason string +} + +func (e ErrInvalidVote) Error() string { + return "invalid vote: " + e.Reason +} + +// ErrAddingVote is returned when adding a vote fails. +type ErrAddingVote struct { + Err error +} + +func (e ErrAddingVote) Error() string { + return "error adding vote: " + e.Err.Error() +} + +func (e ErrAddingVote) Unwrap() error { + return e.Err +} + type ErrConsensusMessageNotRecognized struct { Message any } @@ -36,7 +57,7 @@ type ErrDenyMessageOverflow struct { } func (e ErrDenyMessageOverflow) Error() string { - return fmt.Sprintf("denying message due to possible overflow: %s", e.Err.Error()) + return "denying message due to possible overflow: " + e.Err.Error() } func (e ErrDenyMessageOverflow) Unwrap() error { diff --git a/consensus/invalid_test.go b/internal/consensus/invalid_test.go similarity index 77% rename from consensus/invalid_test.go rename to internal/consensus/invalid_test.go index d6bcfdfda30..b1244d90c09 100644 --- a/consensus/invalid_test.go +++ b/internal/consensus/invalid_test.go @@ -4,43 +4,40 @@ import ( "testing" "time" + cmtcons "github.com/cometbft/cometbft/api/cometbft/consensus/v1" cfg "github.com/cometbft/cometbft/config" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/bytes" "github.com/cometbft/cometbft/libs/log" - cmtrand "github.com/cometbft/cometbft/libs/rand" "github.com/cometbft/cometbft/p2p" - cmtcons "github.com/cometbft/cometbft/proto/tendermint/consensus" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/types" ) -//---------------------------------------------- +// ---------------------------------------------- // byzantine failures // one byz val sends a precommit for a random block at each height -// Ensure a testnet makes blocks +// Ensure a testnet makes blocks. func TestReactorInvalidPrecommit(t *testing.T) { - N := 4 - css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore, + n := 4 + css, cleanup := randConsensusNet(t, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore, func(c *cfg.Config) { c.Consensus.TimeoutPropose = 3000 * time.Millisecond - c.Consensus.TimeoutPrevote = 1000 * time.Millisecond - c.Consensus.TimeoutPrecommit = 1000 * time.Millisecond + c.Consensus.TimeoutVote = 1000 * time.Millisecond }) defer cleanup() - for i := 0; i < N; i++ { + for i := 0; i < n; i++ { ticker := NewTimeoutTicker() ticker.SetLogger(css[i].Logger) css[i].SetTimeoutTicker(ticker) - } - reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) + reactors, blocksSubs, eventBuses := startConsensusNet(t, css, n) defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) // this val sends a random precommit at each height - byzValIdx := N - 1 + byzValIdx := n - 1 byzVal := css[byzValIdx] byzR := reactors[byzValIdx] @@ -56,13 +53,14 @@ func TestReactorInvalidPrecommit(t *testing.T) { // wait for a bunch of blocks // TODO: make this tighter by ensuring the halt happens by block 2 for i := 0; i < 10; i++ { - timeoutWaitGroup(N, func(j int) { + timeoutWaitGroup(n, func(j int) { <-blocksSubs[j].Out() }) } } func invalidDoPrevoteFunc(t *testing.T, cs *State, sw *p2p.Switch, pv types.PrivValidator) { + t.Helper() // routine to: // - precommit for a random block // - send precommit to all peers @@ -80,20 +78,22 @@ func invalidDoPrevoteFunc(t *testing.T, cs *State, sw *p2p.Switch, pv types.Priv // precommit a random block blockHash := bytes.HexBytes(cmtrand.Bytes(32)) + timestamp := cs.voteTime(cs.Height) + precommit := &types.Vote{ ValidatorAddress: addr, ValidatorIndex: valIndex, Height: cs.Height, Round: cs.Round, - Timestamp: cs.voteTime(), - Type: cmtproto.PrecommitType, + Timestamp: timestamp, + Type: types.PrecommitType, BlockID: types.BlockID{ Hash: blockHash, PartSetHeader: types.PartSetHeader{Total: 1, Hash: cmtrand.Bytes(32)}, }, } p := precommit.ToProto() - err = cs.privValidator.SignVote(cs.state.ChainID, p) + err = cs.privValidator.SignVote(cs.state.ChainID, p, true) if err != nil { t.Error(err) } @@ -101,7 +101,7 @@ func invalidDoPrevoteFunc(t *testing.T, cs *State, sw *p2p.Switch, pv types.Priv precommit.ExtensionSignature = p.ExtensionSignature cs.privValidator = nil // disable priv val so we don't do normal votes - peers := sw.Peers().List() + peers := sw.Peers().Copy() for _, peer := range peers { cs.Logger.Info("Sending bad vote", "block", blockHash, "peer", peer) peer.Send(p2p.Envelope{ diff --git a/consensus/mempool_test.go b/internal/consensus/mempool_test.go similarity index 78% rename from consensus/mempool_test.go rename to internal/consensus/mempool_test.go index ea7286e0b7b..458eebbe2d1 100644 --- a/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -2,8 +2,8 @@ package consensus import ( "context" - "fmt" "os" + "strconv" "testing" "time" @@ -11,16 +11,14 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - "github.com/cometbft/cometbft/abci/example/kvstore" abci "github.com/cometbft/cometbft/abci/types" mempl "github.com/cometbft/cometbft/mempool" - "github.com/cometbft/cometbft/proxy" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" ) -// for testing +// for testing. func assertMempool(txn txNotifier) mempl.Mempool { return txn.(mempl.Mempool) } @@ -29,12 +27,11 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { config := ResetConfig("consensus_mempool_txs_available_test") defer os.RemoveAll(config.RootDir) config.Consensus.CreateEmptyBlocks = false - state, privVals := randGenesisState(1, false, 10, nil) + state, privVals := randGenesisState(1, nil) app := kvstore.NewInMemoryApplication() - resp, err := app.Info(context.Background(), proxy.RequestInfo) - require.NoError(t, err) + resp, lanesInfo := fetchAppInfo(app) state.AppHash = resp.LastBlockAppHash - cs := newStateWithConfig(config, state, privVals[0], app) + cs := newStateWithConfig(config, state, privVals[0], app, lanesInfo) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) @@ -42,7 +39,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { ensureNewEventOnChannel(newBlockCh) // first block gets committed ensureNoNewEventOnChannel(newBlockCh) - deliverTxsRange(t, cs, 0, 1) + deliverTxsRange(t, cs, 1) ensureNewEventOnChannel(newBlockCh) // commit txs ensureNewEventOnChannel(newBlockCh) // commit updated app hash ensureNoNewEventOnChannel(newBlockCh) @@ -53,12 +50,13 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { defer os.RemoveAll(config.RootDir) config.Consensus.CreateEmptyBlocksInterval = ensureTimeout - state, privVals := randGenesisState(1, false, 10, nil) + state, privVals := randGenesisState(1, nil) app := kvstore.NewInMemoryApplication() - resp, err := app.Info(context.Background(), proxy.RequestInfo) - require.NoError(t, err) + resp, lanesInfo := fetchAppInfo(app) + require.NotNil(t, resp) + require.NotNil(t, lanesInfo) state.AppHash = resp.LastBlockAppHash - cs := newStateWithConfig(config, state, privVals[0], app) + cs := newStateWithConfig(config, state, privVals[0], app, lanesInfo) assertMempool(cs.txNotifier).EnableTxsAvailable() @@ -74,21 +72,23 @@ func TestMempoolProgressInHigherRound(t *testing.T) { config := ResetConfig("consensus_mempool_txs_available_test") defer os.RemoveAll(config.RootDir) config.Consensus.CreateEmptyBlocks = false - state, privVals := randGenesisState(1, false, 10, nil) - cs := newStateWithConfig(config, state, privVals[0], kvstore.NewInMemoryApplication()) + state, privVals := randGenesisState(1, nil) + app := kvstore.NewInMemoryApplication() + _, lanesInfo := fetchAppInfo(app) + cs := newStateWithConfig(config, state, privVals[0], app, lanesInfo) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) - cs.setProposal = func(proposal *types.Proposal) error { + cs.setProposal = func(proposal *types.Proposal, recvTime time.Time) error { if cs.Height == 2 && cs.Round == 0 { // dont set the proposal in round 0 so we timeout and // go to next round cs.Logger.Info("Ignoring set proposal at height 2, round 0") return nil } - return cs.defaultSetProposal(proposal) + return cs.defaultSetProposal(proposal, recvTime) } startTestRound(cs, height, round) @@ -99,7 +99,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) { round = 0 ensureNewRound(newRoundCh, height, round) // first round at next height - deliverTxsRange(t, cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round + deliverTxsRange(t, cs, 1) // we deliver txs, but dont set a proposal so we get the next round ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) round++ // moving to the next round @@ -107,25 +107,30 @@ func TestMempoolProgressInHigherRound(t *testing.T) { ensureNewEventOnChannel(newBlockCh) // now we can commit the block } -func deliverTxsRange(t *testing.T, cs *State, start, end int) { +func deliverTxsRange(t *testing.T, cs *State, end int) { + t.Helper() + start := 0 // Deliver some txs. for i := start; i < end; i++ { - _, err := assertMempool(cs.txNotifier).CheckTx(kvstore.NewTx(fmt.Sprintf("%d", i), "true")) + reqRes, err := assertMempool(cs.txNotifier).CheckTx(kvstore.NewTx(strconv.Itoa(i), "true"), "") require.NoError(t, err) + require.False(t, reqRes.Response.GetCheckTx().IsErr()) } } func TestMempoolTxConcurrentWithCommit(t *testing.T) { - state, privVals := randGenesisState(1, false, 10, nil) + state, privVals := randGenesisState(1, nil) blockDB := dbm.NewMemDB() stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false}) - cs := newStateWithConfigAndBlockStore(config, state, privVals[0], kvstore.NewInMemoryApplication(), blockDB) + app := kvstore.NewInMemoryApplication() + _, lanesInfo := fetchAppInfo(app) + cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB, lanesInfo) err := stateStore.Save(state) require.NoError(t, err) newBlockEventsCh := subscribe(cs.eventBus, types.EventQueryNewBlockEvents) const numTxs int64 = 3000 - go deliverTxsRange(t, cs, 0, int(numTxs)) + go deliverTxsRange(t, cs, int(numTxs)) startTestRound(cs, cs.Height, cs.Round) for n := int64(0); n < numTxs; { @@ -141,22 +146,23 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { } func TestMempoolRmBadTx(t *testing.T) { - state, privVals := randGenesisState(1, false, 10, nil) + state, privVals := randGenesisState(1, nil) app := kvstore.NewInMemoryApplication() blockDB := dbm.NewMemDB() stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false}) - cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB) + _, lanesInfo := fetchAppInfo(app) + cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB, lanesInfo) err := stateStore.Save(state) require.NoError(t, err) // increment the counter by 1 txBytes := kvstore.NewTx("key", "value") - res, err := app.FinalizeBlock(context.Background(), &abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}}) + res, err := app.FinalizeBlock(context.Background(), &abci.FinalizeBlockRequest{Txs: [][]byte{txBytes}}) require.NoError(t, err) assert.False(t, res.TxResults[0].IsErr()) - assert.True(t, len(res.AppHash) > 0) + assert.NotEmpty(t, res.AppHash) - _, err = app.Commit(context.Background(), &abci.RequestCommit{}) + _, err = app.Commit(context.Background(), &abci.CommitRequest{}) require.NoError(t, err) emptyMempoolCh := make(chan struct{}) @@ -166,7 +172,7 @@ func TestMempoolRmBadTx(t *testing.T) { // CheckTx should not err, but the app should return a bad abci code // and the tx should get removed from the pool invalidTx := []byte("invalidTx") - reqRes, err := assertMempool(cs.txNotifier).CheckTx(invalidTx) + reqRes, err := assertMempool(cs.txNotifier).CheckTx(invalidTx, "") if err != nil { t.Errorf("error after CheckTx: %v", err) return diff --git a/consensus/metrics.gen.go b/internal/consensus/metrics.gen.go similarity index 77% rename from consensus/metrics.gen.go rename to internal/consensus/metrics.gen.go index fa7afad85e6..858ecc56304 100644 --- a/consensus/metrics.gen.go +++ b/internal/consensus/metrics.gen.go @@ -3,8 +3,8 @@ package consensus import ( - "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/cometbft/cometbft/libs/metrics/discard" + prometheus "github.com/cometbft/cometbft/libs/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) @@ -106,6 +106,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "block_size_bytes", Help: "Size of the block.", }, labels).With(labelsAndValues...), + ChainSizeBytes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "chain_size_bytes", + Help: "Size of the chain in bytes.", + }, labels).With(labelsAndValues...), TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -178,7 +184,7 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Namespace: namespace, Subsystem: MetricsSubsystem, Name: "proposal_create_count", - Help: "ProposalCreationCount is the total number of proposals created by this node since process start. The metric is annotated by the status of the proposal from the application, either 'accepted' or 'rejected'.", + Help: "ProposalCreateCount is the total number of proposals created by this node since process start. The metric is annotated by the status of the proposal from the application, either 'accepted' or 'rejected'.", }, labels).With(labelsAndValues...), RoundVotingPowerPercent: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, @@ -192,39 +198,49 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "late_votes", Help: "LateVotes stores the number of votes that were received by this node that correspond to earlier heights and rounds than this node is currently in.", }, append(labels, "vote_type")).With(labelsAndValues...), + ProposalTimestampDifference: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "proposal_timestamp_difference", + Help: "Difference in seconds between the local time when a proposal message is received and the timestamp in the proposal message.", + + Buckets: []float64{-1.5, -1.0, -0.5, -0.2, 0, 0.2, 0.5, 1.0, 1.5, 2.0, 2.5, 4.0, 8.0}, + }, append(labels, "is_timely")).With(labelsAndValues...), } } func NopMetrics() *Metrics { return &Metrics{ - Height: discard.NewGauge(), - ValidatorLastSignedHeight: discard.NewGauge(), - Rounds: discard.NewGauge(), - RoundDurationSeconds: discard.NewHistogram(), - Validators: discard.NewGauge(), - ValidatorsPower: discard.NewGauge(), - ValidatorPower: discard.NewGauge(), - ValidatorMissedBlocks: discard.NewGauge(), - MissingValidators: discard.NewGauge(), - MissingValidatorsPower: discard.NewGauge(), - ByzantineValidators: discard.NewGauge(), - ByzantineValidatorsPower: discard.NewGauge(), - BlockIntervalSeconds: discard.NewHistogram(), - NumTxs: discard.NewGauge(), - BlockSizeBytes: discard.NewGauge(), - TotalTxs: discard.NewGauge(), - CommittedHeight: discard.NewGauge(), - BlockParts: discard.NewCounter(), - DuplicateBlockPart: discard.NewCounter(), - DuplicateVote: discard.NewCounter(), - StepDurationSeconds: discard.NewHistogram(), - BlockGossipPartsReceived: discard.NewCounter(), - QuorumPrevoteDelay: discard.NewGauge(), - FullPrevoteDelay: discard.NewGauge(), - VoteExtensionReceiveCount: discard.NewCounter(), - ProposalReceiveCount: discard.NewCounter(), - ProposalCreateCount: discard.NewCounter(), - RoundVotingPowerPercent: discard.NewGauge(), - LateVotes: discard.NewCounter(), + Height: discard.NewGauge(), + ValidatorLastSignedHeight: discard.NewGauge(), + Rounds: discard.NewGauge(), + RoundDurationSeconds: discard.NewHistogram(), + Validators: discard.NewGauge(), + ValidatorsPower: discard.NewGauge(), + ValidatorPower: discard.NewGauge(), + ValidatorMissedBlocks: discard.NewGauge(), + MissingValidators: discard.NewGauge(), + MissingValidatorsPower: discard.NewGauge(), + ByzantineValidators: discard.NewGauge(), + ByzantineValidatorsPower: discard.NewGauge(), + BlockIntervalSeconds: discard.NewHistogram(), + NumTxs: discard.NewGauge(), + BlockSizeBytes: discard.NewGauge(), + ChainSizeBytes: discard.NewCounter(), + TotalTxs: discard.NewGauge(), + CommittedHeight: discard.NewGauge(), + BlockParts: discard.NewCounter(), + DuplicateBlockPart: discard.NewCounter(), + DuplicateVote: discard.NewCounter(), + StepDurationSeconds: discard.NewHistogram(), + BlockGossipPartsReceived: discard.NewCounter(), + QuorumPrevoteDelay: discard.NewGauge(), + FullPrevoteDelay: discard.NewGauge(), + VoteExtensionReceiveCount: discard.NewCounter(), + ProposalReceiveCount: discard.NewCounter(), + ProposalCreateCount: discard.NewCounter(), + RoundVotingPowerPercent: discard.NewGauge(), + LateVotes: discard.NewCounter(), + ProposalTimestampDifference: discard.NewHistogram(), } } diff --git a/consensus/metrics.go b/internal/consensus/metrics.go similarity index 67% rename from consensus/metrics.go rename to internal/consensus/metrics.go index 9be363d67ab..58602af1021 100644 --- a/consensus/metrics.go +++ b/internal/consensus/metrics.go @@ -4,10 +4,10 @@ import ( "strings" "time" - "github.com/go-kit/kit/metrics" - - cstypes "github.com/cometbft/cometbft/consensus/types" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cstypes "github.com/cometbft/cometbft/internal/consensus/types" + "github.com/cometbft/cometbft/libs/metrics" + "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" ) const ( @@ -16,7 +16,7 @@ const ( MetricsSubsystem = "consensus" ) -//go:generate go run ../scripts/metricsgen -struct=Metrics +//go:generate go run ../../scripts/metricsgen -struct=Metrics // Metrics contains metrics exposed by this package. type Metrics struct { @@ -30,7 +30,7 @@ type Metrics struct { Rounds metrics.Gauge // Histogram of round duration. - RoundDurationSeconds metrics.Histogram `metrics_buckettype:"exprange" metrics_bucketsizes:"0.1, 100, 8"` + RoundDurationSeconds metrics.Histogram `metrics_bucketsizes:"0.1, 100, 8" metrics_buckettype:"exprange"` // Number of validators. Validators metrics.Gauge @@ -56,6 +56,8 @@ type Metrics struct { NumTxs metrics.Gauge // Size of the block. BlockSizeBytes metrics.Gauge + // Size of the chain in bytes. + ChainSizeBytes metrics.Counter // Total number of transactions. TotalTxs metrics.Gauge // The latest block height. @@ -71,14 +73,14 @@ type Metrics struct { DuplicateVote metrics.Counter // Histogram of durations for each step in the consensus protocol. - StepDurationSeconds metrics.Histogram `metrics_labels:"step" metrics_buckettype:"exprange" metrics_bucketsizes:"0.1, 100, 8"` + StepDurationSeconds metrics.Histogram `metrics_bucketsizes:"0.1, 100, 8" metrics_buckettype:"exprange" metrics_labels:"step"` stepStart time.Time // Number of block parts received by the node, separated by whether the part // was relevant to the block the node is trying to gather or not. BlockGossipPartsReceived metrics.Counter `metrics_labels:"matches_current"` - // QuroumPrevoteMessageDelay is the interval in seconds between the proposal + // QuorumPrevoteDelay is the interval in seconds between the proposal // timestamp and the timestamp of the earliest prevote that achieved a quorum // during the prevote step. // @@ -87,13 +89,13 @@ type Metrics struct { // be above 2/3 of the total voting power of the network defines the endpoint // the endpoint of the interval. Subtract the proposal timestamp from this endpoint // to obtain the quorum delay. - //metrics:Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum. + // metrics:Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum. QuorumPrevoteDelay metrics.Gauge `metrics_labels:"proposer_address"` // FullPrevoteDelay is the interval in seconds between the proposal // timestamp and the timestamp of the latest prevote in a round where 100% // of the voting power on the network issued prevotes. - //metrics:Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted. + // metrics:Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted. FullPrevoteDelay metrics.Gauge `metrics_labels:"proposer_address"` // VoteExtensionReceiveCount is the number of vote extensions received by this @@ -107,7 +109,7 @@ type Metrics struct { // either 'accepted' or 'rejected'. ProposalReceiveCount metrics.Counter `metrics_labels:"status"` - // ProposalCreationCount is the total number of proposals created by this node + // ProposalCreateCount is the total number of proposals created by this node // since process start. // The metric is annotated by the status of the proposal from the application, // either 'accepted' or 'rejected'. @@ -122,6 +124,21 @@ type Metrics struct { // correspond to earlier heights and rounds than this node is currently // in. LateVotes metrics.Counter `metrics_labels:"vote_type"` + + // ProposalTimestampDifference is the difference between the local time + // of the validator at the time it receives a proposal message, and the + // timestamp of the received proposal message. + // + // The value of this metric is not expected to be negative, as it would + // mean that the proposal's timestamp is in the future. This indicates + // that the proposer's and this node's clocks are desynchronized. + // + // A positive value of this metric reflects the message delay from the + // proposer to this node, for the delivery of a Proposal message. This + // metric thus should drive the definition of values for the consensus + // parameter SynchronyParams.MessageDelay, used by the PBTS algorithm. + // metrics:Difference in seconds between the local time when a proposal message is received and the timestamp in the proposal message. + ProposalTimestampDifference metrics.Histogram `metrics_bucketsizes:"-1.5, -1.0, -0.5, -0.2, 0, 0.2, 0.5, 1.0, 1.5, 2.0, 2.5, 4.0, 8.0" metrics_labels:"is_timely"` } func (m *Metrics) MarkProposalProcessed(accepted bool) { @@ -140,36 +157,34 @@ func (m *Metrics) MarkVoteExtensionReceived(accepted bool) { m.VoteExtensionReceiveCount.With("status", status).Add(1) } -func (m *Metrics) MarkVoteReceived(vt cmtproto.SignedMsgType, power, totalPower int64) { +func (m *Metrics) MarkVoteReceived(vt types.SignedMsgType, power, totalPower int64) { p := float64(power) / float64(totalPower) - n := strings.ToLower(strings.TrimPrefix(vt.String(), "SIGNED_MSG_TYPE_")) + n := types.SignedMsgTypeToShortString(vt) m.RoundVotingPowerPercent.With("vote_type", n).Add(p) } func (m *Metrics) MarkRound(r int32, st time.Time) { m.Rounds.Set(float64(r)) - roundTime := time.Since(st).Seconds() + roundTime := cmttime.Since(st).Seconds() m.RoundDurationSeconds.Observe(roundTime) - pvt := cmtproto.PrevoteType - pvn := strings.ToLower(strings.TrimPrefix(pvt.String(), "SIGNED_MSG_TYPE_")) + pvn := types.SignedMsgTypeToShortString(types.PrevoteType) m.RoundVotingPowerPercent.With("vote_type", pvn).Set(0) - pct := cmtproto.PrecommitType - pcn := strings.ToLower(strings.TrimPrefix(pct.String(), "SIGNED_MSG_TYPE_")) + pcn := types.SignedMsgTypeToShortString(types.PrecommitType) m.RoundVotingPowerPercent.With("vote_type", pcn).Set(0) } -func (m *Metrics) MarkLateVote(vt cmtproto.SignedMsgType) { - n := strings.ToLower(strings.TrimPrefix(vt.String(), "SIGNED_MSG_TYPE_")) +func (m *Metrics) MarkLateVote(vt types.SignedMsgType) { + n := types.SignedMsgTypeToShortString(vt) m.LateVotes.With("vote_type", n).Add(1) } func (m *Metrics) MarkStep(s cstypes.RoundStepType) { if !m.stepStart.IsZero() { - stepTime := time.Since(m.stepStart).Seconds() + stepTime := cmttime.Since(m.stepStart).Seconds() stepName := strings.TrimPrefix(s.String(), "RoundStep") m.StepDurationSeconds.With("step", stepName).Observe(stepTime) } - m.stepStart = time.Now() + m.stepStart = cmttime.Now() } diff --git a/consensus/msgs.go b/internal/consensus/msgs.go similarity index 80% rename from consensus/msgs.go rename to internal/consensus/msgs.go index cc926069c92..7fd40917528 100644 --- a/consensus/msgs.go +++ b/internal/consensus/msgs.go @@ -2,103 +2,104 @@ package consensus import ( "fmt" + "time" - cmterrors "github.com/cometbft/cometbft/types/errors" "github.com/cosmos/gogoproto/proto" - cstypes "github.com/cometbft/cometbft/consensus/types" - "github.com/cometbft/cometbft/libs/bits" + cmtcons "github.com/cometbft/cometbft/api/cometbft/consensus/v1" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" + "github.com/cometbft/cometbft/internal/bits" + cstypes "github.com/cometbft/cometbft/internal/consensus/types" cmtmath "github.com/cometbft/cometbft/libs/math" - "github.com/cometbft/cometbft/p2p" - cmtcons "github.com/cometbft/cometbft/proto/tendermint/consensus" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/types" + cmterrors "github.com/cometbft/cometbft/types/errors" ) -// MsgToProto takes a consensus message type and returns the proto defined consensus message. -// // TODO: This needs to be removed, but WALToProto depends on this. -func MsgToProto(msg Message) (proto.Message, error) { +// Takes a consensus message type and returns the proto defined consensus message, +// wrapped in the discriminating Message container. +func MsgToWrappedProto(msg Message) (cmtcons.Message, error) { + pb := cmtcons.Message{} if msg == nil { - return nil, ErrNilMessage + return pb, ErrNilMessage } - var pb proto.Message switch msg := msg.(type) { case *NewRoundStepMessage: - pb = &cmtcons.NewRoundStep{ + pb.Sum = &cmtcons.Message_NewRoundStep{NewRoundStep: &cmtcons.NewRoundStep{ Height: msg.Height, Round: msg.Round, Step: uint32(msg.Step), SecondsSinceStartTime: msg.SecondsSinceStartTime, LastCommitRound: msg.LastCommitRound, - } + }} case *NewValidBlockMessage: pbPartSetHeader := msg.BlockPartSetHeader.ToProto() pbBits := msg.BlockParts.ToProto() - pb = &cmtcons.NewValidBlock{ + pb.Sum = &cmtcons.Message_NewValidBlock{NewValidBlock: &cmtcons.NewValidBlock{ Height: msg.Height, Round: msg.Round, BlockPartSetHeader: pbPartSetHeader, BlockParts: pbBits, IsCommit: msg.IsCommit, - } + }} case *ProposalMessage: pbP := msg.Proposal.ToProto() - pb = &cmtcons.Proposal{ + pb.Sum = &cmtcons.Message_Proposal{Proposal: &cmtcons.Proposal{ Proposal: *pbP, - } + }} case *ProposalPOLMessage: pbBits := msg.ProposalPOL.ToProto() - pb = &cmtcons.ProposalPOL{ + pb.Sum = &cmtcons.Message_ProposalPol{ProposalPol: &cmtcons.ProposalPOL{ Height: msg.Height, ProposalPolRound: msg.ProposalPOLRound, ProposalPol: *pbBits, - } + }} case *BlockPartMessage: parts, err := msg.Part.ToProto() if err != nil { - return nil, cmterrors.ErrMsgToProto{MessageName: "Part", Err: err} + return pb, cmterrors.ErrMsgToProto{MessageName: "Part", Err: err} } - pb = &cmtcons.BlockPart{ + pb.Sum = &cmtcons.Message_BlockPart{BlockPart: &cmtcons.BlockPart{ Height: msg.Height, Round: msg.Round, Part: *parts, - } + }} case *VoteMessage: vote := msg.Vote.ToProto() - pb = &cmtcons.Vote{ + pb.Sum = &cmtcons.Message_Vote{Vote: &cmtcons.Vote{ Vote: vote, - } + }} case *HasVoteMessage: - pb = &cmtcons.HasVote{ + pb.Sum = &cmtcons.Message_HasVote{HasVote: &cmtcons.HasVote{ Height: msg.Height, Round: msg.Round, Type: msg.Type, Index: msg.Index, - } + }} case *HasProposalBlockPartMessage: - pb = &cmtcons.HasProposalBlockPart{ + pb.Sum = &cmtcons.Message_HasProposalBlockPart{HasProposalBlockPart: &cmtcons.HasProposalBlockPart{ Height: msg.Height, Round: msg.Round, Index: msg.Index, - } + }} case *VoteSetMaj23Message: bi := msg.BlockID.ToProto() - pb = &cmtcons.VoteSetMaj23{ + pb.Sum = &cmtcons.Message_VoteSetMaj23{VoteSetMaj23: &cmtcons.VoteSetMaj23{ Height: msg.Height, Round: msg.Round, Type: msg.Type, BlockID: bi, - } + }} case *VoteSetBitsMessage: bi := msg.BlockID.ToProto() @@ -115,16 +116,16 @@ func MsgToProto(msg Message) (proto.Message, error) { vsb.Votes = *bits } - pb = vsb + pb.Sum = &cmtcons.Message_VoteSetBits{VoteSetBits: vsb} default: - return nil, ErrConsensusMessageNotRecognized{msg} + return pb, ErrConsensusMessageNotRecognized{msg} } return pb, nil } -// MsgFromProto takes a consensus proto message and returns the native go type +// MsgFromProto takes a consensus proto message and returns the native go type. func MsgFromProto(p proto.Message) (Message, error) { if p == nil { return nil, ErrNilMessage @@ -249,7 +250,7 @@ func MsgFromProto(p proto.Message) (Message, error) { return pb, nil } -// WALToProto takes a WAL message and return a proto walMessage and error +// WALToProto takes a WAL message and return a proto walMessage and error. func WALToProto(msg WALMessage) (*cmtcons.WALMessage, error) { var pb cmtcons.WALMessage @@ -265,19 +266,24 @@ func WALToProto(msg WALMessage) (*cmtcons.WALMessage, error) { }, } case msgInfo: - consMsg, err := MsgToProto(msg.Msg) + cm, err := MsgToWrappedProto(msg.Msg) if err != nil { return nil, err } - if w, ok := consMsg.(p2p.Wrapper); ok { - consMsg = w.Wrap() + + var rtp *time.Time + if !msg.ReceiveTime.IsZero() { + // Only record the `ReceiveTime` if explicitly set. + rt := msg.ReceiveTime + rtp = &rt } - cm := consMsg.(*cmtcons.Message) + pb = cmtcons.WALMessage{ Sum: &cmtcons.WALMessage_MsgInfo{ MsgInfo: &cmtcons.MsgInfo{ - Msg: *cm, - PeerID: string(msg.PeerID), + Msg: cm, + PeerID: string(msg.PeerID), + ReceiveTime: rtp, }, }, } @@ -307,7 +313,7 @@ func WALToProto(msg WALMessage) (*cmtcons.WALMessage, error) { return &pb, nil } -// WALFromProto takes a proto wal message and return a consensus walMessage and error +// WALFromProto takes a proto wal message and return a consensus walMessage and error. func WALFromProto(msg *cmtcons.WALMessage) (WALMessage, error) { if msg == nil { return nil, ErrNilMessage @@ -330,10 +336,15 @@ func WALFromProto(msg *cmtcons.WALMessage) (WALMessage, error) { if err != nil { return nil, cmterrors.ErrMsgFromProto{MessageName: "MsgInfo", Err: err} } - pb = msgInfo{ + msgInfo := msgInfo{ Msg: walMsg, - PeerID: p2p.ID(msg.MsgInfo.PeerID), + PeerID: nodekey.ID(msg.MsgInfo.PeerID), + } + + if msg.MsgInfo.ReceiveTime != nil { + msgInfo.ReceiveTime = *msg.MsgInfo.ReceiveTime } + pb = msgInfo case *cmtcons.WALMessage_TimeoutInfo: tis, err := cmtmath.SafeConvertUint8(int64(msg.TimeoutInfo.Step)) diff --git a/consensus/msgs_test.go b/internal/consensus/msgs_test.go similarity index 51% rename from consensus/msgs_test.go rename to internal/consensus/msgs_test.go index b27650c6a58..bbc837cbca6 100644 --- a/consensus/msgs_test.go +++ b/internal/consensus/msgs_test.go @@ -10,13 +10,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmtcons "github.com/cometbft/cometbft/api/cometbft/consensus/v1" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto/merkle" - "github.com/cometbft/cometbft/libs/bits" - cmtrand "github.com/cometbft/cometbft/libs/rand" - "github.com/cometbft/cometbft/p2p" - cmtcons "github.com/cometbft/cometbft/proto/tendermint/consensus" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cometbft/cometbft/internal/bits" + cmtrand "github.com/cometbft/cometbft/internal/rand" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" ) func TestMsgToProto(t *testing.T) { @@ -47,12 +48,12 @@ func TestMsgToProto(t *testing.T) { require.NoError(t, err) proposal := types.Proposal{ - Type: cmtproto.ProposalType, + Type: types.ProposalType, Height: 1, - Round: 1, + Round: 2, POLRound: 1, BlockID: bi, - Timestamp: time.Now(), + Timestamp: cmttime.Now(), Signature: cmtrand.Bytes(20), } pbProposal := proposal.ToProto() @@ -64,9 +65,9 @@ func TestMsgToProto(t *testing.T) { 0, 1, 0, - cmtproto.PrecommitType, + types.PrecommitType, bi, - time.Now(), + cmttime.Now(), ) pbVote := vote.ToProto() @@ -76,110 +77,128 @@ func TestMsgToProto(t *testing.T) { want proto.Message wantErr bool }{ - {"successful NewRoundStepMessage", &NewRoundStepMessage{ - Height: 2, - Round: 1, - Step: 1, - SecondsSinceStartTime: 1, - LastCommitRound: 2, - }, &cmtcons.NewRoundStep{ - Height: 2, - Round: 1, - Step: 1, - SecondsSinceStartTime: 1, - LastCommitRound: 2, - }, - - false}, + { + "successful NewRoundStepMessage", &NewRoundStepMessage{ + Height: 2, + Round: 1, + Step: 1, + SecondsSinceStartTime: 1, + LastCommitRound: 2, + }, &cmtcons.NewRoundStep{ + Height: 2, + Round: 1, + Step: 1, + SecondsSinceStartTime: 1, + LastCommitRound: 2, + }, - {"successful NewValidBlockMessage", &NewValidBlockMessage{ - Height: 1, - Round: 1, - BlockPartSetHeader: psh, - BlockParts: bits, - IsCommit: false, - }, &cmtcons.NewValidBlock{ - Height: 1, - Round: 1, - BlockPartSetHeader: pbPsh, - BlockParts: pbBits, - IsCommit: false, + false, }, - false}, - {"successful BlockPartMessage", &BlockPartMessage{ - Height: 100, - Round: 1, - Part: &parts, - }, &cmtcons.BlockPart{ - Height: 100, - Round: 1, - Part: *pbParts, + { + "successful NewValidBlockMessage", &NewValidBlockMessage{ + Height: 1, + Round: 1, + BlockPartSetHeader: psh, + BlockParts: bits, + IsCommit: false, + }, &cmtcons.NewValidBlock{ + Height: 1, + Round: 1, + BlockPartSetHeader: pbPsh, + BlockParts: pbBits, + IsCommit: false, + }, + + false, }, + { + "successful BlockPartMessage", &BlockPartMessage{ + Height: 100, + Round: 1, + Part: &parts, + }, &cmtcons.BlockPart{ + Height: 100, + Round: 1, + Part: *pbParts, + }, - false}, - {"successful ProposalPOLMessage", &ProposalPOLMessage{ - Height: 1, - ProposalPOLRound: 1, - ProposalPOL: bits, - }, &cmtcons.ProposalPOL{ - Height: 1, - ProposalPolRound: 1, - ProposalPol: *pbBits, + false, }, - false}, - {"successful ProposalMessage", &ProposalMessage{ - Proposal: &proposal, - }, &cmtcons.Proposal{ - Proposal: *pbProposal, + { + "successful ProposalPOLMessage", &ProposalPOLMessage{ + Height: 1, + ProposalPOLRound: 1, + ProposalPOL: bits, + }, &cmtcons.ProposalPOL{ + Height: 1, + ProposalPolRound: 1, + ProposalPol: *pbBits, + }, + false, }, + { + "successful ProposalMessage", &ProposalMessage{ + Proposal: &proposal, + }, &cmtcons.Proposal{ + Proposal: *pbProposal, + }, - false}, - {"successful VoteMessage", &VoteMessage{ - Vote: vote, - }, &cmtcons.Vote{ - Vote: pbVote, + false, }, + { + "successful VoteMessage", &VoteMessage{ + Vote: vote, + }, &cmtcons.Vote{ + Vote: pbVote, + }, - false}, - {"successful VoteSetMaj23", &VoteSetMaj23Message{ - Height: 1, - Round: 1, - Type: 1, - BlockID: bi, - }, &cmtcons.VoteSetMaj23{ - Height: 1, - Round: 1, - Type: 1, - BlockID: pbBi, + false, }, + { + "successful VoteSetMaj23", &VoteSetMaj23Message{ + Height: 1, + Round: 1, + Type: 1, + BlockID: bi, + }, &cmtcons.VoteSetMaj23{ + Height: 1, + Round: 1, + Type: 1, + BlockID: pbBi, + }, - false}, - {"successful VoteSetBits", &VoteSetBitsMessage{ - Height: 1, - Round: 1, - Type: 1, - BlockID: bi, - Votes: bits, - }, &cmtcons.VoteSetBits{ - Height: 1, - Round: 1, - Type: 1, - BlockID: pbBi, - Votes: *pbBits, + false, }, + { + "successful VoteSetBits", &VoteSetBitsMessage{ + Height: 1, + Round: 1, + Type: 1, + BlockID: bi, + Votes: bits, + }, &cmtcons.VoteSetBits{ + Height: 1, + Round: 1, + Type: 1, + BlockID: pbBi, + Votes: *pbBits, + }, - false}, + false, + }, {"failure", nil, &cmtcons.Message{}, true}, } for _, tt := range testsCases { - tt := tt t.Run(tt.testName, func(t *testing.T) { - pb, err := MsgToProto(tt.msg) - if tt.wantErr == true { - assert.Equal(t, err != nil, tt.wantErr) + wpb, err := MsgToWrappedProto(tt.msg) + if tt.wantErr { + assert.Equal(t, tt.wantErr, err != nil) return } + require.NoError(t, err) + pb, err := wpb.Unwrap() + require.NoError(t, err) assert.EqualValues(t, tt.want, pb, tt.testName) msg, err := MsgFromProto(pb) @@ -196,7 +215,6 @@ func TestMsgToProto(t *testing.T) { } func TestWALMsgProto(t *testing.T) { - parts := types.Part{ Index: 1, Bytes: []byte("test"), @@ -209,12 +227,14 @@ func TestWALMsgProto(t *testing.T) { } pbParts, err := parts.ToProto() require.NoError(t, err) + now := cmttime.Now() testsCases := []struct { - testName string - msg WALMessage - want *cmtcons.WALMessage - wantErr bool + testName string + msg WALMessage + want *cmtcons.WALMessage + wantErr bool + equalValues bool // False for msgInfo, since equalValues does not see nil and time{} as equivalent }{ {"successful EventDataRoundState", types.EventDataRoundState{ Height: 2, @@ -228,14 +248,14 @@ func TestWALMsgProto(t *testing.T) { Step: "ronies", }, }, - }, false}, + }, false, true}, {"successful msgInfo", msgInfo{ Msg: &BlockPartMessage{ Height: 100, Round: 1, Part: &parts, }, - PeerID: p2p.ID("string"), + PeerID: nodekey.ID("string"), }, &cmtcons.WALMessage{ Sum: &cmtcons.WALMessage_MsgInfo{ MsgInfo: &cmtcons.MsgInfo{ @@ -251,7 +271,55 @@ func TestWALMsgProto(t *testing.T) { PeerID: "string", }, }, - }, false}, + }, false, false}, + {"successful msgInfo with receive time", msgInfo{ + Msg: &BlockPartMessage{ + Height: 100, + Round: 1, + Part: &parts, + }, + PeerID: nodekey.ID("string"), + }, &cmtcons.WALMessage{ + Sum: &cmtcons.WALMessage_MsgInfo{ + MsgInfo: &cmtcons.MsgInfo{ + Msg: cmtcons.Message{ + Sum: &cmtcons.Message_BlockPart{ + BlockPart: &cmtcons.BlockPart{ + Height: 100, + Round: 1, + Part: *pbParts, + }, + }, + }, + PeerID: "string", + ReceiveTime: &time.Time{}, + }, + }, + }, false, false}, + {"successful msgInfo with receive time explicit", msgInfo{ + Msg: &BlockPartMessage{ + Height: 100, + Round: 1, + Part: &parts, + }, + PeerID: nodekey.ID("string"), + }, &cmtcons.WALMessage{ + Sum: &cmtcons.WALMessage_MsgInfo{ + MsgInfo: &cmtcons.MsgInfo{ + Msg: cmtcons.Message{ + Sum: &cmtcons.Message_BlockPart{ + BlockPart: &cmtcons.BlockPart{ + Height: 100, + Round: 1, + Part: *pbParts, + }, + }, + }, + PeerID: "string", + ReceiveTime: &now, + }, + }, + }, false, false}, {"successful timeoutInfo", timeoutInfo{ Duration: time.Duration(100), Height: 1, @@ -266,7 +334,7 @@ func TestWALMsgProto(t *testing.T) { Step: 1, }, }, - }, false}, + }, false, true}, {"successful EndHeightMessage", EndHeightMessage{ Height: 1, }, &cmtcons.WALMessage{ @@ -275,18 +343,20 @@ func TestWALMsgProto(t *testing.T) { Height: 1, }, }, - }, false}, - {"failure", nil, &cmtcons.WALMessage{}, true}, + }, false, true}, + {"failure", nil, &cmtcons.WALMessage{}, true, true}, } for _, tt := range testsCases { - tt := tt t.Run(tt.testName, func(t *testing.T) { pb, err := WALToProto(tt.msg) - if tt.wantErr == true { - assert.Equal(t, err != nil, tt.wantErr) + if tt.wantErr { + assert.Equal(t, tt.wantErr, err != nil) return } - assert.EqualValues(t, tt.want, pb, tt.testName) + + if tt.equalValues { + assert.EqualValues(t, tt.want, pb, tt.testName) + } msg, err := WALFromProto(pb) @@ -331,7 +401,7 @@ func TestConsMsgsVectors(t *testing.T) { require.NoError(t, err) proposal := types.Proposal{ - Type: cmtproto.ProposalType, + Type: types.ProposalType, Height: 1, Round: 1, POLRound: 1, @@ -347,7 +417,7 @@ func TestConsMsgsVectors(t *testing.T) { Height: 1, Round: 0, Timestamp: date, - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, BlockID: bi, } vpb := v.ToProto() @@ -373,41 +443,72 @@ func TestConsMsgsVectors(t *testing.T) { SecondsSinceStartTime: math.MaxInt64, LastCommitRound: math.MaxInt32, }}}, "0a2608ffffffffffffffff7f10ffffffff0718ffffffff0f20ffffffffffffffff7f28ffffffff07"}, - {"NewValidBlock", &cmtcons.Message{Sum: &cmtcons.Message_NewValidBlock{ - NewValidBlock: &cmtcons.NewValidBlock{ - Height: 1, Round: 1, BlockPartSetHeader: pbPsh, BlockParts: pbBits, IsCommit: false}}}, - "1231080110011a24080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d22050801120100"}, - {"Proposal", &cmtcons.Message{Sum: &cmtcons.Message_Proposal{Proposal: &cmtcons.Proposal{Proposal: *pbProposal}}}, - "1a720a7008201001180120012a480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d320608c0b89fdc053a146164645f6d6f72655f6578636c616d6174696f6e"}, - {"ProposalPol", &cmtcons.Message{Sum: &cmtcons.Message_ProposalPol{ - ProposalPol: &cmtcons.ProposalPOL{Height: 1, ProposalPolRound: 1}}}, - "2206080110011a00"}, - {"BlockPart", &cmtcons.Message{Sum: &cmtcons.Message_BlockPart{ - BlockPart: &cmtcons.BlockPart{Height: 1, Round: 1, Part: *pbParts}}}, - "2a36080110011a3008011204746573741a26080110011a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d"}, - {"Vote_without_ext", &cmtcons.Message{Sum: &cmtcons.Message_Vote{ - Vote: &cmtcons.Vote{Vote: vpb}}}, - "32700a6e0802100122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d2a0608c0b89fdc0532146164645f6d6f72655f6578636c616d6174696f6e3801"}, - {"Vote_with_ext", &cmtcons.Message{Sum: &cmtcons.Message_Vote{ - Vote: &cmtcons.Vote{Vote: vextPb}}}, - "327b0a790802100122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d2a0608c0b89fdc0532146164645f6d6f72655f6578636c616d6174696f6e38014a09657874656e73696f6e"}, - {"HasVote", &cmtcons.Message{Sum: &cmtcons.Message_HasVote{ - HasVote: &cmtcons.HasVote{Height: 1, Round: 1, Type: cmtproto.PrevoteType, Index: 1}}}, - "3a080801100118012001"}, - {"HasVote", &cmtcons.Message{Sum: &cmtcons.Message_HasVote{ - HasVote: &cmtcons.HasVote{Height: math.MaxInt64, Round: math.MaxInt32, - Type: cmtproto.PrevoteType, Index: math.MaxInt32}}}, - "3a1808ffffffffffffffff7f10ffffffff07180120ffffffff07"}, - {"VoteSetMaj23", &cmtcons.Message{Sum: &cmtcons.Message_VoteSetMaj23{ - VoteSetMaj23: &cmtcons.VoteSetMaj23{Height: 1, Round: 1, Type: cmtproto.PrevoteType, BlockID: pbBi}}}, - "425008011001180122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d"}, - {"VoteSetBits", &cmtcons.Message{Sum: &cmtcons.Message_VoteSetBits{ - VoteSetBits: &cmtcons.VoteSetBits{Height: 1, Round: 1, Type: cmtproto.PrevoteType, BlockID: pbBi, Votes: *pbBits}}}, - "4a5708011001180122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d2a050801120100"}, + { + "NewValidBlock", &cmtcons.Message{Sum: &cmtcons.Message_NewValidBlock{ + NewValidBlock: &cmtcons.NewValidBlock{ + Height: 1, Round: 1, BlockPartSetHeader: pbPsh, BlockParts: pbBits, IsCommit: false, + }, + }}, + "1231080110011a24080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d22050801120100", + }, + { + "Proposal", &cmtcons.Message{Sum: &cmtcons.Message_Proposal{Proposal: &cmtcons.Proposal{Proposal: *pbProposal}}}, + "1a720a7008201001180120012a480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d320608c0b89fdc053a146164645f6d6f72655f6578636c616d6174696f6e", + }, + { + "ProposalPol", &cmtcons.Message{Sum: &cmtcons.Message_ProposalPol{ + ProposalPol: &cmtcons.ProposalPOL{Height: 1, ProposalPolRound: 1}, + }}, + "2206080110011a00", + }, + { + "BlockPart", &cmtcons.Message{Sum: &cmtcons.Message_BlockPart{ + BlockPart: &cmtcons.BlockPart{Height: 1, Round: 1, Part: *pbParts}, + }}, + "2a36080110011a3008011204746573741a26080110011a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d", + }, + { + "Vote_without_ext", &cmtcons.Message{Sum: &cmtcons.Message_Vote{ + Vote: &cmtcons.Vote{Vote: vpb}, + }}, + "32700a6e0802100122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d2a0608c0b89fdc0532146164645f6d6f72655f6578636c616d6174696f6e3801", + }, + { + "Vote_with_ext", &cmtcons.Message{Sum: &cmtcons.Message_Vote{ + Vote: &cmtcons.Vote{Vote: vextPb}, + }}, + "327b0a790802100122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d2a0608c0b89fdc0532146164645f6d6f72655f6578636c616d6174696f6e38014a09657874656e73696f6e", + }, + { + "HasVote", &cmtcons.Message{Sum: &cmtcons.Message_HasVote{ + HasVote: &cmtcons.HasVote{Height: 1, Round: 1, Type: types.PrevoteType, Index: 1}, + }}, + "3a080801100118012001", + }, + { + "HasVote", &cmtcons.Message{Sum: &cmtcons.Message_HasVote{ + HasVote: &cmtcons.HasVote{ + Height: math.MaxInt64, Round: math.MaxInt32, + Type: types.PrevoteType, Index: math.MaxInt32, + }, + }}, + "3a1808ffffffffffffffff7f10ffffffff07180120ffffffff07", + }, + { + "VoteSetMaj23", &cmtcons.Message{Sum: &cmtcons.Message_VoteSetMaj23{ + VoteSetMaj23: &cmtcons.VoteSetMaj23{Height: 1, Round: 1, Type: types.PrevoteType, BlockID: pbBi}, + }}, + "425008011001180122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d", + }, + { + "VoteSetBits", &cmtcons.Message{Sum: &cmtcons.Message_VoteSetBits{ + VoteSetBits: &cmtcons.VoteSetBits{Height: 1, Round: 1, Type: types.PrevoteType, BlockID: pbBi, Votes: *pbBits}, + }}, + "4a5708011001180122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d2a050801120100", + }, } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { bz, err := proto.Marshal(tc.cMsg) require.NoError(t, err) diff --git a/internal/consensus/pbts_test.go b/internal/consensus/pbts_test.go new file mode 100644 index 00000000000..bfa2588668c --- /dev/null +++ b/internal/consensus/pbts_test.go @@ -0,0 +1,771 @@ +package consensus + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/abci/example/kvstore" + abci "github.com/cometbft/cometbft/abci/types" + abcimocks "github.com/cometbft/cometbft/abci/types/mocks" + "github.com/cometbft/cometbft/internal/test" + cmtpubsub "github.com/cometbft/cometbft/libs/pubsub" + "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" + cmttimemocks "github.com/cometbft/cometbft/types/time/mocks" +) + +const ( + // blockTimeIota is used in the test harness as the time between + // blocks when not otherwise specified. + blockTimeIota = time.Millisecond +) + +// pbtsTestHarness constructs a CometBFT network that can be used for testing the +// implementation of the Proposer-Based timestamps algorithm. +// It runs a series of consensus heights and captures timing of votes and events. +type pbtsTestHarness struct { + // configuration options set by the user of the test harness. + pbtsTestConfiguration + + // The timestamp of the first block produced by the network. + firstBlockTime time.Time + + // The CometBFT consensus state machine being run during + // a run of the pbtsTestHarness. + observedState *State + + // A stub for signing votes and messages using the key + // from the observedState. + observedValidator *validatorStub + + // A list of simulated validators that interact with the observedState and are + // fully controlled by the test harness. + otherValidators []*validatorStub + + // The mock time source used by all of the validator stubs in the test harness. + // This mock clock allows the test harness to produce votes and blocks with arbitrary + // timestamps. + validatorClock *cmttimemocks.Source + + chainID string + + // channels for verifying that the observed validator completes certain actions. + ensureProposalCh, roundCh, blockCh, ensureVoteCh <-chan cmtpubsub.Message + + // channel of events from the observed validator annotated with the timestamp + // the event was received. + eventCh <-chan timestampedEvent + + currentHeight int64 + currentRound int32 +} + +type pbtsTestConfiguration struct { + // The timestamp consensus parameters to be used by the state machine under test. + synchronyParams types.SynchronyParams + + // The setting to use for the TimeoutPropose configuration parameter. + timeoutPropose time.Duration + + // The genesis time + genesisTime time.Time + + // The times offset from height 1 block time of the block proposed at height 2. + height2ProposedBlockOffset time.Duration + + // The time offset from height 1 block time at which the proposal at height 2 should be delivered. + height2ProposalTimeDeliveryOffset time.Duration + + // The time offset from height 1 block time of the block proposed at height 4. + // At height 4, the proposed block and the deliver offsets are the same so + // that timely-ness does not affect height 4. + height4ProposedBlockOffset time.Duration +} + +func newPBTSTestHarness(ctx context.Context, t *testing.T, tc pbtsTestConfiguration) pbtsTestHarness { + t.Helper() + const validators = 4 + cfg := test.ResetTestRoot("newPBTSTestHarness") + clock := new(cmttimemocks.Source) + + if tc.genesisTime.IsZero() { + tc.genesisTime = cmttime.Now() + } + + if tc.height4ProposedBlockOffset == 0 { + // Set a default height4ProposedBlockOffset. + // Use a proposed block time that is greater than the time that the + // block at height 2 was delivered. Height 3 is not relevant for testing + // and always occurs blockTimeIota before height 4. If not otherwise specified, + // height 4 therefore occurs 2*blockTimeIota after height 2. + tc.height4ProposedBlockOffset = tc.height2ProposalTimeDeliveryOffset + 2*blockTimeIota + } + cfg.Consensus.TimeoutPropose = tc.timeoutPropose + consensusParams := types.DefaultConsensusParams() + consensusParams.Synchrony = tc.synchronyParams + consensusParams.Feature.PbtsEnableHeight = 1 + + state, privVals := randGenesisStateWithTime(validators, consensusParams, tc.genesisTime) + cs := newStateWithConfig(cfg, state, privVals[0], kvstore.NewInMemoryApplication(), nil) + vss := make([]*validatorStub, validators) + for i := 0; i < validators; i++ { + vss[i] = newValidatorStub(privVals[i], int32(i)) + } + incrementHeight(vss[1:]...) + + for _, vs := range vss { + vs.clock = clock + } + pubKey, err := vss[0].PrivValidator.GetPubKey() + require.NoError(t, err) + + eventCh := timestampedCollector(ctx, t, cs.eventBus) + + return pbtsTestHarness{ + pbtsTestConfiguration: tc, + observedValidator: vss[0], + observedState: cs, + otherValidators: vss[1:], + validatorClock: clock, + currentHeight: 1, + chainID: cs.state.ChainID, + roundCh: subscribe(cs.eventBus, types.EventQueryNewRound), + ensureProposalCh: subscribe(cs.eventBus, types.EventQueryCompleteProposal), + blockCh: subscribe(cs.eventBus, types.EventQueryNewBlock), + ensureVoteCh: subscribeToVoterBuffered(cs, pubKey.Address()), + eventCh: eventCh, + } +} + +func (p *pbtsTestHarness) observedValidatorProposerHeight(t *testing.T, previousBlockTime time.Time) (heightResult, time.Time) { + t.Helper() + p.validatorClock.On("Now").Return(p.genesisTime.Add(p.height2ProposedBlockOffset)).Times(2 * len(p.otherValidators)) + + ensureNewRound(p.roundCh, p.currentHeight, p.currentRound) + + timeout := cmttime.Until(previousBlockTime.Add(ensureTimeout)) + if timeout < ensureTimeout { + timeout = ensureTimeout + } + ensureProposalWithTimeout(p.ensureProposalCh, p.currentHeight, p.currentRound, nil, timeout) + + rs := p.observedState.GetRoundState() + bid := types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()} + ensurePrevote(p.ensureVoteCh, p.currentHeight, p.currentRound) + signAddVotes(p.observedState, types.PrevoteType, p.chainID, bid, false, p.otherValidators...) + + ensurePrecommit(p.ensureVoteCh, p.currentHeight, p.currentRound) + signAddVotes(p.observedState, types.PrecommitType, p.chainID, bid, false, p.otherValidators...) + + ensureNewBlock(p.blockCh, p.currentHeight) + + vk, err := p.observedValidator.GetPubKey() + require.NoError(t, err) + res := collectHeightResults(t, p.eventCh, p.currentHeight, vk.Address()) + + p.currentHeight++ + incrementHeight(p.otherValidators...) + return res, rs.ProposalBlock.Time +} + +func (p *pbtsTestHarness) height2(ctx context.Context, t *testing.T) heightResult { + t.Helper() + signer := p.otherValidators[0].PrivValidator + return p.nextHeight(ctx, t, signer, + p.firstBlockTime.Add(p.height2ProposalTimeDeliveryOffset), + p.firstBlockTime.Add(p.height2ProposedBlockOffset), + p.firstBlockTime.Add(p.height2ProposedBlockOffset+10*blockTimeIota)) +} + +func (p *pbtsTestHarness) intermediateHeights(ctx context.Context, t *testing.T) { + t.Helper() + signer := p.otherValidators[1].PrivValidator + p.nextHeight(ctx, t, signer, + p.firstBlockTime.Add(p.height2ProposedBlockOffset+10*blockTimeIota), + p.firstBlockTime.Add(p.height2ProposedBlockOffset+10*blockTimeIota), + p.firstBlockTime.Add(p.height4ProposedBlockOffset)) + + signer = p.otherValidators[2].PrivValidator + p.nextHeight(ctx, t, signer, + p.firstBlockTime.Add(p.height4ProposedBlockOffset), + p.firstBlockTime.Add(p.height4ProposedBlockOffset), + cmttime.Now()) +} + +func (p *pbtsTestHarness) height5(t *testing.T) (heightResult, time.Time) { + t.Helper() + return p.observedValidatorProposerHeight(t, p.firstBlockTime.Add(p.height4ProposedBlockOffset)) +} + +func (p *pbtsTestHarness) nextHeight( + ctx context.Context, + t *testing.T, + proposer types.PrivValidator, + deliverTime, proposedTime, nextProposedTime time.Time, +) heightResult { + t.Helper() + + p.validatorClock.On("Now").Return(nextProposedTime).Times(2 * len(p.otherValidators)) + + ensureNewRound(p.roundCh, p.currentHeight, p.currentRound) + + b, err := p.observedState.createProposalBlock(ctx) + require.NoError(t, err) + require.Equal(t, b.Height, p.currentHeight) + b.Time = proposedTime + + k, err := proposer.GetPubKey() + require.NoError(t, err) + b.Header.ProposerAddress = k.Address() + ps, err := b.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + bid := types.BlockID{Hash: b.Hash(), PartSetHeader: ps.Header()} + prop := types.NewProposal(p.currentHeight, 0, -1, bid, proposedTime) + tp := prop.ToProto() + + err = proposer.SignProposal(p.chainID, tp) + require.NoError(t, err) + + time.Sleep(cmttime.Until(deliverTime)) + prop.Signature = tp.Signature + err = p.observedState.SetProposalAndBlock(prop, ps, "peerID") + require.NoError(t, err) + ensureProposal(p.ensureProposalCh, p.currentHeight, 0, bid) + + ensurePrevote(p.ensureVoteCh, p.currentHeight, p.currentRound) + signAddVotes(p.observedState, types.PrevoteType, p.chainID, bid, false, p.otherValidators...) + + ensurePrecommit(p.ensureVoteCh, p.currentHeight, p.currentRound) + signAddVotes(p.observedState, types.PrecommitType, p.chainID, bid, false, p.otherValidators...) + + vk, err := p.observedValidator.GetPubKey() + require.NoError(t, err) + res := collectHeightResults(t, p.eventCh, p.currentHeight, vk.Address()) + ensureNewBlock(p.blockCh, p.currentHeight) + + p.currentHeight++ + incrementHeight(p.otherValidators...) + return res +} + +func timestampedCollector(ctx context.Context, t *testing.T, eb *types.EventBus) <-chan timestampedEvent { + t.Helper() + + // Since eventCh is not read until the end of each height, it must be large + // enough to hold all of the events produced during a single height. + eventCh := make(chan timestampedEvent, 100) + + const tsCollectorClient = "timestampedCollector" + proposalSub, err := eb.Subscribe(ctx, tsCollectorClient, types.EventQueryCompleteProposal) + require.NoError(t, err) + // We set a capacity of since there are several votes produced. + // With capacity 1 (default) the tests deadlock sometimes. + voteSub, err := eb.Subscribe(ctx, tsCollectorClient, types.EventQueryVote, 10) + require.NoError(t, err) + + go func(ctx context.Context, t *testing.T) { + t.Helper() + for { + var msg cmtpubsub.Message + select { + case <-ctx.Done(): + return + case msg = <-proposalSub.Out(): + case msg = <-voteSub.Out(): + } + eventCh <- timestampedEvent{ + ts: cmttime.Now(), + m: msg, + } + } + }(ctx, t) + + return eventCh +} + +func collectHeightResults(t *testing.T, eventCh <-chan timestampedEvent, height int64, address []byte) heightResult { + t.Helper() + var res heightResult + for event := range eventCh { + require.False(t, event.ts.IsZero()) + switch v := event.m.Data().(type) { + case types.EventDataVote: + if v.Vote.Height > height { + t.Fatalf("received prevote from unexpected height, expected: %d, saw: %d", height, v.Vote.Height) + } + if !bytes.Equal(address, v.Vote.ValidatorAddress) { + continue + } + if v.Vote.Type != types.PrevoteType { + continue + } + if res.prevote != nil { + t.Fatalf("received duplicate prevote, previous %v, current %v", res.prevote, v.Vote) + } + res.prevote = v.Vote + res.prevoteIssuedAt = event.ts + + case types.EventDataCompleteProposal: + if v.Height > height { + t.Fatalf("received proposal from unexpected height, expected: %d, saw: %d", height, v.Height) + } + res.proposalIssuedAt = event.ts + } + if res.isComplete() { + return res + } + } + t.Fatalf("complete height result never seen for height %d", height) //nolint:revive // this is part of an unreachable code test + + panic("unreachable") +} + +type timestampedEvent struct { + ts time.Time + m cmtpubsub.Message +} + +func (p *pbtsTestHarness) run(ctx context.Context, t *testing.T) resultSet { + t.Helper() + startTestRound(p.observedState, p.currentHeight, p.currentRound) + + r1, proposalBlockTime := p.observedValidatorProposerHeight(t, p.genesisTime) + p.firstBlockTime = proposalBlockTime + r2 := p.height2(ctx, t) + p.intermediateHeights(ctx, t) + r5, _ := p.height5(t) + return resultSet{ + genesisHeight: r1, + height2: r2, + height5: r5, + } +} + +type resultSet struct { + genesisHeight heightResult + height2 heightResult + height5 heightResult +} + +type heightResult struct { + proposalIssuedAt time.Time + prevote *types.Vote + prevoteIssuedAt time.Time +} + +func (hr heightResult) isComplete() bool { + return !hr.proposalIssuedAt.IsZero() && !hr.prevoteIssuedAt.IsZero() && hr.prevote != nil +} + +// TestPBTSProposerWaitsForGenesisTime tests that a proposer will not propose a block +// until after the genesis time has passed. The test sets the genesis time in the +// future and then ensures that the observed validator waits to propose a block. +func TestPBTSProposerWaitsForGenesisTime(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create a genesis time far (enough) in the future. + initialTime := cmttime.Now().Add(800 * time.Millisecond) + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 10 * time.Millisecond, + MessageDelay: 10 * time.Millisecond, + }, + timeoutPropose: 10 * time.Millisecond, + genesisTime: initialTime, + height2ProposalTimeDeliveryOffset: 10 * time.Millisecond, + height2ProposedBlockOffset: 10 * time.Millisecond, + height4ProposedBlockOffset: 30 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + + // ensure that the proposal was issued after the genesis time. + assert.True(t, results.genesisHeight.proposalIssuedAt.After(cfg.genesisTime)) +} + +// TestPBTSProposerWaitsForPreviousBlock tests that the proposer of a block waits until +// the block time of the previous height has passed to propose the next block. +// The test harness ensures that the observed validator will be the proposer at +// height 1 and height 5. The test sets the block time of height 4 in the future +// and then verifies that the observed validator waits until after the block time +// of height 4 to propose a block at height 5. +func TestPBTSProposerWaitsForPreviousBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + initialTime := cmttime.Now().Add(time.Millisecond * 50) + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 100 * time.Millisecond, + MessageDelay: 500 * time.Millisecond, + }, + timeoutPropose: 50 * time.Millisecond, + genesisTime: initialTime, + height2ProposalTimeDeliveryOffset: 150 * time.Millisecond, + height2ProposedBlockOffset: 100 * time.Millisecond, + height4ProposedBlockOffset: 800 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + + // the observed validator is the proposer at height 5. + // ensure that the observed validator did not propose a block until after + // the time configured for height 4. + assert.True(t, results.height5.proposalIssuedAt.After(pbtsTest.firstBlockTime.Add(cfg.height4ProposedBlockOffset))) + + // Ensure that the validator issued a prevote for a non-nil block. + assert.NotNil(t, results.height5.prevote.BlockID.Hash) +} + +func TestPBTSProposerWaitTime(t *testing.T) { + genesisTime, err := time.Parse(time.RFC3339, "2019-03-13T23:00:00Z") + require.NoError(t, err) + testCases := []struct { + name string + previousBlockTime time.Time + localTime time.Time + expectedWait time.Duration + }{ + { + name: "block time greater than local time", + previousBlockTime: genesisTime.Add(5 * time.Nanosecond), + localTime: genesisTime.Add(1 * time.Nanosecond), + expectedWait: 4 * time.Nanosecond, + }, + { + name: "local time greater than block time", + previousBlockTime: genesisTime.Add(1 * time.Nanosecond), + localTime: genesisTime.Add(5 * time.Nanosecond), + expectedWait: 0, + }, + { + name: "both times equal", + previousBlockTime: genesisTime.Add(5 * time.Nanosecond), + localTime: genesisTime.Add(5 * time.Nanosecond), + expectedWait: 0, + }, + } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + mockSource := new(cmttimemocks.Source) + mockSource.On("Now").Return(testCase.localTime) + + ti := proposerWaitTime(mockSource, testCase.previousBlockTime) + assert.Equal(t, testCase.expectedWait, ti) + }) + } +} + +func TestPBTSTimelyProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + initialTime := cmttime.Now() + + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 10 * time.Millisecond, + MessageDelay: 140 * time.Millisecond, + }, + timeoutPropose: 40 * time.Millisecond, + genesisTime: initialTime, + height2ProposedBlockOffset: 15 * time.Millisecond, + height2ProposalTimeDeliveryOffset: 30 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + require.NotNil(t, results.height2.prevote.BlockID.Hash) +} + +func TestPBTSTooFarInThePastProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // localtime > proposedBlockTime + MsgDelay + Precision + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 1 * time.Millisecond, + MessageDelay: 10 * time.Millisecond, + }, + timeoutPropose: 50 * time.Millisecond, + height2ProposedBlockOffset: 15 * time.Millisecond, + height2ProposalTimeDeliveryOffset: 27 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + + require.Nil(t, results.height2.prevote.BlockID.Hash) +} + +func TestPBTSTooFarInTheFutureProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // localtime < proposedBlockTime - Precision + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 1 * time.Millisecond, + MessageDelay: 10 * time.Millisecond, + }, + timeoutPropose: 50 * time.Millisecond, + height2ProposedBlockOffset: 100 * time.Millisecond, + height2ProposalTimeDeliveryOffset: 10 * time.Millisecond, + height4ProposedBlockOffset: 150 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + + require.Nil(t, results.height2.prevote.BlockID.Hash) +} + +// TestPBTSEnableHeight tests the transition between BFT Time and PBTS. +// The test runs multiple heights. BFT Time is used until the configured +// PbtsEnableHeight. During some of these heights, the timestamp of votes +// is shifted to the future to increase block timestamps. PBTS is enabled +// at pbtsSetHeight, via FinalizeBlock. From this point, some nodes select +// timestamps using PBTS, which is not yet enabled. When PbtsEnableHeight +// is reached, some nodes propose bad timestamps. At the end, only blocks +// proposed by the tested node are accepted, as they are not tweaked. +func TestPBTSEnableHeight(t *testing.T) { + numValidators := 4 + election := func(h int64, r int32) int { + return (int(h-1) + int(r)) % numValidators + } + + c := test.ConsensusParams() + c.Feature.PbtsEnableHeight = 0 // Start with PBTS disabled + + app := abcimocks.NewApplication(t) + app.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ProcessProposalResponse{ + Status: abci.PROCESS_PROPOSAL_STATUS_ACCEPT, + }, nil) + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.PrepareProposalResponse{}, nil) + app.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ExtendVoteResponse{}, nil) + app.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.VerifyVoteExtensionResponse{ + Status: abci.VERIFY_VOTE_EXTENSION_STATUS_ACCEPT, + }, nil) + app.On("Commit", mock.Anything, mock.Anything).Return(&abci.CommitResponse{}, nil).Maybe() + app.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil).Maybe() + + cs, vss := randStateWithAppImpl(numValidators, app, c) + height, round, chainID := cs.Height, cs.Round, cs.state.ChainID + + proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) + voteCh := subscribe(cs.eventBus, types.EventQueryVote) + + lastHeight := height + 4 + pbtsSetHeight := height + 2 + pbtsEnableHeight := height + 3 + + startTestRound(cs, height, round) + for height <= lastHeight { + var block *types.Block + var blockID types.BlockID + + ensureNewRound(newRoundCh, height, round) + proposer := election(height, round) + pbtsEnabled := (height >= pbtsEnableHeight) + rejectProposal := false + + // Propose step + if proposer == 0 { + // Wait until we receive our own proposal + // This may take longer when switching to PBTS since + // BFT Time timestamps are shifted to the future. + ensureProposalWithTimeout(proposalCh, height, round, nil, 2*time.Second) + rs := cs.GetRoundState() + block, _ = rs.ProposalBlock, rs.ProposalBlockParts + blockID = rs.Proposal.BlockID + } else { + var ts time.Time + var blockParts *types.PartSet + + if height >= pbtsSetHeight && height < pbtsEnableHeight { + // Use PBTS logic while PBTS is not yet activated + ts = cmttime.Now() + rejectProposal = true + } else if height >= pbtsEnableHeight { + // Shift timestamp to the future 2*PRECISION => not timely + ts = cmttime.Now().Add(2 * c.Synchrony.Precision) + rejectProposal = true + } + block, blockParts, blockID = createProposalBlockWithTime(t, cs, ts) + proposal := types.NewProposal(height, round, -1, blockID, block.Header.Time) + // BFT Time should not care about Proposal's timestamps + if height < pbtsSetHeight { + proposal.Timestamp = cmttime.Now() + } + signProposal(t, proposal, chainID, vss[proposer]) + err := cs.SetProposalAndBlock(proposal, blockParts, "p") + require.NoError(t, err) + ensureProposal(proposalCh, height, round, blockID) + } + + delta := cmttime.Since(block.Time) + t.Log("BLOCK", height, round, "PROPOSER", proposer, "PBTS", pbtsEnabled, + "TIMESTAMP", block.Time, delta, "ACCEPTED", !rejectProposal) + + // Accept proposal and decide, or reject proposal and move to next round + myVote := blockID.Hash + lockedRound := round + if rejectProposal { + myVote = nil + lockedRound = int32(-1) + } else { // We are deciding, enable FinalizeBlock mock + res := &abci.FinalizeBlockResponse{} + // Enable PBTS from pbtsEnableHeight via consensus params + if height == pbtsSetHeight { + params := types.DefaultConsensusParams() + params.Feature.VoteExtensionsEnableHeight = 1 + params.Feature.PbtsEnableHeight = pbtsEnableHeight + paramsProto := params.ToProto() + res.ConsensusParamUpdates = ¶msProto + } + app.On("FinalizeBlock", mock.Anything, mock.Anything). + Return(res, nil).Once() + } + + // Prevote step + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs, round, vss[0], myVote) + for _, vs := range vss[2:] { + signAddVotes(cs, types.PrevoteType, chainID, blockID, false, vs) + ensurePrevote(voteCh, height, round) + } + + // Precommit step + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs, round, lockedRound, vss[0], myVote, myVote) + for _, vs := range vss[2:] { + ts := cmttime.Now() + // Shift the next block timestamp while running BFT Time + if height >= pbtsSetHeight-1 && height < pbtsEnableHeight { + ts = ts.Add(time.Second) + } + vote := signVoteWithTimestamp(vs, types.PrecommitType, chainID, blockID, true, ts) + cs.peerMsgQueue <- msgInfo{Msg: &VoteMessage{vote}} + ensurePrecommit(voteCh, height, round) + } + + if myVote != nil { + height, round = height+1, 0 + incrementHeight(vss[1:]...) + } else { + round = round + 1 + incrementRound(vss[1:]...) + } + } + // Last call to FinalizeBlock + ensureNewRound(newRoundCh, height, round) +} + +// TestPbtsAdaptiveMessageDelay tests whether proposals with timestamps in the +// past are eventually accepted by validators. The test runs multiple rounds. +// Rounds where the tested node is the proposer are skipped. Rounds with other +// proposers uses a Proposal with a tweaked timestamp, which is too far in the +// past. After a maximum number of rounds, if PBTS validation is adaptive, the +// synchronous parameters will be large enough to accept the proposal. +func TestPbtsAdaptiveMessageDelay(t *testing.T) { + numValidators := 4 + election := func(h int64, r int32) int { + return (int(h-1) + int(r)) % numValidators + } + + c := test.ConsensusParams() + app := kvstore.NewInMemoryApplication() + genesisTime := cmttime.Now().Add(-10 * time.Second) + cs, vss := randStateWithAppImplGenesisTime(numValidators, app, c, genesisTime) + + myPubKey, err := vss[0].GetPubKey() + require.NoError(t, err) + myAddress := myPubKey.Address() + + proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) + voteCh := subscribe(cs.eventBus, types.EventQueryVote) + + height, round := cs.Height, cs.Round + chainID := cs.state.ChainID + + originMaxDelta := c.Synchrony.Precision + c.Synchrony.MessageDelay + + maximumRound := round + 10 + startTestRound(cs, height, round) + + for ; round < maximumRound; round++ { + var vote types.BlockID + assert.True(t, vote.IsNil()) // default is vote nil + + t.Log("Starting round", round) + ensureNewRound(newRoundCh, height, round) + proposer := election(height, round) + ac := c.Synchrony.InRound(round) + maxDelta := ac.Precision + ac.MessageDelay + + if proposer != 0 { + shift := originMaxDelta + c.Synchrony.MessageDelay/2 + ts := cmttime.Now().Add(-shift) + if ts.Before(genesisTime) { + ts = genesisTime + } + + // Create block and proposal with the tweaked timestamp + block, blockParts, blockID := createProposalBlockWithTime(t, cs, ts) + proposal := types.NewProposal(height, round, -1, blockID, block.Header.Time) + signProposal(t, proposal, chainID, vss[proposer]) + + require.NoError(t, cs.SetProposalAndBlock(proposal, blockParts, "p")) + maxReceiveTime := cmttime.Now() + + ensureProposal(proposalCh, height, round, blockID) + ensurePrevote(voteCh, height, round) + vote = cs.Votes.Prevotes(round).GetByAddress(myAddress).BlockID + + delta := maxReceiveTime.Sub(ts) + t.Log("Proposal timestamp", ts.Format(time.StampMicro), + "maximum receive time", maxReceiveTime.Format(time.StampMicro), + "delta", delta, + "maximum allowed delta", maxDelta) + t.Logf("Round %d, expected timely=%v, got timely=%v\n", + round, (delta < maxDelta), !vote.IsNil()) + } else { + // The node will accept its own proposal. + // Just make everyone to vote nil and skip the round. + ensureNewProposal(proposalCh, height, round) + ensurePrevote(voteCh, height, round) + } + + for _, vs := range vss[2:] { + signAddVotes(cs, types.PrevoteType, chainID, vote, false, vs) + ensurePrevote(voteCh, height, round) + } + ensurePrecommit(voteCh, height, round) + + for _, vs := range vss[2:] { + signAddVotes(cs, types.PrecommitType, chainID, vote, true, vs) + ensurePrecommit(voteCh, height, round) + } + + if !vote.IsNil() { + // Decide, so we are good! + ensureNewRound(newRoundCh, height+1, 0) + t.Log("Decided at round", round) + return + } + // No decision, new round required + incrementRound(vss[1:]...) + } + t.Error("Did not decide after round", round) +} diff --git a/consensus/reactor.go b/internal/consensus/reactor.go similarity index 73% rename from consensus/reactor.go rename to internal/consensus/reactor.go index ca4b4192a12..3350b5cf532 100644 --- a/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -2,25 +2,25 @@ package consensus import ( "fmt" + "math/rand" "reflect" "sync" "sync/atomic" "time" - cmterrors "github.com/cometbft/cometbft/types/errors" - - cstypes "github.com/cometbft/cometbft/consensus/types" - "github.com/cometbft/cometbft/libs/bits" - cmtevents "github.com/cometbft/cometbft/libs/events" + cmtcons "github.com/cometbft/cometbft/api/cometbft/consensus/v1" + "github.com/cometbft/cometbft/internal/bits" + cstypes "github.com/cometbft/cometbft/internal/consensus/types" + cmtevents "github.com/cometbft/cometbft/internal/events" + cmtrand "github.com/cometbft/cometbft/internal/rand" cmtjson "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/libs/log" - cmtrand "github.com/cometbft/cometbft/libs/rand" cmtsync "github.com/cometbft/cometbft/libs/sync" "github.com/cometbft/cometbft/p2p" - cmtcons "github.com/cometbft/cometbft/proto/tendermint/consensus" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + tcpconn "github.com/cometbft/cometbft/p2p/transport/tcp/conn" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" + cmterrors "github.com/cometbft/cometbft/types/errors" cmttime "github.com/cometbft/cometbft/types/time" ) @@ -36,7 +36,7 @@ const ( votesToContributeToBecomeGoodPeer = 10000 ) -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // Reactor defines a reactor for the consensus service. type Reactor struct { @@ -47,8 +47,9 @@ type Reactor struct { waitSync atomic.Bool eventBus *types.EventBus - rsMtx cmtsync.Mutex - rs *cstypes.RoundState + rsMtx cmtsync.RWMutex + rs cstypes.RoundState // copy of consensus state + initialHeight atomic.Int64 Metrics *Metrics } @@ -58,11 +59,13 @@ type ReactorOption func(*Reactor) // NewReactor returns a new Reactor with the given consensusState. func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor { conR := &Reactor{ - conS: consensusState, - waitSync: atomic.Bool{}, - rs: consensusState.GetRoundState(), - Metrics: NopMetrics(), + conS: consensusState, + waitSync: atomic.Bool{}, + rs: consensusState.GetRoundState(), + initialHeight: atomic.Int64{}, + Metrics: NopMetrics(), } + conR.initialHeight.Store(consensusState.state.InitialHeight) conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR) if waitSync { conR.waitSync.Store(true) @@ -86,7 +89,6 @@ func (conR *Reactor) OnStart() error { go conR.peerStatsRoutine() conR.subscribeToBroadcastEvents() - go conR.updateRoundStateRoutine() if !conR.WaitSync() { err := conR.conS.Start() @@ -150,41 +152,41 @@ conR: } } -// GetChannels implements Reactor -func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { +// StreamDescriptors implements Reactor. +func (*Reactor) StreamDescriptors() []p2p.StreamDescriptor { // TODO optimize - return []*p2p.ChannelDescriptor{ - { + return []p2p.StreamDescriptor{ + &tcpconn.ChannelDescriptor{ ID: StateChannel, Priority: 6, SendQueueCapacity: 100, RecvMessageCapacity: maxMsgSize, - MessageType: &cmtcons.Message{}, + MessageTypeI: &cmtcons.Message{}, }, - { + &tcpconn.ChannelDescriptor{ ID: DataChannel, // maybe split between gossiping current block and catchup stuff // once we gossip the whole block there's nothing left to send until next height or round Priority: 10, SendQueueCapacity: 100, RecvBufferCapacity: 50 * 4096, RecvMessageCapacity: maxMsgSize, - MessageType: &cmtcons.Message{}, + MessageTypeI: &cmtcons.Message{}, }, - { + &tcpconn.ChannelDescriptor{ ID: VoteChannel, Priority: 7, SendQueueCapacity: 100, RecvBufferCapacity: 100 * 100, RecvMessageCapacity: maxMsgSize, - MessageType: &cmtcons.Message{}, + MessageTypeI: &cmtcons.Message{}, }, - { + &tcpconn.ChannelDescriptor{ ID: VoteSetBitsChannel, Priority: 1, SendQueueCapacity: 2, RecvBufferCapacity: 1024, RecvMessageCapacity: maxMsgSize, - MessageType: &cmtcons.Message{}, + MessageTypeI: &cmtcons.Message{}, }, } } @@ -220,7 +222,7 @@ func (conR *Reactor) AddPeer(peer p2p.Peer) { } // RemovePeer is a noop. -func (conR *Reactor) RemovePeer(p2p.Peer, interface{}) { +func (conR *Reactor) RemovePeer(p2p.Peer, any) { if !conR.IsRunning() { return } @@ -237,7 +239,7 @@ func (conR *Reactor) RemovePeer(p2p.Peer, interface{}) { // Messages affect either a peer state or the consensus state. // Peer state updates can happen in parallel, but processing of // proposals, block parts, and votes are ordered by the receiveRoutine -// NOTE: blocks on consensus state for proposals, block parts, and votes +// NOTE: blocks on consensus state for proposals, block parts, and votes. func (conR *Reactor) Receive(e p2p.Envelope) { if !conR.IsRunning() { conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID) @@ -268,9 +270,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) { case StateChannel: switch msg := msg.(type) { case *NewRoundStepMessage: - conR.conS.mtx.Lock() - initialHeight := conR.conS.state.InitialHeight - conR.conS.mtx.Unlock() + initialHeight := conR.initialHeight.Load() if err = msg.ValidateHeight(initialHeight); err != nil { conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err) conR.Switch.StopPeerForError(e.Src, err) @@ -284,10 +284,9 @@ func (conR *Reactor) Receive(e p2p.Envelope) { case *HasProposalBlockPartMessage: ps.ApplyHasProposalBlockPartMessage(msg) case *VoteSetMaj23Message: - cs := conR.conS - cs.mtx.Lock() - height, votes := cs.Height, cs.Votes - cs.mtx.Unlock() + // Get the updated round state as our view may be stale + rs := conR.conS.GetRoundState() + height, votes := rs.Height, rs.Votes if height != msg.Height { return } @@ -301,9 +300,9 @@ func (conR *Reactor) Receive(e p2p.Envelope) { // (and consequently shows which we don't have) var ourVotes *bits.BitArray switch msg.Type { - case cmtproto.PrevoteType: + case types.PrevoteType: ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) - case cmtproto.PrecommitType: + case types.PrecommitType: ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) default: panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") @@ -333,13 +332,13 @@ func (conR *Reactor) Receive(e p2p.Envelope) { switch msg := msg.(type) { case *ProposalMessage: ps.SetHasProposal(msg.Proposal) - conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} + conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID(), cmttime.Now()} case *ProposalPOLMessage: ps.ApplyProposalPOLMessage(msg) case *BlockPartMessage: ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index)) conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1) - conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} + conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID(), time.Time{}} default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } @@ -351,15 +350,12 @@ func (conR *Reactor) Receive(e p2p.Envelope) { } switch msg := msg.(type) { case *VoteMessage: - cs := conR.conS - cs.mtx.RLock() - height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size() - cs.mtx.RUnlock() - ps.EnsureVoteBitArrays(height, valSize) - ps.EnsureVoteBitArrays(height-1, lastCommitSize) - ps.SetHasVote(msg.Vote) + rs := conR.getRoundState() + + height, valSize, lastCommitSize := rs.Height, rs.Validators.Size(), rs.LastCommit.Size() + ps.SetHasVoteFromPeer(msg.Vote, height, valSize, lastCommitSize) - cs.peerMsgQueue <- msgInfo{msg, e.Src.ID()} + conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID(), time.Time{}} default: // don't punish (leave room for soft upgrades) @@ -373,17 +369,17 @@ func (conR *Reactor) Receive(e p2p.Envelope) { } switch msg := msg.(type) { case *VoteSetBitsMessage: - cs := conR.conS - cs.mtx.Lock() - height, votes := cs.Height, cs.Votes - cs.mtx.Unlock() + // Get the updated round state as our view may be stale + rs := conR.conS.GetRoundState() + + height, votes := rs.Height, rs.Votes if height == msg.Height { var ourVotes *bits.BitArray switch msg.Type { - case cmtproto.PrevoteType: + case types.PrevoteType: ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) - case cmtproto.PrecommitType: + case types.PrecommitType: ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) default: panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") @@ -413,7 +409,7 @@ func (conR *Reactor) WaitSync() bool { return conR.waitSync.Load() } -//-------------------------------------- +// -------------------------------------- // subscribeToBroadcastEvents subscribes for new round steps and votes // using internal pubsub defined on state to broadcast @@ -422,14 +418,24 @@ func (conR *Reactor) subscribeToBroadcastEvents() { const subscriber = "consensus-reactor" if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep, func(data cmtevents.EventData) { - conR.broadcastNewRoundStepMessage(data.(*cstypes.RoundState)) + rs := data.(cstypes.RoundState) + + // update reactor's view of round state + conR.updateRoundState(&rs) + + conR.broadcastNewRoundStepMessage(&rs) }); err != nil { conR.Logger.Error("Error adding listener for events (NewRoundStep)", "err", err) } if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock, func(data cmtevents.EventData) { - conR.broadcastNewValidBlockMessage(data.(*cstypes.RoundState)) + rs := data.(cstypes.RoundState) + + // update reactor's view of round state + conR.updateRoundState(&rs) + + conR.broadcastNewValidBlockMessage(&rs) }); err != nil { conR.Logger.Error("Error adding listener for events (ValidBlock)", "err", err) } @@ -437,6 +443,13 @@ func (conR *Reactor) subscribeToBroadcastEvents() { if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote, func(data cmtevents.EventData) { conR.broadcastHasVoteMessage(data.(*types.Vote)) + + // update reactor's view of round state + // NOTE this is safe to do without locking cs because the eventBus is + // synchronous. If it were not, we could pass rs in this event + // instead + rs := conR.conS.getRoundState() + conR.updateRoundState(&rs) }); err != nil { conR.Logger.Error("Error adding listener for events (Vote)", "err", err) } @@ -444,11 +457,25 @@ func (conR *Reactor) subscribeToBroadcastEvents() { if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventProposalBlockPart, func(data cmtevents.EventData) { conR.broadcastHasProposalBlockPartMessage(data.(*BlockPartMessage)) + + // update reactor's view of round state + // NOTE this is safe to do without locking cs because the eventBus is + // synchronous. If it were not, we could pass rs in this event + // instead + rs := conR.conS.getRoundState() + conR.updateRoundState(&rs) }); err != nil { conR.Logger.Error("Error adding listener for events (ProposalBlockPart)", "err", err) } } +// Safely update the reactor's view of round state. +func (conR *Reactor) updateRoundState(rs *cstypes.RoundState) { + conR.rsMtx.Lock() + conR.rs = *rs // copy + conR.rsMtx.Unlock() +} + func (conR *Reactor) unsubscribeFromBroadcastEvents() { const subscriber = "consensus-reactor" conR.conS.evsw.RemoveListener(subscriber) @@ -456,10 +483,12 @@ func (conR *Reactor) unsubscribeFromBroadcastEvents() { func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { nrsMsg := makeRoundStepMessage(rs) - conR.Switch.Broadcast(p2p.Envelope{ - ChannelID: StateChannel, - Message: nrsMsg, - }) + go func() { + conR.Switch.Broadcast(p2p.Envelope{ + ChannelID: StateChannel, + Message: nrsMsg, + }) + }() } func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { @@ -471,10 +500,12 @@ func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { BlockParts: rs.ProposalBlockParts.BitArray().ToProto(), IsCommit: rs.Step == cstypes.RoundStepCommit, } - conR.Switch.Broadcast(p2p.Envelope{ - ChannelID: StateChannel, - Message: csMsg, - }) + go func() { + conR.Switch.Broadcast(p2p.Envelope{ + ChannelID: StateChannel, + Message: csMsg, + }) + }() } // Broadcasts HasVoteMessage to peers that care. @@ -485,13 +516,16 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { Type: vote.Type, Index: vote.ValidatorIndex, } - conR.Switch.Broadcast(p2p.Envelope{ - ChannelID: StateChannel, - Message: msg, - }) + + go func() { + conR.Switch.TryBroadcast(p2p.Envelope{ + ChannelID: StateChannel, + Message: msg, + }) + }() /* // TODO: Make this broadcast more selective. - for _, peer := range conR.Switch.Peers().List() { + for _, peer := range conR.Switch.Peers().Copy() { ps, ok := peer.Get(PeerStateKey).(*PeerState) if !ok { panic(fmt.Sprintf("Peer %v has no state", peer)) @@ -520,10 +554,12 @@ func (conR *Reactor) broadcastHasProposalBlockPartMessage(partMsg *BlockPartMess Round: partMsg.Round, Index: int32(partMsg.Part.Index), } - conR.Switch.Broadcast(p2p.Envelope{ - ChannelID: StateChannel, - Message: msg, - }) + go func() { + conR.Switch.TryBroadcast(p2p.Envelope{ + ChannelID: StateChannel, + Message: msg, + }) + }() } func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *cmtcons.NewRoundStep) { @@ -531,43 +567,37 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *cmtcons.NewRoundStep) Height: rs.Height, Round: rs.Round, Step: uint32(rs.Step), - SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()), + SecondsSinceStartTime: int64(cmttime.Since(rs.StartTime).Seconds()), LastCommitRound: rs.LastCommit.GetRound(), } - return + return nrsMsg } func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) { rs := conR.getRoundState() - nrsMsg := makeRoundStepMessage(rs) + nrsMsg := makeRoundStepMessage(&rs) peer.Send(p2p.Envelope{ ChannelID: StateChannel, Message: nrsMsg, }) } -func (conR *Reactor) updateRoundStateRoutine() { - t := time.NewTicker(100 * time.Microsecond) - defer t.Stop() - for range t.C { - if !conR.IsRunning() { - return - } - rs := conR.conS.GetRoundState() - conR.rsMtx.Lock() - conR.rs = rs - conR.rsMtx.Unlock() - } -} - -func (conR *Reactor) getRoundState() *cstypes.RoundState { - conR.rsMtx.Lock() - defer conR.rsMtx.Unlock() +func (conR *Reactor) getRoundState() cstypes.RoundState { + conR.rsMtx.RLock() + defer conR.rsMtx.RUnlock() return conR.rs } +// ----------------------------------------------------------------------------- +// Reactor gossip routines and helpers + func (conR *Reactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) { logger := conR.Logger.With("peer", peer) + if !peer.HasChannel(DataChannel) { + logger.Info("Peer does not implement DataChannel.") + return + } + rng := cmtrand.NewStdlibRand() OUTER_LOOP: for { @@ -580,169 +610,60 @@ OUTER_LOOP: // so we can reduce the amount of redundant block parts we send if conR.conS.config.PeerGossipIntraloopSleepDuration > 0 { // the config sets an upper bound for how long we sleep. - randDuration := cmtrand.Int63n(int64(conR.conS.config.PeerGossipIntraloopSleepDuration)) + randDuration := rng.Int63n(int64(conR.conS.config.PeerGossipIntraloopSleepDuration)) time.Sleep(time.Duration(randDuration)) } rs := conR.getRoundState() prs := ps.GetRoundState() - // Send proposal Block parts? - if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) { - if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok { - part := rs.ProposalBlockParts.GetPart(index) - parts, err := part.ToProto() - if err != nil { - panic(err) - } - logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round) - if peer.Send(p2p.Envelope{ - ChannelID: DataChannel, - Message: &cmtcons.BlockPart{ - Height: rs.Height, // This tells peer that this part applies to us. - Round: rs.Round, // This tells peer that this part applies to us. - Part: *parts, - }, - }) { - ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) - } - continue OUTER_LOOP - } - } + // -------------------- + // Send block part? + // (Note these can match on hash so round doesn't matter) + // -------------------- - // If the peer is on a previous height that we have, help catch up. - blockStoreBase := conR.conS.blockStore.Base() - if blockStoreBase > 0 && 0 < prs.Height && prs.Height < rs.Height && prs.Height >= blockStoreBase { - heightLogger := logger.With("height", prs.Height) - - // if we never received the commit message from the peer, the block parts wont be initialized - if prs.ProposalBlockParts == nil { - blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) - if blockMeta == nil { - heightLogger.Error("Failed to load block meta", - "blockstoreBase", blockStoreBase, "blockstoreHeight", conR.conS.blockStore.Height()) - time.Sleep(conR.conS.config.PeerGossipSleepDuration) - } else { - ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader) - } - // continue the loop since prs is a copy and not effected by this initialization + if part, continueLoop := pickPartToSend(logger, conR.conS.blockStore, &rs, ps, prs, rng); part != nil { + // part is not nil: we either succeed in sending it, + // or we were instructed not to sleep (busy-waiting) + if ps.SendPartSetHasPart(part, prs) || continueLoop { continue OUTER_LOOP } - conR.gossipDataForCatchup(heightLogger, rs, prs, ps, peer) + } else if continueLoop { + // part is nil but we don't want to sleep (busy-waiting) continue OUTER_LOOP } - // If height and round don't match, sleep. - if (rs.Height != prs.Height) || (rs.Round != prs.Round) { - // logger.Info("Peer Height|Round mismatch, sleeping", - // "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer) - time.Sleep(conR.conS.config.PeerGossipSleepDuration) - continue OUTER_LOOP - } + // -------------------- + // Send proposal? + // (If height and round match, and we have a proposal and they don't) + // -------------------- - // By here, height and round match. - // Proposal block parts were already matched and sent if any were wanted. - // (These can match on hash so the round doesn't matter) - // Now consider sending other things, like the Proposal itself. - - // Send Proposal && ProposalPOL BitArray? - if rs.Proposal != nil && !prs.Proposal { - // Proposal: share the proposal metadata with peer. - { - logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) - if peer.Send(p2p.Envelope{ - ChannelID: DataChannel, - Message: &cmtcons.Proposal{Proposal: *rs.Proposal.ToProto()}, - }) { - // NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected! - ps.SetHasProposal(rs.Proposal) - } - } - // ProposalPOL: lets peer know which POL votes we have so far. - // Peer must receive ProposalMessage first. - // rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round, - // so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound). - if 0 <= rs.Proposal.POLRound { - logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) - peer.Send(p2p.Envelope{ - ChannelID: DataChannel, - Message: &cmtcons.ProposalPOL{ - Height: rs.Height, - ProposalPolRound: rs.Proposal.POLRound, - ProposalPol: *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(), - }, - }) - } + heightRoundMatch := (rs.Height == prs.Height) && (rs.Round == prs.Round) + proposalToSend := rs.Proposal != nil && !prs.Proposal + + if heightRoundMatch && proposalToSend { + ps.SendProposalSetHasProposal(logger, &rs, prs) continue OUTER_LOOP } // Nothing to do. Sleep. time.Sleep(conR.conS.config.PeerGossipSleepDuration) - continue OUTER_LOOP - } -} - -func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState, - prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer, -) { - if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok { - // Ensure that the peer's PartSetHeader is correct - blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) - if blockMeta == nil { - logger.Error("Failed to load block meta", "ourHeight", rs.Height, - "blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height()) - time.Sleep(conR.conS.config.PeerGossipSleepDuration) - return - } else if !blockMeta.BlockID.PartSetHeader.Equals(prs.ProposalBlockPartSetHeader) { - logger.Info("Peer ProposalBlockPartSetHeader mismatch, sleeping", - "blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader) - time.Sleep(conR.conS.config.PeerGossipSleepDuration) - return - } - // Load the part - part := conR.conS.blockStore.LoadBlockPart(prs.Height, index) - if part == nil { - logger.Error("Could not load part", "index", index, - "blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader) - time.Sleep(conR.conS.config.PeerGossipSleepDuration) - return - } - // Send the part - logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index) - pp, err := part.ToProto() - if err != nil { - logger.Error("Could not convert part to proto", "index", index, "error", err) - return - } - if peer.Send(p2p.Envelope{ - ChannelID: DataChannel, - Message: &cmtcons.BlockPart{ - Height: prs.Height, // Not our height, so it doesn't matter. - Round: prs.Round, // Not our height, so it doesn't matter. - Part: *pp, - }, - }) { - ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) - } else { - logger.Debug("Sending block part for catchup failed") - // sleep to avoid retrying too fast - time.Sleep(conR.conS.config.PeerGossipSleepDuration) - } - return } - // logger.Info("No parts to send in catch-up, sleeping") - time.Sleep(conR.conS.config.PeerGossipSleepDuration) } func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) { logger := conR.Logger.With("peer", peer) + if !peer.HasChannel(VoteChannel) { + logger.Info("Peer does not implement VoteChannel.") + return + } + rng := cmtrand.NewStdlibRand() // Simple hack to throttle logs upon sleep. sleeping := 0 OUTER_LOOP: for { - // Manage disconnects from self or peer. if !peer.IsRunning() || !conR.IsRunning() { return @@ -752,7 +673,7 @@ OUTER_LOOP: // so we can reduce the amount of redundant votes we send if conR.conS.config.PeerGossipIntraloopSleepDuration > 0 { // the config sets an upper bound for how long we sleep. - randDuration := cmtrand.Int63n(int64(conR.conS.config.PeerGossipIntraloopSleepDuration)) + randDuration := rng.Int63n(int64(conR.conS.config.PeerGossipIntraloopSleepDuration)) time.Sleep(time.Duration(randDuration)) } @@ -769,52 +690,14 @@ OUTER_LOOP: // logger.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round, // "prsHeight", prs.Height, "prsRound", prs.Round, "prsStep", prs.Step) - // If height matches, then send LastCommit, Prevotes, Precommits. - if rs.Height == prs.Height { - heightLogger := logger.With("height", prs.Height) - if conR.gossipVotesForHeight(heightLogger, rs, prs, ps) { - continue OUTER_LOOP - } - } - - // Special catchup logic. - // If peer is lagging by height 1, send LastCommit. - if prs.Height != 0 && rs.Height == prs.Height+1 { - if ps.PickSendVote(rs.LastCommit) { - logger.Debug("Picked rs.LastCommit to send", "height", prs.Height) - continue OUTER_LOOP - } - } - - // Catchup logic - // If peer is lagging by more than 1, send Commit. - blockStoreBase := conR.conS.blockStore.Base() - if blockStoreBase > 0 && prs.Height != 0 && rs.Height >= prs.Height+2 && prs.Height >= blockStoreBase { - // Load the block's extended commit for prs.Height, - // which contains precommit signatures for prs.Height. - var ec *types.ExtendedCommit - var veEnabled bool - func() { - conR.conS.mtx.RLock() - defer conR.conS.mtx.RUnlock() - veEnabled = conR.conS.state.ConsensusParams.ABCI.VoteExtensionsEnabled(prs.Height) - }() - if veEnabled { - ec = conR.conS.blockStore.LoadBlockExtendedCommit(prs.Height) - } else { - c := conR.conS.blockStore.LoadBlockCommit(prs.Height) - if c == nil { - continue - } - ec = c.WrappedExtendedCommit() - } - if ec == nil { - continue - } - if ps.PickSendVote(ec) { - logger.Debug("Picked Catchup commit to send", "height", prs.Height) + if vote := pickVoteToSend(logger, conR.conS, &rs, ps, prs, rng); vote != nil { + if ps.sendVoteSetHasVote(vote) { continue OUTER_LOOP } + logger.Debug("Failed to send vote to peer", + "height", prs.Height, + "vote", vote, + ) } if sleeping == 0 { @@ -829,66 +712,7 @@ OUTER_LOOP: } time.Sleep(conR.conS.config.PeerGossipSleepDuration) - continue OUTER_LOOP - } -} - -func (conR *Reactor) gossipVotesForHeight( - logger log.Logger, - rs *cstypes.RoundState, - prs *cstypes.PeerRoundState, - ps *PeerState, -) bool { - // If there are lastCommits to send... - if prs.Step == cstypes.RoundStepNewHeight { - if ps.PickSendVote(rs.LastCommit) { - logger.Debug("Picked rs.LastCommit to send") - return true - } - } - // If there are POL prevotes to send... - if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 { - if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { - if ps.PickSendVote(polPrevotes) { - logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", - "round", prs.ProposalPOLRound) - return true - } - } - } - // If there are prevotes to send... - if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round { - if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { - logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) - return true - } - } - // If there are precommits to send... - if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round { - if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) { - logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round) - return true - } - } - // If there are prevotes to send...Needed because of validBlock mechanism - if prs.Round != -1 && prs.Round <= rs.Round { - if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { - logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) - return true - } - } - // If there are POLPrevotes to send... - if prs.ProposalPOLRound != -1 { - if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { - if ps.PickSendVote(polPrevotes) { - logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", - "round", prs.ProposalPOLRound) - return true - } - } } - - return false } // NOTE: `queryMaj23Routine` has a simple crude design since it only comes @@ -907,13 +731,12 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height { if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { - peer.TrySend(p2p.Envelope{ ChannelID: StateChannel, Message: &cmtcons.VoteSetMaj23{ Height: prs.Height, Round: prs.Round, - Type: cmtproto.PrevoteType, + Type: types.PrevoteType, BlockID: maj23.ToProto(), }, }) @@ -933,7 +756,7 @@ OUTER_LOOP: Message: &cmtcons.VoteSetMaj23{ Height: prs.Height, Round: prs.Round, - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, BlockID: maj23.ToProto(), }, }) @@ -948,13 +771,12 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { - peer.TrySend(p2p.Envelope{ ChannelID: StateChannel, Message: &cmtcons.VoteSetMaj23{ Height: prs.Height, Round: prs.ProposalPOLRound, - Type: cmtproto.PrevoteType, + Type: types.PrevoteType, BlockID: maj23.ToProto(), }, }) @@ -977,7 +799,7 @@ OUTER_LOOP: Message: &cmtcons.VoteSetMaj23{ Height: prs.Height, Round: commit.Round, - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, BlockID: commit.BlockID.ToProto(), }, }) @@ -992,6 +814,204 @@ OUTER_LOOP: } } +// pick a block part to send if the peer has the same part set header as us or if they're catching up and we have the block. +// returns the part and a bool that signals whether to continue to the loop (true) or to sleep. +// NOTE there is one case where we don't return a part but continue the loop (ie. we return (nil, true)). +func pickPartToSend( + logger log.Logger, + blockStore sm.BlockStore, + rs *cstypes.RoundState, + ps *PeerState, + prs *cstypes.PeerRoundState, + rng *rand.Rand, +) (*types.Part, bool) { + // If peer has same part set header as us, send block parts + if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) && !rs.ProposalBlockParts.IsLocked() { + if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(rng); ok { + part := rs.ProposalBlockParts.GetPart(index) + // If sending this part fails, restart the OUTER_LOOP (busy-waiting). + return part, true + } + } + + // If the peer is on a previous height that we have, help catch up. + blockStoreBase := blockStore.Base() + if blockStoreBase > 0 && + 0 < prs.Height && prs.Height < rs.Height && + prs.Height >= blockStoreBase { + heightLogger := logger.With("height", prs.Height) + + // if we never received the commit message from the peer, the block parts won't be initialized + if prs.ProposalBlockParts == nil { + blockMeta := blockStore.LoadBlockMeta(prs.Height) + if blockMeta == nil { + heightLogger.Error("Failed to load block meta", + "blockstoreBase", blockStoreBase, "blockstoreHeight", blockStore.Height()) + return nil, false + } + ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader) + // continue the loop since prs is a copy and not affected by this initialization + return nil, true // continue OUTER_LOOP + } + part := pickPartForCatchup(heightLogger, rs, prs, blockStore, rng) + if part != nil { + // If sending this part fails, do not restart the OUTER_LOOP and sleep. + return part, false + } + } + + return nil, false +} + +func pickPartForCatchup( + logger log.Logger, + rs *cstypes.RoundState, + prs *cstypes.PeerRoundState, + blockStore sm.BlockStore, + rng *rand.Rand, +) *types.Part { + index, ok := prs.ProposalBlockParts.Not().PickRandom(rng) + if !ok { + return nil + } + // Ensure that the peer's PartSetHeader is correct + blockMeta := blockStore.LoadBlockMeta(prs.Height) + if blockMeta == nil { + logger.Error("Failed to load block meta", "ourHeight", rs.Height, + "blockstoreBase", blockStore.Base(), "blockstoreHeight", blockStore.Height()) + return nil + } else if !blockMeta.BlockID.PartSetHeader.Equals(prs.ProposalBlockPartSetHeader) { + logger.Info("Peer ProposalBlockPartSetHeader mismatch, sleeping", + "blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader) + return nil + } + // Load the part + part := blockStore.LoadBlockPart(prs.Height, index) + if part == nil { + logger.Error("Could not load part", "index", index, + "blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader) + return nil + } + return part +} + +func pickVoteToSend( + logger log.Logger, + conS *State, + rs *cstypes.RoundState, + ps *PeerState, + prs *cstypes.PeerRoundState, + rng *rand.Rand, +) *types.Vote { + // If height matches, then send LastCommit, Prevotes, Precommits. + if rs.Height == prs.Height { + heightLogger := logger.With("height", prs.Height) + return pickVoteCurrentHeight(heightLogger, rs, prs, ps, rng) + } + + // Special catchup logic. + // If peer is lagging by height 1, send LastCommit. + if prs.Height != 0 && rs.Height == prs.Height+1 { + if vote := ps.PickVoteToSend(rs.LastCommit, rng); vote != nil { + logger.Debug("Picked rs.LastCommit to send", "height", prs.Height) + return vote + } + } + + // Catchup logic + // If peer is lagging by more than 1, send Commit. + blockStoreBase := conS.blockStore.Base() + if blockStoreBase > 0 && prs.Height != 0 && rs.Height >= prs.Height+2 && prs.Height >= blockStoreBase { + // Load the block's extended commit for prs.Height, + // which contains precommit signatures for prs.Height. + var ec *types.ExtendedCommit + var veEnabled bool + func() { + conS.mtx.RLock() + defer conS.mtx.RUnlock() + veEnabled = conS.state.ConsensusParams.Feature.VoteExtensionsEnabled(prs.Height) + }() + if veEnabled { + ec = conS.blockStore.LoadBlockExtendedCommit(prs.Height) + } else { + c := conS.blockStore.LoadBlockCommit(prs.Height) + if c == nil { + return nil + } + ec = c.WrappedExtendedCommit() + } + if ec == nil { + return nil + } + if vote := ps.PickVoteToSend(ec, rng); vote != nil { + logger.Debug("Picked Catchup commit to send", "height", prs.Height) + return vote + } + } + return nil +} + +func pickVoteCurrentHeight( + logger log.Logger, + rs *cstypes.RoundState, + prs *cstypes.PeerRoundState, + ps *PeerState, + rng *rand.Rand, +) *types.Vote { + // If there are lastCommits to send... + if prs.Step == cstypes.RoundStepNewHeight { + if vote := ps.PickVoteToSend(rs.LastCommit, rng); vote != nil { + logger.Debug("Picked rs.LastCommit to send") + return vote + } + } + // If there are POL prevotes to send... + if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 { + if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { + if vote := ps.PickVoteToSend(polPrevotes, rng); vote != nil { + logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", + "round", prs.ProposalPOLRound) + return vote + } + } + } + // If there are prevotes to send... + if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round { + if vote := ps.PickVoteToSend(rs.Votes.Prevotes(prs.Round), rng); vote != nil { + logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) + return vote + } + } + // If there are precommits to send... + if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round { + if vote := ps.PickVoteToSend(rs.Votes.Precommits(prs.Round), rng); vote != nil { + logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round) + return vote + } + } + // If there are prevotes to send...Needed because of validBlock mechanism + if prs.Round != -1 && prs.Round <= rs.Round { + if vote := ps.PickVoteToSend(rs.Votes.Prevotes(prs.Round), rng); vote != nil { + logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) + return vote + } + } + // If there are POLPrevotes to send... + if prs.ProposalPOLRound != -1 { + if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { + if vote := ps.PickVoteToSend(polPrevotes, rng); vote != nil { + logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", + "round", prs.ProposalPOLRound) + return vote + } + } + } + + return nil +} + +// ----------------------------------------------------------------------------- + func (conR *Reactor) peerStatsRoutine() { for { if !conR.IsRunning() { @@ -1035,32 +1055,32 @@ func (conR *Reactor) peerStatsRoutine() { // String returns a string representation of the Reactor. // NOTE: For now, it is just a hard-coded string to avoid accessing unprotected shared variables. // TODO: improve! -func (conR *Reactor) String() string { +func (*Reactor) String() string { // better not to access shared variables return "ConsensusReactor" // conR.StringIndented("") } -// StringIndented returns an indented string representation of the Reactor +// StringIndented returns an indented string representation of the Reactor. func (conR *Reactor) StringIndented(indent string) string { s := "ConsensusReactor{\n" s += indent + " " + conR.conS.StringIndented(indent+" ") + "\n" - for _, peer := range conR.Switch.Peers().List() { + conR.Switch.Peers().ForEach(func(peer p2p.Peer) { ps, ok := peer.Get(types.PeerStateKey).(*PeerState) if !ok { panic(fmt.Sprintf("Peer %v has no state", peer)) } s += indent + " " + ps.StringIndented(indent+" ") + "\n" - } + }) s += indent + "}" return s } -// ReactorMetrics sets the metrics +// ReactorMetrics sets the metrics. func ReactorMetrics(metrics *Metrics) ReactorOption { return func(conR *Reactor) { conR.Metrics = metrics } } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // PeerState contains the known state of a peer, including its connection and // threadsafe access to its PeerRoundState. @@ -1086,7 +1106,7 @@ func (pss peerStateStats) String() string { pss.Votes, pss.BlockParts) } -// NewPeerState returns a new PeerState for the given Peer +// NewPeerState returns a new PeerState for the given Peer. func NewPeerState(peer p2p.Peer) *PeerState { return &PeerState{ peer: peer, @@ -1128,7 +1148,7 @@ func (ps *PeerState) MarshalJSON() ([]byte, error) { } // GetHeight returns an atomic snapshot of the PeerRoundState's height -// used by the mempool to ensure peers are caught up before broadcasting new txs +// used by the mempool to ensure peers are caught up before broadcasting new txs. func (ps *PeerState) GetHeight() int64 { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -1197,37 +1217,94 @@ func (ps *PeerState) setHasProposalBlockPart(height int64, round int32, index in ps.PRS.ProposalBlockParts.SetIndex(index, true) } -// PickSendVote picks a vote and sends it to the peer. -// Returns true if vote was sent. -func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { - if vote, ok := ps.PickVoteToSend(votes); ok { - ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) - if ps.peer.Send(p2p.Envelope{ - ChannelID: VoteChannel, - Message: &cmtcons.Vote{ - Vote: vote.ToProto(), - }, - }) { - ps.SetHasVote(vote) - return true - } +// SendPartSetHasPart sends the part to the peer. +// Returns true and marks the peer as having the part if the part was sent. +func (ps *PeerState) SendPartSetHasPart(part *types.Part, prs *cstypes.PeerRoundState) bool { + // Send the part + ps.logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round, "index", part.Index) + pp, err := part.ToProto() + if err != nil { + // NOTE: only returns error if part is nil, which it should never be by here + ps.logger.Error("Could not convert part to proto", "index", part.Index, "error", err) return false } + if ps.peer.Send(p2p.Envelope{ + ChannelID: DataChannel, + Message: &cmtcons.BlockPart{ + Height: prs.Height, // Not our height, so it doesn't matter. + Round: prs.Round, // Not our height, so it doesn't matter. + Part: *pp, + }, + }) { + ps.SetHasProposalBlockPart(prs.Height, prs.Round, int(part.Index)) + return true + } + ps.logger.Debug("Sending block part failed") + return false +} + +// SendProposalSetHasProposal sends the Proposal (and ProposalPOL if there is one) to the peer. +// If successful, it marks the peer as having the proposal. +func (ps *PeerState) SendProposalSetHasProposal( + logger log.Logger, + rs *cstypes.RoundState, + prs *cstypes.PeerRoundState, +) { + // Proposal: share the proposal metadata with peer. + logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) + if ps.peer.Send(p2p.Envelope{ + ChannelID: DataChannel, + Message: &cmtcons.Proposal{Proposal: *rs.Proposal.ToProto()}, + }) { + // NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected! + ps.SetHasProposal(rs.Proposal) + } + + // ProposalPOL: lets peer know which POL votes we have so far. + // Peer must receive ProposalMessage first. + // rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round, + // so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound). + if 0 <= rs.Proposal.POLRound { + logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) + ps.peer.Send(p2p.Envelope{ + ChannelID: DataChannel, + Message: &cmtcons.ProposalPOL{ + Height: rs.Height, + ProposalPolRound: rs.Proposal.POLRound, + ProposalPol: *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(), + }, + }) + } +} + +// sendVoteSetHasVote sends the vote to the peer. +// Returns true and marks the peer as having the vote if the vote was sent. +func (ps *PeerState) sendVoteSetHasVote(vote *types.Vote) bool { + ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) + if ps.peer.Send(p2p.Envelope{ + ChannelID: VoteChannel, + Message: &cmtcons.Vote{ + Vote: vote.ToProto(), + }, + }) { + ps.SetHasVote(vote) + return true + } return false } // PickVoteToSend picks a vote to send to the peer. // Returns true if a vote was picked. // NOTE: `votes` must be the correct Size() for the Height(). -func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote, ok bool) { +func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader, rng *rand.Rand) *types.Vote { ps.mtx.Lock() defer ps.mtx.Unlock() if votes.Size() == 0 { - return nil, false + return nil } - height, round, votesType, size := votes.GetHeight(), votes.GetRound(), cmtproto.SignedMsgType(votes.Type()), votes.Size() + height, round, votesType, size := votes.GetHeight(), votes.GetRound(), types.SignedMsgType(votes.Type()), votes.Size() // Lazily set data using 'votes'. if votes.IsCommit() { @@ -1237,15 +1314,19 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote psVotes := ps.getVoteBitArray(height, round, votesType) if psVotes == nil { - return nil, false // Not something worth sending + return nil // Not something worth sending } - if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok { - return votes.GetByIndex(int32(index)), true + if index, ok := votes.BitArray().Sub(psVotes).PickRandom(rng); ok { + vote := votes.GetByIndex(int32(index)) + if vote == nil { + ps.logger.Error("votes.GetByIndex returned nil", "votes", votes, "index", index) + } + return vote } - return nil, false + return nil } -func (ps *PeerState) getVoteBitArray(height int64, round int32, votesType cmtproto.SignedMsgType) *bits.BitArray { +func (ps *PeerState) getVoteBitArray(height int64, round int32, votesType types.SignedMsgType) *bits.BitArray { if !types.IsVoteTypeValid(votesType) { return nil } @@ -1253,25 +1334,25 @@ func (ps *PeerState) getVoteBitArray(height int64, round int32, votesType cmtpro if ps.PRS.Height == height { if ps.PRS.Round == round { switch votesType { - case cmtproto.PrevoteType: + case types.PrevoteType: return ps.PRS.Prevotes - case cmtproto.PrecommitType: + case types.PrecommitType: return ps.PRS.Precommits } } if ps.PRS.CatchupCommitRound == round { switch votesType { - case cmtproto.PrevoteType: + case types.PrevoteType: return nil - case cmtproto.PrecommitType: + case types.PrecommitType: return ps.PRS.CatchupCommit } } if ps.PRS.ProposalPOLRound == round { switch votesType { - case cmtproto.PrevoteType: + case types.PrevoteType: return ps.PRS.ProposalPOL - case cmtproto.PrecommitType: + case types.PrecommitType: return nil } } @@ -1280,9 +1361,9 @@ func (ps *PeerState) getVoteBitArray(height int64, round int32, votesType cmtpro if ps.PRS.Height == height+1 { if ps.PRS.LastCommitRound == round { switch votesType { - case cmtproto.PrevoteType: + case types.PrevoteType: return nil - case cmtproto.PrecommitType: + case types.PrecommitType: return ps.PRS.LastCommit } } @@ -1389,7 +1470,7 @@ func (ps *PeerState) BlockPartsSent() int { return ps.Stats.BlockParts } -// SetHasVote sets the given vote as known by the peer +// SetHasVote sets the given vote as known by the peer. func (ps *PeerState) SetHasVote(vote *types.Vote) { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -1397,7 +1478,17 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) { ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex) } -func (ps *PeerState) setHasVote(height int64, round int32, voteType cmtproto.SignedMsgType, index int32) { +// SetHasVote sets the given vote as known by the peer. +func (ps *PeerState) SetHasVoteFromPeer(vote *types.Vote, csHeight int64, valSize, lastCommitSize int) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + ps.ensureVoteBitArrays(csHeight, valSize) + ps.ensureVoteBitArrays(csHeight-1, lastCommitSize) + ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex) +} + +func (ps *PeerState) setHasVote(height int64, round int32, voteType types.SignedMsgType, index int32) { ps.logger.Debug("setHasVote", "peerH/R", log.NewLazySprintf("%d/%d", ps.PRS.Height, ps.PRS.Round), @@ -1545,12 +1636,12 @@ func (ps *PeerState) ApplyVoteSetBitsMessage(msg *VoteSetBitsMessage, ourVotes * } } -// String returns a string representation of the PeerState +// String returns a string representation of the PeerState. func (ps *PeerState) String() string { return ps.StringIndented("") } -// StringIndented returns a string representation of the PeerState +// StringIndented returns a string representation of the PeerState. func (ps *PeerState) StringIndented(indent string) string { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -1565,10 +1656,10 @@ func (ps *PeerState) StringIndented(indent string) string { indent) } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // Messages -// Message is a message that can be sent and received on the Reactor +// Message is a message that can be sent and received on the Reactor. type Message interface { ValidateBasic() error } @@ -1586,10 +1677,10 @@ func init() { cmtjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits") } -//------------------------------------- +// ------------------------------------- // NewRoundStepMessage is sent for every step taken in the ConsensusState. -// For every height/round/step transition +// For every height/round/step transition. type NewRoundStepMessage struct { Height int64 Round int32 @@ -1653,7 +1744,7 @@ func (m *NewRoundStepMessage) String() string { m.Height, m.Round, m.Step, m.LastCommitRound) } -//------------------------------------- +// ------------------------------------- // NewValidBlockMessage is sent when a validator observes a valid block B in some round r, // i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. @@ -1697,7 +1788,7 @@ func (m *NewValidBlockMessage) String() string { m.Height, m.Round, m.BlockPartSetHeader, m.BlockParts, m.IsCommit) } -//------------------------------------- +// ------------------------------------- // ProposalMessage is sent when a new block is proposed. type ProposalMessage struct { @@ -1714,7 +1805,7 @@ func (m *ProposalMessage) String() string { return fmt.Sprintf("[Proposal %v]", m.Proposal) } -//------------------------------------- +// ------------------------------------- // ProposalPOLMessage is sent when a previous proposal is re-proposed. type ProposalPOLMessage struct { @@ -1745,7 +1836,7 @@ func (m *ProposalPOLMessage) String() string { return fmt.Sprintf("[ProposalPOL H:%v POLR:%v POL:%v]", m.Height, m.ProposalPOLRound, m.ProposalPOL) } -//------------------------------------- +// ------------------------------------- // BlockPartMessage is sent when gossipping a piece of the proposed block. type BlockPartMessage struct { @@ -1773,7 +1864,7 @@ func (m *BlockPartMessage) String() string { return fmt.Sprintf("[BlockPart H:%v R:%v P:%v]", m.Height, m.Round, m.Part) } -//------------------------------------- +// ------------------------------------- // VoteMessage is sent when voting for a proposal (or lack thereof). type VoteMessage struct { @@ -1790,13 +1881,13 @@ func (m *VoteMessage) String() string { return fmt.Sprintf("[Vote %v]", m.Vote) } -//------------------------------------- +// ------------------------------------- // HasVoteMessage is sent to indicate that a particular vote has been received. type HasVoteMessage struct { Height int64 Round int32 - Type cmtproto.SignedMsgType + Type types.SignedMsgType Index int32 } @@ -1822,13 +1913,13 @@ func (m *HasVoteMessage) String() string { return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v}]", m.Index, m.Height, m.Round, m.Type) } -//------------------------------------- +// ------------------------------------- // VoteSetMaj23Message is sent to indicate that a given BlockID has seen +2/3 votes. type VoteSetMaj23Message struct { Height int64 Round int32 - Type cmtproto.SignedMsgType + Type types.SignedMsgType BlockID types.BlockID } @@ -1854,13 +1945,13 @@ func (m *VoteSetMaj23Message) String() string { return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID) } -//------------------------------------- +// ------------------------------------- // VoteSetBitsMessage is sent to communicate the bit-array of votes seen for the BlockID. type VoteSetBitsMessage struct { Height int64 Round int32 - Type cmtproto.SignedMsgType + Type types.SignedMsgType BlockID types.BlockID Votes *bits.BitArray } @@ -1888,7 +1979,7 @@ func (m *VoteSetBitsMessage) String() string { return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes) } -//------------------------------------- +// ------------------------------------- // HasProposalBlockPartMessage is sent to indicate that a particular block part has been received. type HasProposalBlockPartMessage struct { @@ -1915,3 +2006,15 @@ func (m *HasProposalBlockPartMessage) ValidateBasic() error { func (m *HasProposalBlockPartMessage) String() string { return fmt.Sprintf("[HasProposalBlockPart PI:%v HR:{%v/%02d}]", m.Index, m.Height, m.Round) } + +var ( + _ types.Wrapper = &cmtcons.BlockPart{} + _ types.Wrapper = &cmtcons.HasVote{} + _ types.Wrapper = &cmtcons.HasProposalBlockPart{} + _ types.Wrapper = &cmtcons.NewRoundStep{} + _ types.Wrapper = &cmtcons.NewValidBlock{} + _ types.Wrapper = &cmtcons.Proposal{} + _ types.Wrapper = &cmtcons.ProposalPOL{} + _ types.Wrapper = &cmtcons.VoteSetBits{} + _ types.Wrapper = &cmtcons.VoteSetMaj23{} +) diff --git a/consensus/reactor_test.go b/internal/consensus/reactor_test.go similarity index 85% rename from consensus/reactor_test.go rename to internal/consensus/reactor_test.go index 090520fbe1c..efb96f9fe4b 100644 --- a/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -14,15 +14,15 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - abcicli "github.com/cometbft/cometbft/abci/client" "github.com/cometbft/cometbft/abci/example/kvstore" abci "github.com/cometbft/cometbft/abci/types" + cmtcons "github.com/cometbft/cometbft/api/cometbft/consensus/v1" cfg "github.com/cometbft/cometbft/config" - cstypes "github.com/cometbft/cometbft/consensus/types" - cryptoenc "github.com/cometbft/cometbft/crypto/encoding" "github.com/cometbft/cometbft/crypto/tmhash" - "github.com/cometbft/cometbft/libs/bits" + "github.com/cometbft/cometbft/internal/bits" + cstypes "github.com/cometbft/cometbft/internal/consensus/types" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/bytes" "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/libs/log" @@ -30,17 +30,16 @@ import ( mempl "github.com/cometbft/cometbft/mempool" "github.com/cometbft/cometbft/p2p" p2pmock "github.com/cometbft/cometbft/p2p/mock" - cmtcons "github.com/cometbft/cometbft/proto/tendermint/consensus" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/proxy" sm "github.com/cometbft/cometbft/state" statemocks "github.com/cometbft/cometbft/state/mocks" "github.com/cometbft/cometbft/store" "github.com/cometbft/cometbft/types" cmterrors "github.com/cometbft/cometbft/types/errors" + cmttime "github.com/cometbft/cometbft/types/time" ) -//---------------------------------------------- +// ---------------------------------------------- // in-process testnets var defaultTestTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) @@ -50,12 +49,13 @@ func startConsensusNet(t *testing.T, css []*State, n int) ( []types.Subscription, []*types.EventBus, ) { + t.Helper() reactors := make([]*Reactor, n) blocksSubs := make([]types.Subscription, 0) eventBuses := make([]*types.EventBus, n) for i := 0; i < n; i++ { - /*logger, err := cmtflags.ParseLogLevel("consensus:info,*:error", logger, "info") - if err != nil { t.Fatal(err)}*/ + // logger, err := cmtflags.ParseLogLevel("consensus:info,*:error", logger, "info") + // if err != nil { t.Fatal(err)} reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states reactors[i].SetLogger(css[i].Logger) @@ -108,20 +108,20 @@ func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*type logger.Info("stopConsensusNet: DONE", "n", len(reactors)) } -// Ensure a testnet makes blocks +// Ensure a testnet makes blocks. func TestReactorBasic(t *testing.T) { - N := 4 - css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) + n := 4 + css, cleanup := randConsensusNet(t, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) defer cleanup() - reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) + reactors, blocksSubs, eventBuses := startConsensusNet(t, css, n) defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) // wait till everyone makes the first new block - timeoutWaitGroup(N, func(j int) { + timeoutWaitGroup(n, func(j int) { <-blocksSubs[j].Out() }) } -// Ensure we can process blocks with evidence +// Ensure we can process blocks with evidence. func TestReactorWithEvidence(t *testing.T) { nValidators := 4 testName := "consensus_reactor_test" @@ -132,7 +132,7 @@ func TestReactorWithEvidence(t *testing.T) { // to unroll unwieldy abstractions. Here we duplicate the code from: // css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) - genDoc, privVals := randGenesisDoc(nValidators, false, 30, nil) + genDoc, privVals := randGenesisDoc(nValidators, 30, nil, cmttime.Now()) css := make([]*State, nValidators) logger := consensusLogger() for i := 0; i < nValidators; i++ { @@ -143,10 +143,10 @@ func TestReactorWithEvidence(t *testing.T) { state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) defer os.RemoveAll(thisConfig.RootDir) - ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0o700) // dir for wal + ensureDir(path.Dir(thisConfig.Consensus.WalFile())) // dir for wal app := appFunc() vals := types.TM2PB.ValidatorUpdates(state.Validators) - _, err := app.InitChain(context.Background(), &abci.RequestInitChain{Validators: vals}) + _, err := app.InitChain(context.Background(), &abci.InitChainRequest{Validators: vals}) require.NoError(t, err) pv := privVals[i] @@ -163,8 +163,10 @@ func TestReactorWithEvidence(t *testing.T) { proxyAppConnMem := proxy.NewAppConnMempool(abcicli.NewLocalClient(mtx, app), proxy.NopMetrics()) // Make Mempool + _, lanesInfo := fetchAppInfo(app) mempool := mempl.NewCListMempool(config.Mempool, proxyAppConnMem, + lanesInfo, state.LastBlockHeight, mempl.WithMetrics(memplMetrics), mempl.WithPreCheck(sm.TxPreCheck(state)), @@ -219,37 +221,37 @@ func TestReactorWithEvidence(t *testing.T) { } } -//------------------------------------ +// ------------------------------------ -// Ensure a testnet makes blocks when there are txs +// Ensure a testnet makes blocks when there are txs. func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { - N := 4 - css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore, + n := 4 + css, cleanup := randConsensusNet(t, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore, func(c *cfg.Config) { c.Consensus.CreateEmptyBlocks = false }) defer cleanup() - reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) + reactors, blocksSubs, eventBuses := startConsensusNet(t, css, n) defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) // send a tx - reqRes, err := assertMempool(css[3].txNotifier).CheckTx(kvstore.NewTxFromID(1)) + reqRes, err := assertMempool(css[3].txNotifier).CheckTx(kvstore.NewTxFromID(1), "") if err != nil { t.Error(err) } require.False(t, reqRes.Response.GetCheckTx().IsErr()) // wait till everyone makes the first new block - timeoutWaitGroup(N, func(j int) { + timeoutWaitGroup(n, func(j int) { <-blocksSubs[j].Out() }) } func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) { - N := 1 - css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) + n := 1 + css, cleanup := randConsensusNet(t, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) defer cleanup() - reactors, _, eventBuses := startConsensusNet(t, css, N) + reactors, _, eventBuses := startConsensusNet(t, css, n) defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) var ( @@ -268,7 +270,7 @@ func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) { Height: 1, Round: 1, Index: 1, - Type: cmtproto.PrevoteType, + Type: types.PrevoteType, }, }) reactor.AddPeer(peer) @@ -276,10 +278,10 @@ func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) { } func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) { - N := 1 - css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) + n := 1 + css, cleanup := randConsensusNet(t, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) defer cleanup() - reactors, _, eventBuses := startConsensusNet(t, css, N) + reactors, _, eventBuses := startConsensusNet(t, css, n) defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) var ( @@ -298,7 +300,7 @@ func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) { Height: 1, Round: 1, Index: 1, - Type: cmtproto.PrevoteType, + Type: types.PrevoteType, }, }) }) @@ -360,7 +362,7 @@ func TestSwitchToConsensusVoteExtensions(t *testing.T) { cs.state.LastBlockHeight = testCase.storedHeight cs.state.LastValidators = cs.state.Validators.Copy() - cs.state.ConsensusParams.ABCI.VoteExtensionsEnableHeight = testCase.initialRequiredHeight + cs.state.ConsensusParams.Feature.VoteExtensionsEnableHeight = testCase.initialRequiredHeight propBlock, err := cs.createProposalBlock(ctx) require.NoError(t, err) @@ -373,11 +375,14 @@ func TestSwitchToConsensusVoteExtensions(t *testing.T) { var voteSet *types.VoteSet if testCase.includeExtensions { - voteSet = types.NewExtendedVoteSet(cs.state.ChainID, testCase.storedHeight, 0, cmtproto.PrecommitType, cs.state.Validators) + voteSet = types.NewExtendedVoteSet(cs.state.ChainID, testCase.storedHeight, 0, types.PrecommitType, cs.state.Validators) } else { - voteSet = types.NewVoteSet(cs.state.ChainID, testCase.storedHeight, 0, cmtproto.PrecommitType, cs.state.Validators) + voteSet = types.NewVoteSet(cs.state.ChainID, testCase.storedHeight, 0, types.PrecommitType, cs.state.Validators) } - signedVote := signVote(validator, cmtproto.PrecommitType, propBlock.Hash(), blockParts.Header(), testCase.includeExtensions) + signedVote := signVote(validator, types.PrecommitType, cs.state.ChainID, types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: blockParts.Header(), + }, testCase.includeExtensions) var veHeight int64 if testCase.includeExtensions { @@ -392,7 +397,8 @@ func TestSwitchToConsensusVoteExtensions(t *testing.T) { require.NoError(t, err) require.True(t, added) - veHeightParam := types.ABCIParams{VoteExtensionsEnableHeight: veHeight} + veHeightParam := types.DefaultFeatureParams() + veHeightParam.VoteExtensionsEnableHeight = veHeight if testCase.includeExtensions { cs.blockStore.SaveBlockWithExtendedCommit(propBlock, blockParts, voteSet.MakeExtendedCommit(veHeightParam)) } else { @@ -416,19 +422,19 @@ func TestSwitchToConsensusVoteExtensions(t *testing.T) { // Test we record stats about votes and block parts from other peers. func TestReactorRecordsVotesAndBlockParts(t *testing.T) { - N := 4 - css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) + n := 4 + css, cleanup := randConsensusNet(t, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) defer cleanup() - reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) + reactors, blocksSubs, eventBuses := startConsensusNet(t, css, n) defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) // wait till everyone makes the first new block - timeoutWaitGroup(N, func(j int) { + timeoutWaitGroup(n, func(j int) { <-blocksSubs[j].Out() }) // Get peer - peer := reactors[1].Switch.Peers().List()[0] + peer := reactors[1].Switch.Peers().Copy()[0] // Get peer state ps := peer.Get(types.PeerStateKey).(*PeerState) @@ -436,12 +442,13 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { assert.Greater(t, ps.BlockPartsSent(), 0, "number of votes sent should have increased") } -//------------------------------------------------------------- +// ------------------------------------------------------------- // ensure we can make blocks despite cycling a validator set func TestReactorVotingPowerChange(t *testing.T) { nVals := 4 logger := log.TestingLogger() + css, cleanup := randConsensusNet( t, nVals, @@ -466,15 +473,13 @@ func TestReactorVotingPowerChange(t *testing.T) { <-blocksSubs[j].Out() }) - //--------------------------------------------------------------------------- + // --------------------------------------------------------------------------- logger.Debug("---------------------------- Testing changing the voting power of one validator a few times") val1PubKey, err := css[0].privValidator.GetPubKey() require.NoError(t, err) - val1PubKeyABCI, err := cryptoenc.PubKeyToProto(val1PubKey) - require.NoError(t, err) - updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) + updateValidatorTx := updateValTx(val1PubKey, 25) previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower() waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) @@ -489,7 +494,7 @@ func TestReactorVotingPowerChange(t *testing.T) { css[0].GetRoundState().LastValidators.TotalVotingPower()) } - updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2) + updateValidatorTx = updateValTx(val1PubKey, 2) previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) @@ -504,7 +509,7 @@ func TestReactorVotingPowerChange(t *testing.T) { css[0].GetRoundState().LastValidators.TotalVotingPower()) } - updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26) + updateValidatorTx = updateValTx(val1PubKey, 26) previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) @@ -552,10 +557,8 @@ func TestReactorValidatorSetChanges(t *testing.T) { t.Run("Testing adding one validator", func(t *testing.T) { newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() - assert.NoError(t, err) - valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) - assert.NoError(t, err) - newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) + require.NoError(t, err) + newValidatorTx1 := updateValTx(newValidatorPubKey1, testMinPower) // wait till everyone makes block 2 // ensure the commit includes all validators @@ -581,9 +584,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { t.Run("Testing changing the voting power of one validator", func(t *testing.T) { updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() require.NoError(t, err) - updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) - require.NoError(t, err) - updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) + updateValidatorTx1 := updateValTx(updateValidatorPubKey1, 25) previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower() waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) @@ -601,15 +602,11 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() require.NoError(t, err) - newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) - require.NoError(t, err) - newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) + newValidatorTx2 := updateValTx(newValidatorPubKey2, testMinPower) newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() require.NoError(t, err) - newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) - require.NoError(t, err) - newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) + newValidatorTx3 := updateValTx(newValidatorPubKey3, testMinPower) t.Run("Testing adding two validators at once", func(t *testing.T) { waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) @@ -621,8 +618,8 @@ func TestReactorValidatorSetChanges(t *testing.T) { }) t.Run("Testing removing two validators at once", func(t *testing.T) { - removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) - removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) + removeValidatorTx2 := updateValTx(newValidatorPubKey2, 0) + removeValidatorTx3 := updateValTx(newValidatorPubKey3, 0) waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3) waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3) @@ -633,21 +630,21 @@ func TestReactorValidatorSetChanges(t *testing.T) { }) } -// Check we can make blocks with skip_timeout_commit=false -func TestReactorWithTimeoutCommit(t *testing.T) { - N := 4 - css, cleanup := randConsensusNet(t, N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newKVStore) +// Check we can make blocks with timeout_commit=0. +func TestReactorWithDefaultTimeoutCommit(t *testing.T) { + n := 4 + css, cleanup := randConsensusNet(t, n, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newKVStore) defer cleanup() - // override default SkipTimeoutCommit == true for tests - for i := 0; i < N; i++ { - css[i].config.SkipTimeoutCommit = false + // override default NextBlockDelay == 0 for tests + for i := 0; i < n; i++ { + css[i].state.NextBlockDelay = 1 * time.Second } - reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N-1) + reactors, blocksSubs, eventBuses := startConsensusNet(t, css, n-1) defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) // wait till everyone makes the first new block - timeoutWaitGroup(N-1, func(j int) { + timeoutWaitGroup(n-1, func(j int) { <-blocksSubs[j].Out() }) } @@ -660,6 +657,7 @@ func waitForAndValidateBlock( css []*State, txs ...[]byte, ) { + t.Helper() timeoutWaitGroup(n, func(j int) { css[j].Logger.Debug("waitForAndValidateBlock") msg := <-blocksSubs[j].Out() @@ -670,7 +668,7 @@ func waitForAndValidateBlock( // optionally add transactions for the next block for _, tx := range txs { - reqRes, err := assertMempool(css[j].txNotifier).CheckTx(tx) + reqRes, err := assertMempool(css[j].txNotifier).CheckTx(tx, "") require.NoError(t, err) require.False(t, reqRes.Response.GetCheckTx().IsErr()) } @@ -685,6 +683,7 @@ func waitForAndValidateBlockWithTx( css []*State, txs ...[]byte, ) { + t.Helper() timeoutWaitGroup(n, func(j int) { ntxs := 0 BLOCK_TX_LOOP: @@ -718,6 +717,7 @@ func waitForBlockWithUpdatedValsAndValidateIt( blocksSubs []types.Subscription, css []*State, ) { + t.Helper() timeoutWaitGroup(n, func(j int) { var newBlock *types.Block LOOP: @@ -736,7 +736,7 @@ func waitForBlockWithUpdatedValsAndValidateIt( } err := validateBlock(newBlock, updatedVals) - assert.Nil(t, err) + require.NoError(t, err) }) } @@ -773,7 +773,7 @@ func timeoutWaitGroup(n int, f func(int)) { close(done) }() - // we're running many nodes in-process, possibly in in a virtual machine, + // we're running many nodes in-process, possibly in a virtual machine, // and spewing debug messages - making a block could take a while, timeout := time.Second * 20 @@ -784,7 +784,7 @@ func timeoutWaitGroup(n int, f func(int)) { } } -//------------------------------------------------------------- +// ------------------------------------------------------------- // Ensure basic validation of structs is functioning func TestNewRoundStepMessageValidateBasic(t *testing.T) { @@ -806,7 +806,6 @@ func TestNewRoundStepMessageValidateBasic(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { message := NewRoundStepMessage{ Height: tc.messageHeight, @@ -841,7 +840,6 @@ func TestNewRoundStepMessageValidateHeight(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { message := NewRoundStepMessage{ Height: tc.messageHeight, @@ -865,7 +863,7 @@ func TestNewValidBlockMessageValidateBasic(t *testing.T) { malleateFn func(*NewValidBlockMessage) expErr string }{ - {func(msg *NewValidBlockMessage) {}, ""}, + {func(_ *NewValidBlockMessage) {}, ""}, {func(msg *NewValidBlockMessage) { msg.Height = -1 }, cmterrors.ErrNegativeField{Field: "Height"}.Error()}, {func(msg *NewValidBlockMessage) { msg.Round = -1 }, cmterrors.ErrNegativeField{Field: "Round"}.Error()}, { @@ -886,7 +884,6 @@ func TestNewValidBlockMessageValidateBasic(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { msg := &NewValidBlockMessage{ Height: 1, @@ -899,7 +896,7 @@ func TestNewValidBlockMessageValidateBasic(t *testing.T) { tc.malleateFn(msg) err := msg.ValidateBasic() - if tc.expErr != "" && assert.Error(t, err) { + if tc.expErr != "" && assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Contains(t, err.Error(), tc.expErr) } }) @@ -911,7 +908,7 @@ func TestProposalPOLMessageValidateBasic(t *testing.T) { malleateFn func(*ProposalPOLMessage) expErr string }{ - {func(msg *ProposalPOLMessage) {}, ""}, + {func(_ *ProposalPOLMessage) {}, ""}, {func(msg *ProposalPOLMessage) { msg.Height = -1 }, cmterrors.ErrNegativeField{Field: "Height"}.Error()}, {func(msg *ProposalPOLMessage) { msg.ProposalPOLRound = -1 }, cmterrors.ErrNegativeField{Field: "ProposalPOLRound"}.Error()}, {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(0) }, cmterrors.ErrRequiredField{Field: "ProposalPOL"}.Error()}, @@ -922,7 +919,6 @@ func TestProposalPOLMessageValidateBasic(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { msg := &ProposalPOLMessage{ Height: 1, @@ -932,7 +928,7 @@ func TestProposalPOLMessageValidateBasic(t *testing.T) { tc.malleateFn(msg) err := msg.ValidateBasic() - if tc.expErr != "" && assert.Error(t, err) { + if tc.expErr != "" && assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Contains(t, err.Error(), tc.expErr) } }) @@ -955,7 +951,6 @@ func TestBlockPartMessageValidateBasic(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { message := BlockPartMessage{ Height: tc.messageHeight, @@ -970,13 +965,13 @@ func TestBlockPartMessageValidateBasic(t *testing.T) { message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)} message.Part.Index = 1 - assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + require.Error(t, message.ValidateBasic()) } func TestHasVoteMessageValidateBasic(t *testing.T) { const ( - validSignedMsgType cmtproto.SignedMsgType = 0x01 - invalidSignedMsgType cmtproto.SignedMsgType = 0x03 + validSignedMsgType types.SignedMsgType = 0x01 + invalidSignedMsgType types.SignedMsgType = 0x03 ) testCases := []struct { //nolint: maligned @@ -985,7 +980,7 @@ func TestHasVoteMessageValidateBasic(t *testing.T) { messageIndex int32 messageHeight int64 testName string - messageType cmtproto.SignedMsgType + messageType types.SignedMsgType }{ {false, 0, 0, 0, "Valid Message", validSignedMsgType}, {true, -1, 0, 0, "Invalid Message", validSignedMsgType}, @@ -995,7 +990,6 @@ func TestHasVoteMessageValidateBasic(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { message := HasVoteMessage{ Height: tc.messageHeight, @@ -1011,8 +1005,8 @@ func TestHasVoteMessageValidateBasic(t *testing.T) { func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { const ( - validSignedMsgType cmtproto.SignedMsgType = 0x01 - invalidSignedMsgType cmtproto.SignedMsgType = 0x03 + validSignedMsgType types.SignedMsgType = 0x01 + invalidSignedMsgType types.SignedMsgType = 0x03 ) validBlockID := types.BlockID{} @@ -1029,7 +1023,7 @@ func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { messageRound int32 messageHeight int64 testName string - messageType cmtproto.SignedMsgType + messageType types.SignedMsgType messageBlockID types.BlockID }{ {false, 0, 0, "Valid Message", validSignedMsgType, validBlockID}, @@ -1040,7 +1034,6 @@ func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { message := VoteSetMaj23Message{ Height: tc.messageHeight, @@ -1059,7 +1052,7 @@ func TestVoteSetBitsMessageValidateBasic(t *testing.T) { malleateFn func(*VoteSetBitsMessage) expErr string }{ - {func(msg *VoteSetBitsMessage) {}, ""}, + {func(_ *VoteSetBitsMessage) {}, ""}, {func(msg *VoteSetBitsMessage) { msg.Height = -1 }, cmterrors.ErrNegativeField{Field: "Height"}.Error()}, {func(msg *VoteSetBitsMessage) { msg.Type = 0x03 }, cmterrors.ErrInvalidField{Field: "Type"}.Error()}, {func(msg *VoteSetBitsMessage) { @@ -1078,7 +1071,6 @@ func TestVoteSetBitsMessageValidateBasic(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { msg := &VoteSetBitsMessage{ Height: 1, @@ -1090,7 +1082,7 @@ func TestVoteSetBitsMessageValidateBasic(t *testing.T) { tc.malleateFn(msg) err := msg.ValidateBasic() - if tc.expErr != "" && assert.Error(t, err) { + if tc.expErr != "" && assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Contains(t, err.Error(), tc.expErr) } }) @@ -1125,3 +1117,40 @@ func TestMarshalJSONPeerState(t *testing.T) { "block_parts":"0"} }`, string(data)) } + +func TestVoteMessageValidateBasic(t *testing.T) { + cs, vss := randState(2) + chainID := cs.state.ChainID + + randBytes := cmtrand.Bytes(tmhash.Size) + blockID := types.BlockID{ + Hash: randBytes, + PartSetHeader: types.PartSetHeader{ + Total: 1, + Hash: randBytes, + }, + } + vote := signVote(vss[1], types.PrecommitType, chainID, blockID, true) + + testCases := []struct { + malleateFn func(*VoteMessage) + expErr string + }{ + {func(_ *VoteMessage) {}, ""}, + {func(msg *VoteMessage) { msg.Vote.ValidatorIndex = -1 }, "negative ValidatorIndex"}, + // INVALID, but passes ValidateBasic, since the method does not know the number of active validators + {func(msg *VoteMessage) { msg.Vote.ValidatorIndex = 1000 }, ""}, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + msg := &VoteMessage{vote} + + tc.malleateFn(msg) + err := msg.ValidateBasic() + if tc.expErr != "" && assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here + assert.Contains(t, err.Error(), tc.expErr) + } + }) + } +} diff --git a/consensus/replay.go b/internal/consensus/replay.go similarity index 90% rename from consensus/replay.go rename to internal/consensus/replay.go index ef94cf51ea6..66eecf1107a 100644 --- a/consensus/replay.go +++ b/internal/consensus/replay.go @@ -3,6 +3,7 @@ package consensus import ( "bytes" "context" + "errors" "fmt" "hash/crc32" "io" @@ -28,10 +29,10 @@ var crc32c = crc32.MakeTable(crc32.Castagnoli) // The former is handled by the WAL, the latter by the proxyApp Handshake on // restart, which ultimately hands off the work to the WAL. -//----------------------------------------- +// ----------------------------------------- // 1. Recover from failure during consensus // (by replaying messages from the WAL) -//----------------------------------------- +// ----------------------------------------- // Unmarshal and apply a single message to the consensus state as if it were // received in receiveRoutine. Lines that start with "#" are ignored. @@ -56,9 +57,9 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m) } case <-newStepSub.Canceled(): - return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was canceled") + return errors.New("failed to read off newStepSub.Out(). newStepSub was canceled") case <-ticker: - return fmt.Errorf("failed to read off newStepSub.Out()") + return errors.New("failed to read off newStepSub.Out()") } } case msgInfo: @@ -70,7 +71,7 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr case *ProposalMessage: p := msg.Proposal cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header", - p.BlockID.PartSetHeader, "pol", p.POLRound, "peer", peerID) + p.BlockID.PartSetHeader, "pol", p.POLRound, "peer", peerID, "receive_time", m.ReceiveTime) case *BlockPartMessage: cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID) case *VoteMessage: @@ -92,7 +93,6 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr // Replay only those messages since the last block. `timeoutRoutine` should // run concurrently to read off tickChan. func (cs *State) catchupReplay(csHeight int64) error { - // Set replayMode to true so we don't log signing errors. cs.replayMode = true defer func() { cs.replayMode = false }() @@ -127,7 +127,7 @@ func (cs *State) catchupReplay(csHeight int64) error { endHeight = 0 } gr, found, err = cs.wal.SearchForEndHeight(endHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true}) - if err == io.EOF { + if errors.Is(err, io.EOF) { cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", endHeight) } else if err != nil { return err @@ -146,7 +146,7 @@ LOOP: for { msg, err = dec.Decode() switch { - case err == io.EOF: + case errors.Is(err, io.EOF): break LOOP case IsDataCorruptionError(err): cs.Logger.Error("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight) @@ -166,7 +166,7 @@ LOOP: return nil } -//-------------------------------------------------------------------------------- +// -------------------------------------------------------------------------------- // Parses marker lines of the form: // #ENDHEIGHT: 12345 @@ -192,11 +192,11 @@ func makeHeightSearchFunc(height int64) auto.SearchFunc { } }*/ -//--------------------------------------------------- +// --------------------------------------------------- // 2. Recover from failure while applying the block. // (by handshaking with the app to figure out where // we were last, and using the WAL to recover there.) -//--------------------------------------------------- +// --------------------------------------------------- type Handshaker struct { stateStore sm.Store @@ -210,8 +210,8 @@ type Handshaker struct { } func NewHandshaker(stateStore sm.Store, state sm.State, - store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker { - + store sm.BlockStore, genDoc *types.GenesisDoc, +) *Handshaker { return &Handshaker{ stateStore: stateStore, initialState: state, @@ -238,13 +238,10 @@ func (h *Handshaker) NBlocks() int { return h.nBlocks } -// TODO: retry the handshake/replay if it fails ? -func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) error { - - // Handshake is done via ABCI Info on the query conn. - res, err := proxyApp.Query().Info(ctx, proxy.RequestInfo) - if err != nil { - return fmt.Errorf("error calling Info: %v", err) +// Handshake receives information from the app via ABCI Info on the query conn that is passed to the function. +func (h *Handshaker) Handshake(ctx context.Context, res *abci.InfoResponse, proxyApp proxy.AppConns) error { + if res == nil { + return errors.New("empty ABCI Info response passed to handshake") } blockHeight := res.LastBlockHeight @@ -266,7 +263,7 @@ func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) err } // Replay blocks up to the latest in the blockstore. - appHash, err = h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, proxyApp) + appHash, err := h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, proxyApp) if err != nil { return fmt.Errorf("error on replay: %v", err) } @@ -305,12 +302,16 @@ func (h *Handshaker) ReplayBlocks( if appBlockHeight == 0 { validators := make([]*types.Validator, len(h.genDoc.Validators)) for i, val := range h.genDoc.Validators { + // Ensure that the public key type is supported. + if _, ok := types.ABCIPubKeyTypesToNames[val.PubKey.Type()]; !ok { + return nil, fmt.Errorf("unsupported public key type %s (validator name: %s)", val.PubKey.Type(), val.Name) + } validators[i] = types.NewValidator(val.PubKey, val.Power) } validatorSet := types.NewValidatorSet(validators) nextVals := types.TM2PB.ValidatorUpdates(validatorSet) pbparams := h.genDoc.ConsensusParams.ToProto() - req := &abci.RequestInitChain{ + req := &abci.InitChainRequest{ Time: h.genDoc.GenesisTime, ChainId: h.genDoc.ChainID, InitialHeight: h.genDoc.InitialHeight, @@ -342,7 +343,7 @@ func (h *Handshaker) ReplayBlocks( state.NextValidators = types.NewValidatorSet(vals).CopyIncrementProposerPriority(1) } else if len(h.genDoc.Validators) == 0 { // If validator set is not set in genesis and still empty after InitChain, exit. - return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain") + return nil, errors.New("validator set is nil in genesis and still empty after InitChain") } if res.ConsensusParams != nil { @@ -393,13 +394,11 @@ func (h *Handshaker) ReplayBlocks( if appBlockHeight < storeBlockHeight { // the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store) return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, false) - } else if appBlockHeight == storeBlockHeight { // We're good! assertAppHashEqualsOneFromState(appHash, state) return appHash, nil } - } else if storeBlockHeight == stateBlockHeight+1 { // We saved the block in the store but haven't updated the state, // so we'll need to replay a block using the WAL. @@ -416,7 +415,10 @@ func (h *Handshaker) ReplayBlocks( // but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT h.logger.Info("Replay last block using real app") state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) - return state.AppHash, err + if err != nil { + return nil, err + } + return state.AppHash, nil case appBlockHeight == storeBlockHeight: // We ran Commit, but didn't save the state, so replayBlock with mock app. @@ -434,9 +436,11 @@ func (h *Handshaker) ReplayBlocks( mockApp := newMockProxyApp(finalizeBlockResponse) h.logger.Info("Replay last block using mock app") state, err = h.replayBlock(state, storeBlockHeight, mockApp) - return state.AppHash, err + if err != nil { + return nil, err + } + return state.AppHash, nil } - } panic(fmt.Sprintf("uncovered case! appHeight: %d, storeHeight: %d, stateHeight: %d", @@ -449,7 +453,8 @@ func (h *Handshaker) replayBlocks( proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int64, - mutateState bool) ([]byte, error) { + mutateState bool, +) ([]byte, error) { // App is further behind than it should be, so we need to replay blocks. // We replay all blocks from appBlockHeight+1. // @@ -478,13 +483,13 @@ func (h *Handshaker) replayBlocks( } h.logger.Info("Applying block", "height", i) - block := h.store.LoadBlock(i) + block, _ := h.store.LoadBlock(i) // Extra check to ensure the app was not changed in a way it shouldn't have. if len(appHash) > 0 { assertAppHashEqualsOneFromBlock(appHash, block) } - appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight) + appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight, storeBlockHeight) if err != nil { return nil, err } @@ -507,8 +512,7 @@ func (h *Handshaker) replayBlocks( // ApplyBlock on the proxyApp with the last block. func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.AppConnConsensus) (sm.State, error) { - block := h.store.LoadBlock(height) - meta := h.store.LoadBlockMeta(height) + block, meta := h.store.LoadBlock(height) // Use stubs for both mempool and evidence pool since no transactions nor // evidence are needed here - block already exists. @@ -516,7 +520,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap blockExec.SetEventBus(h.eventBus) var err error - state, err = blockExec.ApplyBlock(state, meta.BlockID, block) + state, err = blockExec.ApplyBlock(state, meta.BlockID, block, block.Height) if err != nil { return sm.State{}, err } diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go new file mode 100644 index 00000000000..ee23779e0cc --- /dev/null +++ b/internal/consensus/replay_stubs.go @@ -0,0 +1,76 @@ +package consensus + +import ( + "context" + + abcicli "github.com/cometbft/cometbft/abci/client" + abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/internal/clist" + mempl "github.com/cometbft/cometbft/mempool" + "github.com/cometbft/cometbft/p2p/nodekey" + "github.com/cometbft/cometbft/proxy" + "github.com/cometbft/cometbft/types" +) + +// ----------------------------------------------------------------------------- + +type emptyMempool struct{} + +var _ mempl.Mempool = emptyMempool{} + +func (emptyMempool) Lock() {} +func (emptyMempool) Unlock() {} +func (emptyMempool) PreUpdate() {} +func (emptyMempool) Size() int { return 0 } +func (emptyMempool) SizeBytes() int64 { return 0 } +func (emptyMempool) CheckTx(types.Tx, nodekey.ID) (*abcicli.ReqRes, error) { + return nil, nil +} +func (emptyMempool) RemoveTxByKey(types.TxKey) error { return nil } +func (emptyMempool) ReapMaxBytesMaxGas(int64, int64) types.Txs { return types.Txs{} } +func (emptyMempool) GetTxByHash([]byte) types.Tx { return types.Tx{} } +func (emptyMempool) ReapMaxTxs(int) types.Txs { return types.Txs{} } +func (emptyMempool) Update( + int64, + types.Txs, + []*abci.ExecTxResult, + mempl.PreCheckFunc, + mempl.PostCheckFunc, +) error { + return nil +} +func (emptyMempool) Flush() {} +func (emptyMempool) FlushAppConn() error { return nil } +func (emptyMempool) Contains(types.TxKey) bool { return false } +func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (emptyMempool) EnableTxsAvailable() {} +func (emptyMempool) TxsBytes() int64 { return 0 } +func (emptyMempool) TxsFront() *clist.CElement { return nil } +func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } + +// ----------------------------------------------------------------------------- +// newMockProxyApp uses ABCIResponses to give the right results. +// +// Useful because we don't want to call Commit() twice for the same block on +// the real app. + +func newMockProxyApp(finalizeBlockResponse *abci.FinalizeBlockResponse) proxy.AppConnConsensus { + clientCreator := proxy.NewLocalClientCreator(&mockProxyApp{ + finalizeBlockResponse: finalizeBlockResponse, + }) + cli, _ := clientCreator.NewABCIConsensusClient() + err := cli.Start() + if err != nil { + panic(err) + } + return proxy.NewAppConnConsensus(cli, proxy.NopMetrics()) +} + +type mockProxyApp struct { + abci.BaseApplication + finalizeBlockResponse *abci.FinalizeBlockResponse +} + +func (mock *mockProxyApp) FinalizeBlock(context.Context, *abci.FinalizeBlockRequest) (*abci.FinalizeBlockResponse, error) { + return mock.finalizeBlockResponse, nil +} diff --git a/consensus/replay_test.go b/internal/consensus/replay_test.go similarity index 80% rename from consensus/replay_test.go rename to internal/consensus/replay_test.go index 19eef940d8b..e0859b4a9a5 100644 --- a/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -3,6 +3,7 @@ package consensus import ( "bytes" "context" + "errors" "fmt" "io" "os" @@ -18,18 +19,16 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - "github.com/cometbft/cometbft/abci/example/kvstore" abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/abci/types/mocks" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" cfg "github.com/cometbft/cometbft/config" - cryptoenc "github.com/cometbft/cometbft/crypto/encoding" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/libs/log" - cmtrand "github.com/cometbft/cometbft/libs/rand" - "github.com/cometbft/cometbft/mempool" + mempl "github.com/cometbft/cometbft/mempool" "github.com/cometbft/cometbft/privval" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/proxy" sm "github.com/cometbft/cometbft/state" smmocks "github.com/cometbft/cometbft/state/mocks" @@ -54,12 +53,12 @@ func TestMain(m *testing.M) { // These tests ensure we can always recover from failure at any part of the consensus process. // There are two general failure scenarios: failure during consensus, and failure while applying the block. // Only the latter interacts with the app and store, -// but the former has to deal with restrictions on re-use of priv_validator keys. +// but the former has to deal with restrictions on reuse of priv_validator keys. // The `WAL Tests` are for failures during the consensus; // the `Handshake Tests` are for failures in applying the block. // With the help of the WAL, we can recover from it all! -//------------------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------------------ // WAL Tests // TODO: It would be better to verify explicitly which states we can recover from without the wal @@ -72,22 +71,27 @@ func startNewStateAndWaitForBlock( blockDB dbm.DB, stateStore sm.Store, ) { + t.Helper() logger := log.TestingLogger() state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile()) - privValidator := loadPrivValidator(consensusReplayConfig) + privValidator, err := loadPrivValidator(consensusReplayConfig) + require.NoError(t, err) + app := kvstore.NewInMemoryApplication() + _, lanesInfo := fetchAppInfo(app) cs := newStateWithConfigAndBlockStore( consensusReplayConfig, state, privValidator, - kvstore.NewInMemoryApplication(), + app, blockDB, + lanesInfo, ) cs.SetLogger(logger) bytes, _ := os.ReadFile(cs.config.WalFile()) t.Logf("====== WAL: \n\r%X\n", bytes) - err := cs.Start() + err = cs.Start() require.NoError(t, err) defer func() { if err := cs.Stop(); err != nil { @@ -117,7 +121,7 @@ func sendTxs(ctx context.Context, cs *State) { return default: tx := kvstore.NewTxFromID(i) - reqRes, err := assertMempool(cs.txNotifier).CheckTx(tx) + reqRes, err := assertMempool(cs.txNotifier).CheckTx(tx, "") if err != nil { panic(err) } @@ -139,12 +143,12 @@ func TestWALCrash(t *testing.T) { }{ { "empty block", - func(stateDB dbm.DB, cs *State, ctx context.Context) {}, + func(_ dbm.DB, _ *State, _ context.Context) {}, 1, }, { "many non-empty blocks", - func(stateDB dbm.DB, cs *State, ctx context.Context) { + func(_ dbm.DB, cs *State, ctx context.Context) { go sendTxs(ctx, cs) }, 3, @@ -152,7 +156,6 @@ func TestWALCrash(t *testing.T) { } for i, tc := range testCases { - tc := tc consensusReplayConfig := ResetConfig(fmt.Sprintf("%s_%d", t.Name(), i)) t.Run(tc.name, func(t *testing.T) { crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop) @@ -163,6 +166,7 @@ func TestWALCrash(t *testing.T) { func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config, initFn func(dbm.DB, *State, context.Context), heightToStop int64, ) { + t.Helper() walPanicked := make(chan error) crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop} @@ -180,13 +184,17 @@ LOOP: }) state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) require.NoError(t, err) - privValidator := loadPrivValidator(consensusReplayConfig) + privValidator, err := loadPrivValidator(consensusReplayConfig) + require.NoError(t, err) + app := kvstore.NewInMemoryApplication() + _, lanesInfo := fetchAppInfo(app) cs := newStateWithConfigAndBlockStore( consensusReplayConfig, state, privValidator, kvstore.NewInMemoryApplication(), blockDB, + lanesInfo, ) cs.SetLogger(logger) @@ -313,19 +321,18 @@ func (w *crashingWAL) Wait() { w.next.Wait() } const numBlocks = 6 -//--------------------------------------- +// --------------------------------------- // Test handshake/replay // 0 - all synced up // 1 - saved block but app and state are behind by one height // 2 - save block and committed (i.e. app got `Commit`) but state is behind -// 3 - same as 2 but with a truncated block store +// 3 - same as 2 but with a truncated block store. var modes = []uint{0, 1, 2, 3} -// This is actually not a test, it's for storing validator change tx data for testHandshakeReplay +// This is actually not a test, it's for storing validator change tx data for testHandshakeReplay. func setupChainWithChangingValidators(t *testing.T, name string, nBlocks int) (*cfg.Config, []*types.Block, []*types.ExtendedCommit, sm.State) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + t.Helper() nPeers := 7 nVals := 4 @@ -338,12 +345,11 @@ func setupChainWithChangingValidators(t *testing.T, name string, nBlocks int) (* func(_ string) abci.Application { return newKVStore() }) + chainID := genDoc.ChainID genesisState, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) t.Cleanup(cleanup) - partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound) proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal) @@ -359,7 +365,7 @@ func setupChainWithChangingValidators(t *testing.T, name string, nBlocks int) (* ensureNewRound(newRoundCh, height, 0) ensureNewProposal(proposalCh, height, round) rs := css[0].GetRoundState() - signAddVotes(css[0], cmtproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), true, vss[1:nVals]...) + signAddVotes(css[0], types.PrecommitType, chainID, types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, true, vss[1:nVals]...) ensureNewRound(newRoundCh, height+1, 0) // HEIGHT 2 @@ -367,31 +373,21 @@ func setupChainWithChangingValidators(t *testing.T, name string, nBlocks int) (* incrementHeight(vss...) newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() require.NoError(t, err) - valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) - require.NoError(t, err) - newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) - _, err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1) - assert.NoError(t, err) - propBlock, err := css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) + newValidatorTx1 := updateValTx(newValidatorPubKey1, testMinPower) + _, err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, "") require.NoError(t, err) - propBlockParts, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) - blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(vss[1].Height, round, -1, blockID) - p := proposal.ToProto() - if err := vss[1].SignProposal(test.DefaultTestChainID, p); err != nil { - t.Fatal("failed to sign bad proposal", err) - } - proposal.Signature = p.Signature + propBlock, propBlockParts, blockID := createProposalBlock(t, css[0]) // changeProposer(t, cs1, v2) + proposal := types.NewProposal(vss[1].Height, round, -1, blockID, propBlock.Header.Time) + signProposal(t, proposal, chainID, vss[1]) // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(proposal, propBlockParts, "some peer"); err != nil { t.Fatal(err) } ensureNewProposal(proposalCh, height, round) rs = css[0].GetRoundState() - signAddVotes(css[0], cmtproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), true, vss[1:nVals]...) + signAddVotes(css[0], types.PrecommitType, chainID, types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, true, vss[1:nVals]...) ensureNewRound(newRoundCh, height+1, 0) // HEIGHT 3 @@ -399,31 +395,21 @@ func setupChainWithChangingValidators(t *testing.T, name string, nBlocks int) (* incrementHeight(vss...) updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() require.NoError(t, err) - updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) + updateValidatorTx1 := updateValTx(updateValidatorPubKey1, 25) + _, err = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, "") require.NoError(t, err) - updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) - _, err = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1) - assert.NoError(t, err) - propBlock, err = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) - require.NoError(t, err) - propBlockParts, err = propBlock.MakePartSet(partSize) - require.NoError(t, err) - blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal = types.NewProposal(vss[2].Height, round, -1, blockID) - p = proposal.ToProto() - if err := vss[2].SignProposal(test.DefaultTestChainID, p); err != nil { - t.Fatal("failed to sign bad proposal", err) - } - proposal.Signature = p.Signature + propBlock, propBlockParts, blockID = createProposalBlock(t, css[0]) // changeProposer(t, cs1, v2) + proposal = types.NewProposal(vss[2].Height, round, -1, blockID, propBlock.Header.Time) + signProposal(t, proposal, chainID, vss[2]) // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(proposal, propBlockParts, "some peer"); err != nil { t.Fatal(err) } ensureNewProposal(proposalCh, height, round) rs = css[0].GetRoundState() - signAddVotes(css[0], cmtproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), true, vss[1:nVals]...) + signAddVotes(css[0], types.PrecommitType, chainID, types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, true, vss[1:nVals]...) ensureNewRound(newRoundCh, height+1, 0) // HEIGHT 4 @@ -431,23 +417,17 @@ func setupChainWithChangingValidators(t *testing.T, name string, nBlocks int) (* incrementHeight(vss...) newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() require.NoError(t, err) - newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) - require.NoError(t, err) - newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) - _, err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2) + newValidatorTx2 := updateValTx(newValidatorPubKey2, testMinPower) + _, err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, "") require.NoError(t, err) newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() require.NoError(t, err) - newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) + newValidatorTx3 := updateValTx(newValidatorPubKey3, testMinPower) + _, err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3, "") require.NoError(t, err) - newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) - _, err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3) - assert.NoError(t, err) - propBlock, err = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) - require.NoError(t, err) - propBlockParts, err = propBlock.MakePartSet(partSize) - require.NoError(t, err) - blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + + propBlock, propBlockParts, blockID = createProposalBlock(t, css[0]) // changeProposer(t, cs1, v2) + newVss := make([]*validatorStub, nVals+1) copy(newVss, vss[:nVals+1]) sort.Sort(ValidatorStubsByPower(newVss)) @@ -460,7 +440,7 @@ func setupChainWithChangingValidators(t *testing.T, name string, nBlocks int) (* cssPubKey, err := css[cssIdx].privValidator.GetPubKey() require.NoError(t, err) - if vsPubKey.Equals(cssPubKey) { + if vsPubKey.Type() == cssPubKey.Type() && bytes.Equal(vsPubKey.Bytes(), cssPubKey.Bytes()) { return i } } @@ -469,29 +449,26 @@ func setupChainWithChangingValidators(t *testing.T, name string, nBlocks int) (* selfIndex := valIndexFn(0) - proposal = types.NewProposal(vss[3].Height, round, -1, blockID) - p = proposal.ToProto() - if err := vss[3].SignProposal(test.DefaultTestChainID, p); err != nil { - t.Fatal("failed to sign bad proposal", err) - } - proposal.Signature = p.Signature + proposal = types.NewProposal(vss[3].Height, round, -1, blockID, propBlock.Header.Time) + signProposal(t, proposal, chainID, vss[3]) // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(proposal, propBlockParts, "some peer"); err != nil { t.Fatal(err) } ensureNewProposal(proposalCh, height, round) - removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) - _, err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx2) - assert.Nil(t, err) + removeValidatorTx2 := updateValTx(newValidatorPubKey2, 0) + _, err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx2, "") + require.NoError(t, err) rs = css[0].GetRoundState() for i := 0; i < nVals+1; i++ { if i == selfIndex { continue } - signAddVotes(css[0], cmtproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), true, newVss[i]) + signAddVotes(css[0], types.PrecommitType, chainID, + types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, true, newVss[i]) } ensureNewRound(newRoundCh, height+1, 0) @@ -510,35 +487,29 @@ func setupChainWithChangingValidators(t *testing.T, name string, nBlocks int) (* if i == selfIndex { continue } - signAddVotes(css[0], cmtproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), true, newVss[i]) + signAddVotes(css[0], types.PrecommitType, chainID, types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, true, newVss[i]) } ensureNewRound(newRoundCh, height+1, 0) // HEIGHT 6 height++ incrementHeight(vss...) - removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) - _, err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3) - assert.NoError(t, err) - propBlock, err = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) - require.NoError(t, err) - propBlockParts, err = propBlock.MakePartSet(partSize) + removeValidatorTx3 := updateValTx(newValidatorPubKey3, 0) + _, err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3, "") require.NoError(t, err) - blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + + propBlock, propBlockParts, blockID = createProposalBlock(t, css[0]) // changeProposer(t, cs1, v2) + newVss = make([]*validatorStub, nVals+3) copy(newVss, vss[:nVals+3]) sort.Sort(ValidatorStubsByPower(newVss)) selfIndex = valIndexFn(0) - proposal = types.NewProposal(vss[1].Height, round, -1, blockID) - p = proposal.ToProto() - if err := vss[1].SignProposal(test.DefaultTestChainID, p); err != nil { - t.Fatal("failed to sign bad proposal", err) - } - proposal.Signature = p.Signature + proposal = types.NewProposal(vss[1].Height, round, -1, blockID, propBlock.Header.Time) + signProposal(t, proposal, chainID, vss[1]) // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(proposal, propBlockParts, "some peer"); err != nil { t.Fatal(err) } ensureNewProposal(proposalCh, height, round) @@ -547,20 +518,21 @@ func setupChainWithChangingValidators(t *testing.T, name string, nBlocks int) (* if i == selfIndex { continue } - signAddVotes(css[0], cmtproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), true, newVss[i]) + signAddVotes(css[0], types.PrecommitType, chainID, types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, true, newVss[i]) } ensureNewRound(newRoundCh, height+1, 0) chain := []*types.Block{} extCommits := []*types.ExtendedCommit{} for i := 1; i <= nBlocks; i++ { - chain = append(chain, css[0].blockStore.LoadBlock(int64(i))) + block, _ := css[0].blockStore.LoadBlock(int64(i)) + chain = append(chain, block) extCommits = append(extCommits, css[0].blockStore.LoadBlockExtendedCommit(int64(i))) } return config, chain, extCommits, genesisState } -// Sync from scratch +// Sync from scratch. func TestHandshakeReplayAll(t *testing.T) { for _, m := range modes { t.Run(fmt.Sprintf("mode_%d_single", m), func(t *testing.T) { @@ -572,7 +544,7 @@ func TestHandshakeReplayAll(t *testing.T) { } } -// Sync many, not from scratch +// Sync many, not from scratch. func TestHandshakeReplaySome(t *testing.T) { for _, m := range modes { t.Run(fmt.Sprintf("mode_%d_single", m), func(t *testing.T) { @@ -584,7 +556,7 @@ func TestHandshakeReplaySome(t *testing.T) { } } -// Sync from lagging by one +// Sync from lagging by one. func TestHandshakeReplayOne(t *testing.T) { for _, m := range modes { t.Run(fmt.Sprintf("mode_%d_single", m), func(t *testing.T) { @@ -596,7 +568,7 @@ func TestHandshakeReplayOne(t *testing.T) { } } -// Sync from caught up +// Sync from caught up. func TestHandshakeReplayNone(t *testing.T) { for _, m := range modes { t.Run(fmt.Sprintf("mode_%d_single", m), func(t *testing.T) { @@ -624,8 +596,9 @@ func tempWALWithData(data []byte) string { } // Make some blocks. Start a fresh app and apply nBlocks blocks. -// Then restart the app and sync it up with the remaining blocks +// Then restart the app and sync it up with the remaining blocks. func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) { + t.Helper() var ( testConfig *cfg.Config chain []*types.Block @@ -724,8 +697,10 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin } }) + abciInfoResp, err := proxyApp.Query().Info(context.Background(), proxy.InfoRequest) + require.NoError(t, err) // perform the replay protocol to sync Tendermint and the application - err = handshaker.Handshake(context.Background(), proxyApp) + err = handshaker.Handshake(context.Background(), abciInfoResp, proxyApp) if expectError { require.Error(t, err) // finish the test early @@ -734,7 +709,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin require.NoError(t, err) // get the latest app hash from the app - res, err := proxyApp.Query().Info(context.Background(), proxy.RequestInfo) + res, err := proxyApp.Query().Info(context.Background(), proxy.InfoRequest) require.NoError(t, err) // block store and app height should be in sync @@ -766,21 +741,23 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin } } -func applyBlock(t *testing.T, stateStore sm.Store, mempool mempool.Mempool, evpool sm.EvidencePool, st sm.State, blk *types.Block, proxyApp proxy.AppConns, bs sm.BlockStore) sm.State { +func applyBlock(t *testing.T, stateStore sm.Store, mempool mempl.Mempool, evpool sm.EvidencePool, st sm.State, blk *types.Block, proxyApp proxy.AppConns, bs sm.BlockStore) sm.State { + t.Helper() testPartSize := types.BlockPartSizeBytes blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, bs) bps, err := blk.MakePartSet(testPartSize) require.NoError(t, err) blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: bps.Header()} - newState, err := blockExec.ApplyBlock(st, blkID, blk) + newState, err := blockExec.ApplyBlock(st, blkID, blk, blk.Height) require.NoError(t, err) return newState } -func buildAppStateFromChain(t *testing.T, proxyApp proxy.AppConns, stateStore sm.Store, mempool mempool.Mempool, evpool sm.EvidencePool, +func buildAppStateFromChain(t *testing.T, proxyApp proxy.AppConns, stateStore sm.Store, mempool mempl.Mempool, evpool sm.EvidencePool, state sm.State, chain []*types.Block, nBlocks int, mode uint, bs sm.BlockStore, ) { + t.Helper() // start a new app without handshake, play nBlocks blocks if err := proxyApp.Start(); err != nil { panic(err) @@ -789,7 +766,7 @@ func buildAppStateFromChain(t *testing.T, proxyApp proxy.AppConns, stateStore sm state.Version.Consensus.App = kvstore.AppVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) - if _, err := proxyApp.Consensus().InitChain(context.Background(), &abci.RequestInitChain{ + if _, err := proxyApp.Consensus().InitChain(context.Background(), &abci.InitChainRequest{ Validators: validators, }); err != nil { panic(err) @@ -815,7 +792,7 @@ func buildAppStateFromChain(t *testing.T, proxyApp proxy.AppConns, stateStore sm // update the kvstore height and apphash // as if we ran commit but not // here we expect a dummy state store to be used - state = applyBlock(t, stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp, bs) + _ = applyBlock(t, stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp, bs) } default: panic(fmt.Sprintf("unknown mode %v", mode)) @@ -826,7 +803,7 @@ func buildTMStateFromChain( t *testing.T, config *cfg.Config, stateStore sm.Store, - mempool mempool.Mempool, + mempool mempl.Mempool, evpool sm.EvidencePool, state sm.State, chain []*types.Block, @@ -834,6 +811,7 @@ func buildTMStateFromChain( mode uint, bs sm.BlockStore, ) (sm.State, []byte) { + t.Helper() // run the whole chain against this client to build up the CometBFT state clientCreator := proxy.NewLocalClientCreator( kvstore.NewPersistentApplication( @@ -846,7 +824,7 @@ func buildTMStateFromChain( state.Version.Consensus.App = kvstore.AppVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) - if _, err := proxyApp.Consensus().InitChain(context.Background(), &abci.RequestInitChain{ + if _, err := proxyApp.Consensus().InitChain(context.Background(), &abci.InitChainRequest{ Validators: validators, }); err != nil { panic(err) @@ -875,7 +853,7 @@ func buildTMStateFromChain( vals, _ := stateStore.LoadValidators(penultimateHeight) dummyStateStore.On("LoadValidators", penultimateHeight).Return(vals, nil) dummyStateStore.On("Save", mock.Anything).Return(nil) - dummyStateStore.On("SaveFinalizeBlockResponse", lastHeight, mock.MatchedBy(func(response *abci.ResponseFinalizeBlock) bool { + dummyStateStore.On("SaveFinalizeBlockResponse", lastHeight, mock.MatchedBy(func(response *abci.FinalizeBlockResponse) bool { require.NoError(t, stateStore.SaveFinalizeBlockResponse(lastHeight, response)) return true })).Return(nil) @@ -931,7 +909,8 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) - genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) + genDoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile()) + require.NoError(t, err) state.LastValidators = state.Validators.Copy() // mode = 0 for committing all the blocks blocks, err := makeBlocks(3, state, []types.PrivValidator{privVal}) @@ -957,7 +936,9 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { assert.Panics(t, func() { h := NewHandshaker(stateStore, state, store, genDoc) - if err = h.Handshake(context.Background(), proxyApp); err != nil { + abciInfoResp, err := proxyApp.Query().Info(context.Background(), proxy.InfoRequest) + require.NoError(t, err) + if err = h.Handshake(context.Background(), abciInfoResp, proxyApp); err != nil { t.Log(err) } }) @@ -981,7 +962,9 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { assert.Panics(t, func() { h := NewHandshaker(stateStore, state, store, genDoc) - if err = h.Handshake(context.Background(), proxyApp); err != nil { + abciInfoResp, err := proxyApp.Query().Info(context.Background(), proxy.InfoRequest) + require.NoError(t, err) + if err = h.Handshake(context.Background(), abciInfoResp, proxyApp); err != nil { t.Log(err) } }) @@ -996,21 +979,21 @@ type badApp struct { onlyLastHashIsWrong bool } -func (app *badApp) FinalizeBlock(context.Context, *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { +func (app *badApp) FinalizeBlock(context.Context, *abci.FinalizeBlockRequest) (*abci.FinalizeBlockResponse, error) { app.height++ if app.onlyLastHashIsWrong { if app.height == app.numBlocks { - return &abci.ResponseFinalizeBlock{AppHash: cmtrand.Bytes(8)}, nil + return &abci.FinalizeBlockResponse{AppHash: cmtrand.Bytes(8)}, nil } - return &abci.ResponseFinalizeBlock{AppHash: []byte{app.height}}, nil + return &abci.FinalizeBlockResponse{AppHash: []byte{app.height}}, nil } else if app.allHashesAreWrong { - return &abci.ResponseFinalizeBlock{AppHash: cmtrand.Bytes(8)}, nil + return &abci.FinalizeBlockResponse{AppHash: cmtrand.Bytes(8)}, nil } panic("either allHashesAreWrong or onlyLastHashIsWrong must be set") } -//-------------------------- +// -------------------------- // utils for making blocks func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.ExtendedCommit, error) { @@ -1038,7 +1021,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.ExtendedCommit, er dec := NewWALDecoder(gr) for { msg, err := dec.Decode() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } else if err != nil { return nil, nil, err @@ -1072,7 +1055,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.ExtendedCommit, er } commitHeight := thisBlockExtCommit.Height if commitHeight != height+1 { - panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) + panic(fmt.Sprintf("commit doesn't match. got height %d, expected %d", commitHeight, height+1)) } blocks = append(blocks, block) extCommits = append(extCommits, thisBlockExtCommit) @@ -1086,7 +1069,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.ExtendedCommit, er return nil, nil, err } case *types.Vote: - if p.Type == cmtproto.PrecommitType { + if p.Type == types.PrecommitType { thisBlockExtCommit = &types.ExtendedCommit{ Height: p.Height, Round: p.Round, @@ -1115,14 +1098,14 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.ExtendedCommit, er } commitHeight := thisBlockExtCommit.Height if commitHeight != height+1 { - panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) + panic(fmt.Sprintf("commit doesn't match. got height %d, expected %d", commitHeight, height+1)) } blocks = append(blocks, block) extCommits = append(extCommits, thisBlockExtCommit) return blocks, extCommits, nil } -func readPieceFromWAL(msg *TimedWALMessage) interface{} { +func readPieceFromWAL(msg *TimedWALMessage) any { // for logging switch m := msg.Msg.(type) { case msgInfo: @@ -1141,12 +1124,13 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { return nil } -// fresh state and mock store +// fresh state and mock store. func stateAndStore( t *testing.T, config *cfg.Config, appVersion uint64, ) (dbm.DB, sm.State, *mockBlockStore) { + t.Helper() stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, @@ -1160,7 +1144,7 @@ func stateAndStore( return stateDB, state, store } -//---------------------------------- +// ---------------------------------- // mock block store type mockBlockStore struct { @@ -1176,6 +1160,7 @@ var _ sm.BlockStore = &mockBlockStore{} // TODO: NewBlockStore(db.NewMemDB) ... func newMockBlockStore(t *testing.T, config *cfg.Config, params types.ConsensusParams) *mockBlockStore { + t.Helper() return &mockBlockStore{ config: config, params: params, @@ -1183,15 +1168,19 @@ func newMockBlockStore(t *testing.T, config *cfg.Config, params types.ConsensusP } } -func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } -func (bs *mockBlockStore) Base() int64 { return bs.base } -func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 } -func (bs *mockBlockStore) LoadBaseMeta() *types.BlockMeta { return bs.LoadBlockMeta(bs.base) } -func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } -func (bs *mockBlockStore) LoadBlockByHash([]byte) *types.Block { - return bs.chain[int64(len(bs.chain))-1] +func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } +func (bs *mockBlockStore) Base() int64 { return bs.base } +func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 } +func (bs *mockBlockStore) LoadBaseMeta() *types.BlockMeta { return bs.LoadBlockMeta(bs.base) } +func (bs *mockBlockStore) LoadBlock(height int64) (*types.Block, *types.BlockMeta) { + return bs.chain[height-1], bs.LoadBlockMeta(height) +} + +func (bs *mockBlockStore) LoadBlockByHash([]byte) (*types.Block, *types.BlockMeta) { + height := int64(len(bs.chain)) + return bs.chain[height-1], bs.LoadBlockMeta(height) } -func (bs *mockBlockStore) LoadBlockMetaByHash([]byte) *types.BlockMeta { return nil } +func (*mockBlockStore) LoadBlockMetaByHash([]byte) *types.BlockMeta { return nil } func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { block := bs.chain[height-1] bps, err := block.MakePartSet(types.BlockPartSizeBytes) @@ -1201,11 +1190,11 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { Header: block.Header, } } -func (bs *mockBlockStore) LoadBlockPart(int64, int) *types.Part { return nil } -func (bs *mockBlockStore) SaveBlockWithExtendedCommit(*types.Block, *types.PartSet, *types.ExtendedCommit) { +func (*mockBlockStore) LoadBlockPart(int64, int) *types.Part { return nil } +func (*mockBlockStore) SaveBlockWithExtendedCommit(*types.Block, *types.PartSet, *types.ExtendedCommit) { } -func (bs *mockBlockStore) SaveBlock(*types.Block, *types.PartSet, *types.Commit) { +func (*mockBlockStore) SaveBlock(*types.Block, *types.PartSet, *types.Commit) { } func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { @@ -1232,20 +1221,20 @@ func (bs *mockBlockStore) PruneBlocks(height int64, _ sm.State) (uint64, int64, return pruned, evidencePoint, nil } -func (bs *mockBlockStore) DeleteLatestBlock() error { return nil } -func (bs *mockBlockStore) Close() error { return nil } +func (*mockBlockStore) DeleteLatestBlock() error { return nil } +func (*mockBlockStore) Close() error { return nil } -//--------------------------------------- +// --------------------------------------- // Test handshake/init chain func TestHandshakeUpdatesValidators(t *testing.T) { val, _ := types.RandValidator(true, 10) vals := types.NewValidatorSet([]*types.Validator{val}) app := &mocks.Application{} - app.On("Info", mock.Anything, mock.Anything).Return(&abci.ResponseInfo{ + app.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{ LastBlockHeight: 0, }, nil) - app.On("InitChain", mock.Anything, mock.Anything).Return(&abci.ResponseInitChain{ + app.On("InitChain", mock.Anything, mock.Anything).Return(&abci.InitChainResponse{ Validators: types.TM2PB.ValidatorUpdates(vals), }, nil) clientCreator := proxy.NewLocalClientCreator(app) @@ -1271,7 +1260,9 @@ func TestHandshakeUpdatesValidators(t *testing.T) { t.Error(err) } }) - if err := handshaker.Handshake(context.Background(), proxyApp); err != nil { + abciInfoResp, err2 := proxyApp.Query().Info(context.Background(), proxy.InfoRequest) + require.NoError(t, err2) + if err := handshaker.Handshake(context.Background(), abciInfoResp, proxyApp); err != nil { t.Fatalf("Error on abci handshake: %v", err) } var err error diff --git a/consensus/state.go b/internal/consensus/state.go similarity index 76% rename from consensus/state.go rename to internal/consensus/state.go index 72f0ad1a99c..0aee7d073cb 100644 --- a/consensus/state.go +++ b/internal/consensus/state.go @@ -9,23 +9,24 @@ import ( "os" "runtime/debug" "sort" + "strconv" "time" "github.com/cosmos/gogoproto/proto" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" cfg "github.com/cometbft/cometbft/config" - cstypes "github.com/cometbft/cometbft/consensus/types" "github.com/cometbft/cometbft/crypto" - cmtevents "github.com/cometbft/cometbft/libs/events" - "github.com/cometbft/cometbft/libs/fail" + cstypes "github.com/cometbft/cometbft/internal/consensus/types" + cmtevents "github.com/cometbft/cometbft/internal/events" + "github.com/cometbft/cometbft/internal/fail" + cmtos "github.com/cometbft/cometbft/internal/os" cmtjson "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/libs/log" cmtmath "github.com/cometbft/cometbft/libs/math" - cmtos "github.com/cometbft/cometbft/libs/os" "github.com/cometbft/cometbft/libs/service" cmtsync "github.com/cometbft/cometbft/libs/sync" - "github.com/cometbft/cometbft/p2p" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cometbft/cometbft/p2p/nodekey" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" cmterrors "github.com/cometbft/cometbft/types/errors" @@ -34,13 +35,14 @@ import ( var msgQueueSize = 1000 -// msgs from the reactor which may update the state +// msgs from the reactor which may update the state. type msgInfo struct { - Msg Message `json:"msg"` - PeerID p2p.ID `json:"peer_key"` + Msg Message `json:"msg"` + PeerID nodekey.ID `json:"peer_key"` + ReceiveTime time.Time `json:"receive_time"` } -// internally generated messages which may update the state +// internally generated messages which may update the state. type timeoutInfo struct { Duration time.Duration `json:"duration"` Height int64 `json:"height"` @@ -52,12 +54,12 @@ func (ti *timeoutInfo) String() string { return fmt.Sprintf("%v ; %d/%d %v", ti.Duration, ti.Height, ti.Round, ti.Step) } -// interface to the mempool +// interface to the mempool. type txNotifier interface { TxsAvailable() <-chan struct{} } -// interface to the evidence pool +// interface to the evidence pool. type evidencePool interface { // reports conflicting votes to the evidence pool to be processed into evidence ReportConflictingVotes(voteA, voteB *types.Vote) @@ -101,7 +103,7 @@ type State struct { internalMsgQueue chan msgInfo timeoutTicker TimeoutTicker - // information about about added votes and block parts are written on this channel + // information about added votes and block parts are written on this channel // so statistics can be computed by reactor statsMsgQueue chan msgInfo @@ -121,7 +123,7 @@ type State struct { // some functions can be overwritten for testing decideProposal func(height int64, round int32) doPrevote func(height int64, round int32) - setProposal func(proposal *types.Proposal) error + setProposal func(proposal *types.Proposal, t time.Time) error // closed when we finish shutting down done chan struct{} @@ -135,6 +137,10 @@ type State struct { // offline state sync height indicating to which height the node synced offline offlineStateSyncHeight int64 + + // a buffer to store the concatenated proposal block parts (serialization format) + // should only be accessed under the cs.mtx lock + serializedBlockBuffer []byte } // StateOption sets an optional parameter on the State. @@ -221,7 +227,7 @@ func OfflineStateSyncHeight(height int64) StateOption { } // String returns a string. -func (cs *State) String() string { +func (*State) String() string { // better not to access shared variables return "ConsensusState" } @@ -242,11 +248,18 @@ func (cs *State) GetLastHeight() int64 { } // GetRoundState returns a shallow copy of the internal consensus state. -func (cs *State) GetRoundState() *cstypes.RoundState { +// This function is thread-safe. +func (cs *State) GetRoundState() cstypes.RoundState { cs.mtx.RLock() - rs := cs.RoundState // copy + rs := cs.getRoundState() cs.mtx.RUnlock() - return &rs + return rs +} + +// getRoundState returns a shallow copy of the internal consensus state. +// This function is not thread-safe. Use GetRoundState for the thread-safe version. +func (cs *State) getRoundState() cstypes.RoundState { + return cs.RoundState // copy } // GetRoundStateJSON returns a json of RoundState. @@ -256,7 +269,7 @@ func (cs *State) GetRoundStateJSON() ([]byte, error) { return cmtjson.Marshal(cs.RoundState) } -// GetRoundStateSimpleJSON returns a json of RoundStateSimple +// GetRoundStateSimpleJSON returns a json of RoundStateSimple. func (cs *State) GetRoundStateSimpleJSON() ([]byte, error) { cs.mtx.RLock() defer cs.mtx.RUnlock() @@ -279,7 +292,7 @@ func (cs *State) SetPrivValidator(priv types.PrivValidator) { cs.privValidator = priv if err := cs.updatePrivValidatorPubKey(); err != nil { - cs.Logger.Error("failed to get private validator pubkey", "err", err) + cs.Logger.Error("Failed to get private validator pubkey", "err", err) } } @@ -336,14 +349,14 @@ func (cs *State) OnStart() error { break LOOP case !IsDataCorruptionError(err): - cs.Logger.Error("error on catchup replay; proceeding to start state anyway", "err", err) + cs.Logger.Error("Error on catchup replay; proceeding to start state anyway", "err", err) break LOOP case repairAttempted: return err } - cs.Logger.Error("the WAL file is corrupted; attempting repair", "err", err) + cs.Logger.Error("WAL file is corrupted; attempting repair", "err", err) // 1) prep work if err := cs.wal.Stop(); err != nil { @@ -353,20 +366,20 @@ func (cs *State) OnStart() error { repairAttempted = true // 2) backup original WAL file - corruptedFile := fmt.Sprintf("%s.CORRUPTED", cs.config.WalFile()) + corruptedFile := cs.config.WalFile() + ".CORRUPTED" if err := cmtos.CopyFile(cs.config.WalFile(), corruptedFile); err != nil { return err } - cs.Logger.Debug("backed up WAL file", "src", cs.config.WalFile(), "dst", corruptedFile) + cs.Logger.Debug("Backed up WAL file", "src", cs.config.WalFile(), "dst", corruptedFile) // 3) try to repair (WAL file will be overwritten!) if err := repairWalFile(corruptedFile, cs.config.WalFile()); err != nil { - cs.Logger.Error("the WAL repair failed", "err", err) + cs.Logger.Error("WAL repair failed", "err", err) return err } - cs.Logger.Info("successful WAL repair") + cs.Logger.Info("Successfully repaired WAL file") // reload WAL file if err := cs.loadWalFile(); err != nil { @@ -389,17 +402,20 @@ func (cs *State) OnStart() error { // schedule the first round! // use GetRoundState so we don't race the receiveRoutine for access - cs.scheduleRound0(cs.GetRoundState()) + rs := cs.GetRoundState() + cs.scheduleRound0(&rs) return nil } // timeoutRoutine: receive requests for timeouts on tickChan and fire timeouts on tockChan -// receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions +// receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions. +// + func (cs *State) startRoutines(maxSteps int) { err := cs.timeoutTicker.Start() if err != nil { - cs.Logger.Error("failed to start timeout ticker", "err", err) + cs.Logger.Error("Failed to start timeout ticker", "err", err) return } @@ -410,7 +426,7 @@ func (cs *State) startRoutines(maxSteps int) { func (cs *State) loadWalFile() error { wal, err := cs.OpenWAL(cs.config.WalFile()) if err != nil { - cs.Logger.Error("failed to load state WAL", "err", err) + cs.Logger.Error("Failed to load state WAL", "err", err) return err } @@ -421,18 +437,18 @@ func (cs *State) loadWalFile() error { // OnStop implements service.Service. func (cs *State) OnStop() { if err := cs.evsw.Stop(); err != nil { - cs.Logger.Error("failed trying to stop eventSwitch", "error", err) + cs.Logger.Error("Failed trying to stop eventSwitch", "error", err) } if err := cs.timeoutTicker.Stop(); err != nil { - cs.Logger.Error("failed trying to stop timeoutTicket", "error", err) + cs.Logger.Error("Failed trying to stop timeoutTicket", "error", err) } // WAL is stopped in receiveRoutine. } -// Wait waits for the the main routine to return. +// Wait waits for the main routine to return. // NOTE: be sure to Stop() the event switch and drain -// any event channels or this may deadlock +// any event channels or this may deadlock. func (cs *State) Wait() { <-cs.done } @@ -442,21 +458,21 @@ func (cs *State) Wait() { func (cs *State) OpenWAL(walFile string) (WAL, error) { wal, err := NewWAL(walFile) if err != nil { - cs.Logger.Error("failed to open WAL", "file", walFile, "err", err) + cs.Logger.Error("Failed to open WAL", "file", walFile, "err", err) return nil, err } wal.SetLogger(cs.Logger.With("wal", walFile)) if err := wal.Start(); err != nil { - cs.Logger.Error("failed to start WAL", "err", err) + cs.Logger.Error("Failed to start WAL", "err", err) return nil, err } return wal, nil } -//------------------------------------------------------------ +// ------------------------------------------------------------ // Public interface for passing messages into the consensus state, possibly causing a state transition. // If peerID == "", the msg is considered internal. // Messages are added to the appropriate queue (peer or internal). @@ -464,11 +480,11 @@ func (cs *State) OpenWAL(walFile string) (WAL, error) { // TODO: should these return anything or let callers just use events? // AddVote inputs a vote. -func (cs *State) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) { +func (cs *State) AddVote(vote *types.Vote, peerID nodekey.ID) (added bool, err error) { if peerID == "" { - cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""} + cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, "", time.Time{}} } else { - cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerID} + cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerID, time.Time{}} } // TODO: wait for event?! @@ -476,11 +492,11 @@ func (cs *State) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error } // SetProposal inputs a proposal. -func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { +func (cs *State) SetProposal(proposal *types.Proposal, peerID nodekey.ID) error { if peerID == "" { - cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""} + cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, "", cmttime.Now()} } else { - cs.peerMsgQueue <- msgInfo{&ProposalMessage{proposal}, peerID} + cs.peerMsgQueue <- msgInfo{&ProposalMessage{proposal}, peerID, cmttime.Now()} } // TODO: wait for event?! @@ -488,11 +504,11 @@ func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { } // AddProposalBlockPart inputs a part of the proposal block. -func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID p2p.ID) error { +func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID nodekey.ID) error { if peerID == "" { - cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""} + cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, "", time.Time{}} } else { - cs.peerMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, peerID} + cs.peerMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, peerID, time.Time{}} } // TODO: wait for event?! @@ -502,9 +518,8 @@ func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Par // SetProposalAndBlock inputs the proposal and all block parts. func (cs *State) SetProposalAndBlock( proposal *types.Proposal, - block *types.Block, //nolint:revive parts *types.PartSet, - peerID p2p.ID, + peerID nodekey.ID, ) error { // TODO: Since the block parameter is not used, we should instead expose just a SetProposal method. if err := cs.SetProposal(proposal, peerID); err != nil { @@ -521,7 +536,7 @@ func (cs *State) SetProposalAndBlock( return nil } -//------------------------------------------------------------ +// ------------------------------------------------------------ // internal functions for managing the state func (cs *State) updateHeight(height int64) { @@ -549,12 +564,12 @@ func (cs *State) scheduleRound0(rs *cstypes.RoundState) { cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight) } -// Attempt to schedule a timeout (by sending timeoutInfo on the tickChan) +// Attempt to schedule a timeout (by sending timeoutInfo on the tickChan). func (cs *State) scheduleTimeout(duration time.Duration, height int64, round int32, step cstypes.RoundStepType) { cs.timeoutTicker.ScheduleTimeout(timeoutInfo{duration, height, round, step}) } -// send a msg into the receiveRoutine regarding our own proposal, block part, or vote +// send a msg into the receiveRoutine regarding our own proposal, block part, or vote. func (cs *State) sendInternalMessage(mi msgInfo) { select { case cs.internalMsgQueue <- mi: @@ -585,7 +600,7 @@ func (cs *State) reconstructSeenCommit(state sm.State) { // the method will panic on an absent ExtendedCommit or an ExtendedCommit without // extension data. func (cs *State) reconstructLastCommit(state sm.State) { - extensionsEnabled := state.ConsensusParams.ABCI.VoteExtensionsEnabled(state.LastBlockHeight) + extensionsEnabled := state.ConsensusParams.Feature.VoteExtensionsEnabled(state.LastBlockHeight) if !extensionsEnabled { cs.reconstructSeenCommit(state) return @@ -709,19 +724,25 @@ func (cs *State) updateToState(state sm.State) { cs.updateHeight(height) cs.updateRoundStep(0, cstypes.RoundStepNewHeight) + timeoutCommit := state.NextBlockDelay + // If the ABCI app didn't set a delay, use the deprecated config value. + if timeoutCommit == 0 { + timeoutCommit = cs.config.TimeoutCommit //nolint:staticcheck + } if cs.CommitTime.IsZero() { // "Now" makes it easier to sync up dev nodes. - // We add timeoutCommit to allow transactions - // to be gathered for the first block. - // And alternative solution that relies on clocks: - // cs.StartTime = state.LastBlockTime.Add(timeoutCommit) - cs.StartTime = cs.config.Commit(cmttime.Now()) + // + // We add timeoutCommit to allow transactions to be gathered for + // the first block. An alternative solution that relies on clocks: + // `cs.StartTime = state.LastBlockTime.Add(timeoutCommit)` + cs.StartTime = cmttime.Now().Add(timeoutCommit) } else { - cs.StartTime = cs.config.Commit(cs.CommitTime) + cs.StartTime = cs.CommitTime.Add(timeoutCommit) } cs.Validators = validators cs.Proposal = nil + cs.ProposalReceiveTime = time.Time{} cs.ProposalBlock = nil cs.ProposalBlockParts = nil cs.LockedRound = -1 @@ -730,7 +751,7 @@ func (cs *State) updateToState(state sm.State) { cs.ValidRound = -1 cs.ValidBlock = nil cs.ValidBlockParts = nil - if state.ConsensusParams.ABCI.VoteExtensionsEnabled(height) { + if state.ConsensusParams.Feature.VoteExtensionsEnabled(height) { cs.Votes = cstypes.NewExtendedHeightVoteSet(state.ChainID, height, validators) } else { cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) @@ -748,7 +769,7 @@ func (cs *State) updateToState(state sm.State) { func (cs *State) newStep() { rs := cs.RoundStateEvent() if err := cs.wal.Write(rs); err != nil { - cs.Logger.Error("failed writing to WAL", "err", err) + cs.Logger.Error("Failed writing to WAL", "err", err) } cs.nSteps++ @@ -756,14 +777,14 @@ func (cs *State) newStep() { // newStep is called by updateToState in NewState before the eventBus is set! if cs.eventBus != nil { if err := cs.eventBus.PublishEventNewRoundStep(rs); err != nil { - cs.Logger.Error("failed publishing new round step", "err", err) + cs.Logger.Error("Failed publishing new round step", "err", err) } - cs.evsw.FireEvent(types.EventNewRoundStep, &cs.RoundState) + cs.evsw.FireEvent(types.EventNewRoundStep, cs.RoundState) } } -//----------------------------------------- +// ----------------------------------------- // the main go routines // receiveRoutine handles messages which may cause state transitions. @@ -779,7 +800,7 @@ func (cs *State) receiveRoutine(maxSteps int) { // close wal now that we're done writing to it if err := cs.wal.Stop(); err != nil { - cs.Logger.Error("failed trying to stop WAL", "error", err) + cs.Logger.Error("Failed trying to stop WAL", "error", err) } cs.wal.Wait() @@ -804,7 +825,7 @@ func (cs *State) receiveRoutine(maxSteps int) { for { if maxSteps > 0 { if cs.nSteps >= maxSteps { - cs.Logger.Debug("reached max steps; exiting receive routine") + cs.Logger.Debug("Reached max steps; exiting receive routine") cs.nSteps = 0 return } @@ -819,7 +840,7 @@ func (cs *State) receiveRoutine(maxSteps int) { case mi = <-cs.peerMsgQueue: if err := cs.wal.Write(mi); err != nil { - cs.Logger.Error("failed writing to WAL", "err", err) + cs.Logger.Error("Failed writing to WAL", "err", err) } // handles proposals, block parts, votes // may generate internal events (votes, complete proposals, 2/3 majorities) @@ -861,7 +882,7 @@ func (cs *State) receiveRoutine(maxSteps int) { } } -// state transitions on complete-proposal, 2/3-any, 2/3-one +// state transitions on complete-proposal, 2/3-any, 2/3-one. func (cs *State) handleMsg(mi msgInfo) { cs.mtx.Lock() defer cs.mtx.Unlock() @@ -876,13 +897,13 @@ func (cs *State) handleMsg(mi msgInfo) { case *ProposalMessage: // will not cause transition. // once proposal is set, we can receive block parts - err = cs.setProposal(msg.Proposal) + err = cs.setProposal(msg.Proposal, mi.ReceiveTime) case *BlockPartMessage: // if the proposal is complete, we'll enterPrevote or tryFinalizeCommit added, err = cs.addProposalBlockPart(msg, peerID) - // We unlock here to yield to any routines that need to read the the RoundState. + // We unlock here to yield to any routines that need to read the RoundState. // Previously, this code held the lock from the point at which the final block // part was received until the block executed against the application. // This prevented the reactor from being able to retrieve the most updated @@ -905,7 +926,7 @@ func (cs *State) handleMsg(mi msgInfo) { if err != nil && msg.Round != cs.Round { cs.Logger.Debug( - "received block part from wrong round", + "Received block part from wrong round", "height", cs.Height, "cs_round", cs.Round, "block_round", msg.Round, @@ -937,13 +958,13 @@ func (cs *State) handleMsg(mi msgInfo) { // We could make note of this and help filter in broadcastHasVoteMessage(). default: - cs.Logger.Error("unknown msg type", "type", fmt.Sprintf("%T", msg)) + cs.Logger.Error("Unknown msg type", "type", fmt.Sprintf("%T", msg)) return } if err != nil { cs.Logger.Error( - "failed to process message", + "Failed to process message", "height", cs.Height, "round", cs.Round, "peer", peerID, @@ -954,11 +975,11 @@ func (cs *State) handleMsg(mi msgInfo) { } func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { - cs.Logger.Debug("received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + cs.Logger.Debug("Received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) // timeouts must be for current height, round, step if ti.Height != rs.Height || ti.Round < rs.Round || (ti.Round == rs.Round && ti.Step < rs.Step) { - cs.Logger.Debug("ignoring tock because we are ahead", "height", rs.Height, "round", rs.Round, "step", rs.Step) + cs.Logger.Debug("Ignoring tock because we are ahead", "height", rs.Height, "round", rs.Round, "step", rs.Step) return } @@ -984,14 +1005,14 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { case cstypes.RoundStepPrevoteWait: if err := cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()); err != nil { - cs.Logger.Error("failed publishing timeout wait", "err", err) + cs.Logger.Error("Failed publishing timeout wait", "err", err) } cs.enterPrecommit(ti.Height, ti.Round) case cstypes.RoundStepPrecommitWait: if err := cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()); err != nil { - cs.Logger.Error("failed publishing timeout wait", "err", err) + cs.Logger.Error("Failed publishing timeout wait", "err", err) } cs.enterPrecommit(ti.Height, ti.Round) @@ -1027,13 +1048,13 @@ func (cs *State) handleTxsAvailable() { } } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // State functions // Used internally by handleTimeout and handleMsg to make state transitions // Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit), // -// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1) +// or, if NextBlockDelay==0, after receiving all precommits from (height,round-1) // // Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1) // Enter: +2/3 precommits for nil at (height,round-1) @@ -1044,14 +1065,14 @@ func (cs *State) enterNewRound(height int64, round int32) { if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { logger.Debug( - "entering new round with invalid args", + "Entering new round with invalid args", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), ) return } if now := cmttime.Now(); cs.StartTime.After(now) { - logger.Debug("need to set a buffer and log message here for sanity", "start_time", cs.StartTime, "now", now) + logger.Debug("Need to set a buffer and log message here for sanity", "start_time", cs.StartTime, "now", now) } prevHeight, prevRound, prevStep := cs.Height, cs.Round, cs.Step @@ -1072,13 +1093,14 @@ func (cs *State) enterNewRound(height int64, round int32) { // we might have received a proposal for round 0. propAddress := validators.GetProposer().PubKey.Address() if round != 0 { - logger.Info("resetting proposal info", "proposer", propAddress) + logger.Info("Resetting proposal info", "proposer", propAddress) cs.Proposal = nil + cs.ProposalReceiveTime = time.Time{} cs.ProposalBlock = nil cs.ProposalBlockParts = nil } - logger.Debug("entering new round", + logger.Debug("Entering new round", "previous", log.NewLazySprintf("%v/%v/%v", prevHeight, prevRound, prevStep), "proposer", propAddress, ) @@ -1087,7 +1109,7 @@ func (cs *State) enterNewRound(height int64, round int32) { cs.TriggeredTimeoutPrecommit = false if err := cs.eventBus.PublishEventNewRound(cs.NewRoundEvent()); err != nil { - cs.Logger.Error("failed publishing new round", "err", err) + cs.Logger.Error("Failed publishing new round", "err", err) } // Wait for txs to be available in the mempool // before we enterPropose in round 0. If the last block changed the app hash, @@ -1098,13 +1120,14 @@ func (cs *State) enterNewRound(height int64, round int32) { cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round, cstypes.RoundStepNewRound) } - } else { - cs.enterPropose(height, round) + return } + + cs.enterPropose(height, round) } // needProofBlock returns true on the first height (so the genesis app hash is signed right away) -// and where the last block (height-1) caused the app hash to change +// and where the last block (height-1) caused the app hash to change. func (cs *State) needProofBlock(height int64) bool { if height == cs.state.InitialHeight { return true @@ -1113,7 +1136,7 @@ func (cs *State) needProofBlock(height int64) bool { lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) if lastBlockMeta == nil { // See https://github.com/cometbft/cometbft/issues/370 - cs.Logger.Info("short-circuited needProofBlock", "height", height, "InitialHeight", cs.state.InitialHeight) + cs.Logger.Info("Short-circuited needProofBlock", "height", height, "InitialHeight", cs.state.InitialHeight) return true } @@ -1125,7 +1148,7 @@ func (cs *State) needProofBlock(height int64) bool { // // after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval // -// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool +// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool. func (cs *State) enterPropose(height int64, round int32) { logger := cs.Logger.With("height", height, "round", round) @@ -1137,7 +1160,17 @@ func (cs *State) enterPropose(height int64, round int32) { return } - logger.Debug("entering propose step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + // If this validator is the proposer of this round, and the previous block time is later than + // our local clock time, wait to propose until our local clock time has passed the block time. + if cs.isPBTSEnabled(height) && cs.privValidatorPubKey != nil && cs.isProposer(cs.privValidatorPubKey.Address()) { + proposerWaitTime := proposerWaitTime(cmttime.DefaultSource{}, cs.state.LastBlockTime) + if proposerWaitTime > 0 { + cs.scheduleTimeout(proposerWaitTime, height, round, cstypes.RoundStepNewRound) + return + } + } + + logger.Debug("Entering propose step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPropose: @@ -1157,32 +1190,32 @@ func (cs *State) enterPropose(height int64, round int32) { // Nothing more to do if we're not a validator if cs.privValidator == nil { - logger.Debug("node is not a validator") + logger.Debug("Propose step; not proposing since node is not a validator") return } - logger.Debug("node is a validator") - if cs.privValidatorPubKey == nil { // If this node is a validator & proposer in the current round, it will // miss the opportunity to create a block. - logger.Error("propose step; empty priv validator public key", "err", ErrPubKeyIsNotSet) + logger.Error("Propose step; empty priv validator public key", "err", ErrPubKeyIsNotSet) return } - address := cs.privValidatorPubKey.Address() + addr := cs.privValidatorPubKey.Address() // if not a validator, we're done - if !cs.Validators.HasAddress(address) { - logger.Debug("node is not a validator", "addr", address, "vals", cs.Validators) + if !cs.Validators.HasAddress(addr) { + logger.Debug("Propose step; not proposing since node is not in the validator set", + "addr", addr, + "vals", cs.Validators) return } - if cs.isProposer(address) { - logger.Debug("propose step; our turn to propose", "proposer", address) + if cs.isProposer(addr) { + logger.Debug("Propose step; our turn to propose", "proposer", addr) cs.decideProposal(height, round) } else { - logger.Debug("propose step; not our turn to propose", "proposer", cs.Validators.GetProposer().Address) + logger.Debug("Propose step; not our turn to propose", "proposer", cs.Validators.GetProposer().Address) } } @@ -1203,7 +1236,7 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { var err error block, err = cs.createProposalBlock(context.TODO()) if err != nil { - cs.Logger.Error("unable to create proposal block", "error", err) + cs.Logger.Error("Unable to create proposal block", "error", err) return } else if block == nil { panic("Method createProposalBlock should not provide a nil block without errors") @@ -1224,22 +1257,22 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { // Make proposal propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} - proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID) + proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID, block.Header.Time) p := proposal.ToProto() if err := cs.privValidator.SignProposal(cs.state.ChainID, p); err == nil { proposal.Signature = p.Signature // send proposal and block parts on internal msg queue - cs.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) + cs.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, "", cmttime.Now()}) for i := 0; i < int(blockParts.Total()); i++ { part := blockParts.GetPart(i) - cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""}) + cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, "", time.Time{}}) } - cs.Logger.Debug("signed proposal", "height", height, "round", round, "proposal", proposal) + cs.Logger.Debug("Signed proposal", "height", height, "round", round, "proposal", proposal) } else if !cs.replayMode { - cs.Logger.Error("propose step; failed signing proposal", "height", height, "round", round, "err", err) + cs.Logger.Error("Propose step; failed signing proposal", "height", height, "round", round, "err", err) } } @@ -1280,7 +1313,7 @@ func (cs *State) createProposalBlock(ctx context.Context) (*types.Block, error) case cs.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit - lastExtCommit = cs.LastCommit.MakeExtendedCommit(cs.state.ConsensusParams.ABCI) + lastExtCommit = cs.LastCommit.MakeExtendedCommit(cs.state.ConsensusParams.Feature) default: // This shouldn't happen. return nil, ErrProposalWithoutPreviousCommit @@ -1301,19 +1334,20 @@ func (cs *State) createProposalBlock(ctx context.Context) (*types.Block, error) return ret, nil } -// Enter: `timeoutPropose` after entering Propose. -// Enter: proposal block and POL is ready. -// If we received a valid proposal within this round and we are not locked on a block, -// we will prevote for block. -// Otherwise, if we receive a valid proposal that matches the block we are -// locked on or matches a block that received a POL in a round later than our -// locked round, prevote for the proposal, otherwise vote nil. +// Enter: isProposalComplete() and Step <= RoundStepPropose. +// Enter: `timeout_propose` (timeout of RoundStepPropose type) expires. +// +// If we received a valid proposal and the associated proposed block within +// this round and: (i) we are not locked on a block, or we are locked on the +// proposed block, or (ii) the proposed block received a POL in a round greater +// or equal than our locked round, we will prevote for the poroposed block ID. +// Otherwise, we prevote nil. func (cs *State) enterPrevote(height int64, round int32) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) { logger.Debug( - "entering prevote step with invalid args", + "Entering prevote step with invalid args", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), ) return @@ -1325,7 +1359,7 @@ func (cs *State) enterPrevote(height int64, round int32) { cs.newStep() }() - logger.Debug("entering prevote step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("Entering prevote step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) // Sign and broadcast vote as necessary cs.doPrevote(height, round) @@ -1334,23 +1368,36 @@ func (cs *State) enterPrevote(height int64, round int32) { // (so we have more time to try and collect +2/3 prevotes for a single block) } +func (cs *State) timelyProposalMargins() (time.Duration, time.Duration) { + sp := cs.state.ConsensusParams.Synchrony.InRound(cs.Round) + + // cs.ProposalReceiveTime - cs.Proposal.Timestamp >= -1 * Precision + // cs.ProposalReceiveTime - cs.Proposal.Timestamp <= MessageDelay + Precision + return -sp.Precision, sp.MessageDelay + sp.Precision +} + +func (cs *State) proposalIsTimely() bool { + sp := cs.state.ConsensusParams.Synchrony.InRound(cs.Proposal.Round) + + return cs.Proposal.IsTimely(cs.ProposalReceiveTime, sp) +} + +// Implements doPrevote. Called by enterPrevote(height, round) provided that +// round == cs.Round, height == cs.Height, and cs.Step <= // RoundStepPropose. func (cs *State) defaultDoPrevote(height int64, round int32) { logger := cs.Logger.With("height", height, "round", round) - // We did not receive a proposal within this round. (and thus executing this from a timeout) - if cs.ProposalBlock == nil { - logger.Debug("prevote step: ProposalBlock is nil; prevoting nil") - cs.signAddVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}, nil) + // We did not receive a valid proposal for this round (and thus executing this from a timeout). + if cs.Proposal == nil { + logger.Debug("Prevote step: did not receive a valid Proposal; prevoting nil") + cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{}, nil) return } - // Validate proposal block, from consensus' perspective - err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock) - if err != nil { - // ProposalBlock is invalid, prevote nil. - logger.Error("prevote step: consensus deems this block invalid; prevoting nil", - "err", err) - cs.signAddVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}, nil) + // We did not (fully) receive the proposed block (and thus executing this from a timeout). + if cs.ProposalBlock == nil { + logger.Debug("Prevote step: did not receive the ProposalBlock; prevoting nil") + cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{}, nil) return } @@ -1383,8 +1430,46 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { if cs.Proposal.POLRound == -1 { if cs.LockedRound == -1 { if cs.ValidRound != -1 && cs.ProposalBlock.HashesTo(cs.ValidBlock.Hash()) { - logger.Debug("prevote step: ProposalBlock matches our valid block; prevoting the proposal") - cs.signAddVote(cmtproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header(), nil) + logger.Debug("Prevote step: ProposalBlock matches our valid block; prevoting the proposal") + cs.signAddVote(types.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header(), nil) + return + } + + // Timestamp validation using Proposed-Based TimeStamp (PBTS) algorithm. + // See: https://github.com/cometbft/cometbft/blob/main/spec/consensus/proposer-based-timestamp/ + if cs.isPBTSEnabled(height) { + if !cs.Proposal.Timestamp.Equal(cs.ProposalBlock.Header.Time) { + logger.Debug("Prevote step: proposal timestamp not equal; prevoting nil") + cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{}, nil) + return + } + + if !cs.proposalIsTimely() { + lowerBound, upperBound := cs.timelyProposalMargins() + // TODO: use Warn level once available. + logger.Info("Prevote step: Proposal is not timely; prevoting nil", + "timestamp", cs.Proposal.Timestamp.Format(time.RFC3339Nano), + "receive_time", cs.ProposalReceiveTime.Format(time.RFC3339Nano), + "timestamp_difference", cs.ProposalReceiveTime.Sub(cs.Proposal.Timestamp), + "lower_bound", lowerBound, + "upper_bound", upperBound) + cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{}, nil) + return + } + + logger.Debug("Prevote step: Proposal is timely", + "timestamp", cs.Proposal.Timestamp.Format(time.RFC3339Nano), + "receive_time", cs.ProposalReceiveTime.Format(time.RFC3339Nano), + "timestamp_difference", cs.ProposalReceiveTime.Sub(cs.Proposal.Timestamp)) + } + + // Validate proposal block, from consensus' perspective + err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock) + if err != nil { + // ProposalBlock is invalid, prevote nil. + logger.Error("prevote step: consensus deems this block invalid; prevoting nil", + "err", err) + cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{}, nil) return } @@ -1405,25 +1490,25 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { cs.metrics.MarkProposalProcessed(isAppValid) if !isAppValid { - logger.Error("prevote step: state machine rejected a proposed block; this should not happen:"+ + logger.Error("Prevote step: state machine rejected a proposed block; this should not happen:"+ "the proposer may be misbehaving; prevoting nil", "err", err) - cs.signAddVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}, nil) + cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{}, nil) return } - logger.Debug("prevote step: ProposalBlock is valid and there is no locked block; prevoting the proposal") - cs.signAddVote(cmtproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header(), nil) + logger.Debug("Prevote step: ProposalBlock is valid and there is no locked block; prevoting the proposal") + cs.signAddVote(types.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header(), nil) return } if cs.ProposalBlock.HashesTo(cs.LockedBlock.Hash()) { - logger.Debug("prevote step: ProposalBlock is valid (POLRound is -1) and matches our locked block; prevoting the proposal") - cs.signAddVote(cmtproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header(), nil) + logger.Debug("Prevote step: ProposalBlock is valid (POLRound is -1) and matches our locked block; prevoting the proposal") + cs.signAddVote(types.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header(), nil) return } - logger.Debug("prevote step: ProposalBlock is valid (POLRound is -1), but doesn't match our locked block; prevoting nil") - cs.signAddVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}, nil) + logger.Debug("Prevote step: ProposalBlock is valid (POLRound is -1), but doesn't match our locked block; prevoting nil") + cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{}, nil) return } @@ -1455,23 +1540,34 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { */ blockID, ok := cs.Votes.Prevotes(cs.Proposal.POLRound).TwoThirdsMajority() ok = ok && !blockID.IsNil() - if ok && cs.ProposalBlock.HashesTo(blockID.Hash) && cs.Proposal.POLRound >= 0 && cs.Proposal.POLRound < cs.Round { - if cs.LockedRound <= cs.Proposal.POLRound { - logger.Debug("prevote step: ProposalBlock is valid and received a 2/3" + + if ok && cs.ProposalBlock.HashesTo(blockID.Hash) && cs.Proposal.POLRound < cs.Round { + if cs.LockedRound < cs.Proposal.POLRound { + logger.Debug("Prevote step: ProposalBlock is valid and received a 2/3" + "majority in a round later than the locked round; prevoting the proposal") - cs.signAddVote(cmtproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header(), nil) + cs.signAddVote(types.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header(), nil) return } if cs.ProposalBlock.HashesTo(cs.LockedBlock.Hash()) { - logger.Debug("prevote step: ProposalBlock is valid and matches our locked block; prevoting the proposal") - cs.signAddVote(cmtproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header(), nil) + logger.Debug("Prevote step: ProposalBlock is valid and matches our locked block; prevoting the proposal") + cs.signAddVote(types.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header(), nil) + return + } + // If v_r = lockedRound_p we expect v to match lockedValue_p. If it is not the case, + // we have two 2/3+ majorities for different values at round v_r, meaning that the + // assumption of a 2/3+ majority of honest processes was violated. We should at + // least log this scenario, see: https://github.com/cometbft/cometbft/issues/1309. + if cs.LockedRound == cs.Proposal.POLRound { + logger.Info("Prevote step: ProposalBlock is valid and received a 2/3" + + "majority at our locked round, while not matching our locked value;" + + "this can only happen when 1/3 or more validators are double signing; prevoting the proposal") + cs.signAddVote(types.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header(), nil) return } } - logger.Debug("prevote step: ProposalBlock is valid but was not our locked block or" + + logger.Debug("Prevote step: ProposalBlock is valid but was not our locked block or " + "did not receive a more recent majority; prevoting nil") - cs.signAddVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}, nil) + cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{}, nil) } // Enter: any +2/3 prevotes at next round. @@ -1480,7 +1576,7 @@ func (cs *State) enterPrevoteWait(height int64, round int32) { if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { logger.Debug( - "entering prevote wait step with invalid args", + "Entering prevote wait step with invalid args", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), ) return @@ -1488,12 +1584,12 @@ func (cs *State) enterPrevoteWait(height int64, round int32) { if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { panic(fmt.Sprintf( - "entering prevote wait step (%v/%v), but prevotes does not have any +2/3 votes", + "Entering prevote wait step (%v/%v), but prevotes does not have any +2/3 votes", height, round, )) } - logger.Debug("entering prevote wait step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("Entering prevote wait step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPrevoteWait: @@ -1515,13 +1611,13 @@ func (cs *State) enterPrecommit(height int64, round int32) { if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { logger.Debug( - "entering precommit step with invalid args", + "Entering precommit step with invalid args", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), ) return } - logger.Debug("entering precommit step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("Entering precommit step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPrecommit: @@ -1535,18 +1631,18 @@ func (cs *State) enterPrecommit(height int64, round int32) { // If we don't have a polka, we must precommit nil. if !ok { if cs.LockedBlock != nil { - logger.Debug("precommit step; no +2/3 prevotes during enterPrecommit while we are locked; precommitting nil") + logger.Debug("Precommit step; no +2/3 prevotes during enterPrecommit while we are locked; precommitting nil") } else { - logger.Debug("precommit step; no +2/3 prevotes during enterPrecommit; precommitting nil") + logger.Debug("Precommit step; no +2/3 prevotes during enterPrecommit; precommitting nil") } - cs.signAddVote(cmtproto.PrecommitType, nil, types.PartSetHeader{}, nil) + cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{}, nil) return } // At this point +2/3 prevoted for a particular block or nil. if err := cs.eventBus.PublishEventPolka(cs.RoundStateEvent()); err != nil { - logger.Error("failed publishing polka", "err", err) + logger.Error("Failed publishing polka", "err", err) } // the latest POLRound should be this round. @@ -1557,23 +1653,22 @@ func (cs *State) enterPrecommit(height int64, round int32) { // +2/3 prevoted nil. Precommit nil. if blockID.IsNil() { - logger.Debug("precommit step; +2/3 prevoted for nil") - cs.signAddVote(cmtproto.PrecommitType, nil, types.PartSetHeader{}, nil) + logger.Debug("Precommit step; +2/3 prevoted for nil; precommitting nil") + cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{}, nil) return } - // At this point, +2/3 prevoted for a particular block. // If we're already locked on that block, precommit it, and update the LockedRound if cs.LockedBlock.HashesTo(blockID.Hash) { - logger.Debug("precommit step; +2/3 prevoted locked block; relocking") + logger.Debug("Precommit step; +2/3 prevoted locked block; relocking") cs.LockedRound = round if err := cs.eventBus.PublishEventRelock(cs.RoundStateEvent()); err != nil { - logger.Error("failed publishing event relock", "err", err) + logger.Error("Precommit step; failed publishing event relock", "err", err) } - cs.signAddVote(cmtproto.PrecommitType, blockID.Hash, blockID.PartSetHeader, cs.LockedBlock) + cs.signAddVote(types.PrecommitType, blockID.Hash, blockID.PartSetHeader, cs.LockedBlock) return } @@ -1581,11 +1676,11 @@ func (cs *State) enterPrecommit(height int64, round int32) { // the proposed block, update our locked block to this block and issue a // precommit vote for it. if cs.ProposalBlock.HashesTo(blockID.Hash) { - logger.Debug("precommit step; +2/3 prevoted proposal block; locking", "hash", blockID.Hash) + logger.Debug("Precommit step: +2/3 prevoted proposal block; locking", "hash", blockID.Hash) // Validate the block. if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { - panic(fmt.Sprintf("precommit step; +2/3 prevoted for an invalid block: %v", err)) + panic(fmt.Sprintf("Precommit step; +2/3 prevoted for an invalid block: %v; relocking", err)) } cs.LockedRound = round @@ -1593,23 +1688,23 @@ func (cs *State) enterPrecommit(height int64, round int32) { cs.LockedBlockParts = cs.ProposalBlockParts if err := cs.eventBus.PublishEventLock(cs.RoundStateEvent()); err != nil { - logger.Error("failed publishing event lock", "err", err) + logger.Error("Precommit step; failed publishing event lock", "err", err) } - cs.signAddVote(cmtproto.PrecommitType, blockID.Hash, blockID.PartSetHeader, cs.ProposalBlock) + cs.signAddVote(types.PrecommitType, blockID.Hash, blockID.PartSetHeader, cs.ProposalBlock) return } // There was a polka in this round for a block we don't have. // Fetch that block, and precommit nil. - logger.Debug("precommit step; +2/3 prevotes for a block we do not have; voting nil", "block_id", blockID) + logger.Debug("Precommit step; +2/3 prevotes for a block we do not have; voting nil", "block_id", blockID) if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { cs.ProposalBlock = nil cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) } - cs.signAddVote(cmtproto.PrecommitType, nil, types.PartSetHeader{}, nil) + cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{}, nil) } // Enter: any +2/3 precommits for next round. @@ -1618,7 +1713,7 @@ func (cs *State) enterPrecommitWait(height int64, round int32) { if cs.Height != height || round < cs.Round || (cs.Round == round && cs.TriggeredTimeoutPrecommit) { logger.Debug( - "entering precommit wait step with invalid args", + "Entering precommit wait step with invalid args", "triggered_timeout", cs.TriggeredTimeoutPrecommit, "current", log.NewLazySprintf("%v/%v", cs.Height, cs.Round), ) @@ -1632,7 +1727,7 @@ func (cs *State) enterPrecommitWait(height int64, round int32) { )) } - logger.Debug("entering precommit wait step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("Entering precommit wait step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPrecommitWait: @@ -1644,19 +1739,19 @@ func (cs *State) enterPrecommitWait(height int64, round int32) { cs.scheduleTimeout(cs.config.Precommit(round), height, round, cstypes.RoundStepPrecommitWait) } -// Enter: +2/3 precommits for block +// Enter: +2/3 precommits for block. func (cs *State) enterCommit(height int64, commitRound int32) { logger := cs.Logger.With("height", height, "commit_round", commitRound) if cs.Height != height || cstypes.RoundStepCommit <= cs.Step { logger.Debug( - "entering commit step with invalid args", + "Entering commit step with invalid args", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), ) return } - logger.Debug("entering commit step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + logger.Debug("Entering commit step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) defer func() { // Done enterCommit: @@ -1679,7 +1774,7 @@ func (cs *State) enterCommit(height int64, commitRound int32) { // Move them over to ProposalBlock if they match the commit hash, // otherwise they'll be cleared in updateToState. if cs.LockedBlock.HashesTo(blockID.Hash) { - logger.Debug("commit is for a locked block; set ProposalBlock=LockedBlock", "block_hash", blockID.Hash) + logger.Debug("Commit is for a locked block; set ProposalBlock=LockedBlock", "block_hash", blockID.Hash) cs.ProposalBlock = cs.LockedBlock cs.ProposalBlockParts = cs.LockedBlockParts } @@ -1688,8 +1783,8 @@ func (cs *State) enterCommit(height int64, commitRound int32) { if !cs.ProposalBlock.HashesTo(blockID.Hash) { if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { logger.Info( - "commit is for a block we do not know about; set ProposalBlock=nil", - "proposal", log.NewLazyBlockHash(cs.ProposalBlock), + "Commit is for a block we do not know about; set ProposalBlock=nil", + "proposal", log.NewLazyHash(cs.ProposalBlock), "commit", blockID.Hash, ) @@ -1699,10 +1794,10 @@ func (cs *State) enterCommit(height int64, commitRound int32) { cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil { - logger.Error("failed publishing valid block", "err", err) + logger.Error("Failed publishing valid block", "err", err) } - cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState) + cs.evsw.FireEvent(types.EventValidBlock, cs.RoundState) } } } @@ -1717,7 +1812,7 @@ func (cs *State) tryFinalizeCommit(height int64) { blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() if !ok || blockID.IsNil() { - logger.Error("failed attempt to finalize commit; there was no +2/3 majority or +2/3 was for nil") + logger.Error("Failed attempt to finalize commit; there was no +2/3 majority or +2/3 was for nil") return } @@ -1725,8 +1820,8 @@ func (cs *State) tryFinalizeCommit(height int64) { // TODO: this happens every time if we're not a validator (ugly logs) // TODO: ^^ wait, why does it matter that we're a validator? logger.Debug( - "failed attempt to finalize commit; we do not have the commit block", - "proposal_block", log.NewLazyBlockHash(cs.ProposalBlock), + "Failed attempt to finalize commit; we do not have the commit block", + "proposal_block", log.NewLazyHash(cs.ProposalBlock), "commit_block", blockID.Hash, ) return @@ -1735,13 +1830,13 @@ func (cs *State) tryFinalizeCommit(height int64) { cs.finalizeCommit(height) } -// Increment height and goto cstypes.RoundStepNewHeight +// Increment height and goto cstypes.RoundStepNewHeight. func (cs *State) finalizeCommit(height int64) { logger := cs.Logger.With("height", height) if cs.Height != height || cs.Step != cstypes.RoundStepCommit { logger.Debug( - "entering finalize commit step", + "Entering finalize commit step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step), ) return @@ -1767,12 +1862,12 @@ func (cs *State) finalizeCommit(height int64) { } logger.Info( - "finalizing commit of block", - "hash", log.NewLazyBlockHash(block), + "Finalizing commit of block", + "hash", log.NewLazyHash(block), "root", block.AppHash, "num_txs", len(block.Txs), ) - logger.Debug("committed block", "block", log.NewLazySprintf("%v", block)) + logger.Debug("Committed block", "block", log.NewLazySprintf("%v", block)) fail.Fail() // XXX @@ -1780,15 +1875,15 @@ func (cs *State) finalizeCommit(height int64) { if cs.blockStore.Height() < block.Height { // NOTE: the seenCommit is local justification to commit this block, // but may differ from the LastCommit included in the next block - seenExtendedCommit := cs.Votes.Precommits(cs.CommitRound).MakeExtendedCommit(cs.state.ConsensusParams.ABCI) - if cs.state.ConsensusParams.ABCI.VoteExtensionsEnabled(block.Height) { + seenExtendedCommit := cs.Votes.Precommits(cs.CommitRound).MakeExtendedCommit(cs.state.ConsensusParams.Feature) + if cs.state.ConsensusParams.Feature.VoteExtensionsEnabled(block.Height) { cs.blockStore.SaveBlockWithExtendedCommit(block, blockParts, seenExtendedCommit) } else { cs.blockStore.SaveBlock(block, blockParts, seenExtendedCommit.ToCommit()) } } else { // Happens during replay if we already saved the block but didn't commit - logger.Debug("calling finalizeCommit on already stored block", "height", block.Height) + logger.Debug("Calling finalizeCommit on already stored block", "height", block.Height) } fail.Fail() // XXX @@ -1820,14 +1915,16 @@ func (cs *State) finalizeCommit(height int64) { stateCopy := cs.state.Copy() // Execute and commit the block, update and save the state, and update the mempool. - // NOTE The block.AppHash wont reflect these txs until the next block. - stateCopy, err := cs.blockExec.ApplyBlock( + // We use apply verified block here because we have verified the block in this function already. + // NOTE The block.AppHash won't reflect these txs until the next block. + stateCopy, err := cs.blockExec.ApplyVerifiedBlock( stateCopy, types.BlockID{ Hash: block.Hash(), PartSetHeader: blockParts.Header(), }, block, + block.Height, ) if err != nil { panic(fmt.Sprintf("failed to apply block; error %v", err)) @@ -1845,7 +1942,7 @@ func (cs *State) finalizeCommit(height int64) { // Private validator might have changed it's key pair => refetch pubkey. if err := cs.updatePrivValidatorPubKey(); err != nil { - logger.Error("failed to get private validator pubkey", "err", err) + logger.Error("Failed to get private validator pubkey", "err", err) } // cs.StartTime is already set. @@ -1909,7 +2006,6 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { cs.metrics.ValidatorMissedBlocks.With(label...).Add(float64(1)) } } - } } cs.metrics.MissingValidators.Set(float64(missingValidators)) @@ -1922,7 +2018,7 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { ) for _, ev := range block.Evidence.Evidence { if dve, ok := ev.(*types.DuplicateVoteEvidence); ok { - if _, val := cs.Validators.GetByAddress(dve.VoteA.ValidatorAddress); val != nil { + if _, val := cs.Validators.GetByAddressMut(dve.VoteA.ValidatorAddress); val != nil { byzantineValidatorsCount++ byzantineValidatorsPower += val.VotingPower } @@ -1943,15 +2039,16 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { cs.metrics.NumTxs.Set(float64(len(block.Data.Txs))) cs.metrics.TotalTxs.Add(float64(len(block.Data.Txs))) cs.metrics.BlockSizeBytes.Set(float64(block.Size())) + cs.metrics.ChainSizeBytes.Add(float64(block.Size())) cs.metrics.CommittedHeight.Set(float64(block.Height)) } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- -func (cs *State) defaultSetProposal(proposal *types.Proposal) error { +func (cs *State) defaultSetProposal(proposal *types.Proposal, recvTime time.Time) error { // Already have one // TODO: possibly catch double proposals - if cs.Proposal != nil { + if cs.Proposal != nil || proposal == nil { return nil } @@ -1968,35 +2065,73 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { p := proposal.ToProto() // Verify signature - pubKey := cs.Validators.GetProposer().PubKey - if !pubKey.VerifySignature( + proposer := cs.Validators.GetProposer() + if !proposer.PubKey.VerifySignature( types.ProposalSignBytes(cs.state.ChainID, p), proposal.Signature, ) { return ErrInvalidProposalSignature } + // Validate the proposed block size, derived from its PartSetHeader + maxBytes := cs.state.ConsensusParams.Block.MaxBytes + if maxBytes == -1 { + maxBytes = int64(types.MaxBlockSizeBytes) + } + if int64(proposal.BlockID.PartSetHeader.Total) > (maxBytes-1)/int64(types.BlockPartSizeBytes)+1 { + return ErrProposalTooManyParts + } + proposal.Signature = p.Signature cs.Proposal = proposal + cs.ProposalReceiveTime = recvTime + cs.calculateProposalTimestampDifferenceMetric() // We don't update cs.ProposalBlockParts if it is already set. // This happens if we're already in cstypes.RoundStepCommit or if there is a valid block in the current round. // TODO: We can check if Proposal is for a different block as this is a sign of misbehavior! if cs.ProposalBlockParts == nil { cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartSetHeader) + + // If we signed this Proposal, lock the PartSet until we load + // all the BlockParts that should come just after the Proposal. + if bytes.Equal(proposer.Address, cs.privValidatorPubKey.Address()) { + cs.ProposalBlockParts.Lock() + } } - cs.Logger.Info("received proposal", "proposal", proposal, "proposer", pubKey.Address()) + cs.Logger.Info("Received proposal", "proposal", proposal, "proposer", proposer.Address) return nil } +func (cs *State) readSerializedBlockFromBlockParts() ([]byte, error) { + // reuse a serialized block buffer from cs + var serializedBlockBuffer []byte + if len(cs.serializedBlockBuffer) < int(cs.ProposalBlockParts.ByteSize()) { + serializedBlockBuffer = make([]byte, cs.ProposalBlockParts.ByteSize()) + cs.serializedBlockBuffer = serializedBlockBuffer + } else { + serializedBlockBuffer = cs.serializedBlockBuffer[:cs.ProposalBlockParts.ByteSize()] + } + + n, err := io.ReadFull(cs.ProposalBlockParts.GetReader(), serializedBlockBuffer) + if err != nil { + return nil, err + } + // Consistency check, should be impossible to fail. + if n != len(serializedBlockBuffer) { + return nil, fmt.Errorf("unexpected error in reading block parts, expected to read %d bytes, read %d", len(serializedBlockBuffer), n) + } + return serializedBlockBuffer, nil +} + // NOTE: block is not necessarily valid. // Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, // once we have the full block. -func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (added bool, err error) { +func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID nodekey.ID) (added bool, err error) { height, round, part := msg.Height, msg.Round, msg.Part // Blocks might be reused, so round mismatch is OK if cs.Height != height { - cs.Logger.Debug("received block part from wrong height", "height", height, "round", round) + cs.Logger.Debug("Received block part from wrong height", "height", height, "round", round) cs.metrics.BlockGossipPartsReceived.With("matches_current", "false").Add(1) return false, nil } @@ -2007,7 +2142,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add // NOTE: this can happen when we've gone to a higher round and // then receive parts from the previous round - not necessarily a bad peer. cs.Logger.Debug( - "received a block part when we are not expecting any", + "Received a block part when we are not expecting any", "height", height, "round", round, "index", part.Index, @@ -2033,6 +2168,10 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add cs.evsw.FireEvent(types.EventProposalBlockPart, msg) } + count, total := cs.ProposalBlockParts.Count(), cs.ProposalBlockParts.Total() + cs.Logger.Debug("Receive block part", "height", height, "round", round, + "index", part.Index, "count", count, "total", total, "from", peerID) + maxBytes := cs.state.ConsensusParams.Block.MaxBytes if maxBytes == -1 { maxBytes = int64(types.MaxBlockSizeBytes) @@ -2043,7 +2182,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add ) } if added && cs.ProposalBlockParts.IsComplete() { - bz, err := io.ReadAll(cs.ProposalBlockParts.GetReader()) + bz, err := cs.readSerializedBlockFromBlockParts() if err != nil { return added, err } @@ -2060,12 +2199,13 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add } cs.ProposalBlock = block + cs.ProposalBlockParts.Unlock() // NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal - cs.Logger.Info("received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) + cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) if err := cs.eventBus.PublishEventCompleteProposal(cs.CompleteProposalEvent()); err != nil { - cs.Logger.Error("failed publishing event complete proposal", "err", err) + cs.Logger.Error("Failed publishing event complete proposal", "err", err) } } return added, nil @@ -2078,9 +2218,9 @@ func (cs *State) handleCompleteProposal(blockHeight int64) { if hasTwoThirds && !blockID.IsNil() && (cs.ValidRound < cs.Round) { if cs.ProposalBlock.HashesTo(blockID.Hash) { cs.Logger.Debug( - "updating valid block to new proposal block", + "Updating valid block to new proposal block", "valid_round", cs.Round, - "valid_block_hash", log.NewLazyBlockHash(cs.ProposalBlock), + "valid_block_hash", log.NewLazyHash(cs.ProposalBlock), ) cs.ValidRound = cs.Round @@ -2106,15 +2246,15 @@ func (cs *State) handleCompleteProposal(blockHeight int64) { } } -// Attempt to add the vote. if its a duplicate signature, dupeout the validator -func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { +// Attempt to add the vote. if its a duplicate signature, dupeout the validator. +func (cs *State) tryAddVote(vote *types.Vote, peerID nodekey.ID) (bool, error) { added, err := cs.addVote(vote, peerID) // NOTE: some of these errors are swallowed here if err != nil { // If the vote height is off, we'll just ignore it, // But if it's a conflicting sig, add it to the cs.evpool. // If it's otherwise invalid, punish peer. - //nolint: gocritic + if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok { if cs.privValidatorPubKey == nil { return false, ErrPubKeyIsNotSet @@ -2122,7 +2262,7 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { if bytes.Equal(vote.ValidatorAddress, cs.privValidatorPubKey.Address()) { cs.Logger.Error( - "found conflicting vote from ourselves; did you unsafe_reset a validator?", + "Found conflicting vote from ourselves; did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, "type", vote.Type, @@ -2134,33 +2274,28 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { // report conflicting votes to the evidence pool cs.evpool.ReportConflictingVotes(voteErr.VoteA, voteErr.VoteB) cs.Logger.Debug( - "found and sent conflicting votes to the evidence pool", + "Found and sent conflicting votes to the evidence pool", "vote_a", voteErr.VoteA, "vote_b", voteErr.VoteB, ) return added, err - } else if errors.Is(err, types.ErrVoteNonDeterministicSignature) { - cs.Logger.Info("vote has non-deterministic signature", "err", err) - } else if errors.Is(err, types.ErrInvalidVoteExtension) { - cs.Logger.Info("vote has invalid extension") - } else { - // Either - // 1) bad peer OR - // 2) not a bad peer? this can also err sometimes with "Unexpected step" OR - // 3) tmkms use with multiple validators connecting to a single tmkms instance - // (https://github.com/tendermint/tendermint/issues/3839). - cs.Logger.Info("failed attempting to add vote", "err", err) - return added, ErrAddingVote } + + // Either + // 1) bad peer OR + // 2) not a bad peer? this can also err sometimes with "Unexpected step" OR + // 3) tmkms use with multiple validators connecting to a single tmkms instance + // (https://github.com/tendermint/tendermint/issues/3839). + return added, ErrAddingVote{Err: err} } return added, nil } -func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) { +func (cs *State) addVote(vote *types.Vote, peerID nodekey.ID) (added bool, err error) { cs.Logger.Debug( - "adding vote", + "Adding vote", "vote_height", vote.Height, "vote_type", vote.Type, "val_index", vote.ValidatorIndex, @@ -2175,23 +2310,23 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error // A precommit for the previous height? // These come in while we wait timeoutCommit - if vote.Height+1 == cs.Height && vote.Type == cmtproto.PrecommitType { + if vote.Height+1 == cs.Height && vote.Type == types.PrecommitType { if cs.Step != cstypes.RoundStepNewHeight { // Late precommit at prior height is ignored - cs.Logger.Debug("precommit vote came in after commit timeout and has been ignored", "vote", vote) + cs.Logger.Debug("Precommit vote came in after commit timeout and has been ignored", "vote", vote) return added, err } added, err = cs.LastCommit.AddVote(vote) if !added { - // If the vote wasnt added but there's no error, its a duplicate vote + // If the vote wasn't added but there's no error, its a duplicate vote if err == nil { cs.metrics.DuplicateVote.Add(1) } return added, err } - cs.Logger.Debug("added vote to last precommits", "last_commit", cs.LastCommit.StringShort()) + cs.Logger.Debug("Added vote to last precommits", "last_commit", cs.LastCommit.StringShort()) if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil { return added, err } @@ -2199,7 +2334,8 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error cs.evsw.FireEvent(types.EventVote, vote) // if we can skip timeoutCommit and have all the votes now, - if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() { + skipTimeoutCommit := cs.state.NextBlockDelay == 0 && cs.config.TimeoutCommit == 0 //nolint:staticcheck + if skipTimeoutCommit && cs.LastCommit.HasAll() { // go straight to new round (skip timeout commit) // cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight) cs.enterNewRound(cs.Height, 0) @@ -2211,12 +2347,12 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error // Height mismatch is ignored. // Not necessarily a bad peer, but not favorable behavior. if vote.Height != cs.Height { - cs.Logger.Debug("vote ignored and not added", "vote_height", vote.Height, "cs_height", cs.Height, "peer", peerID) + cs.Logger.Debug("Vote ignored and not added", "vote_height", vote.Height, "cs_height", cs.Height, "peer", peerID) return added, err } // Check to see if the chain is configured to extend votes. - extEnabled := cs.state.ConsensusParams.ABCI.VoteExtensionsEnabled(vote.Height) + extEnabled := cs.state.ConsensusParams.Feature.VoteExtensionsEnabled(vote.Height) if extEnabled { // The chain is configured to extend votes, check that the vote is // not for a nil block and verify the extensions signature against the @@ -2228,14 +2364,21 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error } // Verify VoteExtension if precommit and not nil // https://github.com/tendermint/tendermint/issues/8487 - if vote.Type == cmtproto.PrecommitType && !vote.BlockID.IsNil() && + if vote.Type == types.PrecommitType && !vote.BlockID.IsNil() && !bytes.Equal(vote.ValidatorAddress, myAddr) { // Skip the VerifyVoteExtension call if the vote was issued by this validator. - // The core fields of the vote message were already validated in the // consensus reactor when the vote was received. // Here, we verify the signature of the vote extension included in the vote // message. _, val := cs.state.Validators.GetByIndex(vote.ValidatorIndex) + if val == nil { // TODO: we should disconnect from this malicious peer + valsCount := cs.state.Validators.Size() + cs.Logger.Info("Peer sent us vote with invalid ValidatorIndex", + "peer", peerID, + "validator_index", vote.ValidatorIndex, + "len_validators", valsCount) + return added, ErrInvalidVote{Reason: fmt.Sprintf("ValidatorIndex %d is out of bounds [0, %d)", vote.ValidatorIndex, valsCount)} + } if err := vote.VerifyExtension(cs.state.ChainID, val.PubKey); err != nil { return false, err } @@ -2246,16 +2389,14 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error return false, err } } - } else { + } else if len(vote.Extension) > 0 || len(vote.ExtensionSignature) > 0 { // Vote extensions are not enabled on the network. // Reject the vote, as it is malformed // // TODO punish a peer if it sent a vote with an extension when the feature // is disabled on the network. // https://github.com/tendermint/tendermint/issues/8565 - if len(vote.Extension) > 0 || len(vote.ExtensionSignature) > 0 { - return false, fmt.Errorf("received vote with vote extension for height %v (extensions disabled) from peer ID %s", vote.Height, peerID) - } + return false, fmt.Errorf("received vote with vote extension for height %v (extensions disabled) from peer ID %s", vote.Height, peerID) } height := cs.Height @@ -2263,7 +2404,7 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error if !added { // Either duplicate, or error upon cs.Votes.AddByIndex() - // If the vote wasnt added but there's no error, its a duplicate vote + // If the vote wasn't added but there's no error, its a duplicate vote if err == nil { cs.metrics.DuplicateVote.Add(1) } @@ -2281,9 +2422,9 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error cs.evsw.FireEvent(types.EventVote, vote) switch vote.Type { - case cmtproto.PrevoteType: + case types.PrevoteType: prevotes := cs.Votes.Prevotes(vote.Round) - cs.Logger.Debug("added vote to prevote", "vote", vote, "prevotes", prevotes.StringShort()) + cs.Logger.Debug("Added vote to prevote", "vote", vote, "prevotes", prevotes.StringShort()) // Check to see if >2/3 of the voting power on the network voted for any non-nil block. if blockID, ok := prevotes.TwoThirdsMajority(); ok && !blockID.IsNil() { @@ -2293,14 +2434,14 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error // Update Valid* if we can. if cs.ValidRound < vote.Round && vote.Round == cs.Round { if cs.ProposalBlock.HashesTo(blockID.Hash) { - cs.Logger.Debug("updating valid block because of POL", "valid_round", cs.ValidRound, "pol_round", vote.Round) + cs.Logger.Debug("Updating valid block because of POL", "valid_round", cs.ValidRound, "pol_round", vote.Round) cs.ValidRound = vote.Round cs.ValidBlock = cs.ProposalBlock cs.ValidBlockParts = cs.ProposalBlockParts } else { cs.Logger.Debug( - "valid block we do not know about; set ProposalBlock=nil", - "proposal", log.NewLazyBlockHash(cs.ProposalBlock), + "Valid block we do not know about; set ProposalBlock=nil", + "proposal", log.NewLazyHash(cs.ProposalBlock), "block_id", blockID.Hash, ) @@ -2312,7 +2453,7 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) } - cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState) + cs.evsw.FireEvent(types.EventValidBlock, cs.RoundState) if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil { return added, err } @@ -2340,9 +2481,9 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error } } - case cmtproto.PrecommitType: + case types.PrecommitType: precommits := cs.Votes.Precommits(vote.Round) - cs.Logger.Debug("added vote to precommit", + cs.Logger.Debug("Added vote to precommit", "height", vote.Height, "round", vote.Round, "validator", vote.ValidatorAddress.String(), @@ -2357,7 +2498,8 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error if !blockID.IsNil() { cs.enterCommit(height, vote.Round) - if cs.config.SkipTimeoutCommit && precommits.HasAll() { + skipTimeoutCommit := cs.state.NextBlockDelay == 0 && cs.config.TimeoutCommit == 0 //nolint:staticcheck + if skipTimeoutCommit && precommits.HasAll() { cs.enterNewRound(cs.Height, 0) } } else { @@ -2377,7 +2519,7 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error // CONTRACT: cs.privValidator is not nil. func (cs *State) signVote( - msgType cmtproto.SignedMsgType, + msgType types.SignedMsgType, hash []byte, header types.PartSetHeader, block *types.Block, @@ -2394,19 +2536,20 @@ func (cs *State) signVote( addr := cs.privValidatorPubKey.Address() valIdx, _ := cs.Validators.GetByAddress(addr) + timestamp := cs.voteTime(cs.Height) vote := &types.Vote{ ValidatorAddress: addr, ValidatorIndex: valIdx, Height: cs.Height, Round: cs.Round, - Timestamp: cs.voteTime(), + Timestamp: timestamp, Type: msgType, BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, } - extEnabled := cs.state.ConsensusParams.ABCI.VoteExtensionsEnabled(vote.Height) - if msgType == cmtproto.PrecommitType && !vote.BlockID.IsNil() { + extEnabled := cs.state.ConsensusParams.Feature.VoteExtensionsEnabled(vote.Height) + if msgType == types.PrecommitType && !vote.BlockID.IsNil() { // if the signedMessage type is for a non-nil precommit, add // VoteExtension if extEnabled { @@ -2418,17 +2561,21 @@ func (cs *State) signVote( } } - recoverable, err := types.SignAndCheckVote(vote, cs.privValidator, cs.state.ChainID, extEnabled && (msgType == cmtproto.PrecommitType)) + recoverable, err := types.SignAndCheckVote(vote, cs.privValidator, cs.state.ChainID, extEnabled && (msgType == types.PrecommitType)) if err != nil && !recoverable { - panic(fmt.Sprintf("non-recoverable error when signing vote (%d/%d)", vote.Height, vote.Round)) + panic(fmt.Sprintf("non-recoverable error when signing vote %v: %v", vote, err)) } return vote, err } -func (cs *State) voteTime() time.Time { +func (cs *State) voteTime(height int64) time.Time { + if cs.isPBTSEnabled(height) { + return cmttime.Now() + } now := cmttime.Now() minVoteTime := now + // Minimum time increment between blocks const timeIota = time.Millisecond // TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil, @@ -2448,9 +2595,9 @@ func (cs *State) voteTime() time.Time { } // sign the vote and publish on internalMsgQueue -// block information is only used to extend votes (precommit only); should be nil in all other cases +// block information is only used to extend votes (precommit only); should be nil in all other cases. func (cs *State) signAddVote( - msgType cmtproto.SignedMsgType, + msgType types.SignedMsgType, hash []byte, header types.PartSetHeader, block *types.Block, @@ -2473,20 +2620,20 @@ func (cs *State) signAddVote( // TODO: pass pubKey to signVote vote, err := cs.signVote(msgType, hash, header, block) if err != nil { - cs.Logger.Error("failed signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) + cs.Logger.Error("Failed signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) return } hasExt := len(vote.ExtensionSignature) > 0 - extEnabled := cs.state.ConsensusParams.ABCI.VoteExtensionsEnabled(vote.Height) - if vote.Type == cmtproto.PrecommitType && !vote.BlockID.IsNil() && hasExt != extEnabled { + extEnabled := cs.state.ConsensusParams.Feature.VoteExtensionsEnabled(vote.Height) + if vote.Type == types.PrecommitType && !vote.BlockID.IsNil() && hasExt != extEnabled { panic(fmt.Errorf("vote extension absence/presence does not match extensions enabled %t!=%t, height %d, type %v", hasExt, extEnabled, vote.Height, vote.Type)) } - cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""}) - cs.Logger.Debug("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote) + cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, "", time.Time{}}) + cs.Logger.Debug("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote) } -// updatePrivValidatorPubKey get's the private validator public key and +// updatePrivValidatorPubKey gets the private validator public key and // memoizes it. This func returns an error if the private validator is not // responding or responds with an error. func (cs *State) updatePrivValidatorPubKey() error { @@ -2502,7 +2649,7 @@ func (cs *State) updatePrivValidatorPubKey() error { return nil } -// look back to check existence of the node's consensus votes before joining consensus +// look back to check existence of the node's consensus votes before joining consensus. func (cs *State) checkDoubleSigningRisk(height int64) error { if cs.privValidator != nil && cs.privValidatorPubKey != nil && cs.config.DoubleSignCheckHeight > 0 && height > 0 { valAddr := cs.privValidatorPubKey.Address() @@ -2516,7 +2663,7 @@ func (cs *State) checkDoubleSigningRisk(height int64) error { if lastCommit != nil { for sigIdx, s := range lastCommit.Signatures { if s.BlockIDFlag == types.BlockIDFlagCommit && bytes.Equal(s.ValidatorAddress, valAddr) { - cs.Logger.Info("found signature from the same key", "sig", s, "idx", sigIdx, "height", height-i) + cs.Logger.Info("Found signature from the same key", "sig", s, "idx", sigIdx, "height", height-i) return ErrSignatureFoundInPastBlocks } } @@ -2541,7 +2688,7 @@ func (cs *State) calculatePrevoteMessageDelayMetrics() { var votingPowerSeen int64 for _, v := range pl { - _, val := cs.Validators.GetByAddress(v.ValidatorAddress) + _, val := cs.Validators.GetByAddressMut(v.ValidatorAddress) votingPowerSeen += val.VotingPower if votingPowerSeen >= cs.Validators.TotalVotingPower()*2/3+1 { cs.metrics.QuorumPrevoteDelay.With("proposer_address", cs.Validators.GetProposer().Address.String()).Set(v.Timestamp.Sub(cs.Proposal.Timestamp).Seconds()) @@ -2553,7 +2700,7 @@ func (cs *State) calculatePrevoteMessageDelayMetrics() { } } -//--------------------------------------------------------- +// --------------------------------------------------------- func CompareHRS(h1 int64, r1 int32, s1 cstypes.RoundStepType, h2 int64, r2 int32, s2 cstypes.RoundStepType) int { if h1 < h2 { @@ -2609,3 +2756,31 @@ func repairWalFile(src, dst string) error { return nil } + +func (cs *State) calculateProposalTimestampDifferenceMetric() { + if cs.Proposal != nil && cs.Proposal.POLRound == -1 { + isTimely := cs.proposalIsTimely() + cs.metrics.ProposalTimestampDifference. + With("is_timely", strconv.FormatBool(isTimely)). + Observe(cs.ProposalReceiveTime.Sub(cs.Proposal.Timestamp).Seconds()) + } +} + +// proposerWaitTime determines how long the proposer should wait to propose its next block. +// If the result is zero, a block can be proposed immediately. +// +// Block times must be monotonically increasing, so if the block time of the previous +// block is larger than the proposer's current time, then the proposer will sleep +// until its local clock exceeds the previous block time. +func proposerWaitTime(lt cmttime.Source, bt time.Time) time.Duration { + t := lt.Now() + if bt.After(t) { + return bt.Sub(t) + } + return 0 +} + +// isPBTSEnabled returns true if PBTS is enabled at the current height. +func (cs *State) isPBTSEnabled(height int64) bool { + return cs.state.ConsensusParams.Feature.PbtsEnabled(height) +} diff --git a/consensus/state_test.go b/internal/consensus/state_test.go similarity index 64% rename from consensus/state_test.go rename to internal/consensus/state_test.go index 8d4d45385e5..21c43f5e4f8 100644 --- a/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -15,16 +15,16 @@ import ( "github.com/cometbft/cometbft/abci/example/kvstore" abci "github.com/cometbft/cometbft/abci/types" abcimocks "github.com/cometbft/cometbft/abci/types/mocks" - cstypes "github.com/cometbft/cometbft/consensus/types" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto/tmhash" + cstypes "github.com/cometbft/cometbft/internal/consensus/types" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/internal/test" cmtbytes "github.com/cometbft/cometbft/libs/bytes" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/protoio" cmtpubsub "github.com/cometbft/cometbft/libs/pubsub" - cmtrand "github.com/cometbft/cometbft/libs/rand" p2pmock "github.com/cometbft/cometbft/p2p/mock" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/types" ) @@ -42,24 +42,26 @@ x * TestFullRound1 - 1 val, full successful round x * TestFullRoundNil - 1 val, full round of nil x * TestFullRound2 - 2 vals, both required for full round LockSuite -x * TestStateLockNoPOL - 2 vals, 4 rounds. one val locked, precommits nil every round except first. -x * TestStateLockPOLUpdateLock - 4 vals, one precommits, +x * TestStateLock_NoPOL - 2 vals, 4 rounds. one val locked, precommits nil every round except first. +x * TestStateLock_POLUpdateLock - 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka -x * TestStateLockPOLRelock - 4 vals, polka in round 1 and polka in round 2. +x * TestStateLock_POLRelock - 4 vals, polka in round 1 and polka in round 2. Ensure validator updates locked round. -x * TestStateLockPOLDoesNotUnlock 4 vals, one precommits, other 3 polka nil at +x * TestStateLock_POLDoesNotUnlock 4 vals, one precommits, other 3 polka nil at next round, so we precommit nil but maintain lock -x * TestStateLockMissingProposalWhenPOLSeenDoesNotUpdateLock - 4 vals, 1 misses proposal but sees POL. -x * TestStateLockMissingProposalWhenPOLSeenDoesNotUnlock - 4 vals, 1 misses proposal but sees POL. - * TestStateLockMissingProposalWhenPOLForLockedBlock - 4 vals, 1 misses proposal but sees POL for locked block. -x * TestStateLockPOLSafety1 - 4 vals. We shouldn't change lock based on polka at earlier round -x * TestStateLockPOLSafety2 - 4 vals. After unlocking, we shouldn't relock based on polka at earlier round -x * TestStatePrevotePOLFromPreviousRound 4 vals, prevote a proposal if a POL was seen for it in a previous round. +x * TestStateLock_MissingProposalWhenPOLSeenDoesNotUpdateLock - 4 vals, 1 misses proposal but sees POL. +x * TestStateLock_MissingProposalWhenPOLSeenDoesNotUnlock - 4 vals, 1 misses proposal but sees POL. + * TestStateLock_MissingProposalWhenPOLForLockedBlock - 4 vals, 1 misses proposal but sees POL for locked block. +x * TestState_MissingProposalValidBlockReceivedTimeout - 4 vals, 1 misses proposal but receives full block. +x * TestState_MissingProposalValidBlockReceivedPrecommit - 4 vals, 1 misses proposal but receives full block. +x * TestStateLock_POLSafety1 - 4 vals. We shouldn't change lock based on polka at earlier round +x * TestStateLock_POLSafety2 - 4 vals. We shouldn't accept a proposal with POLRound smaller than our locked round. +x * TestState_PrevotePOLFromPreviousRound 4 vals, prevote a proposal if a POL was seen for it in a previous round. * TestNetworkLock - once +1/3 precommits, network should be locked * TestNetworkLockPOL - once +1/3 precommits, the block with more recent polka is committed SlashingSuite -x * TestSlashingPrevotes - a validator prevoting twice in a round gets slashed -x * TestSlashingPrecommits - a validator precomitting twice in a round gets slashed +x * TestStateSlashing_Prevotes - a validator prevoting twice in a round gets slashed +x * TestStateSlashing_Precommits - a validator precomitting twice in a round gets slashed CatchupSuite * TestCatchup - if we might be behind and we've seen any 2/3 prevotes, round skip to new round, precommit, or prevote HaltSuite @@ -67,12 +69,14 @@ x * TestHalt1 - if we see +2/3 precommits after timing out into new round, we sh */ -//---------------------------------------------------------------------------------------------------- +// ---------------------------------------------------------------------------------------------------- // ProposeSuite func TestStateProposerSelection0(t *testing.T) { cs1, vss := randState(4) - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID + pv, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) @@ -84,18 +88,17 @@ func TestStateProposerSelection0(t *testing.T) { // Commit a block and ensure proposer for the next height is correct. prop := cs1.GetRoundState().Validators.GetProposer() - pv, err := cs1.privValidator.GetPubKey() - require.NoError(t, err) address := pv.Address() - if !bytes.Equal(prop.Address, address) { - t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address) - } + require.Truef(t, bytes.Equal(prop.Address, address), "expected proposer to be validator 0 (%X). Got %X", address, prop.Address) // Wait for complete proposal. ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - signAddVotes(cs1, cmtproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), true, vss[1:]...) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + }, true, vss[1:]...) // Wait for new round so next validator is set. ensureNewRound(newRoundCh, height+1, 0) @@ -104,15 +107,13 @@ func TestStateProposerSelection0(t *testing.T) { pv1, err := vss[1].GetPubKey() require.NoError(t, err) addr := pv1.Address() - if !bytes.Equal(prop.Address, addr) { - panic(fmt.Sprintf("expected proposer to be validator %d. Got %X", 1, prop.Address)) - } + require.Truef(t, bytes.Equal(prop.Address, addr), "expected proposer to be validator 1 (%X). Got %X", addr, prop.Address) } -// Now let's do it all again, but starting from round 2 instead of 0 +// Now let's do it all again, but starting from round 2 instead of 0. func TestStateProposerSelection2(t *testing.T) { cs1, vss := randState(4) // test needs more work for more than 3 validators - height := cs1.Height + height, chainID := cs1.Height, cs1.state.ChainID newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) // this time we jump in at round 2 @@ -131,20 +132,17 @@ func TestStateProposerSelection2(t *testing.T) { require.NoError(t, err) addr := pvk.Address() correctProposer := addr - if !bytes.Equal(prop.Address, correctProposer) { - panic(fmt.Sprintf( - "expected RoundState.Validators.GetProposer() to be validator %d. Got %X", - int(i+2)%len(vss), - prop.Address)) - } - - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vss[1:]...) + require.Truef(t, bytes.Equal(prop.Address, correctProposer), + "expected RoundState.Validators.GetProposer() to be validator %d (%X). Got %X", + int(i+2)%len(vss), correctProposer, prop.Address, + ) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vss[1:]...) ensureNewRound(newRoundCh, height, i+round+1) // wait for the new round event each round incrementRound(vss[1:]...) } } -// a non-validator should timeout into the prevote round +// a non-validator should timeout into the prevote round. func TestStateEnterProposeNoPrivValidator(t *testing.T) { cs, _ := randState(1) cs.SetPrivValidator(nil) @@ -199,7 +197,7 @@ func TestStateBadProposal(t *testing.T) { defer cancel() cs1, vss := randState(2) - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID vs2 := vss[1] partSize := types.BlockPartSizeBytes @@ -224,18 +222,12 @@ func TestStateBadProposal(t *testing.T) { propBlockParts, err := propBlock.MakePartSet(partSize) require.NoError(t, err) blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(vs2.Height, round, -1, blockID) - p := proposal.ToProto() - if err := vs2.SignProposal(cs1.state.ChainID, p); err != nil { - t.Fatal("failed to sign bad proposal", err) - } - - proposal.Signature = p.Signature + proposal := types.NewProposal(vs2.Height, round, -1, blockID, propBlock.Header.Time) + signProposal(t, proposal, chainID, vs2) // set the proposal block - if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + err = cs1.SetProposalAndBlock(proposal, propBlockParts, "some peer") + require.NoError(t, err) // start the machine startTestRound(cs1, height, round) @@ -248,23 +240,17 @@ func TestStateBadProposal(t *testing.T) { validatePrevote(t, cs1, round, vss[0], nil) // add bad prevote from vs2 and wait for it - bps, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) - - signAddVotes(cs1, cmtproto.PrevoteType, propBlock.Hash(), bps.Header(), false, vs2) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2) ensurePrevote(voteCh, height, round) // wait for precommit ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - - bps2, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) - signAddVotes(cs1, cmtproto.PrecommitType, propBlock.Hash(), bps2.Header(), true, vs2) + signAddVotes(cs1, types.PrecommitType, chainID, blockID, true, vs2) } func TestStateOversizedBlock(t *testing.T) { - const maxBytes = 2000 + const maxBytes = int64(types.BlockPartSizeBytes) for _, testCase := range []struct { name string @@ -282,7 +268,7 @@ func TestStateOversizedBlock(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { cs1, vss := randState(2) cs1.state.ConsensusParams.Block.MaxBytes = maxBytes - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID vs2 := vss[1] partSize := types.BlockPartSizeBytes @@ -297,12 +283,8 @@ func TestStateOversizedBlock(t *testing.T) { incrementRound(vss[1:]...) blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(height, round, -1, blockID) - p := proposal.ToProto() - if err := vs2.SignProposal(cs1.state.ChainID, p); err != nil { - t.Fatal("failed to sign bad proposal", err) - } - proposal.Signature = p.Signature + proposal := types.NewProposal(height, round, -1, blockID, propBlock.Header.Time) + signProposal(t, proposal, chainID, vs2) totalBytes := 0 for i := 0; i < int(propBlockParts.Total()); i++ { @@ -310,14 +292,20 @@ func TestStateOversizedBlock(t *testing.T) { totalBytes += len(part.Bytes) } - if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) + maxBlockParts := maxBytes / int64(types.BlockPartSizeBytes) + if maxBytes > maxBlockParts*int64(types.BlockPartSizeBytes) { + maxBlockParts++ } + numBlockParts := int64(propBlockParts.Total()) + + err := cs1.SetProposalAndBlock(proposal, propBlockParts, "some peer") + require.NoError(t, err) // start the machine startTestRound(cs1, height, round) - t.Log("Block Sizes;", "Limit", cs1.state.ConsensusParams.Block.MaxBytes, "Current", totalBytes) + t.Log("Block Sizes;", "Limit", maxBytes, "Current", totalBytes) + t.Log("Proposal Parts;", "Maximum", maxBlockParts, "Current", numBlockParts) validateHash := propBlock.Hash() lockedRound := int32(1) @@ -333,25 +321,25 @@ func TestStateOversizedBlock(t *testing.T) { ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], validateHash) - bps, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) + // Should not accept a Proposal with too many block parts + if numBlockParts > maxBlockParts { + require.Nil(t, cs1.Proposal) + } - signAddVotes(cs1, cmtproto.PrevoteType, propBlock.Hash(), bps.Header(), false, vs2) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2) ensurePrevote(voteCh, height, round) ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, lockedRound, vss[0], validateHash, validateHash) - bps2, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) - signAddVotes(cs1, cmtproto.PrecommitType, propBlock.Hash(), bps2.Header(), true, vs2) + signAddVotes(cs1, types.PrecommitType, chainID, blockID, true, vs2) }) } } -//---------------------------------------------------------------------------------------------------- +// ---------------------------------------------------------------------------------------------------- // FullRoundSuite -// propose, prevote, and precommit a block +// propose, prevote, and precommit a block. func TestStateFullRound1(t *testing.T) { cs, vss := randState(1) height, round := cs.Height, cs.Round @@ -391,7 +379,7 @@ func TestStateFullRound1(t *testing.T) { validateLastPrecommit(t, cs, vss[0], propBlockHash) } -// nil is proposed, so prevote and precommit nil +// nil is proposed, so prevote and precommit nil. func TestStateFullRoundNil(t *testing.T) { cs, _ := randState(1) height, round := cs.Height, cs.Round @@ -406,11 +394,11 @@ func TestStateFullRoundNil(t *testing.T) { } // run through propose, prevote, precommit commit with two validators -// where the first validator has to wait for votes from the second +// where the first validator has to wait for votes from the second. func TestStateFullRound2(t *testing.T) { cs1, vss := randState(2) vs2 := vss[1] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote) newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) @@ -422,38 +410,38 @@ func TestStateFullRound2(t *testing.T) { // we should be stuck in limbo waiting for more prevotes rs := cs1.GetRoundState() - propBlockHash, propPartSetHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header() + blockID := types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()} // prevote arrives from vs2: - signAddVotes(cs1, cmtproto.PrevoteType, propBlockHash, propPartSetHeader, false, vs2) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2) ensurePrevote(voteCh, height, round) // prevote ensurePrecommit(voteCh, height, round) // precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash) + validatePrecommit(t, cs1, 0, 0, vss[0], blockID.Hash, blockID.Hash) // we should be stuck in limbo waiting for more precommits // precommit arrives from vs2: - signAddVotes(cs1, cmtproto.PrecommitType, propBlockHash, propPartSetHeader, true, vs2) + signAddVotes(cs1, types.PrecommitType, chainID, blockID, true, vs2) ensurePrecommit(voteCh, height, round) // wait to finish commit, propose in next height ensureNewBlock(newBlockCh, height) } -//------------------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------------------ // LockSuite // two validators, 4 rounds. -// two vals take turns proposing. val1 locks on first one, precommits nil on everything else -func TestStateLockNoPOL(t *testing.T) { +// two vals take turns proposing. val1 locks on first one, precommits nil on everything else. +func TestStateLock_NoPOL(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() cs1, vss := randState(2) vs2 := vss[1] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID partSize := types.BlockPartSizeBytes @@ -475,33 +463,39 @@ func TestStateLockNoPOL(t *testing.T) { ensureNewProposal(proposalCh, height, round) roundState := cs1.GetRoundState() - theBlockHash := roundState.ProposalBlock.Hash() - thePartSetHeader := roundState.ProposalBlockParts.Header() + initialBlockID := types.BlockID{ + Hash: roundState.ProposalBlock.Hash(), + PartSetHeader: roundState.ProposalBlockParts.Header(), + } ensurePrevote(voteCh, height, round) // prevote // we should now be stuck in limbo forever, waiting for more prevotes // prevote arrives from vs2: - signAddVotes(cs1, cmtproto.PrevoteType, theBlockHash, thePartSetHeader, false, vs2) + signAddVotes(cs1, types.PrevoteType, chainID, initialBlockID, false, vs2) ensurePrevote(voteCh, height, round) // prevote + validatePrevote(t, cs1, round, vss[0], initialBlockID.Hash) // the proposed block should now be locked and our precommit added ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], initialBlockID.Hash, initialBlockID.Hash) // we should now be stuck in limbo forever, waiting for more precommits // lets add one for a different block - hash := make([]byte, len(theBlockHash)) - copy(hash, theBlockHash) + hash := make([]byte, len(initialBlockID.Hash)) + copy(hash, initialBlockID.Hash) hash[0] = (hash[0] + 1) % 255 - signAddVotes(cs1, cmtproto.PrecommitType, hash, thePartSetHeader, true, vs2) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{ + Hash: hash, + PartSetHeader: initialBlockID.PartSetHeader, + }, true, vs2) ensurePrecommit(voteCh, height, round) // precommit // (note we're entering precommit for a second time this round) // but with invalid args. then we enterPrecommitWait, and the timeout to new round ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - /// + // round++ // moving to the next round ensureNewRound(newRoundCh, height, round) @@ -524,25 +518,22 @@ func TestStateLockNoPOL(t *testing.T) { validatePrevote(t, cs1, round, vss[0], nil) // add a conflicting prevote from the other validator - bps, err := rs.LockedBlock.MakePartSet(partSize) + partSet, err := rs.LockedBlock.MakePartSet(partSize) require.NoError(t, err) - - signAddVotes(cs1, cmtproto.PrevoteType, hash, bps.Header(), false, vs2) + conflictingBlockID := types.BlockID{Hash: hash, PartSetHeader: partSet.Header()} + signAddVotes(cs1, types.PrevoteType, chainID, conflictingBlockID, false, vs2) ensurePrevote(voteCh, height, round) // now we're going to enter prevote again, but with invalid args // and then prevote wait, which should timeout. then wait for precommit ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) - // the proposed block should still be locked block. // we should precommit nil and be locked on the proposal. ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) + validatePrecommit(t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) // add conflicting precommit from vs2 - bps2, err := rs.LockedBlock.MakePartSet(partSize) - require.NoError(t, err) - signAddVotes(cs1, cmtproto.PrecommitType, hash, bps2.Header(), true, vs2) + signAddVotes(cs1, types.PrecommitType, chainID, conflictingBlockID, true, vs2) ensurePrecommit(voteCh, height, round) // (note we're entering precommit for a second time this round, but with invalid args @@ -562,33 +553,29 @@ func TestStateLockNoPOL(t *testing.T) { rs = cs1.GetRoundState() // now we're on a new round and are the proposer - if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) { - panic(fmt.Sprintf( - "Expected proposal block to be locked block. Got %v, Expected %v", - rs.ProposalBlock, - rs.LockedBlock)) - } + require.Truef(t, bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()), + "expected proposal block to be locked block. Got %v, Expected %v", + rs.ProposalBlock, rs.LockedBlock, + ) ensurePrevote(voteCh, height, round) // prevote validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) - - bps0, err := rs.ProposalBlock.MakePartSet(partSize) + partSet, err = rs.ProposalBlock.MakePartSet(partSize) require.NoError(t, err) - signAddVotes(cs1, cmtproto.PrevoteType, hash, bps0.Header(), false, vs2) + newBlockID := types.BlockID{Hash: hash, PartSetHeader: partSet.Header()} + signAddVotes(cs1, types.PrevoteType, chainID, newBlockID, false, vs2) ensurePrevote(voteCh, height, round) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) // precommit - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal + validatePrecommit(t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) // precommit nil but be locked on proposal - bps1, err := rs.ProposalBlock.MakePartSet(partSize) - require.NoError(t, err) signAddVotes( cs1, - cmtproto.PrecommitType, - hash, - bps1.Header(), + types.PrecommitType, + chainID, + newBlockID, true, vs2) // NOTE: conflicting precommits at same height ensurePrecommit(voteCh, height, round) @@ -598,8 +585,11 @@ func TestStateLockNoPOL(t *testing.T) { cs2, _ := randState(2) // needed so generated block is different than locked block // before we time out into new round, set next proposal block prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round+1) - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with vs2") + require.NotNil(t, propBlock, "Failed to create proposal block with vs2") + require.NotNil(t, prop, "Failed to create proposal block with vs2") + propBlockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), } incrementRound(vs2) @@ -615,9 +605,8 @@ func TestStateLockNoPOL(t *testing.T) { // so set the proposal block bps3, err := propBlock.MakePartSet(partSize) require.NoError(t, err) - if err := cs1.SetProposalAndBlock(prop, propBlock, bps3, ""); err != nil { - t.Fatal(err) - } + err = cs1.SetProposalAndBlock(prop, bps3, "") + require.NoError(t, err) ensureNewProposal(proposalCh, height, round) @@ -626,40 +615,35 @@ func TestStateLockNoPOL(t *testing.T) { validatePrevote(t, cs1, 3, vss[0], nil) // prevote for proposed block - bps4, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) - - signAddVotes(cs1, cmtproto.PrevoteType, propBlock.Hash(), bps4.Header(), false, vs2) + signAddVotes(cs1, types.PrevoteType, chainID, propBlockID, false, vs2) ensurePrevote(voteCh, height, round) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal + validatePrecommit(t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) // precommit nil but locked on proposal - bps5, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) signAddVotes( cs1, - cmtproto.PrecommitType, - propBlock.Hash(), - bps5.Header(), + types.PrecommitType, + chainID, + propBlockID, true, vs2) // NOTE: conflicting precommits at same height ensurePrecommit(voteCh, height, round) } -// TestStateLockPOLUpdateLock tests that a validator maintains updates its locked +// TestStateLock_POLUpdateLock tests that a validator updates its locked // block if the following conditions are met within a round: // 1. The validator received a valid proposal for the block // 2. The validator received prevotes representing greater than 2/3 of the voting // power on the network for the block. -func TestStateLockPOLUpdateLock(t *testing.T) { +func TestStateLock_POLUpdateLock(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID partSize := types.BlockPartSizeBytes @@ -682,26 +666,30 @@ func TestStateLockPOLUpdateLock(t *testing.T) { */ t.Log("### Starting Round 0") + // start round and wait for propose and prevote startTestRound(cs1, height, round) ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts.Header() + initialBlockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } ensurePrevote(voteCh, height, round) - signAddVotes(cs1, cmtproto.PrevoteType, theBlockHash, theBlockParts, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, initialBlockID, false, vs2, vs3, vs4) + // check that the validator generates a Lock event. ensureLock(lockCh, height, round) // the proposed block should now be locked and our precommit added. ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], initialBlockID.Hash, initialBlockID.Hash) // add precommits from the rest of the validators. - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) // timeout to new round. ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -710,7 +698,7 @@ func TestStateLockPOLUpdateLock(t *testing.T) { Round 1: Create a block, D and send a proposal for it to cs1 Send a prevote for D from each of the validators to cs1. - Send a precommit for nil from all of the validtors to cs1. + Send a precommit for nil from all of the validators to cs1. Check that cs1 is now locked on the new block, D and no longer on the old block. */ @@ -724,10 +712,13 @@ func TestStateLockPOLUpdateLock(t *testing.T) { propBlockR1Parts, err := propBlockR1.MakePartSet(partSize) require.NoError(t, err) propBlockR1Hash := propBlockR1.Hash() - require.NotEqual(t, propBlockR1Hash, theBlockHash) - if err := cs1.SetProposalAndBlock(propR1, propBlockR1, propBlockR1Parts, "some peer"); err != nil { - t.Fatal(err) + r1BlockID := types.BlockID{ + Hash: propBlockR1Hash, + PartSetHeader: propBlockR1Parts.Header(), } + require.NotEqual(t, propBlockR1Hash, initialBlockID.Hash) + err = cs1.SetProposalAndBlock(propR1, propBlockR1Parts, "some peer") + require.NoError(t, err) ensureNewRound(newRoundCh, height, round) @@ -739,7 +730,7 @@ func TestStateLockPOLUpdateLock(t *testing.T) { validatePrevote(t, cs1, round, vss[0], nil) // Add prevotes from the remainder of the validators for the new locked block. - signAddVotes(cs1, cmtproto.PrevoteType, propBlockR1Hash, propBlockR1Parts.Header(), false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, r1BlockID, false, vs2, vs3, vs4) // Check that we lock on a new block. ensureLock(lockCh, height, round) @@ -751,13 +742,13 @@ func TestStateLockPOLUpdateLock(t *testing.T) { validatePrecommit(t, cs1, round, round, vss[0], propBlockR1Hash, propBlockR1Hash) } -// TestStateLockPOLRelock tests that a validator updates its locked round if +// TestStateLock_POLRelock tests that a validator updates its locked round if // it receives votes representing over 2/3 of the voting power on the network // for a block that it is already locked in. -func TestStateLockPOLRelock(t *testing.T) { +func TestStateLock_POLRelock(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) @@ -774,7 +765,6 @@ func TestStateLockPOLRelock(t *testing.T) { cs1 creates a proposal for block B. Send a prevote for B from each of the validators to cs1. Send a precommit for nil from all of the validators to cs1. - This ensures that cs1 will lock on B in this round but not precommit it. */ t.Log("### Starting Round 0") @@ -785,21 +775,25 @@ func TestStateLockPOLRelock(t *testing.T) { ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() theBlock := rs.ProposalBlock - theBlockHash := rs.ProposalBlock.Hash() theBlockParts := rs.ProposalBlockParts + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } ensurePrevote(voteCh, height, round) - signAddVotes(cs1, cmtproto.PrevoteType, theBlockHash, theBlockParts.Header(), false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) + // check that the validator generates a Lock event. ensureLock(lockCh, height, round) // the proposed block should now be locked and our precommit added. ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits from the rest of the validators. - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) // timeout to new round. ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -808,23 +802,17 @@ func TestStateLockPOLRelock(t *testing.T) { Round 1: Create a proposal for block B, the same block from round 1. Send a prevote for B from each of the validators to cs1. - Send a precommit for nil from all of the validtors to cs1. + Send a precommit for nil from all of the validators to cs1. Check that cs1 updates its 'locked round' value to the current round. */ t.Log("### Starting Round 1") incrementRound(vs2, vs3, vs4) round++ - propBlockID := types.BlockID{Hash: theBlockHash, PartSetHeader: theBlockParts.Header()} - propR1 := types.NewProposal(height, round, cs1.ValidRound, propBlockID) - p := propR1.ToProto() - if err := vs2.SignProposal(cs1.state.ChainID, p); err != nil { - t.Fatalf("error signing proposal: %s", err) - } - propR1.Signature = p.Signature - if err := cs1.SetProposalAndBlock(propR1, theBlock, theBlockParts, ""); err != nil { - t.Fatal(err) - } + propR1 := types.NewProposal(height, round, cs1.ValidRound, blockID, theBlock.Header.Time) + signProposal(t, propR1, chainID, vs2) + err = cs1.SetProposalAndBlock(propR1, theBlockParts, "") + require.NoError(t, err) ensureNewRound(newRoundCh, height, round) @@ -833,10 +821,10 @@ func TestStateLockPOLRelock(t *testing.T) { // Prevote our locked block since it matches the propsal seen in this round. ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + validatePrevote(t, cs1, round, vss[0], blockID.Hash) // Add prevotes from the remainder of the validators for the locked block. - signAddVotes(cs1, cmtproto.PrevoteType, theBlockHash, theBlockParts.Header(), false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) // Check that we relock. ensureRelock(relockCh, height, round) @@ -844,15 +832,15 @@ func TestStateLockPOLRelock(t *testing.T) { ensurePrecommit(voteCh, height, round) // We should now be locked on the same block but with an updated locked round. - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) } -// TestStateLockPrevoteNilWhenLockedAndMissProposal tests that a validator prevotes nil +// TestStateLock_PrevoteNilWhenLockedAndMissProposal tests that a validator prevotes nil // if it is locked on a block and misses the proposal in a round. -func TestStateLockPrevoteNilWhenLockedAndMissProposal(t *testing.T) { +func TestStateLock_PrevoteNilWhenLockedAndMissProposal(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) @@ -878,22 +866,24 @@ func TestStateLockPrevoteNilWhenLockedAndMissProposal(t *testing.T) { ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } ensurePrevote(voteCh, height, round) - signAddVotes(cs1, cmtproto.PrevoteType, theBlockHash, theBlockParts.Header(), false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) // check that the validator generates a Lock event. ensureLock(lockCh, height, round) // the proposed block should now be locked and our precommit added. ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits from the rest of the validators. - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) // timeout to new round. ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -901,7 +891,7 @@ func TestStateLockPrevoteNilWhenLockedAndMissProposal(t *testing.T) { /* Round 1: Send a prevote for nil from each of the validators to cs1. - Send a precommit for nil from all of the validtors to cs1. + Send a precommit for nil from all of the validators to cs1. Check that cs1 prevotes nil instead of its locked block, but ensure that it maintains its locked block. @@ -917,18 +907,17 @@ func TestStateLockPrevoteNilWhenLockedAndMissProposal(t *testing.T) { validatePrevote(t, cs1, round, vss[0], nil) // Add prevotes from the remainder of the validators nil. - signAddVotes(cs1, cmtproto.PrevoteType, nil, types.PartSetHeader{}, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, types.BlockID{}, false, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // We should now be locked on the same block but with an updated locked round. - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) + validatePrecommit(t, cs1, round, 0, vss[0], nil, blockID.Hash) } -// TestStateLock_PrevoteNilWhenLockedAndMissProposal tests that a validator prevotes nil -// if it is locked on a block and misses the proposal in a round. -func TestStateLockPrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { +// TestStateLock_PrevoteNilWhenLockedAndDifferentProposal tests that a validator prevotes nil +// if it is locked on a block and gets a different proposal in a round. +func TestStateLock_PrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - /* All of the assertions in this test occur on the `cs1` validator. The test sends signed votes from the other validators to cs1 and @@ -938,7 +927,7 @@ func TestStateLockPrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) @@ -963,22 +952,24 @@ func TestStateLockPrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } ensurePrevote(voteCh, height, round) - signAddVotes(cs1, cmtproto.PrevoteType, theBlockHash, theBlockParts.Header(), false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) // check that the validator generates a Lock event. ensureLock(lockCh, height, round) // the proposed block should now be locked and our precommit added. ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits from the rest of the validators. - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) // timeout to new round. ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -987,7 +978,7 @@ func TestStateLockPrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { Round 1: Create a proposal for a new block. Send a prevote for nil from each of the validators to cs1. - Send a precommit for nil from all of the validtors to cs1. + Send a precommit for nil from all of the validators to cs1. Check that cs1 prevotes nil instead of its locked block, but ensure that it maintains its locked block. @@ -999,12 +990,10 @@ func TestStateLockPrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { propR1, propBlockR1 := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) propBlockR1Parts, err := propBlockR1.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) - propBlockR1Hash := propBlockR1.Hash() - require.NotEqual(t, propBlockR1Hash, theBlockHash) - if err := cs1.SetProposalAndBlock(propR1, propBlockR1, propBlockR1Parts, "some peer"); err != nil { - t.Fatal(err) - } + require.NotEqual(t, propBlockR1Hash, blockID.Hash) + err = cs1.SetProposalAndBlock(propR1, propBlockR1Parts, "some peer") + require.NoError(t, err) ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) @@ -1014,19 +1003,19 @@ func TestStateLockPrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { validatePrevote(t, cs1, round, vss[0], nil) // Add prevotes from the remainder of the validators for nil. - signAddVotes(cs1, cmtproto.PrevoteType, nil, types.PartSetHeader{}, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, types.BlockID{}, false, vs2, vs3, vs4) // We should now be locked on the same block but prevote nil. ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) + validatePrecommit(t, cs1, round, 0, vss[0], nil, blockID.Hash) } -// TestStateLockPOLDoesNotUnlock tests that a validator maintains its locked block +// TestStateLock_POLDoesNotUnlock tests that a validator maintains its locked block // despite receiving +2/3 nil prevotes and nil precommits from other validators. // Tendermint used to 'unlock' its locked block when greater than 2/3 prevotes // for a nil block were seen. This behavior has been removed and this test ensures // that it has been completely removed. -func TestStateLockPOLDoesNotUnlock(t *testing.T) { +func TestStateLock_POLDoesNotUnlock(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() /* @@ -1038,7 +1027,7 @@ func TestStateLockPOLDoesNotUnlock(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) @@ -1053,7 +1042,7 @@ func TestStateLockPOLDoesNotUnlock(t *testing.T) { Round 0: Create a block, B Send a prevote for B from each of the validators to `cs1`. - Send a precommit for B from one of the validtors to `cs1`. + Send a precommit for B from one of the validators to `cs1`. This ensures that cs1 will lock on B in this round. */ @@ -1065,13 +1054,15 @@ func TestStateLockPOLDoesNotUnlock(t *testing.T) { ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts.Header() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + validatePrevote(t, cs1, round, vss[0], blockID.Hash) - signAddVotes(cs1, cmtproto.PrevoteType, theBlockHash, theBlockParts, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) // the validator should have locked a block in this round. ensureLock(lockCh, height, round) @@ -1079,15 +1070,15 @@ func TestStateLockPOLDoesNotUnlock(t *testing.T) { ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our should be for this locked block. - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // Add precommits from the other validators. // We only issue 1/2 Precommits for the block in this round. // This ensures that the validator being tested does not commit the block. - // We do not want the validator to commit the block because we want the test + // We do not want the validator to commit the block because we want the // test to proceeds to the next consensus round. - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs4) - signAddVotes(cs1, cmtproto.PrecommitType, theBlockHash, theBlockParts, true, vs3) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, blockID, true, vs3) // timeout to new round ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1105,11 +1096,9 @@ func TestStateLockPOLDoesNotUnlock(t *testing.T) { prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) propBlockParts, err := propBlock.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) - - require.NotEqual(t, propBlock.Hash(), theBlockHash) - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, ""); err != nil { - t.Fatal(err) - } + require.NotEqual(t, propBlock.Hash(), blockID.Hash) + err = cs1.SetProposalAndBlock(prop, propBlockParts, "") + require.NoError(t, err) ensureNewRound(newRoundCh, height, round) @@ -1120,15 +1109,16 @@ func TestStateLockPOLDoesNotUnlock(t *testing.T) { validatePrevote(t, cs1, round, vss[0], nil) // add >2/3 prevotes for nil from all other validators - signAddVotes(cs1, cmtproto.PrevoteType, nil, types.PartSetHeader{}, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, types.BlockID{}, false, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // verify that we haven't update our locked block since the first round - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) + validatePrecommit(t, cs1, round, 0, vss[0], nil, blockID.Hash) - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + /* Round 2: The validator cs1 saw >2/3 precommits for nil in the previous round. @@ -1142,10 +1132,8 @@ func TestStateLockPOLDoesNotUnlock(t *testing.T) { prop, propBlock = decideProposal(ctx, t, cs3, vs3, vs3.Height, vs3.Round) propBlockParts, err = propBlock.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) - - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, ""); err != nil { - t.Fatal(err) - } + err = cs1.SetProposalAndBlock(prop, propBlockParts, "") + require.NoError(t, err) ensureNewRound(newRoundCh, height, round) @@ -1155,25 +1143,24 @@ func TestStateLockPOLDoesNotUnlock(t *testing.T) { ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], nil) - signAddVotes(cs1, cmtproto.PrevoteType, nil, types.PartSetHeader{}, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, types.BlockID{}, false, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // verify that we haven't update our locked block since the first round - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) - + validatePrecommit(t, cs1, round, 0, vss[0], nil, blockID.Hash) } -// TestStateLockMissingProposalWhenPOLSeenDoesNotUnlock tests that observing -// a two thirds majority for a block does not cause a validator to upate its lock on the +// TestStateLock_MissingProposalWhenPOLSeenDoesNotUpdateLock tests that observing +// a two thirds majority for a block does not cause a validator to update its lock on the // new block if a proposal was not seen for that block. -func TestStateLockMissingProposalWhenPOLSeenDoesNotUpdateLock(t *testing.T) { +func TestStateLock_MissingProposalWhenPOLSeenDoesNotUpdateLock(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID partSize := types.BlockPartSizeBytes @@ -1198,19 +1185,21 @@ func TestStateLockMissingProposalWhenPOLSeenDoesNotUpdateLock(t *testing.T) { ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - firstBlockHash := rs.ProposalBlock.Hash() - firstBlockParts := rs.ProposalBlockParts.Header() + firstBlockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } ensurePrevote(voteCh, height, round) // prevote - signAddVotes(cs1, cmtproto.PrevoteType, firstBlockHash, firstBlockParts, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, firstBlockID, false, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // our precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], firstBlockHash, firstBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], firstBlockID.Hash, firstBlockID.Hash) // add precommits from the rest - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) // timeout to new round ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1227,13 +1216,15 @@ func TestStateLockMissingProposalWhenPOLSeenDoesNotUpdateLock(t *testing.T) { round++ cs2 := newState(cs1.state, vs2, kvstore.NewInMemoryApplication()) prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with vs2") - } - secondBlockParts, err := propBlock.MakePartSet(partSize) + require.NotNil(t, propBlock, "Failed to create proposal block with vs2") + require.NotNil(t, prop, "Failed to create proposal block with vs2") + partSet, err := propBlock.MakePartSet(partSize) require.NoError(t, err) - secondBlockHash := propBlock.Hash() - require.NotEqual(t, secondBlockHash, firstBlockHash) + secondBlockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } + require.NotEqual(t, secondBlockID.Hash, firstBlockID.Hash) ensureNewRound(newRoundCh, height, round) @@ -1242,19 +1233,19 @@ func TestStateLockMissingProposalWhenPOLSeenDoesNotUpdateLock(t *testing.T) { validatePrevote(t, cs1, round, vss[0], nil) // now lets add prevotes from everyone else for the new block - signAddVotes(cs1, cmtproto.PrevoteType, secondBlockHash, secondBlockParts.Header(), false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, secondBlockID, false, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, 0, vss[0], nil, firstBlockHash) + validatePrecommit(t, cs1, round, 0, vss[0], nil, firstBlockID.Hash) } -// TestStateLockMissingProposalWhenPOLForLockedBlock tests that observing +// TestStateLock_MissingProposalWhenPOLForLockedBlock tests that observing // a two thirds majority for a block that matches the validator's locked block -// causes a validator to upate its lock round and Precommit the locked block. -func TestStateLockMissingProposalWhenPOLForLockedBlock(t *testing.T) { +// causes a validator to update its lock round and Precommit the locked block. +func TestStateLock_MissingProposalWhenPOLForLockedBlock(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) @@ -1278,19 +1269,21 @@ func TestStateLockMissingProposalWhenPOLForLockedBlock(t *testing.T) { ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - blockHash := rs.ProposalBlock.Hash() - blockParts := rs.ProposalBlockParts.Header() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } ensurePrevote(voteCh, height, round) // prevote - signAddVotes(cs1, cmtproto.PrevoteType, blockHash, blockParts, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // our precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], blockHash, blockHash) + validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits from the rest - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) // timeout to new round ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1314,28 +1307,124 @@ func TestStateLockMissingProposalWhenPOLForLockedBlock(t *testing.T) { validatePrevote(t, cs1, round, vss[0], nil) // now lets add prevotes from everyone else for the locked block - signAddVotes(cs1, cmtproto.PrevoteType, blockHash, blockParts, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) + // the validator precommits the valid block (as it received 2/3+ + // prevotes) which matches its locked block (which also received 2/3+ + // prevotes in the previous round). ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) +} + +// TestState_MissingProposalValidBlockReceivedTimeout tests if a node that +// misses the round's Proposal but receives a Polka for a block and the full +// block will not prevote for the valid block because the Proposal was missing. +func TestState_MissingProposalValidBlockReceivedTimeout(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cs1, vss := randState(4) + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID - // the validator precommits the block, because it matches its locked block, - // maintains the same locked block and updates its locked round - validatePrecommit(t, cs1, round, 1, vss[0], blockHash, blockHash) + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + + // Produce a block + block, err := cs1.createProposalBlock(ctx) + require.NoError(t, err) + blockParts, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + blockID := types.BlockID{ + Hash: block.Hash(), + PartSetHeader: blockParts.Header(), + } - // NOTE: this behavior is inconsistent with Tendermint consensus pseudo-code. - // In the pseudo-code, if a process does not receive the proposal (and block) for - // the current round, it cannot Precommit the proposed block ID, even thought it - // sees a POL for that block that matches the locked value (block). + // Skip round 0 and start consensus threads + round++ + incrementRound(vss[1:]...) + startTestRound(cs1, height, round) + + // Receive prevotes(height, round=1, blockID) from all other validators. + for i := 1; i < len(vss); i++ { + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vss[i]) + ensurePrevote(voteCh, height, round) + } + + // We have polka for blockID so we can accept the associated full block. + for i := 0; i < int(blockParts.Total()); i++ { + err := cs1.AddProposalBlockPart(height, round, blockParts.GetPart(i), "peer") + require.NoError(t, err) + } + ensureNewValidBlock(validBlockCh, height, round) + + // We don't prevote right now because we didn't receive the round's + // Proposal. Wait for the propose timeout. + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + + rs := cs1.GetRoundState() + assert.Equal(t, rs.ValidRound, round) + assert.Equal(t, rs.ValidBlock.Hash(), blockID.Hash) + + // Since we didn't see the round's Proposal, we should prevote nil. + // NOTE: introduced by https://github.com/cometbft/cometbft/pull/1203. + // In branches v0.{34,37,38}.x, the node prevotes for the valid block. + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], nil) } -// TestStateLockDoesNotLockOnOldProposal tests that observing +// TestState_MissingProposalValidBlockReceivedPrecommit tests if a node that +// misses the round's Proposal, but receives a Polka for a block and the full +// block, precommits the valid block even though the Proposal is missing. +func TestState_MissingProposalValidBlockReceivedPrecommit(t *testing.T) { + cs1, vss := randState(4) + height, round := cs1.Height, cs1.Round + chainID := cs1.state.ChainID + + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + + // Produce a block + _, blockParts, blockID := createProposalBlock(t, cs1) + + // Skip round 0 and start consensus + round++ + incrementRound(vss[1:]...) + startTestRound(cs1, height, round) + + // We are late, so we already receive prevotes for the block + for i := 1; i < len(vss); i++ { + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vss[i]) + ensurePrevote(voteCh, height, round) + } + // We received a Polka for blockID, which is now valid + ensureNewValidBlock(validBlockCh, height, round) + + // We don't have the Proposal, so we wait for timeout propose + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], nil) + + // We accept the full block associated with the valid blockID + for i := 0; i < int(blockParts.Total()); i++ { + err := cs1.AddProposalBlockPart(height, round, blockParts.GetPart(i), "peer") + require.NoError(t, err) + } + + // we don't have the Proposal for blockID, but we have the full block + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) +} + +// TestStateLock_DoesNotLockOnOldProposal tests that observing // a two thirds majority for a block does not cause a validator to lock on the // block if a proposal was not seen for that block in the current round, but // was seen in a previous round. -func TestStateLockDoesNotLockOnOldProposal(t *testing.T) { +func TestStateLock_DoesNotLockOnOldProposal(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) @@ -1358,18 +1447,20 @@ func TestStateLockDoesNotLockOnOldProposal(t *testing.T) { ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - firstBlockHash := rs.ProposalBlock.Hash() - firstBlockParts := rs.ProposalBlockParts.Header() + firstBlockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } ensurePrevote(voteCh, height, round) - signAddVotes(cs1, cmtproto.PrevoteType, nil, types.PartSetHeader{}, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, types.BlockID{}, false, vs2, vs3, vs4) // The proposed block should not have been locked. ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) incrementRound(vs2, vs3, vs4) @@ -1393,26 +1484,22 @@ func TestStateLockDoesNotLockOnOldProposal(t *testing.T) { validatePrevote(t, cs1, round, vss[0], nil) // All validators prevote for the old block. - signAddVotes(cs1, cmtproto.PrevoteType, firstBlockHash, firstBlockParts, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, firstBlockID, false, vs2, vs3, vs4) // Make sure that cs1 did not lock on the block since it did not receive a proposal for it. ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) } -// 4 vals -// a polka at round 1 but we miss it -// then a polka at round 2 that we lock on -// then we see the polka from round 1 but shouldn't unlock -func TestStateLockPOLSafety1(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - +// TestStateLock_POLSafety1 tests that a node should not change a lock based on +// polka in a round earlier than the locked round. The nodes proposes a block +// in round 0, this value receive a polka, not seen by anyone. A second block +// is proposed in round 1, we see the polka and lock it. Then we receive the +// polka from round 0. We don't do anything and remaining locked on round 1. +func TestStateLock_POLSafety1(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round - - partSize := types.BlockPartSizeBytes + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) @@ -1423,116 +1510,94 @@ func TestStateLockPOLSafety1(t *testing.T) { addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) - // start round and wait for propose and prevote + // block for round 1, from vs2, empty + // we build it now, to prevent timeouts + block1, blockParts1, blockID1 := createProposalBlock(t, cs1) + prop1 := types.NewProposal(vs2.Height, vs2.Round+1, -1, blockID1, block1.Time) + signProposal(t, prop1, chainID, vs2) + + // add a tx to the mempool + tx := kvstore.NewRandomTx(22) + reqRes, err := assertMempool(cs1.txNotifier).CheckTx(tx, "") + require.NoError(t, err) + require.False(t, reqRes.Response.GetCheckTx().IsErr()) + + // start the machine startTestRound(cs1, cs1.Height, round) ensureNewRound(newRoundCh, height, round) + // our proposal, it includes tx ensureNewProposal(proposalCh, height, round) - rs := cs1.GetRoundState() - propBlock := rs.ProposalBlock + blockID := cs1.GetRoundState().Proposal.BlockID + require.NotEqual(t, blockID, blockID1) ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlock.Hash()) + validatePrevote(t, cs1, round, vss[0], blockID.Hash) - // the others sign a polka but we don't see it - bps, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) + // the others sign a polka in round 0, but no one sees it + prevotes := signVotes(types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) - prevotes := signVotes(cmtproto.PrevoteType, propBlock.Hash(), bps.Header(), false, vs2, vs3, vs4) + // the others precommit nil + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) - // we do see them precommit nil - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) - - // cs1 precommit nil + // precommit nil, locked value remains unset ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) t.Log("### ONTO ROUND 1") incrementRound(vs2, vs3, vs4) - round++ // moving to the next round - cs2 := newState(cs1.state, vs2, kvstore.NewInMemoryApplication()) - prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) - propBlockHash := propBlock.Hash() - propBlockParts, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) + round++ ensureNewRound(newRoundCh, height, round) + err = cs1.SetProposalAndBlock(prop1, blockParts1, "some peer") + require.NoError(t, err) - // XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } - /*Round2 - // we timeout and prevote our lock - // a polka happened but we didn't see it! - */ - + // prevote for proposal for block1 ensureNewProposal(proposalCh, height, round) - - rs = cs1.GetRoundState() - - if rs.LockedBlock != nil { - t.Fatalf("was not expected to be locked on a block") - } - - // go to prevote, prevote for proposal block ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) - - // now we see the others prevote for it, so we should lock on it - signAddVotes(cs1, cmtproto.PrevoteType, propBlockHash, propBlockParts.Header(), false, vs2, vs3, vs4) + validatePrevote(t, cs1, round, vss[0], blockID1.Hash) + // we see prevotes for it, so we should lock on and precommit it + signAddVotes(cs1, types.PrevoteType, chainID, blockID1, false, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) - // we should have precommitted - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) - - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + validatePrecommit(t, cs1, round, round, vss[0], blockID1.Hash, blockID1.Hash) + // the other don't see the polka, so precommit nil + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + t.Log("### ONTO ROUND 2") incrementRound(vs2, vs3, vs4) - round++ // moving to the next round + round++ + // new round, no proposal, prevote nil ensureNewRound(newRoundCh, height, round) - - t.Log("### ONTO ROUND 2") - /*Round3 - we see the polka from round 1 but we shouldn't unlock! - */ - - // timeout of propose ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) - - // finish prevote ensurePrevote(voteCh, height, round) - // we should prevote for nil validatePrevote(t, cs1, round, vss[0], nil) + // prevotes from the round-2 are added, nothing should change, no new round step newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep) - - // before prevotes from the previous round are added - // add prevotes from the earlier round addVotes(cs1, prevotes...) - ensureNoNewRoundStep(newStepCh) -} - -// 4 vals. -// polka P0 at R0, P1 at R1, and P2 at R2, -// we lock on P0 at R0, don't see P1, and unlock using P2 at R2 -// then we should make sure we don't lock using P1 -// What we want: -// dont see P0, lock on P1 at R1, dont unlock using P0 at R2 -func TestStateLockPOLSafety2(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + // receive prevotes for nil, precommit nil, locked round is the same + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, round-1, vss[0], nil, blockID1.Hash) +} +// TestStateLock_POLSafety2 tests that a node should not accept a proposal with +// POLRound lower that its locked round. The nodes proposes a block in round 0, +// this value receives a polka, only seen by v3. A second block is proposed in +// round 1, we see the polka and lock it. Then we receive the polka from round +// 0 and the proposal from v3 re-proposing the block originally from round 0. +// We must reject this proposal, since we are locked on round 1. +func TestStateLock_POLSafety2(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round - - partSize := types.BlockPartSizeBytes + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) @@ -1542,92 +1607,106 @@ func TestStateLockPOLSafety2(t *testing.T) { addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) - // the block for R0: gets polkad but we miss it - // (even though we signed it, shhh) - _, propBlock0 := decideProposal(ctx, t, cs1, vss[0], height, round) - propBlockHash0 := propBlock0.Hash() - propBlockParts0, err := propBlock0.MakePartSet(partSize) - require.NoError(t, err) - propBlockID0 := types.BlockID{Hash: propBlockHash0, PartSetHeader: propBlockParts0.Header()} - - // the others sign a polka but we don't see it - prevotes := signVotes(cmtproto.PrevoteType, propBlockHash0, propBlockParts0.Header(), false, vs2, vs3, vs4) + // block for round 1, from vs2, empty + // we build it now, to prevent timeouts + block1, blockParts1, blockID1 := createProposalBlock(t, cs1) + prop1 := types.NewProposal(vs2.Height, vs2.Round+1, -1, blockID1, block1.Time) + signProposal(t, prop1, chainID, vs2) - // the block for round 1 - prop1, propBlock1 := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round+1) - propBlockHash1 := propBlock1.Hash() - propBlockParts1, err := propBlock1.MakePartSet(partSize) + // add a tx to the mempool + tx := kvstore.NewRandomTx(22) + reqRes, err := assertMempool(cs1.txNotifier).CheckTx(tx, "") require.NoError(t, err) + require.False(t, reqRes.Response.GetCheckTx().IsErr()) - incrementRound(vs2, vs3, vs4) - - round++ // moving to the next round - t.Log("### ONTO Round 1") - // jump in at round 1 - startTestRound(cs1, height, round) + // start the machine + startTestRound(cs1, cs1.Height, round) ensureNewRound(newRoundCh, height, round) - if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil { - t.Fatal(err) - } + // our proposal, it includes tx ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() + block0, blockParts0 := rs.ProposalBlock, rs.ProposalBlockParts + blockID0 := rs.Proposal.BlockID + require.NotEqual(t, blockID0, blockID1) ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash1) + validatePrevote(t, cs1, round, vss[0], blockID0.Hash) - signAddVotes(cs1, cmtproto.PrevoteType, propBlockHash1, propBlockParts1.Header(), false, vs2, vs3, vs4) + // the others sign a polka in round 0 + prevotes := signVotes(types.PrevoteType, chainID, blockID0, false, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) - // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash1, propBlockHash1) + // v2, v4 precommit nil, as they don't see the polka + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs4) + // v3 precommits the block, it has seen the polka + signAddVotes(cs1, types.PrecommitType, chainID, blockID0, true, vs3) - // add precommits from the rest - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs4) - signAddVotes(cs1, cmtproto.PrecommitType, propBlockHash1, propBlockParts1.Header(), true, vs3) + // conflicting prevots, precommit nil, locked value remains unset + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + t.Log("### ONTO ROUND 1") incrementRound(vs2, vs3, vs4) + round++ - // timeout of precommit wait to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - - round++ // moving to the next round - // in round 2 we see the polkad block from round 0 - newProp := types.NewProposal(height, round, 0, propBlockID0) - p := newProp.ToProto() - if err := vs3.SignProposal(cs1.state.ChainID, p); err != nil { - t.Fatal(err) - } + ensureNewRound(newRoundCh, height, round) + err = cs1.SetProposalAndBlock(prop1, blockParts1, "some peer") + require.NoError(t, err) - newProp.Signature = p.Signature + // prevote for proposal for block1 + ensureNewProposal(proposalCh, height, round) + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], blockID1.Hash) - if err := cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer"); err != nil { - t.Fatal(err) - } + // we see prevotes for it, so we should lock on and precommit it + signAddVotes(cs1, types.PrevoteType, chainID, blockID1, false, vs2, vs3, vs4) + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, round, vss[0], blockID1.Hash, blockID1.Hash) - // Add the pol votes + // prevotes from round 0 are late received + newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep) addVotes(cs1, prevotes...) + ensureNoNewRoundStep(newStepCh) + + // the other don't see the polka, so precommit nil + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + + t.Log("### ONTO ROUND 2") + incrementRound(vs2, vs3, vs4) + round++ + + // v3 has seen a polka for our block in round 0 + // it re-proposes block 0 with POLRound == 0 + prop2 := types.NewProposal(vs3.Height, vs3.Round, 0, blockID0, block0.Time) + signProposal(t, prop2, chainID, vs3) ensureNewRound(newRoundCh, height, round) - t.Log("### ONTO Round 2") - /*Round2 - // now we see the polka from round 1, but we shouldnt unlock - */ + err = cs1.SetProposalAndBlock(prop2, blockParts0, "some peer") + require.NoError(t, err) ensureNewProposal(proposalCh, height, round) + // our locked round is 1, so we reject the proposal from v3 ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash1) + validatePrevote(t, cs1, round, vss[0], nil) + + // receive prevotes for nil, precommit nil, locked round is the same + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, round-1, vss[0], nil, blockID1.Hash) } -// TestStatePrevotePOLFromPreviousRound tests that a validator will prevote +// TestState_PrevotePOLFromPreviousRound tests that a validator will prevote // for a block if it is locked on a different block but saw a POL for the block // it is not locked on in a previous round. -func TestStatePrevotePOLFromPreviousRound(t *testing.T) { +func TestState_PrevotePOLFromPreviousRound(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID partSize := types.BlockPartSizeBytes @@ -1655,22 +1734,24 @@ func TestStatePrevotePOLFromPreviousRound(t *testing.T) { ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts.Header() + r0BlockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } ensurePrevote(voteCh, height, round) - signAddVotes(cs1, cmtproto.PrevoteType, theBlockHash, theBlockParts, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, r0BlockID, false, vs2, vs3, vs4) // check that the validator generates a Lock event. ensureLock(lockCh, height, round) // the proposed block should now be locked and our precommit added. ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], r0BlockID.Hash, r0BlockID.Hash) // add precommits from the rest of the validators. - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) // timeout to new round. ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1679,7 +1760,7 @@ func TestStatePrevotePOLFromPreviousRound(t *testing.T) { Round 1: Create a block, D but do not send a proposal for it to cs1. Send a prevote for D from each of the validators to cs1 so that cs1 sees a POL. - Send a precommit for nil from all of the validtors to cs1. + Send a precommit for nil from all of the validators to cs1. cs1 has now seen greater than 2/3 of the voting power prevote D in this round but cs1 did not see the proposal for D in this round so it will not prevote or precommit it. @@ -1694,18 +1775,20 @@ func TestStatePrevotePOLFromPreviousRound(t *testing.T) { t.Log(propR1.POLRound) propBlockR1Parts, err := propBlockR1.MakePartSet(partSize) require.NoError(t, err) - - propBlockR1Hash := propBlockR1.Hash() - require.NotEqual(t, propBlockR1Hash, theBlockHash) + r1BlockID := types.BlockID{ + Hash: propBlockR1.Hash(), + PartSetHeader: propBlockR1Parts.Header(), + } + require.NotEqual(t, r1BlockID.Hash, r0BlockID.Hash) ensureNewRound(newRoundCh, height, round) - signAddVotes(cs1, cmtproto.PrevoteType, propBlockR1Hash, propBlockR1Parts.Header(), false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, r1BlockID, false, vs2, vs3, vs4) ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], nil) - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) @@ -1727,18 +1810,12 @@ func TestStatePrevotePOLFromPreviousRound(t *testing.T) { t.Log("### Starting Round 2") incrementRound(vs2, vs3, vs4) round++ - propBlockID := types.BlockID{Hash: propBlockR1Hash, PartSetHeader: propBlockR1Parts.Header()} - propR2 := types.NewProposal(height, round, 1, propBlockID) - p := propR2.ToProto() - if err := vs3.SignProposal(cs1.state.ChainID, p); err != nil { - t.Fatalf("error signing proposal: %s", err) - } - propR2.Signature = p.Signature + propR2 := types.NewProposal(height, round, 1, r1BlockID, propBlockR1.Header.Time) + signProposal(t, propR2, chainID, vs3) // cs1 receives a proposal for D, the block that received a POL in round 1. - if err := cs1.SetProposalAndBlock(propR2, propBlockR1, propBlockR1Parts, ""); err != nil { - t.Fatal(err) - } + err = cs1.SetProposalAndBlock(propR2, propBlockR1Parts, "") + require.NoError(t, err) ensureNewRound(newRoundCh, height, round) @@ -1747,14 +1824,14 @@ func TestStatePrevotePOLFromPreviousRound(t *testing.T) { // We should now prevote this block, despite being locked on the block from // round 0. ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockR1Hash) + validatePrevote(t, cs1, round, vss[0], r1BlockID.Hash) - signAddVotes(cs1, cmtproto.PrevoteType, nil, types.PartSetHeader{}, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, types.BlockID{}, false, vs2, vs3, vs4) // cs1 did not receive a POL within this round, so it should remain locked // on the block from round 0. ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) + validatePrecommit(t, cs1, round, 0, vss[0], nil, r0BlockID.Hash) } // 4 vals. @@ -1765,7 +1842,7 @@ func TestStatePrevotePOLFromPreviousRound(t *testing.T) { func TestProposeValidBlock(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID partSize := types.BlockPartSizeBytes @@ -1785,22 +1862,25 @@ func TestProposeValidBlock(t *testing.T) { ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() propBlock := rs.ProposalBlock - propBlockHash := propBlock.Hash() + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) + validatePrevote(t, cs1, round, vss[0], blockID.Hash) // the others sign a polka - bps, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) - signAddVotes(cs1, cmtproto.PrevoteType, propBlockHash, bps.Header(), false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // we should have precommitted the proposed block in this round. - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1817,17 +1897,17 @@ func TestProposeValidBlock(t *testing.T) { ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], nil) - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // we should have precommitted nil during this round because we received // >2/3 precommits for nil from the other validators. - validatePrecommit(t, cs1, round, 0, vss[0], nil, propBlockHash) + validatePrecommit(t, cs1, round, 0, vss[0], nil, blockID.Hash) incrementRound(vs2, vs3, vs4) incrementRound(vs2, vs3, vs4) - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) round += 2 // increment by multiple rounds @@ -1843,9 +1923,9 @@ func TestProposeValidBlock(t *testing.T) { ensureNewProposal(proposalCh, height, round) rs = cs1.GetRoundState() - assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), propBlockHash)) + assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), blockID.Hash)) assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), rs.ValidBlock.Hash())) - assert.True(t, rs.Proposal.POLRound == rs.ValidRound) + assert.Equal(t, rs.Proposal.POLRound, rs.ValidRound) assert.True(t, bytes.Equal(rs.Proposal.BlockID.Hash, rs.ValidBlock.Hash())) } @@ -1854,7 +1934,7 @@ func TestProposeValidBlock(t *testing.T) { func TestSetValidBlockOnDelayedPrevote(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID partSize := types.BlockPartSizeBytes @@ -1874,18 +1954,21 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() propBlock := rs.ProposalBlock - propBlockHash := propBlock.Hash() - propBlockParts, err := propBlock.MakePartSet(partSize) + partSet, err := propBlock.MakePartSet(partSize) require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) + validatePrevote(t, cs1, round, vss[0], blockID.Hash) // vs2 send prevote for propBlock - signAddVotes(cs1, cmtproto.PrevoteType, propBlockHash, propBlockParts.Header(), false, vs2) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2) // vs3 send prevote nil - signAddVotes(cs1, cmtproto.PrevoteType, nil, types.PartSetHeader{}, false, vs3) + signAddVotes(cs1, types.PrevoteType, chainID, types.BlockID{}, false, vs3) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) @@ -1895,20 +1978,20 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { rs = cs1.GetRoundState() - assert.True(t, rs.ValidBlock == nil) - assert.True(t, rs.ValidBlockParts == nil) - assert.True(t, rs.ValidRound == -1) + assert.Nil(t, rs.ValidBlock) + assert.Nil(t, rs.ValidBlockParts) + assert.Equal(t, int32(-1), rs.ValidRound) // vs2 send (delayed) prevote for propBlock - signAddVotes(cs1, cmtproto.PrevoteType, propBlockHash, propBlockParts.Header(), false, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs4) ensureNewValidBlock(validBlockCh, height, round) rs = cs1.GetRoundState() - assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), propBlockHash)) - assert.True(t, rs.ValidBlockParts.Header().Equals(propBlockParts.Header())) - assert.True(t, rs.ValidRound == round) + assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), blockID.Hash)) + assert.True(t, rs.ValidBlockParts.Header().Equals(blockID.PartSetHeader)) + assert.Equal(t, rs.ValidRound, round) } // What we want: @@ -1920,7 +2003,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID partSize := types.BlockPartSizeBytes @@ -1946,12 +2029,15 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { validatePrevote(t, cs1, round, vss[0], nil) prop, propBlock := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round+1) - propBlockHash := propBlock.Hash() - propBlockParts, err := propBlock.MakePartSet(partSize) + partSet, err := propBlock.MakePartSet(partSize) require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } // vs2, vs3 and vs4 send prevote for propBlock - signAddVotes(cs1, cmtproto.PrevoteType, propBlockHash, propBlockParts.Header(), false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) ensureNewValidBlock(validBlockCh, height, round) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) @@ -1959,16 +2045,17 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + partSet, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) + err = cs1.SetProposalAndBlock(prop, partSet, "some peer") + require.NoError(t, err) ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), propBlockHash)) - assert.True(t, rs.ValidBlockParts.Header().Equals(propBlockParts.Header())) - assert.True(t, rs.ValidRound == round) + assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), blockID.Hash)) + assert.True(t, rs.ValidBlockParts.Header().Equals(blockID.PartSetHeader)) + assert.Equal(t, rs.ValidRound, round) } func TestProcessProposalAccept(t *testing.T) { @@ -1990,12 +2077,13 @@ func TestProcessProposalAccept(t *testing.T) { } { t.Run(testCase.name, func(t *testing.T) { m := abcimocks.NewApplication(t) - status := abci.ResponseProcessProposal_REJECT + status := abci.PROCESS_PROPOSAL_STATUS_REJECT if testCase.accept { - status = abci.ResponseProcessProposal_ACCEPT + status = abci.PROCESS_PROPOSAL_STATUS_ACCEPT } - m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: status}, nil) - m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil).Maybe() + m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ProcessProposalResponse{Status: status}, nil) + m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.PrepareProposalResponse{}, nil).Maybe() + m.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil).Maybe() cs1, _ := randStateWithApp(4, m) height, round := cs1.Height, cs1.Round @@ -2038,25 +2126,25 @@ func TestExtendVoteCalledWhenEnabled(t *testing.T) { } { t.Run(testCase.name, func(t *testing.T) { m := abcimocks.NewApplication(t) - m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) - m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) + m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.PrepareProposalResponse{}, nil) + m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_ACCEPT}, nil) if testCase.enabled { - m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ + m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ExtendVoteResponse{ VoteExtension: []byte("extension"), }, nil) - m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_ACCEPT, + m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.VerifyVoteExtensionResponse{ + Status: abci.VERIFY_VOTE_EXTENSION_STATUS_ACCEPT, }, nil) } - m.On("Commit", mock.Anything, mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() - m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() + m.On("Commit", mock.Anything, mock.Anything).Return(&abci.CommitResponse{}, nil).Maybe() + m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.FinalizeBlockResponse{}, nil).Maybe() + m.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil).Maybe() height := int64(1) if !testCase.enabled { height = 0 } cs1, vss := randStateWithAppWithHeight(4, m, height) - - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) @@ -2072,18 +2160,17 @@ func TestExtendVoteCalledWhenEnabled(t *testing.T) { m.AssertNotCalled(t, "ExtendVote", mock.Anything, mock.Anything) rs := cs1.GetRoundState() - blockID := types.BlockID{ Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header(), } - signAddVotes(cs1, cmtproto.PrevoteType, blockID.Hash, blockID.PartSetHeader, false, vss[1:]...) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vss[1:]...) ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) ensurePrecommit(voteCh, height, round) if testCase.enabled { - m.AssertCalled(t, "ExtendVote", context.TODO(), &abci.RequestExtendVote{ + m.AssertCalled(t, "ExtendVote", context.TODO(), &abci.ExtendVoteRequest{ Height: height, Hash: blockID.Hash, Time: rs.ProposalBlock.Time, @@ -2097,7 +2184,7 @@ func TestExtendVoteCalledWhenEnabled(t *testing.T) { m.AssertNotCalled(t, "ExtendVote", mock.Anything, mock.Anything) } - signAddVotes(cs1, cmtproto.PrecommitType, blockID.Hash, blockID.PartSetHeader, testCase.enabled, vss[1:]...) + signAddVotes(cs1, types.PrecommitType, chainID, blockID, testCase.enabled, vss[1:]...) ensureNewRound(newRoundCh, height+1, 0) m.AssertExpectations(t) @@ -2108,7 +2195,7 @@ func TestExtendVoteCalledWhenEnabled(t *testing.T) { require.NoError(t, err) addr := pv.Address() if testCase.enabled { - m.AssertCalled(t, "VerifyVoteExtension", context.TODO(), &abci.RequestVerifyVoteExtension{ + m.AssertCalled(t, "VerifyVoteExtension", context.TODO(), &abci.VerifyVoteExtensionRequest{ Hash: blockID.Hash, ValidatorAddress: addr, Height: height, @@ -2126,19 +2213,20 @@ func TestExtendVoteCalledWhenEnabled(t *testing.T) { // method is not called for a validator's vote that is never delivered. func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { m := abcimocks.NewApplication(t) - m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) - m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) - m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ + m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.PrepareProposalResponse{}, nil) + m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_ACCEPT}, nil) + m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ExtendVoteResponse{ VoteExtension: []byte("extension"), }, nil) - m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_ACCEPT, + m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.VerifyVoteExtensionResponse{ + Status: abci.VERIFY_VOTE_EXTENSION_STATUS_ACCEPT, }, nil) - m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() - m.On("Commit", mock.Anything, mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() + m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.FinalizeBlockResponse{}, nil).Maybe() + m.On("Commit", mock.Anything, mock.Anything).Return(&abci.CommitResponse{}, nil).Maybe() + m.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil).Maybe() cs1, vss := randStateWithApp(4, m) - height, round := cs1.Height, cs1.Round - cs1.state.ConsensusParams.ABCI.VoteExtensionsEnableHeight = cs1.Height + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID + cs1.state.ConsensusParams.Feature.VoteExtensionsEnableHeight = cs1.Height proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) @@ -2156,12 +2244,12 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header(), } - signAddVotes(cs1, cmtproto.PrevoteType, blockID.Hash, blockID.PartSetHeader, false, vss...) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vss...) ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) ensurePrecommit(voteCh, height, round) - m.AssertCalled(t, "ExtendVote", context.TODO(), &abci.RequestExtendVote{ + m.AssertCalled(t, "ExtendVote", context.TODO(), &abci.ExtendVoteRequest{ Height: height, Hash: blockID.Hash, Time: rs.ProposalBlock.Time, @@ -2172,7 +2260,7 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { ProposerAddress: rs.ProposalBlock.ProposerAddress, }) - signAddVotes(cs1, cmtproto.PrecommitType, blockID.Hash, blockID.PartSetHeader, true, vss[2:]...) + signAddVotes(cs1, types.PrecommitType, chainID, blockID, true, vss[2:]...) ensureNewRound(newRoundCh, height+1, 0) m.AssertExpectations(t) @@ -2182,7 +2270,7 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { require.NoError(t, err) addr = pv.Address() - m.AssertNotCalled(t, "VerifyVoteExtension", context.TODO(), &abci.RequestVerifyVoteExtension{ + m.AssertNotCalled(t, "VerifyVoteExtension", context.TODO(), &abci.VerifyVoteExtensionRequest{ Hash: blockID.Hash, ValidatorAddress: addr, Height: height, @@ -2206,24 +2294,25 @@ func TestPrepareProposalReceivesVoteExtensions(t *testing.T) { } m := abcimocks.NewApplication(t) - m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ + m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ExtendVoteResponse{ VoteExtension: voteExtensions[0], }, nil) - m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) + m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_ACCEPT}, nil) // capture the prepare proposal request. - rpp := &abci.RequestPrepareProposal{} - m.On("PrepareProposal", mock.Anything, mock.MatchedBy(func(r *abci.RequestPrepareProposal) bool { + rpp := &abci.PrepareProposalRequest{} + m.On("PrepareProposal", mock.Anything, mock.MatchedBy(func(r *abci.PrepareProposalRequest) bool { rpp = r return true - })).Return(&abci.ResponsePrepareProposal{}, nil) + })).Return(&abci.PrepareProposalResponse{}, nil) - m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_ACCEPT}, nil) - m.On("Commit", mock.Anything, mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() - m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil) + m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.VerifyVoteExtensionResponse{Status: abci.VERIFY_VOTE_EXTENSION_STATUS_ACCEPT}, nil) + m.On("Commit", mock.Anything, mock.Anything).Return(&abci.CommitResponse{}, nil).Maybe() + m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.FinalizeBlockResponse{}, nil) + m.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil).Maybe() cs1, vss := randStateWithApp(4, m) - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) @@ -2241,11 +2330,11 @@ func TestPrepareProposalReceivesVoteExtensions(t *testing.T) { Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header(), } - signAddVotes(cs1, cmtproto.PrevoteType, blockID.Hash, blockID.PartSetHeader, false, vss[1:]...) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vss[1:]...) // create a precommit for each validator with the associated vote extension. for i, vs := range vss[1:] { - signAddPrecommitWithExtension(t, cs1, blockID.Hash, blockID.PartSetHeader, voteExtensions[i+1], vs) + signAddPrecommitWithExtension(t, cs1, chainID, blockID, voteExtensions[i+1], vs) } ensurePrevote(voteCh, height, round) @@ -2263,7 +2352,7 @@ func TestPrepareProposalReceivesVoteExtensions(t *testing.T) { round = 3 blockID2 := types.BlockID{} - signAddVotes(cs1, cmtproto.PrecommitType, blockID2.Hash, blockID2.PartSetHeader, true, vss[1:]...) + signAddVotes(cs1, types.PrecommitType, chainID, blockID2, true, vss[1:]...) ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) @@ -2308,24 +2397,25 @@ func TestFinalizeBlockCalled(t *testing.T) { } { t.Run(testCase.name, func(t *testing.T) { m := abcimocks.NewApplication(t) - m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{ - Status: abci.ResponseProcessProposal_ACCEPT, + m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ProcessProposalResponse{ + Status: abci.PROCESS_PROPOSAL_STATUS_ACCEPT, }, nil) - m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) + m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.PrepareProposalResponse{}, nil) // We only expect VerifyVoteExtension to be called on non-nil precommits. // https://github.com/tendermint/tendermint/issues/8487 if !testCase.voteNil { - m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{}, nil) - m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_ACCEPT, + m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ExtendVoteResponse{}, nil) + m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.VerifyVoteExtensionResponse{ + Status: abci.VERIFY_VOTE_EXTENSION_STATUS_ACCEPT, }, nil) } - r := &abci.ResponseFinalizeBlock{AppHash: []byte("the_hash")} + r := &abci.FinalizeBlockResponse{AppHash: []byte("the_hash")} m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(r, nil).Maybe() - m.On("Commit", mock.Anything, mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() + m.On("Commit", mock.Anything, mock.Anything).Return(&abci.CommitResponse{}, nil).Maybe() + m.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil).Maybe() cs1, vss := randStateWithApp(4, m) - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) @@ -2351,10 +2441,10 @@ func TestFinalizeBlockCalled(t *testing.T) { } } - signAddVotes(cs1, cmtproto.PrevoteType, blockID.Hash, blockID.PartSetHeader, false, vss[1:]...) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vss[1:]...) ensurePrevoteMatch(t, voteCh, height, round, rs.ProposalBlock.Hash()) - signAddVotes(cs1, cmtproto.PrecommitType, blockID.Hash, blockID.PartSetHeader, true, vss[1:]...) + signAddVotes(cs1, types.PrecommitType, chainID, blockID, true, vss[1:]...) ensurePrecommit(voteCh, height, round) ensureNewRound(newRoundCh, nextHeight, nextRound) @@ -2425,23 +2515,24 @@ func TestVoteExtensionEnableHeight(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { numValidators := 3 m := abcimocks.NewApplication(t) - m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{ - Status: abci.ResponseProcessProposal_ACCEPT, + m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ProcessProposalResponse{ + Status: abci.PROCESS_PROPOSAL_STATUS_ACCEPT, }, nil) - m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) + m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.PrepareProposalResponse{}, nil) if testCase.expectExtendCalled { - m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{}, nil) + m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ExtendVoteResponse{}, nil) } if testCase.expectVerifyCalled { - m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_ACCEPT, + m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.VerifyVoteExtensionResponse{ + Status: abci.VERIFY_VOTE_EXTENSION_STATUS_ACCEPT, }, nil).Times(numValidators - 1) } - m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() - m.On("Commit", mock.Anything, mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() + m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.FinalizeBlockResponse{}, nil).Maybe() + m.On("Commit", mock.Anything, mock.Anything).Return(&abci.CommitResponse{}, nil).Maybe() + m.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil).Maybe() cs1, vss := randStateWithAppWithHeight(numValidators, m, testCase.enableHeight) - cs1.state.ConsensusParams.ABCI.VoteExtensionsEnableHeight = testCase.enableHeight - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID + cs1.state.ConsensusParams.Feature.VoteExtensionsEnableHeight = testCase.enableHeight timeoutCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) @@ -2455,9 +2546,13 @@ func TestVoteExtensionEnableHeight(t *testing.T) { ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } // sign all of the votes - signAddVotes(cs1, cmtproto.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), false, vss[1:]...) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vss[1:]...) ensurePrevoteMatch(t, voteCh, height, round, rs.ProposalBlock.Hash()) var ext []byte @@ -2466,7 +2561,7 @@ func TestVoteExtensionEnableHeight(t *testing.T) { } for _, vs := range vss[1:] { - vote, err := vs.signVote(cmtproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), ext, testCase.hasExtension) + vote, err := vs.signVote(types.PrecommitType, chainID, blockID, ext, testCase.hasExtension, vs.clock.Now()) require.NoError(t, err) addVotes(cs1, vote) } @@ -2483,13 +2578,45 @@ func TestVoteExtensionEnableHeight(t *testing.T) { } } +// TestStateDoesntCrashOnInvalidVote tests that the state does not crash when +// receiving an invalid vote. In particular, one with the incorrect +// ValidatorIndex. +func TestStateDoesntCrashOnInvalidVote(t *testing.T) { + cs, vss := randState(2) + height, round, chainID := cs.Height, cs.Round, cs.state.ChainID + // create dummy peer + peer := p2pmock.NewPeer(nil) + + startTestRound(cs, height, round) + + randBytes := cmtrand.Bytes(tmhash.Size) + blockID := types.BlockID{ + Hash: randBytes, + } + + vote := signVote(vss[1], types.PrecommitType, chainID, blockID, true) + // Non-existent validator index + vote.ValidatorIndex = int32(len(vss)) + + voteMessage := &VoteMessage{vote} + assert.NotPanics(t, func() { + cs.handleMsg(msgInfo{voteMessage, peer.ID(), time.Time{}}) + }) + + added, err := cs.AddVote(vote, peer.ID()) + assert.False(t, added) + assert.NoError(t, err) + // TODO: uncomment once we punish peer and return an error + // assert.Equal(t, ErrInvalidVote{Reason: "ValidatorIndex 2 is out of bounds [0, 2)"}, err) +} + // 4 vals, 3 Nil Precommits at P0 // What we want: -// P0 waits for timeoutPrecommit before starting next round +// P0 waits for timeoutPrecommit before starting next round. func TestWaitingTimeoutOnNilPolka(*testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) @@ -2498,7 +2625,7 @@ func TestWaitingTimeoutOnNilPolka(*testing.T) { startTestRound(cs1, height, round) ensureNewRound(newRoundCh, height, round) - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) ensureNewRound(newRoundCh, height, round+1) @@ -2506,11 +2633,11 @@ func TestWaitingTimeoutOnNilPolka(*testing.T) { // 4 vals, 3 Prevotes for nil from the higher round. // What we want: -// P0 waits for timeoutPropose in the next round before entering prevote +// P0 waits for timeoutPropose in the next round before entering prevote. func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) @@ -2526,13 +2653,13 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { ensurePrevote(voteCh, height, round) incrementRound(vss[1:]...) - signAddVotes(cs1, cmtproto.PrevoteType, nil, types.PartSetHeader{}, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, types.BlockID{}, false, vs2, vs3, vs4) round++ // moving to the next round ensureNewRound(newRoundCh, height, round) rs := cs1.GetRoundState() - assert.True(t, rs.Step == cstypes.RoundStepPropose) // P0 does not prevote before timeoutPropose expires + assert.Equal(t, cstypes.RoundStepPropose, rs.Step) // P0 does not prevote before timeoutPropose expires ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Propose(round).Nanoseconds()) @@ -2542,11 +2669,11 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { // 4 vals, 3 Precommits for nil from the higher round. // What we want: -// P0 jump to higher round, precommit and start precommit wait +// P0 jump to higher round, precommit and start precommit wait. func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) @@ -2562,7 +2689,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { ensurePrevote(voteCh, height, round) incrementRound(vss[1:]...) - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2, vs3, vs4) round++ // moving to the next round ensureNewRound(newRoundCh, height, round) @@ -2582,7 +2709,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, int32(1) + height, round, chainID := cs1.Height, int32(1), cs1.state.ChainID timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) @@ -2596,7 +2723,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { ensureNewRound(newRoundCh, height, round) incrementRound(vss[1:]...) - signAddVotes(cs1, cmtproto.PrevoteType, nil, types.PartSetHeader{}, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, types.BlockID{}, false, vs2, vs3, vs4) ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) @@ -2605,14 +2732,14 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { } // What we want: -// P0 emit NewValidBlock event upon receiving 2/3+ Precommit for B but hasn't received block B yet +// P0 emit NewValidBlock event upon receiving 2/3+ Precommit for B but hasn't received block B yet. func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, int32(1) + height, round, chainID := cs1.Height, int32(1), cs1.state.ChainID incrementRound(vs2, vs3, vs4) @@ -2622,22 +2749,25 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) _, propBlock := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round) - propBlockHash := propBlock.Hash() - propBlockParts, err := propBlock.MakePartSet(partSize) + partSet, err := propBlock.MakePartSet(partSize) require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } // start round in which PO is not proposer startTestRound(cs1, height, round) ensureNewRound(newRoundCh, height, round) // vs2, vs3 and vs4 send precommit for propBlock - signAddVotes(cs1, cmtproto.PrecommitType, propBlockHash, propBlockParts.Header(), true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, blockID, true, vs2, vs3, vs4) ensureNewValidBlock(validBlockCh, height, round) rs := cs1.GetRoundState() - assert.True(t, rs.Step == cstypes.RoundStepCommit) - assert.True(t, rs.ProposalBlock == nil) - assert.True(t, rs.ProposalBlockParts.Header().Equals(propBlockParts.Header())) + assert.Equal(t, rs.Step, cstypes.RoundStepCommit) //nolint:testifylint // this will tell us to reverse the items being compared no matter what + assert.Nil(t, rs.ProposalBlock) + assert.True(t, rs.ProposalBlockParts.Header().Equals(blockID.PartSetHeader)) } // What we want: @@ -2649,7 +2779,7 @@ func TestCommitFromPreviousRound(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, int32(1) + height, round, chainID := cs1.Height, int32(1), cs1.state.ChainID partSize := types.BlockPartSizeBytes @@ -2658,28 +2788,31 @@ func TestCommitFromPreviousRound(t *testing.T) { proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) prop, propBlock := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round) - propBlockHash := propBlock.Hash() - propBlockParts, err := propBlock.MakePartSet(partSize) + partSet, err := propBlock.MakePartSet(partSize) require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } // start round in which PO is not proposer startTestRound(cs1, height, round) ensureNewRound(newRoundCh, height, round) // vs2, vs3 and vs4 send precommit for propBlock for the previous round - signAddVotes(cs1, cmtproto.PrecommitType, propBlockHash, propBlockParts.Header(), true, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, blockID, true, vs2, vs3, vs4) ensureNewValidBlock(validBlockCh, height, round) rs := cs1.GetRoundState() - assert.True(t, rs.Step == cstypes.RoundStepCommit) - assert.True(t, rs.CommitRound == vs2.Round) - assert.True(t, rs.ProposalBlock == nil) - assert.True(t, rs.ProposalBlockParts.Header().Equals(propBlockParts.Header())) - - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + assert.Equal(t, cstypes.RoundStepCommit, rs.Step) + assert.Equal(t, vs2.Round, rs.CommitRound) + assert.Nil(t, rs.ProposalBlock, nil) + assert.True(t, rs.ProposalBlockParts.Header().Equals(blockID.PartSetHeader)) + partSet, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) + err = cs1.SetProposalAndBlock(prop, partSet, "some peer") + require.NoError(t, err) ensureNewProposal(proposalCh, height, round) ensureNewRound(newRoundCh, height+1, 0) @@ -2699,14 +2832,14 @@ func (n *fakeTxNotifier) Notify() { // 2 vals precommit votes for a block but node times out waiting for the third. Move to next round // and third precommit arrives which leads to the commit of that header and the correct -// start of the next round +// start of the next round. func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { - config.Consensus.SkipTimeoutCommit = false cs1, vss := randState(4) + cs1.state.NextBlockDelay = 10 * time.Millisecond cs1.txNotifier = &fakeTxNotifier{ch: make(chan struct{})} vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) @@ -2725,31 +2858,33 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts.Header() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + validatePrevote(t, cs1, round, vss[0], blockID.Hash) - signAddVotes(cs1, cmtproto.PrevoteType, theBlockHash, theBlockParts, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2) - signAddVotes(cs1, cmtproto.PrecommitType, theBlockHash, theBlockParts, true, vs3) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2) + signAddVotes(cs1, types.PrecommitType, chainID, blockID, true, vs3) // wait till timeout occurs - ensurePrecommitTimeout(precommitTimeoutCh) + ensureNewTimeout(precommitTimeoutCh, height, round, cs1.config.TimeoutVote.Nanoseconds()) ensureNewRound(newRoundCh, height, round+1) // majority is now reached - signAddVotes(cs1, cmtproto.PrecommitType, theBlockHash, theBlockParts, true, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, blockID, true, vs4) - ensureNewBlockHeader(newBlockHeader, height, theBlockHash) + ensureNewBlockHeader(newBlockHeader, height, blockID.Hash) cs1.txNotifier.(*fakeTxNotifier).Notify() @@ -2765,11 +2900,11 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config.Consensus.SkipTimeoutCommit = false cs1, vss := randState(4) + cs1.state.NextBlockDelay = 10 * time.Millisecond vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID partSize := types.BlockPartSizeBytes @@ -2788,31 +2923,32 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts.Header() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + validatePrevote(t, cs1, round, vss[0], blockID.Hash) - signAddVotes(cs1, cmtproto.PrevoteType, theBlockHash, theBlockParts, false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2) - signAddVotes(cs1, cmtproto.PrecommitType, theBlockHash, theBlockParts, true, vs3) - signAddVotes(cs1, cmtproto.PrecommitType, theBlockHash, theBlockParts, true, vs4) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2) + signAddVotes(cs1, types.PrecommitType, chainID, blockID, true, vs3) + signAddVotes(cs1, types.PrecommitType, chainID, blockID, true, vs4) - ensureNewBlockHeader(newBlockHeader, height, theBlockHash) + ensureNewBlockHeader(newBlockHeader, height, blockID.Hash) prop, propBlock := decideProposal(ctx, t, cs1, vs2, height+1, 0) propBlockParts, err := propBlock.MakePartSet(partSize) require.NoError(t, err) - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + err = cs1.SetProposalAndBlock(prop, propBlockParts, "some peer") + require.NoError(t, err) ensureNewProposal(proposalCh, height+1, 0) rs = cs1.GetRoundState() @@ -2822,87 +2958,10 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { "triggeredTimeoutPrecommit should be false at the beginning of each height") } -//------------------------------------------------------------------------------------------ -// SlashingSuite -// TODO: Slashing - -/* -func TestStateSlashingPrevotes(t *testing.T) { - cs1, vss := randState(2) - vs2 := vss[1] - - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) - - // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh - re := <-proposalCh - <-voteCh // prevote - - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - - // we should now be stuck in limbo forever, waiting for more prevotes - // add one for a different block should cause us to go into prevote wait - hash := rs.ProposalBlock.Hash() - hash[0] = byte(hash[0]+1) % 255 - signAddVotes(cs1, cmtproto.PrevoteType, hash, rs.ProposalBlockParts.Header(), vs2) - - <-timeoutWaitCh - - // NOTE: we have to send the vote for different block first so we don't just go into precommit round right - // away and ignore more prevotes (and thus fail to slash!) - - // add the conflicting vote - signAddVotes(cs1, cmtproto.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) - - // XXX: Check for existence of Dupeout info -} - -func TestStateSlashingPrecommits(t *testing.T) { - cs1, vss := randState(2) - vs2 := vss[1] - - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) - - // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh - re := <-proposalCh - <-voteCh // prevote - - // add prevote from vs2 - signAddVotes(cs1, cmtproto.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) - - <-voteCh // precommit - - // we should now be stuck in limbo forever, waiting for more prevotes - // add one for a different block should cause us to go into prevote wait - hash := rs.ProposalBlock.Hash() - hash[0] = byte(hash[0]+1) % 255 - signAddVotes(cs1, cmtproto.PrecommitType, hash, rs.ProposalBlockParts.Header(), vs2) - - // NOTE: we have to send the vote for different block first so we don't just go into precommit round right - // away and ignore more prevotes (and thus fail to slash!) - - // add precommit from vs2 - signAddVotes(cs1, cmtproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) - - // XXX: Check for existence of Dupeout info -} -*/ - -//------------------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------------------ // CatchupSuite -//------------------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------------------ // HaltSuite // 4 vals. @@ -2910,7 +2969,7 @@ func TestStateSlashingPrecommits(t *testing.T) { func TestStateHalt1(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, cs1.Round + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID partSize := types.BlockPartSizeBytes proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) @@ -2929,22 +2988,26 @@ func TestStateHalt1(t *testing.T) { ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() propBlock := rs.ProposalBlock - propBlockParts, err := propBlock.MakePartSet(partSize) + partSet, err := propBlock.MakePartSet(partSize) require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } ensurePrevote(voteCh, height, round) - signAddVotes(cs1, cmtproto.PrevoteType, propBlock.Hash(), propBlockParts.Header(), false, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) // add precommits from the rest - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, true, vs2) // didnt receive proposal - signAddVotes(cs1, cmtproto.PrecommitType, propBlock.Hash(), propBlockParts.Header(), true, vs3) + signAddVotes(cs1, types.PrecommitType, chainID, types.BlockID{}, true, vs2) // didn't receive proposal + signAddVotes(cs1, types.PrecommitType, chainID, blockID, true, vs3) // we receive this later, but vs3 might receive it earlier and with ours will go to commit! - precommit4 := signVote(vs4, cmtproto.PrecommitType, propBlock.Hash(), propBlockParts.Header(), true) + precommit4 := signVote(vs4, types.PrecommitType, chainID, blockID, true) incrementRound(vs2, vs3, vs4) @@ -2956,7 +3019,7 @@ func TestStateHalt1(t *testing.T) { ensureNewRound(newRoundCh, height, round) t.Log("### ONTO ROUND 1") - /*Round2 + /* Round2 // we timeout and prevote // a polka happened but we didn't see it! */ @@ -2988,26 +3051,26 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { } cs.ProposalBlockParts = types.NewPartSetFromHeader(parts.Header()) - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peer.ID(), time.Time{}}) statsMessage := <-cs.statsMsgQueue require.Equal(t, msg, statsMessage.Msg, "") require.Equal(t, peer.ID(), statsMessage.PeerID, "") // sending the same part from different peer - cs.handleMsg(msgInfo{msg, "peer2"}) + cs.handleMsg(msgInfo{msg, "peer2", time.Time{}}) // sending the part with the same height, but different round msg.Round = 1 - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peer.ID(), time.Time{}}) // sending the part from the smaller height msg.Height = 0 - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peer.ID(), time.Time{}}) // sending the part from the bigger height msg.Height = 3 - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peer.ID(), time.Time{}}) select { case <-cs.statsMsgQueue: @@ -3018,28 +3081,32 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { func TestStateOutputVoteStats(t *testing.T) { cs, vss := randState(2) + chainID := cs.state.ChainID // create dummy peer peer := p2pmock.NewPeer(nil) randBytes := cmtrand.Bytes(tmhash.Size) + blockID := types.BlockID{ + Hash: randBytes, + } - vote := signVote(vss[1], cmtproto.PrecommitType, randBytes, types.PartSetHeader{}, true) + vote := signVote(vss[1], types.PrecommitType, chainID, blockID, true) voteMessage := &VoteMessage{vote} - cs.handleMsg(msgInfo{voteMessage, peer.ID()}) + cs.handleMsg(msgInfo{voteMessage, peer.ID(), time.Time{}}) statsMessage := <-cs.statsMsgQueue require.Equal(t, voteMessage, statsMessage.Msg, "") require.Equal(t, peer.ID(), statsMessage.PeerID, "") // sending the same part from different peer - cs.handleMsg(msgInfo{&VoteMessage{vote}, "peer2"}) + cs.handleMsg(msgInfo{&VoteMessage{vote}, "peer2", time.Time{}}) // sending the vote for the bigger height incrementHeight(vss[1]) - vote = signVote(vss[1], cmtproto.PrecommitType, randBytes, types.PartSetHeader{}, true) + vote = signVote(vss[1], types.PrecommitType, chainID, blockID, true) - cs.handleMsg(msgInfo{&VoteMessage{vote}, peer.ID()}) + cs.handleMsg(msgInfo{&VoteMessage{vote}, peer.ID(), time.Time{}}) select { case <-cs.statsMsgQueue: @@ -3049,41 +3116,132 @@ func TestStateOutputVoteStats(t *testing.T) { } func TestSignSameVoteTwice(t *testing.T) { - _, vss := randState(2) + cs, vss := randState(2) + chainID := cs.state.ChainID randBytes := cmtrand.Bytes(tmhash.Size) vote := signVote(vss[1], - cmtproto.PrecommitType, - randBytes, - types.PartSetHeader{Total: 10, Hash: randBytes}, + types.PrecommitType, + chainID, + types.BlockID{ + Hash: randBytes, + PartSetHeader: types.PartSetHeader{Total: 10, Hash: randBytes}, + }, true, ) vote2 := signVote(vss[1], - cmtproto.PrecommitType, - randBytes, - types.PartSetHeader{Total: 10, Hash: randBytes}, + types.PrecommitType, + chainID, + types.BlockID{ + Hash: randBytes, + PartSetHeader: types.PartSetHeader{Total: 10, Hash: randBytes}, + }, true, ) require.Equal(t, vote, vote2) } +// TestStateTimestamp_ProposalNotMatch tests that a validator does not prevote a +// proposed block if the timestamp in the block does not match the timestamp in the +// corresponding proposal message. +func TestStateTimestamp_ProposalNotMatch(t *testing.T) { + cs1, vss := randState(4) + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() + voteCh := subscribeToVoter(cs1, addr) + + propBlock, propBlockParts, blockID := createProposalBlock(t, cs1) + + round++ + incrementRound(vss[1:]...) + + // Create a proposal with a timestamp that does not match the timestamp of the block. + proposal := types.NewProposal(vs2.Height, round, -1, blockID, propBlock.Header.Time.Add(time.Millisecond)) + signProposal(t, proposal, chainID, vs2) + require.NoError(t, cs1.SetProposalAndBlock(proposal, propBlockParts, "some peer")) + + startTestRound(cs1, height, round) + ensureProposal(proposalCh, height, round, blockID) + + // ensure that the validator prevotes nil. + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], nil) + + // This does not refer to the main concern of this test unit. Since + // 2/3+ validators have seen the proposal, validated and prevoted for + // it, it is a valid proposal. We should lock and precommit for it. + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) +} + +// TestStateTimestamp_ProposalMatch tests that a validator prevotes a +// proposed block if the timestamp in the block matches the timestamp in the +// corresponding proposal message. +func TestStateTimestamp_ProposalMatch(t *testing.T) { + cs1, vss := randState(4) + height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() + voteCh := subscribeToVoter(cs1, addr) + + propBlock, propBlockParts, blockID := createProposalBlock(t, cs1) + + round++ + incrementRound(vss[1:]...) + + // Create a proposal with a timestamp that matches the timestamp of the block. + proposal := types.NewProposal(vs2.Height, round, -1, blockID, propBlock.Header.Time) + signProposal(t, proposal, chainID, vs2) + require.NoError(t, cs1.SetProposalAndBlock(proposal, propBlockParts, "some peer")) + + startTestRound(cs1, height, round) + ensureProposal(proposalCh, height, round, blockID) + + signAddVotes(cs1, types.PrevoteType, chainID, blockID, false, vs2, vs3, vs4) + + // ensure that the validator prevotes the block. + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], propBlock.Hash()) + + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, 1, vss[0], propBlock.Hash(), propBlock.Hash()) +} + // subscribe subscribes test client to the given query and returns a channel with cap = 1. func subscribe(eventBus *types.EventBus, q cmtpubsub.Query) <-chan cmtpubsub.Message { sub, err := eventBus.Subscribe(context.Background(), testSubscriber, q) if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) + panic(fmt.Sprintf("failed to subscribe %s to %v; err %v", testSubscriber, q, err)) } return sub.Out() } +// subscribe subscribes test client to the given query and returns a channel with cap = 1. +func unsubscribe(eventBus *types.EventBus, q cmtpubsub.Query) { //nolint: unused + err := eventBus.Unsubscribe(context.Background(), testSubscriber, q) + if err != nil { + panic(fmt.Sprintf("failed to subscribe %s to %v; err %v", testSubscriber, q, err)) + } +} + // subscribe subscribes test client to the given query and returns a channel with cap = 0. func subscribeUnBuffered(eventBus *types.EventBus, q cmtpubsub.Query) <-chan cmtpubsub.Message { sub, err := eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, q) if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) + panic(fmt.Sprintf("failed to subscribe %s to %v; err %v", testSubscriber, q, err)) } return sub.Out() } @@ -3091,17 +3249,19 @@ func subscribeUnBuffered(eventBus *types.EventBus, q cmtpubsub.Query) <-chan cmt func signAddPrecommitWithExtension( t *testing.T, cs *State, - hash []byte, - header types.PartSetHeader, + chainID string, + blockID types.BlockID, extension []byte, stub *validatorStub, ) { - v, err := stub.signVote(cmtproto.PrecommitType, hash, header, extension, true) + t.Helper() + v, err := stub.signVote(types.PrecommitType, chainID, blockID, extension, true, stub.clock.Now()) require.NoError(t, err, "failed to sign vote") addVotes(cs, v) } func findBlockSizeLimit(t *testing.T, height, maxBytes int64, cs *State, partSize uint32, oversized bool) (*types.Block, *types.PartSet) { + t.Helper() var offset int64 if !oversized { offset = -2 @@ -3130,3 +3290,32 @@ func findBlockSizeLimit(t *testing.T, height, maxBytes int64, cs *State, partSiz require.Fail(t, "We shouldn't hit the end of the loop") return nil, nil } + +// TestReadSerializedBlockFromBlockParts tests that the readSerializedBlockFromBlockParts function +// reads the block correctly from the block parts. +func TestReadSerializedBlockFromBlockParts(t *testing.T) { + sizes := []int{0, 5, 64, 70, 128, 200} + + // iterate through many initial buffer sizes and new block sizes. + // (Skip new block size = 0, as that is not valid construction) + // Ensure that we read back the correct block size, and the buffer is resized correctly. + for i := 0; i < len(sizes); i++ { + for j := 1; j < len(sizes); j++ { + initialSize, newBlockSize := sizes[i], sizes[j] + testName := fmt.Sprintf("initialSize=%d,newBlockSize=%d", initialSize, newBlockSize) + t.Run(testName, func(t *testing.T) { + blockData := cmtrand.Bytes(newBlockSize) + ps := types.NewPartSetFromData(blockData, 64) + cs := &State{ + serializedBlockBuffer: make([]byte, initialSize), + } + cs.ProposalBlockParts = ps + + serializedBlock, err := cs.readSerializedBlockFromBlockParts() + require.NoError(t, err) + require.Equal(t, blockData, serializedBlock) + require.Equal(t, len(cs.serializedBlockBuffer), max(initialSize, newBlockSize)) + }) + } + } +} diff --git a/consensus/ticker.go b/internal/consensus/ticker.go similarity index 65% rename from consensus/ticker.go rename to internal/consensus/ticker.go index ae5fab794ab..1abe32d4079 100644 --- a/consensus/ticker.go +++ b/internal/consensus/ticker.go @@ -7,9 +7,7 @@ import ( "github.com/cometbft/cometbft/libs/service" ) -var ( - tickTockBufferSize = 10 -) +const tickTockBufferSize = 10 // TimeoutTicker is a timer that schedules timeouts // conditional on the height/round/step in the timeoutInfo. @@ -20,7 +18,7 @@ type TimeoutTicker interface { Chan() <-chan timeoutInfo // on which to receive a timeout ScheduleTimeout(ti timeoutInfo) // reset the timer - SetLogger(log.Logger) + SetLogger(l log.Logger) } // timeoutTicker wraps time.Timer, @@ -31,17 +29,21 @@ type TimeoutTicker interface { type timeoutTicker struct { service.BaseService - timer *time.Timer - tickChan chan timeoutInfo // for scheduling timeouts - tockChan chan timeoutInfo // for notifying about them + timerActive bool + timer *time.Timer + tickChan chan timeoutInfo // for scheduling timeouts + tockChan chan timeoutInfo // for notifying about them } // NewTimeoutTicker returns a new TimeoutTicker. func NewTimeoutTicker() TimeoutTicker { tt := &timeoutTicker{ - timer: time.NewTimer(0), - tickChan: make(chan timeoutInfo, tickTockBufferSize), - tockChan: make(chan timeoutInfo, tickTockBufferSize), + timer: time.NewTimer(0), + // An indicator variable to check if the timer is active or not. + // Concurrency safe because the timer is only accessed by a single goroutine. + timerActive: true, + tickChan: make(chan timeoutInfo, tickTockBufferSize), + tockChan: make(chan timeoutInfo, tickTockBufferSize), } tt.BaseService = *service.NewBaseService(nil, "TimeoutTicker", tt) tt.stopTimer() // don't want to fire until the first scheduled timeout @@ -50,7 +52,6 @@ func NewTimeoutTicker() TimeoutTicker { // OnStart implements service.Service. It starts the timeout routine. func (t *timeoutTicker) OnStart() error { - go t.timeoutRoutine() return nil @@ -59,7 +60,6 @@ func (t *timeoutTicker) OnStart() error { // OnStop implements service.Service. It stops the timeout routine. func (t *timeoutTicker) OnStop() { t.BaseService.OnStop() - t.stopTimer() } // Chan returns a channel on which timeouts are sent. @@ -74,23 +74,22 @@ func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) { t.tickChan <- ti } -//------------------------------------------------------------- +// ------------------------------------------------------------- -// stop the timer and drain if necessary +// if the timer is active, stop it and drain the channel. func (t *timeoutTicker) stopTimer() { - // Stop() returns false if it was already fired or was stopped - if !t.timer.Stop() { - select { - case <-t.timer.C: - default: - t.Logger.Debug("Timer already stopped") - } + if !t.timerActive { + return } + _ = t.timer.Stop() + t.timerActive = false } // send on tickChan to start a new timer. -// timers are interupted and replaced by new ticks from later steps -// timeouts of 0 on the tickChan will be immediately relayed to the tockChan +// timers are interrupted and replaced by new ticks from later steps +// timeouts of 0 on the tickChan will be immediately relayed to the tockChan. +// NOTE: timerActive is not concurrency safe, but it's only accessed in NewTimer and timeoutRoutine, +// making it single-threaded access. func (t *timeoutTicker) timeoutRoutine() { t.Logger.Debug("Starting timeout routine") var ti timeoutInfo @@ -99,28 +98,21 @@ func (t *timeoutTicker) timeoutRoutine() { case newti := <-t.tickChan: t.Logger.Debug("Received tick", "old_ti", ti, "new_ti", newti) - // ignore tickers for old height/round/step - if newti.Height < ti.Height { + if shouldSkipTick(newti, ti) { continue - } else if newti.Height == ti.Height { - if newti.Round < ti.Round { - continue - } else if newti.Round == ti.Round { - if ti.Step > 0 && newti.Step <= ti.Step { - continue - } - } } - // stop the last timer + // stop the last timer if it exists t.stopTimer() - // update timeoutInfo and reset timer - // NOTE time.Timer allows duration to be non-positive + // update timeoutInfo, reset timer, and mark timer as active ti = newti t.timer.Reset(ti.Duration) + t.timerActive = true + t.Logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) case <-t.timer.C: + t.timerActive = false t.Logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) // go routine here guarantees timeoutRoutine doesn't block. // Determinism comes from playback in the receiveRoutine. @@ -128,7 +120,19 @@ func (t *timeoutTicker) timeoutRoutine() { // and managing the timeouts ourselves with a millisecond ticker go func(toi timeoutInfo) { t.tockChan <- toi }(ti) case <-t.Quit(): + t.stopTimer() return } } } + +// shouldSkipTick returns true if the new timeoutInfo should be skipped. +func shouldSkipTick(newti, ti timeoutInfo) bool { + if newti.Height < ti.Height { + return true + } + if newti.Height == ti.Height && ((newti.Round < ti.Round) || (newti.Round == ti.Round && ti.Step > 0 && newti.Step <= ti.Step)) { + return true + } + return false +} diff --git a/internal/consensus/ticker_test.go b/internal/consensus/ticker_test.go new file mode 100644 index 00000000000..a1f0d27891b --- /dev/null +++ b/internal/consensus/ticker_test.go @@ -0,0 +1,40 @@ +package consensus + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/internal/consensus/types" +) + +func TestTimeoutTicker(t *testing.T) { + ticker := NewTimeoutTicker() + err := ticker.Start() + require.NoError(t, err) + defer func() { + err := ticker.Stop() + require.NoError(t, err) + }() + + c := ticker.Chan() + for i := 1; i <= 10; i++ { + height := int64(i) + + startTime := time.Now() + // Schedule a timeout for 5ms from now + negTimeout := timeoutInfo{Duration: -1 * time.Millisecond, Height: height, Round: 0, Step: types.RoundStepNewHeight} + timeout := timeoutInfo{Duration: 5 * time.Millisecond, Height: height, Round: 0, Step: types.RoundStepNewRound} + ticker.ScheduleTimeout(negTimeout) + ticker.ScheduleTimeout(timeout) + + // Wait for the timeout to be received + to := <-c + endTime := time.Now() + elapsedTime := endTime.Sub(startTime) + if timeout == to { + require.True(t, elapsedTime >= timeout.Duration, "We got the 5ms timeout. However the timeout happened too quickly. Should be >= 5ms. Got %dms (start time %d end time %d)", elapsedTime.Milliseconds(), startTime.UnixMilli(), endTime.UnixMilli()) + } + } +} diff --git a/consensus/types/height_vote_set.go b/internal/consensus/types/height_vote_set.go similarity index 83% rename from consensus/types/height_vote_set.go rename to internal/consensus/types/height_vote_set.go index 2c45a29aa63..df1beddef9e 100644 --- a/consensus/types/height_vote_set.go +++ b/internal/consensus/types/height_vote_set.go @@ -8,8 +8,7 @@ import ( cmtjson "github.com/cometbft/cometbft/libs/json" cmtmath "github.com/cometbft/cometbft/libs/math" - "github.com/cometbft/cometbft/p2p" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/types" ) @@ -18,10 +17,8 @@ type RoundVoteSet struct { Precommits *types.VoteSet } -var ( - ErrGotVoteFromUnwantedRound = errors.New( - "peer has sent a vote that does not match our round for more than one round", - ) +var ErrGotVoteFromUnwantedRound = errors.New( + "peer has sent a vote that does not match our round for more than one round", ) /* @@ -47,7 +44,7 @@ type HeightVoteSet struct { mtx sync.Mutex round int32 // max tracked round roundVoteSets map[int32]RoundVoteSet // keys: [0...round] - peerCatchupRounds map[p2p.ID][]int32 // keys: peer.ID; values: at most 2 rounds + peerCatchupRounds map[nodekey.ID][]int32 // keys: peer.ID; values: at most 2 rounds } func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet { @@ -75,7 +72,7 @@ func (hvs *HeightVoteSet) Reset(height int64, valSet *types.ValidatorSet) { hvs.height = height hvs.valSet = valSet hvs.roundVoteSets = make(map[int32]RoundVoteSet) - hvs.peerCatchupRounds = make(map[p2p.ID][]int32) + hvs.peerCatchupRounds = make(map[nodekey.ID][]int32) hvs.addRound(0) hvs.round = 0 @@ -115,12 +112,12 @@ func (hvs *HeightVoteSet) addRound(round int32) { panic("addRound() for an existing round") } // log.Debug("addRound(round)", "round", round) - prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, cmtproto.PrevoteType, hvs.valSet) + prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrevoteType, hvs.valSet) var precommits *types.VoteSet if hvs.extensionsEnabled { - precommits = types.NewExtendedVoteSet(hvs.chainID, hvs.height, round, cmtproto.PrecommitType, hvs.valSet) + precommits = types.NewExtendedVoteSet(hvs.chainID, hvs.height, round, types.PrecommitType, hvs.valSet) } else { - precommits = types.NewVoteSet(hvs.chainID, hvs.height, round, cmtproto.PrecommitType, hvs.valSet) + precommits = types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrecommitType, hvs.valSet) } hvs.roundVoteSets[round] = RoundVoteSet{ Prevotes: prevotes, @@ -130,41 +127,41 @@ func (hvs *HeightVoteSet) addRound(round int32) { // Duplicate votes return added=false, err=nil. // By convention, peerID is "" if origin is self. -func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.ID, extEnabled bool) (added bool, err error) { +func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID nodekey.ID, extEnabled bool) (added bool, err error) { hvs.mtx.Lock() defer hvs.mtx.Unlock() if hvs.extensionsEnabled != extEnabled { panic(fmt.Errorf("extensions enabled general param does not match the one in HeightVoteSet %t!=%t", hvs.extensionsEnabled, extEnabled)) } if !types.IsVoteTypeValid(vote.Type) { - return + return false, fmt.Errorf("invalid vote type %T", vote.Type) } voteSet := hvs.getVoteSet(vote.Round, vote.Type) if voteSet == nil { - if rndz := hvs.peerCatchupRounds[peerID]; len(rndz) < 2 { - hvs.addRound(vote.Round) - voteSet = hvs.getVoteSet(vote.Round, vote.Type) - hvs.peerCatchupRounds[peerID] = append(rndz, vote.Round) - } else { + rndz := hvs.peerCatchupRounds[peerID] + if len(rndz) >= 2 { // punish peer err = ErrGotVoteFromUnwantedRound - return + return false, err } + hvs.addRound(vote.Round) + voteSet = hvs.getVoteSet(vote.Round, vote.Type) + hvs.peerCatchupRounds[peerID] = append(rndz, vote.Round) } added, err = voteSet.AddVote(vote) - return + return added, err } func (hvs *HeightVoteSet) Prevotes(round int32) *types.VoteSet { hvs.mtx.Lock() defer hvs.mtx.Unlock() - return hvs.getVoteSet(round, cmtproto.PrevoteType) + return hvs.getVoteSet(round, types.PrevoteType) } func (hvs *HeightVoteSet) Precommits(round int32) *types.VoteSet { hvs.mtx.Lock() defer hvs.mtx.Unlock() - return hvs.getVoteSet(round, cmtproto.PrecommitType) + return hvs.getVoteSet(round, types.PrecommitType) } // Last round and blockID that has +2/3 prevotes for a particular block or nil. @@ -173,7 +170,7 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int32, polBlockID types.BlockID) { hvs.mtx.Lock() defer hvs.mtx.Unlock() for r := hvs.round; r >= 0; r-- { - rvs := hvs.getVoteSet(r, cmtproto.PrevoteType) + rvs := hvs.getVoteSet(r, types.PrevoteType) polBlockID, ok := rvs.TwoThirdsMajority() if ok { return r, polBlockID @@ -182,15 +179,15 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int32, polBlockID types.BlockID) { return -1, types.BlockID{} } -func (hvs *HeightVoteSet) getVoteSet(round int32, voteType cmtproto.SignedMsgType) *types.VoteSet { +func (hvs *HeightVoteSet) getVoteSet(round int32, voteType types.SignedMsgType) *types.VoteSet { rvs, ok := hvs.roundVoteSets[round] if !ok { return nil } switch voteType { - case cmtproto.PrevoteType: + case types.PrevoteType: return rvs.Prevotes - case cmtproto.PrecommitType: + case types.PrecommitType: return rvs.Precommits default: panic(fmt.Sprintf("Unexpected vote type %X", voteType)) @@ -200,12 +197,13 @@ func (hvs *HeightVoteSet) getVoteSet(round int32, voteType cmtproto.SignedMsgTyp // If a peer claims that it has 2/3 majority for given blockKey, call this. // NOTE: if there are too many peers, or too much peer churn, // this can cause memory issues. -// TODO: implement ability to remove peers too +// TODO: implement ability to remove peers too. func (hvs *HeightVoteSet) SetPeerMaj23( round int32, - voteType cmtproto.SignedMsgType, - peerID p2p.ID, - blockID types.BlockID) error { + voteType types.SignedMsgType, + peerID nodekey.ID, + blockID types.BlockID, +) error { hvs.mtx.Lock() defer hvs.mtx.Unlock() if !types.IsVoteTypeValid(voteType) { @@ -218,7 +216,7 @@ func (hvs *HeightVoteSet) SetPeerMaj23( return voteSet.SetPeerMaj23(types.P2PID(peerID), blockID) } -//--------------------------------------------------------- +// --------------------------------------------------------- // string and json func (hvs *HeightVoteSet) String() string { diff --git a/consensus/types/height_vote_set_test.go b/internal/consensus/types/height_vote_set_test.go similarity index 84% rename from consensus/types/height_vote_set_test.go rename to internal/consensus/types/height_vote_set_test.go index ec0f6123a79..f81f39d4e16 100644 --- a/consensus/types/height_vote_set_test.go +++ b/internal/consensus/types/height_vote_set_test.go @@ -8,9 +8,8 @@ import ( cfg "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/crypto/tmhash" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/internal/test" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/types" cmttime "github.com/cometbft/cometbft/types/time" ) @@ -29,19 +28,19 @@ func TestPeerCatchupRounds(t *testing.T) { hvs := NewExtendedHeightVoteSet(test.DefaultTestChainID, 1, valSet) - vote999_0 := makeVoteHR(1, 0, 999, privVals) + vote999_0 := makeVoteHR(999, privVals) added, err := hvs.AddVote(vote999_0, "peer1", true) if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1000_0 := makeVoteHR(1, 0, 1000, privVals) + vote1000_0 := makeVoteHR(1000, privVals) added, err = hvs.AddVote(vote1000_0, "peer1", true) if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1001_0 := makeVoteHR(1, 0, 1001, privVals) + vote1001_0 := makeVoteHR(1001, privVals) added, err = hvs.AddVote(vote1001_0, "peer1", true) if err != ErrGotVoteFromUnwantedRound { t.Errorf("expected GotVoteFromUnwantedRoundError, but got %v", err) @@ -60,35 +59,35 @@ func TestInconsistentExtensionData(t *testing.T) { valSet, privVals := types.RandValidatorSet(10, 1) hvsE := NewExtendedHeightVoteSet(test.DefaultTestChainID, 1, valSet) - voteNoExt := makeVoteHR(1, 0, 20, privVals) + voteNoExt := makeVoteHR(20, privVals) voteNoExt.Extension, voteNoExt.ExtensionSignature = nil, nil require.Panics(t, func() { _, _ = hvsE.AddVote(voteNoExt, "peer1", false) }) hvsNoE := NewHeightVoteSet(test.DefaultTestChainID, 1, valSet) - voteExt := makeVoteHR(1, 0, 20, privVals) + voteExt := makeVoteHR(20, privVals) require.Panics(t, func() { _, _ = hvsNoE.AddVote(voteExt, "peer1", true) }) } func makeVoteHR( - height int64, - valIndex, round int32, privVals []types.PrivValidator, ) *types.Vote { + height := int64(1) + valIndex := 0 privVal := privVals[valIndex] randBytes := cmtrand.Bytes(tmhash.Size) vote, err := types.MakeVote( privVal, test.DefaultTestChainID, - valIndex, + 0, height, round, - cmtproto.PrecommitType, + types.PrecommitType, types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}}, cmttime.Now(), ) diff --git a/consensus/types/peer_round_state.go b/internal/consensus/types/peer_round_state.go similarity index 92% rename from consensus/types/peer_round_state.go rename to internal/consensus/types/peer_round_state.go index 00e2e4d30fd..89a8a726af7 100644 --- a/consensus/types/peer_round_state.go +++ b/internal/consensus/types/peer_round_state.go @@ -4,11 +4,11 @@ import ( "fmt" "time" - "github.com/cometbft/cometbft/libs/bits" + "github.com/cometbft/cometbft/internal/bits" "github.com/cometbft/cometbft/types" ) -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // PeerRoundState contains the known state of a peer. // NOTE: Read-only when returned by PeerState.GetRoundState(). @@ -43,12 +43,12 @@ type PeerRoundState struct { CatchupCommit *bits.BitArray `json:"catchup_commit"` } -// String returns a string representation of the PeerRoundState +// String returns a string representation of the PeerRoundState. func (prs PeerRoundState) String() string { return prs.StringIndented("") } -// StringIndented returns a string representation of the PeerRoundState +// StringIndented returns a string representation of the PeerRoundState. func (prs PeerRoundState) StringIndented(indent string) string { return fmt.Sprintf(`PeerRoundState{ %s %v/%v/%v @%v diff --git a/consensus/types/round_state.go b/internal/consensus/types/round_state.go similarity index 85% rename from consensus/types/round_state.go rename to internal/consensus/types/round_state.go index 6749d4265a0..9c11cea2b03 100644 --- a/consensus/types/round_state.go +++ b/internal/consensus/types/round_state.go @@ -9,13 +9,13 @@ import ( "github.com/cometbft/cometbft/types" ) -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // RoundStepType enum type -// RoundStepType enumerates the state of the consensus state machine +// RoundStepType enumerates the state of the consensus state machine. type RoundStepType uint8 // These must be numeric, ordered. -// RoundStepType +// RoundStepType. const ( RoundStepNewHeight = RoundStepType(0x01) // Wait til CommitTime + timeoutCommit RoundStepNewRound = RoundStepType(0x02) // Setup new round and go to RoundStepPropose @@ -35,7 +35,7 @@ func (rs RoundStepType) IsValid() bool { return uint8(rs) >= 0x01 && uint8(rs) <= 0x08 } -// String returns a string +// String returns a string. func (rs RoundStepType) String() string { switch rs { case RoundStepNewHeight: @@ -59,11 +59,11 @@ func (rs RoundStepType) String() string { } } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // RoundState defines the internal consensus state. // NOTE: Not thread safe. Should only be manipulated by functions downstream -// of the cs.receiveRoutine +// of the cs.receiveRoutine. type RoundState struct { Height int64 `json:"height"` // Height we are working on Round int32 `json:"round"` @@ -71,14 +71,15 @@ type RoundState struct { StartTime time.Time `json:"start_time"` // Subjective time when +2/3 precommits for Block at Round were found - CommitTime time.Time `json:"commit_time"` - Validators *types.ValidatorSet `json:"validators"` - Proposal *types.Proposal `json:"proposal"` - ProposalBlock *types.Block `json:"proposal_block"` - ProposalBlockParts *types.PartSet `json:"proposal_block_parts"` - LockedRound int32 `json:"locked_round"` - LockedBlock *types.Block `json:"locked_block"` - LockedBlockParts *types.PartSet `json:"locked_block_parts"` + CommitTime time.Time `json:"commit_time"` + Validators *types.ValidatorSet `json:"validators"` + Proposal *types.Proposal `json:"proposal"` + ProposalReceiveTime time.Time `json:"proposal_receive_time"` + ProposalBlock *types.Block `json:"proposal_block"` + ProposalBlockParts *types.PartSet `json:"proposal_block_parts"` + LockedRound int32 `json:"locked_round"` + LockedBlock *types.Block `json:"locked_block"` + LockedBlockParts *types.PartSet `json:"locked_block_parts"` // The variables below starting with "Valid..." derive their name from // the algorithm presented in this paper: @@ -102,7 +103,7 @@ type RoundState struct { TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"` } -// Compressed version of the RoundState for use in RPC +// Compressed version of the RoundState for use in RPC. type RoundStateSimple struct { HeightRoundStep string `json:"height/round/step"` StartTime time.Time `json:"start_time"` @@ -113,7 +114,7 @@ type RoundStateSimple struct { Proposer types.ValidatorInfo `json:"proposer"` } -// Compress the RoundState to RoundStateSimple +// Compress the RoundState to RoundStateSimple. func (rs *RoundState) RoundStateSimple() RoundStateSimple { votesJSON, err := rs.Votes.MarshalJSON() if err != nil { @@ -121,7 +122,7 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple { } addr := rs.Validators.GetProposer().Address - idx, _ := rs.Validators.GetByAddress(addr) + idx, _ := rs.Validators.GetByAddressMut(addr) return RoundStateSimple{ HeightRoundStep: fmt.Sprintf("%d/%d/%d", rs.Height, rs.Round, rs.Step), @@ -140,7 +141,7 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple { // NewRoundEvent returns the RoundState with proposer information as an event. func (rs *RoundState) NewRoundEvent() types.EventDataNewRound { addr := rs.Validators.GetProposer().Address - idx, _ := rs.Validators.GetByAddress(addr) + idx, _ := rs.Validators.GetByAddressMut(addr) return types.EventDataNewRound{ Height: rs.Height, @@ -179,12 +180,12 @@ func (rs *RoundState) RoundStateEvent() types.EventDataRoundState { } } -// String returns a string +// String returns a string. func (rs *RoundState) String() string { return rs.StringIndented("") } -// StringIndented returns a string +// StringIndented returns a string. func (rs *RoundState) StringIndented(indent string) string { return fmt.Sprintf(`RoundState{ %s H:%v R:%v S:%v @@ -217,7 +218,7 @@ func (rs *RoundState) StringIndented(indent string) string { indent) } -// StringShort returns a string +// StringShort returns a string. func (rs *RoundState) StringShort() string { return fmt.Sprintf(`RoundState{H:%v R:%v S:%v ST:%v}`, rs.Height, rs.Round, rs.Step, rs.StartTime) diff --git a/consensus/wal.go b/internal/consensus/wal.go similarity index 94% rename from consensus/wal.go rename to internal/consensus/wal.go index 7eff8811e91..508828d570e 100644 --- a/consensus/wal.go +++ b/internal/consensus/wal.go @@ -11,25 +11,25 @@ import ( "github.com/cosmos/gogoproto/proto" - auto "github.com/cometbft/cometbft/libs/autofile" + cmtcons "github.com/cometbft/cometbft/api/cometbft/consensus/v1" + auto "github.com/cometbft/cometbft/internal/autofile" + cmtos "github.com/cometbft/cometbft/internal/os" cmtjson "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/libs/log" - cmtos "github.com/cometbft/cometbft/libs/os" "github.com/cometbft/cometbft/libs/service" - cmtcons "github.com/cometbft/cometbft/proto/tendermint/consensus" cmterrors "github.com/cometbft/cometbft/types/errors" cmttime "github.com/cometbft/cometbft/types/time" ) const ( - // time.Time + max consensus msg size + // time.Time + max consensus msg size. maxMsgSizeBytes = maxMsgSize + 24 - // how often the WAL should be sync'd during period sync'ing + // how often the WAL should be sync'd during period sync'ing. walDefaultFlushInterval = 2 * time.Second ) -//-------------------------------------------------------- +// -------------------------------------------------------- // types and functions for savings consensus messages // TimedWALMessage wraps WALMessage and adds Time for debugging purposes. @@ -44,7 +44,7 @@ type EndHeightMessage struct { Height int64 `json:"height"` } -type WALMessage interface{} +type WALMessage any func init() { cmtjson.RegisterType(msgInfo{}, "tendermint/wal/MsgInfo") @@ -52,13 +52,13 @@ func init() { cmtjson.RegisterType(EndHeightMessage{}, "tendermint/wal/EndHeightMessage") } -//-------------------------------------------------------- +// -------------------------------------------------------- // Simple write-ahead logger // WAL is an interface for any write-ahead logger. type WAL interface { - Write(WALMessage) error - WriteSync(WALMessage) error + Write(msg WALMessage) error + WriteSync(msg WALMessage) error FlushAndSync() error SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) @@ -154,7 +154,7 @@ func (wal *BaseWAL) processFlushTicks() { } // FlushAndSync flushes and fsync's the underlying group's data to disk. -// See auto#FlushAndSync +// See auto#FlushAndSync. func (wal *BaseWAL) FlushAndSync() error { return wal.group.FlushAndSync() } @@ -181,7 +181,7 @@ func (wal *BaseWAL) Wait() { // Write is called in newStep and for each receive on the // peerMsgQueue and the timeoutTicker. -// NOTE: does not call fsync() +// NOTE: does not call fsync(). func (wal *BaseWAL) Write(msg WALMessage) error { if wal == nil { return nil @@ -198,7 +198,7 @@ func (wal *BaseWAL) Write(msg WALMessage) error { // WriteSync is called when we receive a msg from ourselves // so that we write to disk before sending signed messages. -// NOTE: calls fsync() +// NOTE: calls fsync(). func (wal *BaseWAL) WriteSync(msg WALMessage) error { if wal == nil { return nil @@ -252,7 +252,7 @@ func (wal *BaseWAL) SearchForEndHeight( dec := NewWALDecoder(gr) for { msg, err = dec.Decode() - if err == io.EOF { + if errors.Is(err, io.EOF) { // OPTIMISATION: no need to look for height in older files if we've seen h < height if lastHeightFound > 0 && lastHeightFound < height { gr.Close() @@ -286,7 +286,7 @@ func (wal *BaseWAL) SearchForEndHeight( // A WALEncoder writes custom-encoded WAL messages to an output stream. // -// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value +// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value. type WALEncoder struct { wr io.Writer } @@ -336,7 +336,7 @@ func IsDataCorruptionError(err error) bool { return ok } -// DataCorruptionError is an error that occures if data on disk was corrupted. +// DataCorruptionError is an error that occurs if data on disk was corrupted. type DataCorruptionError struct { cause error } diff --git a/consensus/wal_fuzz.go b/internal/consensus/wal_fuzz.go similarity index 100% rename from consensus/wal_fuzz.go rename to internal/consensus/wal_fuzz.go diff --git a/consensus/wal_generator.go b/internal/consensus/wal_generator.go similarity index 78% rename from consensus/wal_generator.go rename to internal/consensus/wal_generator.go index 14c672e8b46..291fb44b19c 100644 --- a/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -9,13 +9,13 @@ import ( "testing" "time" - db "github.com/cometbft/cometbft-db" + "github.com/stretchr/testify/require" + db "github.com/cometbft/cometbft-db" "github.com/cometbft/cometbft/abci/example/kvstore" cfg "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/internal/test" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/log" - cmtrand "github.com/cometbft/cometbft/libs/rand" "github.com/cometbft/cometbft/privval" "github.com/cometbft/cometbft/proxy" sm "github.com/cometbft/cometbft/state" @@ -29,6 +29,8 @@ import ( // (byteBufferWAL) and waits until numBlocks are created. // If the node fails to produce given numBlocks, it returns an error. func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int, config *cfg.Config) (err error) { + t.Helper() + app := kvstore.NewPersistentApplication(filepath.Join(config.DBDir(), "wal_generator")) logger := log.TestingLogger().With("wal_generator", "wal_generator") @@ -39,32 +41,27 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int, config *cfg.C // NOTE: we don't do handshake so need to set state.Version.Consensus.App directly. privValidatorKeyFile := config.PrivValidatorKeyFile() privValidatorStateFile := config.PrivValidatorStateFile() - privValidator := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) + privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile, nil) + require.NoError(t, err) genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) - if err != nil { - return fmt.Errorf("failed to read genesis file: %w", err) - } + require.NoError(t, err) blockStoreDB := db.NewMemDB() stateDB := blockStoreDB stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) state, err := sm.MakeGenesisState(genDoc) - if err != nil { - return fmt.Errorf("failed to make genesis state: %w", err) - } + require.NoError(t, err, "failed to make genesis state") state.Version.Consensus.App = kvstore.AppVersion - if err = stateStore.Save(state); err != nil { - t.Error(err) - } + err = stateStore.Save(state) + require.NoError(t, err) blockStore := store.NewBlockStore(blockStoreDB) proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), proxy.NopMetrics()) proxyApp.SetLogger(logger.With("module", "proxy")) - if err := proxyApp.Start(); err != nil { - return fmt.Errorf("failed to start proxy app connections: %w", err) - } + err = proxyApp.Start() + require.NoError(t, err, "failed to start proxy app connections") t.Cleanup(func() { if err := proxyApp.Stop(); err != nil { t.Error(err) @@ -73,9 +70,8 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int, config *cfg.C eventBus := types.NewEventBus() eventBus.SetLogger(logger.With("module", "events")) - if err := eventBus.Start(); err != nil { - return fmt.Errorf("failed to start event bus: %w", err) - } + err = eventBus.Start() + require.NoError(t, err, "failed to start event bus") t.Cleanup(func() { if err := eventBus.Stop(); err != nil { t.Error(err) @@ -96,32 +92,30 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int, config *cfg.C numBlocksWritten := make(chan struct{}) wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten) // see wal.go#103 - if err := wal.Write(EndHeightMessage{0}); err != nil { - t.Error(err) - } + err = wal.Write(EndHeightMessage{0}) + require.NoError(t, err) consensusState.wal = wal - if err := consensusState.Start(); err != nil { - return fmt.Errorf("failed to start consensus state: %w", err) - } + err = consensusState.Start() + require.NoError(t, err, "failed to start consensus state") select { case <-numBlocksWritten: - if err := consensusState.Stop(); err != nil { - t.Error(err) - } + err := consensusState.Stop() + require.NoError(t, err) return nil case <-time.After(1 * time.Minute): - if err := consensusState.Stop(); err != nil { - t.Error(err) - } + err := consensusState.Stop() + require.NoError(t, err) return fmt.Errorf("waited too long for CometBFT to produce %d blocks (grep logs for `wal_generator`)", numBlocks) } } // WALWithNBlocks returns a WAL content with numBlocks. func WALWithNBlocks(t *testing.T, numBlocks int, config *cfg.Config) (data []byte, err error) { + t.Helper() + var b bytes.Buffer wr := bufio.NewWriter(&b) @@ -139,23 +133,12 @@ func randPort() int { return base + cmtrand.Intn(spread) } -func makeAddrs() (string, string) { +func makeAddrs() (address, port string) { start := randPort() return fmt.Sprintf("tcp://127.0.0.1:%d", start), fmt.Sprintf("tcp://127.0.0.1:%d", start+1) } -// getConfig returns a config for test cases -func getConfig(t *testing.T) *cfg.Config { - c := test.ResetTestRoot(t.Name()) - - // and we use random ports to run in parallel - cmt, rpc := makeAddrs() - c.P2P.ListenAddress = cmt - c.RPC.ListenAddress = rpc - return c -} - // byteBufferWAL is a WAL which writes all msgs to a byte buffer. Writing stops // when the heightToStop is reached. Client will be notified via // signalWhenStopsTo channel. @@ -168,7 +151,7 @@ type byteBufferWAL struct { logger log.Logger } -// needed for determinism +// needed for determinism. var fixedTime, _ = time.Parse(time.RFC3339, "2017-01-02T15:04:05Z") func newByteBufferWAL(logger log.Logger, enc *WALEncoder, nBlocks int64, signalStop chan<- struct{}) *byteBufferWAL { @@ -212,15 +195,15 @@ func (w *byteBufferWAL) WriteSync(m WALMessage) error { return w.Write(m) } -func (w *byteBufferWAL) FlushAndSync() error { return nil } +func (*byteBufferWAL) FlushAndSync() error { return nil } -func (w *byteBufferWAL) SearchForEndHeight( +func (*byteBufferWAL) SearchForEndHeight( int64, *WALSearchOptions, ) (rd io.ReadCloser, found bool, err error) { return nil, false, nil } -func (w *byteBufferWAL) Start() error { return nil } -func (w *byteBufferWAL) Stop() error { return nil } -func (w *byteBufferWAL) Wait() {} +func (*byteBufferWAL) Start() error { return nil } +func (*byteBufferWAL) Stop() error { return nil } +func (*byteBufferWAL) Wait() {} diff --git a/consensus/wal_test.go b/internal/consensus/wal_test.go similarity index 50% rename from consensus/wal_test.go rename to internal/consensus/wal_test.go index 32d8a7b8596..bb9fb145285 100644 --- a/consensus/wal_test.go +++ b/internal/consensus/wal_test.go @@ -3,20 +3,27 @@ package consensus import ( "bytes" "crypto/rand" + "encoding/hex" + "fmt" + "io" "os" "path/filepath" - - // "sync" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/cometbft/cometbft/consensus/types" + dbm "github.com/cometbft/cometbft-db" + cfg "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/crypto/merkle" - "github.com/cometbft/cometbft/libs/autofile" + "github.com/cometbft/cometbft/crypto/tmhash" + "github.com/cometbft/cometbft/internal/autofile" + "github.com/cometbft/cometbft/internal/consensus/types" + cmtrand "github.com/cometbft/cometbft/internal/rand" + "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/libs/log" + sm "github.com/cometbft/cometbft/state" cmttypes "github.com/cometbft/cometbft/types" cmttime "github.com/cometbft/cometbft/types/time" ) @@ -26,6 +33,8 @@ const ( ) func TestWALTruncate(t *testing.T) { + const numBlocks = 60 + walDir, err := os.MkdirTemp("", "wal") require.NoError(t, err) defer os.RemoveAll(walDir) @@ -56,7 +65,7 @@ func TestWALTruncate(t *testing.T) { // 60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), // when headBuf is full, truncate content will Flush to the file. at this // time, RotateFile is called, truncate content exist in each file. - err = WALGenerateNBlocks(t, wal.Group(), 60, getConfig(t)) + err = WALGenerateNBlocks(t, wal.Group(), numBlocks, getConfig(t)) require.NoError(t, err) time.Sleep(1 * time.Millisecond) // wait groupCheckDuration, make sure RotateFile run @@ -67,14 +76,14 @@ func TestWALTruncate(t *testing.T) { h := int64(50) gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) - assert.NoError(t, err, "expected not to err on height %d", h) + require.NoError(t, err, "expected not to err on height %d", h) assert.True(t, found, "expected to find end height for %d", h) assert.NotNil(t, gr) defer gr.Close() dec := NewWALDecoder(gr) msg, err := dec.Decode() - assert.NoError(t, err, "expected to decode a message") + require.NoError(t, err, "expected to decode a message") rs, ok := msg.Msg.(cmttypes.EventDataRoundState) assert.True(t, ok, "expected message of type EventDataRoundState") assert.Equal(t, rs.Height, h+1, "wrong height") @@ -82,17 +91,42 @@ func TestWALTruncate(t *testing.T) { func TestWALEncoderDecoder(t *testing.T) { now := cmttime.Now() + + randbytes := cmtrand.Bytes(tmhash.Size) + cs1, vss := randState(1) + + block1 := cmttypes.BlockID{ + Hash: randbytes, + PartSetHeader: cmttypes.PartSetHeader{Total: 5, Hash: randbytes}, + } + + p := cmttypes.Proposal{ + Type: cmttypes.ProposalType, + Height: 42, + Round: 13, + BlockID: block1, + POLRound: 12, + Timestamp: cmttime.Canonical(now), + } + + pp := p.ToProto() + err := vss[0].SignProposal(cs1.state.ChainID, pp) + require.NoError(t, err) + + p.Signature = pp.Signature + msgs := []TimedWALMessage{ {Time: now, Msg: EndHeightMessage{0}}, {Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}}, {Time: now, Msg: cmttypes.EventDataRoundState{Height: 1, Round: 1, Step: ""}}, + {Time: now, Msg: msgInfo{Msg: &ProposalMessage{Proposal: &p}, PeerID: "Nobody", ReceiveTime: now}}, + {Time: now, Msg: msgInfo{Msg: &ProposalMessage{Proposal: &p}, PeerID: "Nobody", ReceiveTime: time.Time{}}}, + {Time: now, Msg: msgInfo{Msg: &ProposalMessage{Proposal: &p}, PeerID: "Nobody"}}, } b := new(bytes.Buffer) for _, msg := range msgs { - msg := msg - b.Reset() enc := NewWALEncoder(b) @@ -107,6 +141,117 @@ func TestWALEncoderDecoder(t *testing.T) { } } +func TestWALEncoderDecoderMultiVersion(t *testing.T) { + now := time.Time{}.AddDate(100, 10, 20) + v038Data, _ := hex.DecodeString("a570586b000000c50a0b0880e2c3b1a4feffffff0112b50112b2010aa7011aa4010aa1010820102a180d200c2a480a2001c073624aaf3978514ef8443bb2a859c75fc3cc6af26d5aaa20926f046baa6612240805122001c073624aaf3978514ef8443bb2a859c75fc3cc6af26d5aaa20926f046baa66320b0880e2c3b1a4feffffff013a404942b2803552651e1c7e7b72557cdade0a4c5a638dcda9822ec402d42c5f75c767f62c0f3fb0d58aef7842a4e18964faaff3d17559989cf1f11dd006e31a9d0f12064e6f626f6479") + + ss, privVals := makeState(1, "execution_chain") + var pVal cmttypes.PrivValidator + for mk := range privVals { + pVal = privVals[mk] + } + vs := newValidatorStub(pVal, 1) + + cmtrand.Seed(0) + randbytes := cmtrand.Bytes(tmhash.Size) + block1 := cmttypes.BlockID{ + Hash: randbytes, + PartSetHeader: cmttypes.PartSetHeader{Total: 5, Hash: randbytes}, + } + + p := cmttypes.Proposal{ + Type: cmttypes.ProposalType, + Height: 42, + Round: 13, + BlockID: block1, + POLRound: 12, + Timestamp: cmttime.Canonical(now), + } + + pp := p.ToProto() + err := vs.SignProposal(ss.ChainID, pp) + require.NoError(t, err) + p.Signature = pp.Signature + + cases := []struct { + twm TimedWALMessage + expectFailure bool + }{ + {twm: TimedWALMessage{Time: now, Msg: msgInfo{Msg: &ProposalMessage{Proposal: &p}, PeerID: "Nobody", ReceiveTime: now}}, expectFailure: true}, + {twm: TimedWALMessage{Time: now, Msg: msgInfo{Msg: &ProposalMessage{Proposal: &p}, PeerID: "Nobody", ReceiveTime: time.Time{}}}, expectFailure: false}, + {twm: TimedWALMessage{Time: now, Msg: msgInfo{Msg: &ProposalMessage{Proposal: &p}, PeerID: "Nobody"}}, expectFailure: false}, + } + + b := new(bytes.Buffer) + b.Reset() + + _, err = b.Write(v038Data) + require.NoError(t, err) + + dec := NewWALDecoder(b) + v038decoded, err := dec.Decode() + require.NoError(t, err) + twmV038 := v038decoded.Msg + msgInfoV038 := twmV038.(msgInfo) + + for _, tc := range cases { + if tc.expectFailure { + assert.NotEqual(t, tc.twm.Msg, msgInfoV038) + } else { + assert.Equal(t, tc.twm.Msg, msgInfoV038) + } + } +} + +func TestWALEncoder(t *testing.T) { + now := time.Time{}.AddDate(100, 10, 20) + + ss, privVals := makeState(1, "execution_chain") + var pVal cmttypes.PrivValidator + for mk := range privVals { + pVal = privVals[mk] + } + vs := newValidatorStub(pVal, 1) + + cmtrand.Seed(0) + randbytes := cmtrand.Bytes(tmhash.Size) + block1 := cmttypes.BlockID{ + Hash: randbytes, + PartSetHeader: cmttypes.PartSetHeader{Total: 5, Hash: randbytes}, + } + + p := cmttypes.Proposal{ + Type: cmttypes.ProposalType, + Height: 42, + Round: 13, + BlockID: block1, + POLRound: 12, + Timestamp: cmttime.Canonical(now), + } + + pp := p.ToProto() + err := vs.SignProposal(ss.ChainID, pp) + require.NoError(t, err) + p.Signature = pp.Signature + + b := new(bytes.Buffer) + enc := NewWALEncoder(b) + twm := TimedWALMessage{Time: now, Msg: msgInfo{Msg: &ProposalMessage{Proposal: &p}, PeerID: "Nobody"}} + err = enc.Encode(&twm) + require.NoError(t, err) + + var b1 bytes.Buffer + tee := io.TeeReader(b, &b1) + b2, err := io.ReadAll(tee) + require.NoError(t, err) + fmt.Printf("%s\n", hex.EncodeToString(b1.Bytes())) + + // Encoded string generated with v0.38 (before PBTS) + data, err := hex.DecodeString("a570586b000000c50a0b0880e2c3b1a4feffffff0112b50112b2010aa7011aa4010aa1010820102a180d200c2a480a2001c073624aaf3978514ef8443bb2a859c75fc3cc6af26d5aaa20926f046baa6612240805122001c073624aaf3978514ef8443bb2a859c75fc3cc6af26d5aaa20926f046baa66320b0880e2c3b1a4feffffff013a404942b2803552651e1c7e7b72557cdade0a4c5a638dcda9822ec402d42c5f75c767f62c0f3fb0d58aef7842a4e18964faaff3d17559989cf1f11dd006e31a9d0f12064e6f626f6479") + require.NoError(t, err) + require.Equal(t, data, b2) +} + func TestWALWrite(t *testing.T) { walDir, err := os.MkdirTemp("", "wal") require.NoError(t, err) @@ -144,7 +289,7 @@ func TestWALWrite(t *testing.T) { err = wal.Write(msgInfo{ Msg: msg, }) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Contains(t, err.Error(), "msg is too big") } } @@ -162,14 +307,14 @@ func TestWALSearchForEndHeight(t *testing.T) { h := int64(3) gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) - assert.NoError(t, err, "expected not to err on height %d", h) + require.NoError(t, err, "expected not to err on height %d", h) assert.True(t, found, "expected to find end height for %d", h) assert.NotNil(t, gr) defer gr.Close() dec := NewWALDecoder(gr) msg, err := dec.Decode() - assert.NoError(t, err, "expected to decode a message") + require.NoError(t, err, "expected to decode a message") rs, ok := msg.Msg.(cmttypes.EventDataRoundState) assert.True(t, ok, "expected message of type EventDataRoundState") assert.Equal(t, rs.Height, h+1, "wrong height") @@ -209,7 +354,7 @@ func TestWALPeriodicSync(t *testing.T) { h := int64(4) gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) - assert.NoError(t, err, "expected not to err on height %d", h) + require.NoError(t, err, "expected not to err on height %d", h) assert.True(t, found, "expected to find end height for %d", h) assert.NotNil(t, gr) if gr != nil { @@ -217,6 +362,28 @@ func TestWALPeriodicSync(t *testing.T) { } } +// FIXME: this helper is very similar to the one in ../../state/helpers_test.go. +func makeState(nVals int, chainID string) (sm.State, map[string]cmttypes.PrivValidator) { + vals, privVals := test.GenesisValidatorSet(nVals) + + s, _ := sm.MakeGenesisState(&cmttypes.GenesisDoc{ + ChainID: chainID, + Validators: vals, + AppHash: nil, + ConsensusParams: test.ConsensusParams(), + }) + + stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) + if err := stateStore.Save(s); err != nil { + panic(err) + } + + return s, privVals +} + /* var initOnce sync.Once @@ -237,13 +404,14 @@ func nBytes(n int) []byte { } func benchmarkWalDecode(b *testing.B, n int) { + b.Helper() // registerInterfacesOnce() buf := new(bytes.Buffer) enc := NewWALEncoder(buf) data := nBytes(n) - if err := enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()}); err != nil { + if err := enc.Encode(&TimedWALMessage{Msg: data, Time: cmttime.Now().Round(time.Second).UTC()}); err != nil { b.Error(err) } @@ -268,18 +436,35 @@ func BenchmarkWalDecode512B(b *testing.B) { func BenchmarkWalDecode10KB(b *testing.B) { benchmarkWalDecode(b, 10*1024) } + func BenchmarkWalDecode100KB(b *testing.B) { benchmarkWalDecode(b, 100*1024) } + func BenchmarkWalDecode1MB(b *testing.B) { benchmarkWalDecode(b, 1024*1024) } + func BenchmarkWalDecode10MB(b *testing.B) { benchmarkWalDecode(b, 10*1024*1024) } + func BenchmarkWalDecode100MB(b *testing.B) { benchmarkWalDecode(b, 100*1024*1024) } + func BenchmarkWalDecode1GB(b *testing.B) { benchmarkWalDecode(b, 1024*1024*1024) } + +// getConfig returns a config for test cases. +func getConfig(t *testing.T) *cfg.Config { + t.Helper() + c := test.ResetTestRoot(t.Name()) + + // and we use random ports to run in parallel + cmt, rpc := makeAddrs() + c.P2P.ListenAddress = cmt + c.RPC.ListenAddress = rpc + return c +} diff --git a/libs/events/Makefile b/internal/events/Makefile similarity index 100% rename from libs/events/Makefile rename to internal/events/Makefile diff --git a/libs/events/README.md b/internal/events/README.md similarity index 100% rename from libs/events/README.md rename to internal/events/README.md diff --git a/libs/events/event_cache.go b/internal/events/event_cache.go similarity index 84% rename from libs/events/event_cache.go rename to internal/events/event_cache.go index f508e873da0..6a840003737 100644 --- a/libs/events/event_cache.go +++ b/internal/events/event_cache.go @@ -1,20 +1,20 @@ package events // An EventCache buffers events for a Fireable -// All events are cached. Filtering happens on Flush +// All events are cached. Filtering happens on Flush. type EventCache struct { evsw Fireable events []eventInfo } -// Create a new EventCache with an EventSwitch as backend +// Create a new EventCache with an EventSwitch as backend. func NewEventCache(evsw Fireable) *EventCache { return &EventCache{ evsw: evsw, } } -// a cached event +// a cached event. type eventInfo struct { event string data EventData @@ -27,7 +27,7 @@ func (evc *EventCache) FireEvent(event string, data EventData) { } // Fire events by running evsw.FireEvent on all cached events. Blocks. -// Clears cached events +// Clears cached events. func (evc *EventCache) Flush() { for _, ei := range evc.events { evc.evsw.FireEvent(ei.event, ei.data) diff --git a/libs/events/event_cache_test.go b/internal/events/event_cache_test.go similarity index 90% rename from libs/events/event_cache_test.go rename to internal/events/event_cache_test.go index 3f8641f93d8..d48e2e48760 100644 --- a/libs/events/event_cache_test.go +++ b/internal/events/event_cache_test.go @@ -12,7 +12,7 @@ func TestEventCache_Flush(t *testing.T) { err := evsw.Start() require.NoError(t, err) - err = evsw.AddListenerForEvent("nothingness", "", func(data EventData) { + err = evsw.AddListenerForEvent("nothingness", "", func(_ EventData) { // Check we are not initializing an empty buffer full // of zeroed eventInfos in the EventCache require.FailNow(t, "We should never receive a message on this switch since none are fired") @@ -25,7 +25,7 @@ func TestEventCache_Flush(t *testing.T) { evc.Flush() fail := true pass := false - err = evsw.AddListenerForEvent("somethingness", "something", func(data EventData) { + err = evsw.AddListenerForEvent("somethingness", "something", func(_ EventData) { if fail { require.FailNow(t, "Shouldn't see a message until flushed") } diff --git a/libs/events/events.go b/internal/events/events.go similarity index 95% rename from libs/events/events.go rename to internal/events/events.go index 293e6fb8da9..984d54709a4 100644 --- a/libs/events/events.go +++ b/internal/events/events.go @@ -20,7 +20,7 @@ func (e ErrListenerWasRemoved) Error() string { // EventData is a generic event data can be typed and registered with // tendermint/go-amino via concrete implementation of this interface. -type EventData interface{} +type EventData any // Eventable is the interface reactors and other modules must export to become // eventable. @@ -68,11 +68,11 @@ func NewEventSwitch() EventSwitch { return evsw } -func (evsw *eventSwitch) OnStart() error { +func (*eventSwitch) OnStart() error { return nil } -func (evsw *eventSwitch) OnStop() {} +func (*eventSwitch) OnStop() {} func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventCallback) error { // Get/Create eventCell and listener. @@ -158,7 +158,7 @@ func (evsw *eventSwitch) FireEvent(event string, data EventData) { eventCell.FireEvent(data) } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // eventCell handles keeping track of listener callbacks for a given event. type eventCell struct { @@ -199,7 +199,7 @@ func (cell *eventCell) FireEvent(data EventData) { } } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- type EventCallback func(data EventData) diff --git a/libs/events/events_test.go b/internal/events/events_test.go similarity index 97% rename from libs/events/events_test.go rename to internal/events/events_test.go index d6bbcd08c35..0aa9fa493b8 100644 --- a/libs/events/events_test.go +++ b/internal/events/events_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/cometbft/cometbft/libs/rand" + "github.com/cometbft/cometbft/internal/rand" ) // TestAddListenerForEventFireOnce sets up an EventSwitch, subscribes a single @@ -228,7 +228,7 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { // we explicitly ignore errors here, since the listener will sometimes be removed // (that's what we're testing) _ = evsw.AddListenerForEvent("listener", fmt.Sprintf("event%d", index), - func(data EventData) { + func(_ EventData) { t.Errorf("should not run callback for %d.\n", index) stopInputEvent = true }) @@ -298,7 +298,7 @@ func TestAddAndRemoveListener(t *testing.T) { } } -// TestRemoveListener does basic tests on adding and removing +// TestRemoveListener does basic tests on adding and removing. func TestRemoveListener(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() @@ -313,13 +313,13 @@ func TestRemoveListener(t *testing.T) { sum1, sum2 := 0, 0 // add some listeners and make sure they work err = evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { + func(_ EventData) { sum1++ }) require.NoError(t, err) err = evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { + func(_ EventData) { sum2++ }) require.NoError(t, err) @@ -350,7 +350,7 @@ func TestRemoveListener(t *testing.T) { assert.Equal(t, count, sum2) } -// TestAddAndRemoveListenersAsync sets up an EventSwitch, subscribes two +// TestRemoveListenersAsync sets up an EventSwitch, subscribes two // listeners to three events, and fires a thousand integers for each event. // These two listeners serve as the baseline validation while other listeners // are randomly subscribed and unsubscribed. @@ -451,7 +451,7 @@ func TestRemoveListenersAsync(t *testing.T) { } } -//------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------ // Helper functions // sumReceivedNumbers takes two channels and adds all numbers received @@ -476,7 +476,8 @@ func sumReceivedNumbers(numbers, doneSum chan uint64) { // sent on `doneChan` for assertion that all events have been sent, and enabling // the test to assert all events have also been received. func fireEvents(evsw Fireable, event string, doneChan chan uint64, - offset uint64) { + offset uint64, +) { var sentSum uint64 for i := offset; i <= offset+uint64(999); i++ { sentSum += i diff --git a/evidence/doc.go b/internal/evidence/doc.go similarity index 100% rename from evidence/doc.go rename to internal/evidence/doc.go diff --git a/evidence/errors.go b/internal/evidence/errors.go similarity index 96% rename from evidence/errors.go rename to internal/evidence/errors.go index e4f9a280c3e..3ed7e0cbbea 100644 --- a/evidence/errors.go +++ b/internal/evidence/errors.go @@ -26,7 +26,7 @@ type ( Evidence types.Evidence } - // ErrVotingPowerDoesNotMatch is returned when voting power from trusted validator set does not match voting power from evidence + // ErrVotingPowerDoesNotMatch is returned when voting power from trusted validator set does not match voting power from evidence. ErrVotingPowerDoesNotMatch struct { TrustedVotingPower int64 EvidenceVotingPower int64 @@ -37,18 +37,18 @@ type ( Height int64 } - // ErrValidatorAddressesDoNotMatch is returned when provided DuplicateVoteEvidence's votes have different validators as signers + // ErrValidatorAddressesDoNotMatch is returned when provided DuplicateVoteEvidence's votes have different validators as signers. ErrValidatorAddressesDoNotMatch struct { ValidatorA bytes.HexBytes ValidatorB bytes.HexBytes } - // ErrSameBlockIDs is returned if a duplicate vote evidence has votes from the same block id (should be different) + // ErrSameBlockIDs is returned if a duplicate vote evidence has votes from the same block id (should be different). ErrSameBlockIDs struct { BlockID types.BlockID } - // ErrInvalidEvidenceValidators is returned when evidence validation spots an error related to validator set + // ErrInvalidEvidenceValidators is returned when evidence validation spots an error related to validator set. ErrInvalidEvidenceValidators struct { ValError error } @@ -122,5 +122,4 @@ func (e ErrDuplicateEvidenceHRTMismatch) Error() string { return fmt.Sprintf("h/r/t does not match: %d/%d/%v vs %d/%d/%v", e.VoteA.Height, e.VoteA.Round, e.VoteA.Type, e.VoteB.Height, e.VoteB.Round, e.VoteB.Type) - } diff --git a/evidence/mocks/block_store.go b/internal/evidence/mocks/block_store.go similarity index 87% rename from evidence/mocks/block_store.go rename to internal/evidence/mocks/block_store.go index 1ba72961ffe..45be790b5bf 100644 --- a/evidence/mocks/block_store.go +++ b/internal/evidence/mocks/block_store.go @@ -16,6 +16,10 @@ type BlockStore struct { func (_m *BlockStore) Height() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Height") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() @@ -30,6 +34,10 @@ func (_m *BlockStore) Height() int64 { func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlockCommit") + } + var r0 *types.Commit if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { r0 = rf(height) @@ -46,6 +54,10 @@ func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlockMeta") + } + var r0 *types.BlockMeta if rf, ok := ret.Get(0).(func(int64) *types.BlockMeta); ok { r0 = rf(height) diff --git a/evidence/pool.go b/internal/evidence/pool.go similarity index 77% rename from evidence/pool.go rename to internal/evidence/pool.go index 85ddf428324..2efa6de6043 100644 --- a/evidence/pool.go +++ b/internal/evidence/pool.go @@ -7,25 +7,20 @@ import ( "sync/atomic" "time" - cmterrors "github.com/cometbft/cometbft/types/errors" "github.com/cosmos/gogoproto/proto" gogotypes "github.com/cosmos/gogoproto/types" + "github.com/google/orderedcode" dbm "github.com/cometbft/cometbft-db" - - clist "github.com/cometbft/cometbft/libs/clist" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" + "github.com/cometbft/cometbft/internal/clist" "github.com/cometbft/cometbft/libs/log" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" + cmterrors "github.com/cometbft/cometbft/types/errors" ) -const ( - baseKeyCommitted = byte(0x00) - baseKeyPending = byte(0x01) -) - -// Pool maintains a pool of valid evidence to be broadcasted and committed +// Pool maintains a pool of valid evidence to be broadcasted and committed. type Pool struct { logger log.Logger @@ -48,11 +43,63 @@ type Pool struct { pruningHeight int64 pruningTime time.Time + + dbKeyLayout KeyLayout +} + +func isEmpty(evidenceDB dbm.DB) bool { + iter, err := evidenceDB.Iterator(nil, nil) + if err != nil { + panic(err) + } + + defer iter.Close() + for ; iter.Valid(); iter.Next() { + return false + } + return true +} + +func setDBLayout(pool *Pool, dbKeyLayoutVersion string) { + if !isEmpty(pool.evidenceStore) { + var version []byte + var err error + if version, err = pool.evidenceStore.Get([]byte("version")); err != nil { + // WARN: This is because currently cometBFT DB does not return an error if the key does not exist + // If this behavior changes we need to account for that. + panic(err) + } + if len(version) != 0 { + dbKeyLayoutVersion = string(version) + } + } + + switch dbKeyLayoutVersion { + case "v1", "": + pool.dbKeyLayout = &v1LegacyLayout{} + dbKeyLayoutVersion = "v1" + case "v2": + pool.dbKeyLayout = &v2Layout{} + default: + panic("unknown key layout version") + } + if err := pool.evidenceStore.SetSync([]byte("version"), []byte(dbKeyLayoutVersion)); err != nil { + panic(err) + } +} + +type PoolOptions func(*Pool) + +// WithDBKeyLayout sets the db key layout. +func WithDBKeyLayout(dbKeyLayoutV string) PoolOptions { + return func(pool *Pool) { + setDBLayout(pool, dbKeyLayoutV) + } } // NewPool creates an evidence pool. If using an existing evidence store, // it will add all pending evidence to the concurrent list. -func NewPool(evidenceDB dbm.DB, stateDB sm.Store, blockStore BlockStore) (*Pool, error) { +func NewPool(evidenceDB dbm.DB, stateDB sm.Store, blockStore BlockStore, options ...PoolOptions) (*Pool, error) { state, err := stateDB.Load() if err != nil { return nil, sm.ErrCannotLoadState{Err: err} @@ -68,10 +115,18 @@ func NewPool(evidenceDB dbm.DB, stateDB sm.Store, blockStore BlockStore) (*Pool, consensusBuffer: make([]duplicateVoteSet, 0), } + for _, option := range options { + option(pool) + } + + if pool.dbKeyLayout == nil { + setDBLayout(pool, "v1") + } + // if pending evidence already in db, in event of prior failure, then check for expiration, // update the size and load it back to the evidenceList pool.pruningHeight, pool.pruningTime = pool.removeExpiredPendingEvidence() - evList, _, err := pool.listEvidence(baseKeyPending, -1) + evList, _, err := pool.listEvidence(pool.dbKeyLayout.PrefixToBytesPending(), -1) if err != nil { return nil, err } @@ -88,9 +143,10 @@ func (evpool *Pool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) { if evpool.Size() == 0 { return []types.Evidence{}, 0 } - evidence, size, err := evpool.listEvidence(baseKeyPending, maxBytes) + evidence, size, err := evpool.listEvidence(evpool.dbKeyLayout.PrefixToBytesPending(), maxBytes) if err != nil { evpool.logger.Error("Unable to retrieve pending evidence", "err", err) + return []types.Evidence{}, 0 } return evidence, size } @@ -132,11 +188,11 @@ func (evpool *Pool) Update(state sm.State, ev types.EvidenceList) { // AddEvidence checks the evidence is valid and adds it to the pool. func (evpool *Pool) AddEvidence(ev types.Evidence) error { - evpool.logger.Debug("Attempting to add evidence", "ev", ev) + evpool.logger.Info("Attempting to add evidence", "ev", ev) // We have already verified this piece of evidence - no need to do it again if evpool.isPending(ev) { - evpool.logger.Debug("Evidence already pending, ignoring this one", "ev", ev) + evpool.logger.Info("Evidence already pending, ignoring this one", "ev", ev) return nil } @@ -144,7 +200,7 @@ func (evpool *Pool) AddEvidence(ev types.Evidence) error { if evpool.isCommitted(ev) { // this can happen if the peer that sent us the evidence is behind so we shouldn't // punish the peer. - evpool.logger.Debug("Evidence was already committed, ignoring this one", "ev", ev) + evpool.logger.Info("Evidence was already committed, ignoring this one", "ev", ev) return nil } @@ -192,7 +248,6 @@ func (evpool *Pool) ReportConflictingVotes(voteA, voteB *types.Vote) { func (evpool *Pool) CheckEvidence(evList types.EvidenceList) error { hashes := make([][]byte, len(evList)) for idx, ev := range evList { - _, isLightEv := ev.(*types.LightClientAttackEvidence) // We must verify light client attack evidence regardless because there could be a @@ -229,12 +284,12 @@ func (evpool *Pool) CheckEvidence(evList types.EvidenceList) error { return nil } -// EvidenceFront goes to the first evidence in the clist +// EvidenceFront goes to the first evidence in the clist. func (evpool *Pool) EvidenceFront() *clist.CElement { return evpool.evidenceList.Front() } -// EvidenceWaitChan is a channel that closes once the first evidence in the list is there. i.e Front is not nil +// EvidenceWaitChan is a channel that closes once the first evidence in the list is there. i.e Front is not nil. func (evpool *Pool) EvidenceWaitChan() <-chan struct{} { return evpool.evidenceList.WaitChan() } @@ -261,7 +316,7 @@ func (evpool *Pool) Close() error { } // IsExpired checks whether evidence or a polc is expired by checking whether a height and time is older -// than set by the evidence consensus parameters +// than set by the evidence consensus parameters. func (evpool *Pool) isExpired(height int64, time time.Time) bool { var ( params = evpool.State().ConsensusParams.Evidence @@ -274,7 +329,7 @@ func (evpool *Pool) isExpired(height int64, time time.Time) bool { // IsCommitted returns true if we have already seen this exact evidence and it is already marked as committed. func (evpool *Pool) isCommitted(evidence types.Evidence) bool { - key := keyCommitted(evidence) + key := evpool.dbKeyLayout.CalcKeyCommitted(evidence) ok, err := evpool.evidenceStore.Has(key) if err != nil { evpool.logger.Error("Unable to find committed evidence", "err", err) @@ -284,7 +339,7 @@ func (evpool *Pool) isCommitted(evidence types.Evidence) bool { // IsPending checks whether the evidence is already pending. DB errors are passed to the logger. func (evpool *Pool) isPending(evidence types.Evidence) bool { - key := keyPending(evidence) + key := evpool.dbKeyLayout.CalcKeyPending(evidence) ok, err := evpool.evidenceStore.Has(key) if err != nil { evpool.logger.Error("Unable to find pending evidence", "err", err) @@ -303,7 +358,7 @@ func (evpool *Pool) addPendingEvidence(ev types.Evidence) error { return fmt.Errorf("unable to marshal evidence: %w", err) } - key := keyPending(ev) + key := evpool.dbKeyLayout.CalcKeyPending(ev) err = evpool.evidenceStore.Set(key, evBytes) if err != nil { @@ -314,7 +369,7 @@ func (evpool *Pool) addPendingEvidence(ev types.Evidence) error { } func (evpool *Pool) removePendingEvidence(evidence types.Evidence) { - key := keyPending(evidence) + key := evpool.dbKeyLayout.CalcKeyPending(evidence) if err := evpool.evidenceStore.Delete(key); err != nil { evpool.logger.Error("Unable to delete pending evidence", "err", err) } else { @@ -335,7 +390,7 @@ func (evpool *Pool) markEvidenceAsCommitted(evidence types.EvidenceList) { // Add evidence to the committed list. As the evidence is stored in the block store // we only need to record the height that it was saved at. - key := keyCommitted(ev) + key := evpool.dbKeyLayout.CalcKeyCommitted(ev) h := gogotypes.Int64Value{Value: ev.Height()} evBytes, err := proto.Marshal(&h) @@ -357,7 +412,7 @@ func (evpool *Pool) markEvidenceAsCommitted(evidence types.EvidenceList) { // listEvidence retrieves lists evidence from oldest to newest within maxBytes. // If maxBytes is -1, there's no cap on the size of returned evidence. -func (evpool *Pool) listEvidence(prefixKey byte, maxBytes int64) ([]types.Evidence, int64, error) { +func (evpool *Pool) listEvidence(prefixKey []byte, maxBytes int64) ([]types.Evidence, int64, error) { var ( evSize int64 totalSize int64 @@ -365,7 +420,7 @@ func (evpool *Pool) listEvidence(prefixKey byte, maxBytes int64) ([]types.Eviden evList cmtproto.EvidenceList // used for calculating the bytes size ) - iter, err := dbm.IteratePrefix(evpool.evidenceStore, []byte{prefixKey}) + iter, err := dbm.IteratePrefix(evpool.evidenceStore, prefixKey) if err != nil { return nil, totalSize, fmt.Errorf("database error: %v", err) } @@ -401,7 +456,7 @@ func (evpool *Pool) listEvidence(prefixKey byte, maxBytes int64) ([]types.Eviden } func (evpool *Pool) removeExpiredPendingEvidence() (int64, time.Time) { - iter, err := dbm.IteratePrefix(evpool.evidenceStore, []byte{baseKeyPending}) + iter, err := dbm.IteratePrefix(evpool.evidenceStore, evpool.dbKeyLayout.PrefixToBytesPending()) if err != nil { evpool.logger.Error("Unable to iterate over pending evidence", "err", err) return evpool.State().LastBlockHeight, evpool.State().LastBlockTime @@ -460,7 +515,6 @@ func (evpool *Pool) processConsensusBuffer(state sm.State) { evpool.mtx.Lock() defer evpool.mtx.Unlock() for _, voteSet := range evpool.consensusBuffer { - // Check the height of the conflicting votes and fetch the corresponding time and validator set // to produce the valid evidence var ( @@ -513,13 +567,13 @@ func (evpool *Pool) processConsensusBuffer(state sm.State) { // check if we already have this evidence if evpool.isPending(dve) { - evpool.logger.Debug("evidence already pending; ignoring", "evidence", dve) + evpool.logger.Info("evidence already pending; ignoring", "evidence", dve) continue } // check that the evidence is not already committed on chain if evpool.isCommitted(dve) { - evpool.logger.Debug("evidence already committed; ignoring", "evidence", dve) + evpool.logger.Info("evidence already committed; ignoring", "evidence", dve) continue } @@ -555,19 +609,103 @@ func evMapKey(ev types.Evidence) string { return string(ev.Hash()) } -// big endian padded hex -func bE(h int64) string { - return fmt.Sprintf("%0.16X", h) +// -------------- DB Key layout representation --------------- + +type KeyLayout interface { + CalcKeyCommitted(evidence types.Evidence) []byte + + CalcKeyPending(evidence types.Evidence) []byte + + PrefixToBytesPending() []byte + + PrefixToBytesCommitted() []byte +} + +type v1LegacyLayout struct{} + +// PrefixToBytesCommitted implements EvidenceKeyLayout. +func (v1LegacyLayout) PrefixToBytesCommitted() []byte { + return []byte{baseKeyCommitted} } -func keyCommitted(evidence types.Evidence) []byte { +// PrefixToBytesPending implements EvidenceKeyLayout. +func (v1LegacyLayout) PrefixToBytesPending() []byte { + return []byte{baseKeyPending} +} + +// CalcKeyCommitted implements EvidenceKeyLayout. +func (v1LegacyLayout) CalcKeyCommitted(evidence types.Evidence) []byte { return append([]byte{baseKeyCommitted}, keySuffix(evidence)...) } -func keyPending(evidence types.Evidence) []byte { +// CalcKeyPending implements EvidenceKeyLayout. +func (v1LegacyLayout) CalcKeyPending(evidence types.Evidence) []byte { return append([]byte{baseKeyPending}, keySuffix(evidence)...) } +var _ KeyLayout = (*v1LegacyLayout)(nil) + +type v2Layout struct{} + +// PrefixToBytesCommitted implements EvidenceKeyLayout. +func (v2Layout) PrefixToBytesCommitted() []byte { + key, err := orderedcode.Append(nil, prefixCommitted) + if err != nil { + panic(err) + } + return key +} + +// PrefixToBytesPending implements EvidenceKeyLayout. +func (v2Layout) PrefixToBytesPending() []byte { + key, err := orderedcode.Append(nil, prefixPending) + if err != nil { + panic(err) + } + return key +} + +// CalcKeyCommitted implements EvidenceKeyLayout. +func (v2Layout) CalcKeyCommitted(evidence types.Evidence) []byte { + key, err := orderedcode.Append(nil, prefixCommitted, evidence.Height(), string(evidence.Hash())) + if err != nil { + panic(err) + } + return key +} + +// CalcKeyPending implements EvidenceKeyLayout. +func (v2Layout) CalcKeyPending(evidence types.Evidence) []byte { + key, err := orderedcode.Append(nil, prefixPending, evidence.Height(), string(evidence.Hash())) + if err != nil { + panic(err) + } + return key +} + +var _ KeyLayout = (*v2Layout)(nil) + +// -------- Util --------- +// big endian padded hex. +func bE(h int64) string { + return fmt.Sprintf("%0.16X", h) +} + func keySuffix(evidence types.Evidence) []byte { return []byte(fmt.Sprintf("%s/%X", bE(evidence.Height()), evidence.Hash())) } + +// --------------------- + +// ---- v2 layout ----. +const ( + // prefixes must be unique across all db's. + prefixCommitted = int64(9) + prefixPending = int64(10) +) + +// ---- v1 layout ----. +const ( + baseKeyCommitted = byte(0x00) + baseKeyPending = byte(0x01) +) diff --git a/evidence/pool_test.go b/internal/evidence/pool_test.go similarity index 92% rename from evidence/pool_test.go rename to internal/evidence/pool_test.go index 22fd6a01e59..c585d8b0c93 100644 --- a/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -10,12 +10,11 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - - "github.com/cometbft/cometbft/evidence" - "github.com/cometbft/cometbft/evidence/mocks" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" + "github.com/cometbft/cometbft/internal/evidence" + "github.com/cometbft/cometbft/internal/evidence/mocks" "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/libs/log" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" sm "github.com/cometbft/cometbft/state" smmocks "github.com/cometbft/cometbft/state/mocks" "github.com/cometbft/cometbft/store" @@ -51,14 +50,18 @@ func TestEvidencePoolBasic(t *testing.T) { stateStore.On("LoadValidators", mock.AnythingOfType("int64")).Return(valSet, nil) stateStore.On("Load").Return(createState(height+1, valSet), nil) - pool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) + require.Panics(t, func() { _, _ = evidence.NewPool(evidenceDB, stateStore, blockStore, evidence.WithDBKeyLayout("2")) }, "failed to create tore") + + pool, err := evidence.NewPool(evidenceDB, stateStore, blockStore, evidence.WithDBKeyLayout("v2")) + require.NoError(t, err) + require.NoError(t, err) pool.SetLogger(log.TestingLogger()) // evidence not seen yet: evs, size := pool.PendingEvidence(defaultEvidenceMaxBytes) - assert.Equal(t, 0, len(evs)) - assert.Zero(t, size) + require.Empty(t, evs) + require.Zero(t, size) ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, privVals[0], evidenceChainID) require.NoError(t, err) @@ -71,7 +74,7 @@ func TestEvidencePoolBasic(t *testing.T) { }() // evidence seen but not yet committed: - assert.NoError(t, pool.AddEvidence(ev)) + require.NoError(t, pool.AddEvidence(ev)) select { case <-evAdded: @@ -84,16 +87,16 @@ func TestEvidencePoolBasic(t *testing.T) { const evidenceBytes int64 = 372 evs, size = pool.PendingEvidence(evidenceBytes) - assert.Equal(t, 1, len(evs)) + assert.Len(t, evs, 1) assert.Equal(t, evidenceBytes, size) // check that the size of the single evidence in bytes is correct // shouldn't be able to add evidence twice - assert.NoError(t, pool.AddEvidence(ev)) + require.NoError(t, pool.AddEvidence(ev)) evs, _ = pool.PendingEvidence(defaultEvidenceMaxBytes) - assert.Equal(t, 1, len(evs)) + assert.Len(t, evs, 1) } -// Tests inbound evidence for the right time and height +// Tests inbound evidence for the right time and height. func TestAddExpiredEvidence(t *testing.T) { var ( val = types.NewMockPV() @@ -132,15 +135,14 @@ func TestAddExpiredEvidence(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.evDescription, func(t *testing.T) { ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(tc.evHeight, tc.evTime, val, evidenceChainID) require.NoError(t, err) err = pool.AddEvidence(ev) if tc.expErr { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) } }) } @@ -212,7 +214,7 @@ func TestEvidencePoolUpdate(t *testing.T) { // b) If we try to check this evidence again it should fail because it has already been committed err = pool.CheckEvidence(types.EvidenceList{ev}) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, evidence.ErrEvidenceAlreadyCommitted.Error(), err.(*types.ErrInvalidEvidence).Reason.Error()) } } @@ -227,7 +229,7 @@ func TestVerifyPendingEvidencePasses(t *testing.T) { require.NoError(t, err) err = pool.CheckEvidence(types.EvidenceList{ev}) - assert.NoError(t, err) + require.NoError(t, err) } func TestVerifyDuplicatedEvidenceFails(t *testing.T) { @@ -237,13 +239,13 @@ func TestVerifyDuplicatedEvidenceFails(t *testing.T) { val, evidenceChainID) require.NoError(t, err) err = pool.CheckEvidence(types.EvidenceList{ev, ev}) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, evidence.ErrDuplicateEvidence.Error(), err.(*types.ErrInvalidEvidence).Reason.Error()) } } // check that valid light client evidence is correctly validated and stored in -// evidence pool +// evidence pool. func TestLightClientAttackEvidenceLifecycle(t *testing.T) { var ( height int64 = 100 @@ -251,7 +253,7 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) { ) ev, trusted, common := makeLunaticEvidence(t, height, commonHeight, - 10, 5, 5, defaultEvidenceTime, defaultEvidenceTime.Add(1*time.Hour)) + 5, 5, defaultEvidenceTime, defaultEvidenceTime.Add(1*time.Hour)) state := sm.State{ LastBlockTime: defaultEvidenceTime.Add(2 * time.Hour), @@ -273,7 +275,7 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) { pool.SetLogger(log.TestingLogger()) err = pool.AddEvidence(ev) - assert.NoError(t, err) + require.NoError(t, err) hash := ev.Hash() @@ -281,7 +283,7 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) { require.NoError(t, pool.AddEvidence(ev)) pendingEv, _ := pool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) - require.Equal(t, 1, len(pendingEv)) + require.Len(t, pendingEv, 1) require.Equal(t, ev, pendingEv[0]) require.NoError(t, pool.CheckEvidence(pendingEv)) @@ -304,7 +306,7 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) { } // Tests that restarting the evidence pool after a potential failure will recover the -// pending evidence and continue to gossip it +// pending evidence and continue to gossip it. func TestRecoverPendingEvidence(t *testing.T) { height := int64(10) val := types.NewMockPV() @@ -348,11 +350,11 @@ func TestRecoverPendingEvidence(t *testing.T) { }, }, nil) newPool, err := evidence.NewPool(evidenceDB, newStateStore, blockStore) - assert.NoError(t, err) + require.NoError(t, err) evList, _ := newPool.PendingEvidence(defaultEvidenceMaxBytes) - assert.Equal(t, 1, len(evList)) + require.Len(t, evList, 1) next := newPool.EvidenceFront() - assert.Equal(t, goodEvidence, next.Value.(types.Evidence)) + require.Equal(t, goodEvidence, next.Value.(types.Evidence)) } func initializeStateFromValidatorSet(valSet *types.ValidatorSet, height int64) sm.Store { @@ -416,8 +418,7 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) (*store.Blo block := state.MakeBlock(i, test.MakeNTxs(i, 1), lastCommit.ToCommit(), nil, state.Validators.Proposer.Address) block.Header.Time = defaultEvidenceTime.Add(time.Duration(i) * time.Minute) block.Header.Version = cmtversion.Consensus{Block: version.BlockProtocol, App: 1} - const parts = 1 - partSet, err := block.MakePartSet(parts) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) if err != nil { return nil, err } diff --git a/evidence/reactor.go b/internal/evidence/reactor.go similarity index 91% rename from evidence/reactor.go rename to internal/evidence/reactor.go index dea6a0777bb..702757fd59d 100644 --- a/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -6,11 +6,11 @@ import ( "github.com/cosmos/gogoproto/proto" - clist "github.com/cometbft/cometbft/libs/clist" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" + "github.com/cometbft/cometbft/internal/clist" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/p2p" - - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + tcpconn "github.com/cometbft/cometbft/p2p/transport/tcp/conn" "github.com/cometbft/cometbft/types" ) @@ -24,7 +24,7 @@ const ( // Most evidence should be committed in the very next block that is why we wait // just over the block production rate before sending evidence again. broadcastEvidenceIntervalS = 10 - // If a message fails wait this much before sending it again + // If a message fails wait this much before sending it again. peerRetryMessageIntervalMS = 100 ) @@ -50,22 +50,24 @@ func (evR *Reactor) SetLogger(l log.Logger) { evR.evpool.SetLogger(l) } -// GetChannels implements Reactor. +// StreamDescriptors implements Reactor. // It returns the list of channels for this reactor. -func (evR *Reactor) GetChannels() []*p2p.ChannelDescriptor { - return []*p2p.ChannelDescriptor{ - { +func (*Reactor) StreamDescriptors() []p2p.StreamDescriptor { + return []p2p.StreamDescriptor{ + &tcpconn.ChannelDescriptor{ ID: EvidenceChannel, Priority: 6, RecvMessageCapacity: maxMsgSize, - MessageType: &cmtproto.EvidenceList{}, + MessageTypeI: &cmtproto.EvidenceList{}, }, } } // AddPeer implements Reactor. func (evR *Reactor) AddPeer(peer p2p.Peer) { - go evR.broadcastEvidenceRoutine(peer) + if peer.HasChannel(EvidenceChannel) { + go evR.broadcastEvidenceRoutine(peer) + } } // Receive implements Reactor. @@ -168,7 +170,6 @@ func (evR Reactor) prepareEvidenceMessage( peer p2p.Peer, ev types.Evidence, ) (evis []types.Evidence) { - // make sure the peer is up to date evHeight := ev.Height() peerState, ok := peer.Get(types.PeerStateKey).(PeerState) @@ -192,7 +193,6 @@ func (evR Reactor) prepareEvidenceMessage( if peerHeight <= evHeight { // peer is behind. sleep while he catches up return nil } else if ageNumBlocks > params.MaxAgeNumBlocks { // evidence is too old relative to the peer, skip - // NOTE: if evidence is too old for an honest peer, then we're behind and // either it already got committed or it never will! evR.Logger.Info("Not sending peer old evidence", @@ -216,8 +216,8 @@ type PeerState interface { GetHeight() int64 } -// encodemsg takes a array of evidence -// returns the byte encoding of the List Message +// evidenceListToProto takes an array of evidence +// returns the byte encoding of the List Message. func evidenceListToProto(evis []types.Evidence) (*cmtproto.EvidenceList, error) { evi := make([]cmtproto.Evidence, len(evis)) for i := 0; i < len(evis); i++ { diff --git a/evidence/reactor_test.go b/internal/evidence/reactor_test.go similarity index 88% rename from evidence/reactor_test.go rename to internal/evidence/reactor_test.go index 620c8fa7d5e..0dfe4a71084 100644 --- a/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -8,22 +8,20 @@ import ( "time" "github.com/fortytw2/leaktest" - "github.com/go-kit/log/term" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" cfg "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/tmhash" - "github.com/cometbft/cometbft/evidence" - "github.com/cometbft/cometbft/evidence/mocks" + "github.com/cometbft/cometbft/internal/evidence" + "github.com/cometbft/cometbft/internal/evidence/mocks" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/p2p" p2pmocks "github.com/cometbft/cometbft/p2p/mocks" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" ) @@ -39,14 +37,14 @@ var ( // evidence pools. func TestReactorBroadcastEvidence(t *testing.T) { config := cfg.TestConfig() - N := 7 + n := 7 // create statedb for everyone - stateDBs := make([]sm.Store, N) + stateDBs := make([]sm.Store, n) val := types.NewMockPV() // we need validators saved for heights at least as high as we have evidence for height := int64(numEvidence) + 10 - for i := 0; i < N; i++ { + for i := 0; i < n; i++ { stateDBs[i] = initializeValidatorState(val, height) } @@ -55,7 +53,7 @@ func TestReactorBroadcastEvidence(t *testing.T) { // set the peer height on each reactor for _, r := range reactors { - for _, peer := range r.Switch.Peers().List() { + for _, peer := range r.Switch.Peers().Copy() { ps := peerState{height} peer.Set(types.PeerStateKey, ps) } @@ -86,14 +84,14 @@ func TestReactorSelectiveBroadcast(t *testing.T) { // set the peer height on each reactor for _, r := range reactors { - for _, peer := range r.Switch.Peers().List() { + for _, peer := range r.Switch.Peers().Copy() { ps := peerState{height1} peer.Set(types.PeerStateKey, ps) } } // update the first reactor peer's height to be very small - peer := reactors[0].Switch.Peers().List()[0] + peer := reactors[0].Switch.Peers().Copy()[0] ps := peerState{height2} peer.Set(types.PeerStateKey, ps) @@ -104,15 +102,15 @@ func TestReactorSelectiveBroadcast(t *testing.T) { waitForEvidence(t, evList[:numEvidence/2-1], []*evidence.Pool{pools[1]}) // peers should still be connected - peers := reactors[1].Switch.Peers().List() - assert.Equal(t, 1, len(peers)) + peers := reactors[1].Switch.Peers().Copy() + assert.Len(t, peers, 1) } // This tests aims to ensure that reactors don't send evidence that they have committed or that ar // not ready for the peer through three scenarios. // First, committed evidence to a newly connected peer // Second, evidence to a peer that is behind -// Third, evidence that was pending and became committed just before the peer caught up +// Third, evidence that was pending and became committed just before the peer caught up. func TestReactorsGossipNoCommittedEvidence(t *testing.T) { config := cfg.TestConfig() @@ -135,11 +133,11 @@ func TestReactorsGossipNoCommittedEvidence(t *testing.T) { time.Sleep(100 * time.Millisecond) - peer := reactors[0].Switch.Peers().List()[0] + peer := reactors[0].Switch.Peers().Copy()[0] ps := peerState{height - 2} peer.Set(types.PeerStateKey, ps) - peer = reactors[1].Switch.Peers().List()[0] + peer = reactors[1].Switch.Peers().Copy()[0] ps = peerState{height} peer.Set(types.PeerStateKey, ps) @@ -177,7 +175,7 @@ func TestReactorsGossipNoCommittedEvidence(t *testing.T) { // now update the state of the second reactor pools[1].Update(state, types.EvidenceList{}) - peer = reactors[0].Switch.Peers().List()[0] + peer = reactors[0].Switch.Peers().Copy()[0] ps = peerState{height} peer.Set(types.PeerStateKey, ps) @@ -208,7 +206,8 @@ func TestReactorBroadcastEvidenceMemoryLeak(t *testing.T) { // i.e. broadcastEvidenceRoutine finishes when peer is stopped defer leaktest.CheckTimeout(t, 10*time.Second)() - p.On("Send", mock.MatchedBy(func(i interface{}) bool { + p.On("HasChannel", evidence.EvidenceChannel).Maybe().Return(true) + p.On("Send", mock.MatchedBy(func(i any) bool { e, ok := i.(p2p.Envelope) return ok && e.ChannelID == evidence.EvidenceChannel })).Return(false) @@ -226,31 +225,18 @@ func TestReactorBroadcastEvidenceMemoryLeak(t *testing.T) { _ = sendEvidence(t, pool, val, 2) } -// evidenceLogger is a TestingLogger which uses a different -// color for each validator ("validator" key must exist). -func evidenceLogger() log.Logger { - return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { - for i := 0; i < len(keyvals)-1; i += 2 { - if keyvals[i] == "validator" { - return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} - } - } - return term.FgBgColor{} - }) -} - -// connect N evidence reactors through N switches +// connect N evidence reactors through N switches. func makeAndConnectReactorsAndPools(config *cfg.Config, stateStores []sm.Store) ([]*evidence.Reactor, []*evidence.Pool, ) { - N := len(stateStores) + n := len(stateStores) - reactors := make([]*evidence.Reactor, N) - pools := make([]*evidence.Pool, N) - logger := evidenceLogger() + reactors := make([]*evidence.Reactor, n) + pools := make([]*evidence.Pool, n) + logger := log.TestingLogger() evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) - for i := 0; i < N; i++ { + for i := 0; i < n; i++ { evidenceDB := dbm.NewMemDB() blockStore := &mocks.BlockStore{} blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return( @@ -265,7 +251,7 @@ func makeAndConnectReactorsAndPools(config *cfg.Config, stateStores []sm.Store) reactors[i].SetLogger(logger.With("validator", i)) } - p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { + p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch { s.AddReactor("EVIDENCE", reactors[i]) return s }, p2p.Connect2Switches) @@ -273,8 +259,9 @@ func makeAndConnectReactorsAndPools(config *cfg.Config, stateStores []sm.Store) return reactors, pools } -// wait for all evidence on all reactors +// wait for all evidence on all reactors. func waitForEvidence(t *testing.T, evs types.EvidenceList, pools []*evidence.Pool) { + t.Helper() // wait for the evidence in all evpools wg := new(sync.WaitGroup) for i := 0; i < len(pools); i++ { @@ -296,7 +283,7 @@ func waitForEvidence(t *testing.T, evs types.EvidenceList, pools []*evidence.Poo } } -// wait for all evidence on a single evpool +// wait for all evidence on a single evpool. func _waitForEvidence( t *testing.T, wg *sync.WaitGroup, @@ -304,6 +291,7 @@ func _waitForEvidence( poolIdx int, pools []*evidence.Pool, ) { + t.Helper() evpool := pools[poolIdx] var evList []types.Evidence currentPoolSize := 0 @@ -329,6 +317,7 @@ func _waitForEvidence( } func sendEvidence(t *testing.T, evpool *evidence.Pool, val types.PrivValidator, n int) types.EvidenceList { + t.Helper() evList := make([]types.Evidence, n) for i := 0; i < n; i++ { ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(int64(i+1), @@ -356,7 +345,7 @@ func exampleVote(t byte) *types.Vote { } return &types.Vote{ - Type: cmtproto.SignedMsgType(t), + Type: types.SignedMsgType(t), Height: 3, Round: 2, Timestamp: stamp, @@ -398,8 +387,6 @@ func TestEvidenceVectors(t *testing.T) { } for _, tc := range testCases { - tc := tc - evi := make([]cmtproto.Evidence, len(tc.evidenceList)) for i := 0; i < len(tc.evidenceList); i++ { ev, err := types.EvidenceToProto(tc.evidenceList[i]) @@ -415,6 +402,5 @@ func TestEvidenceVectors(t *testing.T) { require.NoError(t, err, tc.testName) require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) - } } diff --git a/evidence/services.go b/internal/evidence/services.go similarity index 77% rename from evidence/services.go rename to internal/evidence/services.go index 5ebbc5a5b71..45c330972a9 100644 --- a/evidence/services.go +++ b/internal/evidence/services.go @@ -4,7 +4,7 @@ import ( "github.com/cometbft/cometbft/types" ) -//go:generate ../scripts/mockery_generate.sh BlockStore +//go:generate ../../scripts/mockery_generate.sh BlockStore type BlockStore interface { LoadBlockMeta(height int64) *types.BlockMeta diff --git a/evidence/verify.go b/internal/evidence/verify.go similarity index 95% rename from evidence/verify.go rename to internal/evidence/verify.go index 833c66ff632..8f7523528dc 100644 --- a/evidence/verify.go +++ b/internal/evidence/verify.go @@ -15,7 +15,7 @@ import ( // - it is sufficiently recent (MaxAge) // - it is from a key who was a validator at the given height // - it is internally consistent with state -// - it was properly signed by the alleged equivocator and meets the individual evidence verification requirements +// - it was properly signed by the alleged equivocator and meets the individual evidence verification requirements. func (evpool *Pool) verify(evidence types.Evidence) error { var ( state = evpool.State() @@ -86,8 +86,7 @@ func (evpool *Pool) verify(evidence types.Evidence) error { } } - err = VerifyLightClientAttack(ev, commonHeader, trustedHeader, commonVals, state.LastBlockTime, - state.ConsensusParams.Evidence.MaxAgeDuration) + err = VerifyLightClientAttack(ev, commonHeader, trustedHeader, commonVals) if err != nil { return err } @@ -103,6 +102,7 @@ func (evpool *Pool) verify(evidence types.Evidence) error { // the conflicting header's commit // - 2/3+ of the conflicting validator set correctly signed the conflicting block // - the nodes trusted header at the same height as the conflicting header has a different hash +// - all signatures must be checked as this will be used as evidence // // CONTRACT: must run ValidateBasic() on the evidence before verifying // @@ -111,8 +111,6 @@ func VerifyLightClientAttack( e *types.LightClientAttackEvidence, commonHeader, trustedHeader *types.SignedHeader, commonVals *types.ValidatorSet, - now time.Time, //nolint:revive - trustPeriod time.Duration, //nolint:revive ) error { // TODO: Should the current time and trust period be used in this method? // If not, why were the parameters present? @@ -120,7 +118,7 @@ func VerifyLightClientAttack( // In the case of lunatic attack there will be a different commonHeader height. Therefore the node perform a single // verification jump between the common header and the conflicting one if commonHeader.Height != e.ConflictingBlock.Height { - err := commonVals.VerifyCommitLightTrusting(trustedHeader.ChainID, e.ConflictingBlock.Commit, light.DefaultTrustLevel) + err := commonVals.VerifyCommitLightTrustingAllSignatures(trustedHeader.ChainID, e.ConflictingBlock.Commit, light.DefaultTrustLevel) if err != nil { return ErrConflictingBlock{fmt.Errorf("skipping verification of conflicting block failed: %w", err)} } @@ -132,7 +130,7 @@ func VerifyLightClientAttack( } // Verify that the 2/3+ commits from the conflicting validator set were for the conflicting header - if err := e.ConflictingBlock.ValidatorSet.VerifyCommitLight(trustedHeader.ChainID, e.ConflictingBlock.Commit.BlockID, + if err := e.ConflictingBlock.ValidatorSet.VerifyCommitLightAllSignatures(trustedHeader.ChainID, e.ConflictingBlock.Commit.BlockID, e.ConflictingBlock.Height, e.ConflictingBlock.Commit); err != nil { return ErrConflictingBlock{fmt.Errorf("invalid commit from conflicting block: %w", err)} } @@ -216,7 +214,7 @@ func VerifyDuplicateVote(e *types.DuplicateVoteEvidence, chainID string, valSet } // validateABCIEvidence validates the ABCI component of the light client attack -// evidence i.e voting power and byzantine validators +// evidence i.e voting power and byzantine validators. func validateABCIEvidence( ev *types.LightClientAttackEvidence, commonVals *types.ValidatorSet, @@ -278,7 +276,7 @@ func getSignedHeader(blockStore BlockStore, height int64) (*types.SignedHeader, }, nil } -// check that the evidence hasn't expired +// check that the evidence hasn't expired. func IsEvidenceExpired(heightNow int64, timeNow time.Time, heightEv int64, timeEv time.Time, evidenceParams types.EvidenceParams) bool { ageDuration := timeNow.Sub(timeEv) ageNumBlocks := heightNow - heightEv diff --git a/evidence/verify_test.go b/internal/evidence/verify_test.go similarity index 86% rename from evidence/verify_test.go rename to internal/evidence/verify_test.go index 57c3c4f8b8c..00c078e48db 100644 --- a/evidence/verify_test.go +++ b/internal/evidence/verify_test.go @@ -9,15 +9,13 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/tmhash" - "github.com/cometbft/cometbft/evidence" - "github.com/cometbft/cometbft/evidence/mocks" + "github.com/cometbft/cometbft/internal/evidence" + "github.com/cometbft/cometbft/internal/evidence/mocks" "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/libs/log" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" sm "github.com/cometbft/cometbft/state" smmocks "github.com/cometbft/cometbft/state/mocks" "github.com/cometbft/cometbft/types" @@ -38,31 +36,27 @@ func TestVerifyLightClientAttack_Lunatic(t *testing.T) { attackTime := defaultEvidenceTime.Add(1 * time.Hour) // create valid lunatic evidence ev, trusted, common := makeLunaticEvidence( - t, height, commonHeight, totalVals, byzVals, totalVals-byzVals, defaultEvidenceTime, attackTime) + t, height, commonHeight, byzVals, totalVals-byzVals, defaultEvidenceTime, attackTime) require.NoError(t, ev.ValidateBasic()) // good pass -> no error - err := evidence.VerifyLightClientAttack(ev, common.SignedHeader, trusted.SignedHeader, common.ValidatorSet, - defaultEvidenceTime.Add(2*time.Hour), 3*time.Hour) - assert.NoError(t, err) + err := evidence.VerifyLightClientAttack(ev, common.SignedHeader, trusted.SignedHeader, common.ValidatorSet) + require.NoError(t, err) // trusted and conflicting hashes are the same -> an error should be returned - err = evidence.VerifyLightClientAttack(ev, common.SignedHeader, ev.ConflictingBlock.SignedHeader, common.ValidatorSet, - defaultEvidenceTime.Add(2*time.Hour), 3*time.Hour) - assert.Error(t, err) + err = evidence.VerifyLightClientAttack(ev, common.SignedHeader, ev.ConflictingBlock.SignedHeader, common.ValidatorSet) + require.Error(t, err) // evidence with different total validator power should fail ev.TotalVotingPower = 1 * defaultVotingPower - err = evidence.VerifyLightClientAttack(ev, common.SignedHeader, trusted.SignedHeader, common.ValidatorSet, - defaultEvidenceTime.Add(2*time.Hour), 3*time.Hour) - assert.Error(t, err) + err = evidence.VerifyLightClientAttack(ev, common.SignedHeader, trusted.SignedHeader, common.ValidatorSet) + require.Error(t, err) // evidence without enough malicious votes should fail ev, trusted, common = makeLunaticEvidence( - t, height, commonHeight, totalVals, byzVals-1, totalVals-byzVals, defaultEvidenceTime, attackTime) - err = evidence.VerifyLightClientAttack(ev, common.SignedHeader, trusted.SignedHeader, common.ValidatorSet, - defaultEvidenceTime.Add(2*time.Hour), 3*time.Hour) - assert.Error(t, err) + t, height, commonHeight, byzVals-1, totalVals-byzVals, defaultEvidenceTime, attackTime) + err = evidence.VerifyLightClientAttack(ev, common.SignedHeader, trusted.SignedHeader, common.ValidatorSet) + require.Error(t, err) } func TestVerify_LunaticAttackAgainstState(t *testing.T) { @@ -75,7 +69,7 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) { attackTime := defaultEvidenceTime.Add(1 * time.Hour) // create valid lunatic evidence ev, trusted, common := makeLunaticEvidence( - t, height, commonHeight, totalVals, byzVals, totalVals-byzVals, defaultEvidenceTime, attackTime) + t, height, commonHeight, byzVals, totalVals-byzVals, defaultEvidenceTime, attackTime) // now we try to test verification against state state := sm.State{ @@ -97,18 +91,18 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) { evList := types.EvidenceList{ev} // check that the evidence pool correctly verifies the evidence - assert.NoError(t, pool.CheckEvidence(evList)) + require.NoError(t, pool.CheckEvidence(evList)) // as it was not originally in the pending bucket, it should now have been added pendingEvs, _ := pool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) - assert.Equal(t, 1, len(pendingEvs)) + assert.Len(t, pendingEvs, 1) assert.Equal(t, ev, pendingEvs[0]) // if we submit evidence only against a single byzantine validator when we see there are more validators then this // should return an error ev.ByzantineValidators = ev.ByzantineValidators[:1] t.Log(evList) - assert.Error(t, pool.CheckEvidence(evList)) + require.Error(t, pool.CheckEvidence(evList)) // restore original byz vals ev.ByzantineValidators = ev.GetByzantineValidators(common.ValidatorSet, trusted.SignedHeader) @@ -116,20 +110,20 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) { evList = types.EvidenceList{ev, ev} pool, err = evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) require.NoError(t, err) - assert.Error(t, pool.CheckEvidence(evList)) + require.Error(t, pool.CheckEvidence(evList)) // If evidence is submitted with an altered timestamp it should return an error ev.Timestamp = defaultEvidenceTime.Add(1 * time.Minute) pool, err = evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) require.NoError(t, err) - assert.Error(t, pool.AddEvidence(ev)) + require.Error(t, pool.AddEvidence(ev)) ev.Timestamp = defaultEvidenceTime // Evidence submitted with a different validator power should fail ev.TotalVotingPower = 1 pool, err = evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) require.NoError(t, err) - assert.Error(t, pool.AddEvidence(ev)) + require.Error(t, pool.AddEvidence(ev)) ev.TotalVotingPower = common.ValidatorSet.TotalVotingPower() } @@ -145,7 +139,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) { // create a forward lunatic attack ev, trusted, common := makeLunaticEvidence( - t, attackHeight, commonHeight, totalVals, byzVals, totalVals-byzVals, defaultEvidenceTime, attackTime) + t, attackHeight, commonHeight, byzVals, totalVals-byzVals, defaultEvidenceTime, attackTime) // now we try to test verification against state state := sm.State{ @@ -172,7 +166,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) { require.NoError(t, err) // check that the evidence pool correctly verifies the evidence - assert.NoError(t, pool.CheckEvidence(types.EvidenceList{ev})) + require.NoError(t, pool.CheckEvidence(types.EvidenceList{ev})) // now we use a time which isn't able to contradict the FLA - thus we can't verify the evidence oldBlockStore := &mocks.BlockStore{} @@ -188,7 +182,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) { pool, err = evidence.NewPool(dbm.NewMemDB(), stateStore, oldBlockStore) require.NoError(t, err) - assert.Error(t, pool.CheckEvidence(types.EvidenceList{ev})) + require.Error(t, pool.CheckEvidence(types.EvidenceList{ev})) } func TestVerifyLightClientAttack_Equivocation(t *testing.T) { @@ -207,7 +201,7 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { // we are simulating a duplicate vote attack where all the validators in the conflictingVals set // except the last validator vote twice blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash")) - voteSet := types.NewVoteSet(evidenceChainID, 10, 1, cmtproto.SignedMsgType(2), conflictingVals) + voteSet := types.NewVoteSet(evidenceChainID, 10, 1, types.SignedMsgType(2), conflictingVals) commit, err := test.MakeCommitFromVoteSet(blockID, voteSet, conflictingPrivVals[:4], defaultEvidenceTime) require.NoError(t, err) ev := &types.LightClientAttackEvidence{ @@ -225,7 +219,7 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { } trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) - trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, cmtproto.SignedMsgType(2), conflictingVals) + trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, types.SignedMsgType(2), conflictingVals) trustedCommit, err := test.MakeCommitFromVoteSet(trustedBlockID, trustedVoteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) trustedSignedHeader := &types.SignedHeader{ @@ -234,21 +228,18 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { } // good pass -> no error - err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, conflictingVals, - defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) - assert.NoError(t, err) + err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, conflictingVals) + require.NoError(t, err) // trusted and conflicting hashes are the same -> an error should be returned - err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, ev.ConflictingBlock.SignedHeader, conflictingVals, - defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) - assert.Error(t, err) + err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, ev.ConflictingBlock.SignedHeader, conflictingVals) + require.Error(t, err) // conflicting header has different next validators hash which should have been correctly derived from // the previous round ev.ConflictingBlock.Header.NextValidatorsHash = crypto.CRandBytes(tmhash.Size) - err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, nil, - defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) - assert.Error(t, err) + err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, nil) + require.Error(t, err) // revert next validators hash ev.ConflictingBlock.Header.NextValidatorsHash = trustedHeader.NextValidatorsHash @@ -270,10 +261,10 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { evList := types.EvidenceList{ev} err = pool.CheckEvidence(evList) - assert.NoError(t, err) + require.NoError(t, err) pendingEvs, _ := pool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) - assert.Equal(t, 1, len(pendingEvs)) + assert.Len(t, pendingEvs, 1) } func TestVerifyLightClientAttack_Amnesia(t *testing.T) { @@ -291,7 +282,7 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { // we are simulating an amnesia attack where all the validators in the conflictingVals set // except the last validator vote twice. However this time the commits are of different rounds. blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash")) - voteSet := types.NewVoteSet(evidenceChainID, 10, 0, cmtproto.SignedMsgType(2), conflictingVals) + voteSet := types.NewVoteSet(evidenceChainID, 10, 0, types.SignedMsgType(2), conflictingVals) commit, err := test.MakeCommitFromVoteSet(blockID, voteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) ev := &types.LightClientAttackEvidence{ @@ -309,7 +300,7 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { } trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) - trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, cmtproto.SignedMsgType(2), conflictingVals) + trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, types.SignedMsgType(2), conflictingVals) trustedCommit, err := test.MakeCommitFromVoteSet(trustedBlockID, trustedVoteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) trustedSignedHeader := &types.SignedHeader{ @@ -318,14 +309,12 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { } // good pass -> no error - err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, conflictingVals, - defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) - assert.NoError(t, err) + err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, conflictingVals) + require.NoError(t, err) // trusted and conflicting hashes are the same -> an error should be returned - err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, ev.ConflictingBlock.SignedHeader, conflictingVals, - defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) - assert.Error(t, err) + err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, ev.ConflictingBlock.SignedHeader, conflictingVals) + require.Error(t, err) state := sm.State{ LastBlockTime: defaultEvidenceTime.Add(1 * time.Minute), @@ -345,10 +334,10 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { evList := types.EvidenceList{ev} err = pool.CheckEvidence(evList) - assert.NoError(t, err) + require.NoError(t, err) pendingEvs, _ := pool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) - assert.Equal(t, 1, len(pendingEvs)) + assert.Len(t, pendingEvs, 1) } type voteData struct { @@ -372,11 +361,11 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { vote1 := types.MakeVoteNoError(t, val, chainID, 0, 10, 2, 1, blockID, defaultEvidenceTime) v1 := vote1.ToProto() - err := val.SignVote(chainID, v1) + err := val.SignVote(chainID, v1, false) require.NoError(t, err) badVote := types.MakeVoteNoError(t, val, chainID, 0, 10, 2, 1, blockID, defaultEvidenceTime) bv := badVote.ToProto() - err = val2.SignVote(chainID, bv) + err = val2.SignVote(chainID, bv, false) require.NoError(t, err) vote1.Signature = v1.Signature @@ -407,9 +396,9 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { Timestamp: defaultEvidenceTime, } if c.valid { - assert.Nil(t, evidence.VerifyDuplicateVote(ev, chainID, valSet), "evidence should be valid") + require.NoError(t, evidence.VerifyDuplicateVote(ev, chainID, valSet), "evidence should be valid") } else { - assert.NotNil(t, evidence.VerifyDuplicateVote(ev, chainID, valSet), "evidence should be invalid") + require.Error(t, evidence.VerifyDuplicateVote(ev, chainID, valSet), "evidence should be invalid") } } @@ -441,25 +430,27 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { evList := types.EvidenceList{goodEv} err = pool.CheckEvidence(evList) - assert.NoError(t, err) + require.NoError(t, err) // evidence with a different validator power should fail evList = types.EvidenceList{badEv} err = pool.CheckEvidence(evList) - assert.Error(t, err) + require.Error(t, err) // evidence with a different timestamp should fail evList = types.EvidenceList{badTimeEv} err = pool.CheckEvidence(evList) - assert.Error(t, err) + require.Error(t, err) } func makeLunaticEvidence( t *testing.T, height, commonHeight int64, - totalVals, byzVals, phantomVals int, + byzVals, phantomVals int, commonTime, attackTime time.Time, ) (ev *types.LightClientAttackEvidence, trusted *types.LightBlock, common *types.LightBlock) { + t.Helper() + totalVals := 10 commonValSet, commonPrivVals := types.RandValidatorSet(totalVals, defaultVotingPower) require.Greater(t, totalVals, byzVals) @@ -484,7 +475,7 @@ func makeLunaticEvidence( conflictingHeader.ValidatorsHash = conflictingVals.Hash() blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash")) - voteSet := types.NewVoteSet(evidenceChainID, height, 1, cmtproto.SignedMsgType(2), conflictingVals) + voteSet := types.NewVoteSet(evidenceChainID, height, 1, types.SignedMsgType(2), conflictingVals) commit, err := test.MakeCommitFromVoteSet(blockID, voteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) ev = &types.LightClientAttackEvidence{ @@ -511,7 +502,7 @@ func makeLunaticEvidence( } trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) trustedVals, privVals := types.RandValidatorSet(totalVals, defaultVotingPower) - trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, cmtproto.SignedMsgType(2), trustedVals) + trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, types.SignedMsgType(2), trustedVals) trustedCommit, err := test.MakeCommitFromVoteSet(trustedBlockID, trustedVoteSet, privVals, defaultEvidenceTime) require.NoError(t, err) trusted = &types.LightBlock{ @@ -570,6 +561,7 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.Bloc func orderPrivValsByValSet( t *testing.T, vals *types.ValidatorSet, privVals []types.PrivValidator, ) []types.PrivValidator { + t.Helper() output := make([]types.PrivValidator, len(privVals)) for idx, v := range vals.Validators { for _, p := range privVals { diff --git a/libs/fail/fail.go b/internal/fail/fail.go similarity index 94% rename from libs/fail/fail.go rename to internal/fail/fail.go index 38cec9a2969..70b8e169c54 100644 --- a/libs/fail/fail.go +++ b/internal/fail/fail.go @@ -22,7 +22,7 @@ func envSet() int { return callIndexToFail } -// Fail when FAIL_TEST_INDEX == callIndex +// Fail when FAIL_TEST_INDEX == callIndex. var callIndex int // indexes Fail calls func Fail() { diff --git a/libs/flowrate/README.md b/internal/flowrate/README.md similarity index 100% rename from libs/flowrate/README.md rename to internal/flowrate/README.md diff --git a/libs/flowrate/flowrate.go b/internal/flowrate/flowrate.go similarity index 94% rename from libs/flowrate/flowrate.go rename to internal/flowrate/flowrate.go index 23252f4e333..a02501169c7 100644 --- a/libs/flowrate/flowrate.go +++ b/internal/flowrate/flowrate.go @@ -32,6 +32,8 @@ type Monitor struct { tBytes int64 // Number of bytes expected in the current transfer tLast time.Duration // Time of the most recent transfer of at least 1 byte + + sleepTime time.Duration // Amount of time spend on time.Sleep() calls } // New creates a new flow control monitor. Instantaneous transfer rate is @@ -47,6 +49,7 @@ type Monitor struct { // The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, // respectively. func New(sampleRate, windowSize time.Duration) *Monitor { + ensureClockRunning() if sampleRate = clockRound(sampleRate); sampleRate <= 0 { sampleRate = 5 * clockRate } @@ -121,6 +124,8 @@ type Status struct { TimeRem time.Duration // Estimated time to completion Progress Percent // Overall transfer progress Active bool // Flag indicating an active transfer + + SleepTime time.Duration // Amount of time spend on time.Sleep() calls } // Status returns current transfer status information. The returned value @@ -142,6 +147,10 @@ func (m *Monitor) Status() Status { if s.BytesRem < 0 { s.BytesRem = 0 } + if m.sleepTime > 0 { + s.SleepTime = m.sleepTime + m.sleepTime = 0 + } if s.Duration > 0 { rAvg := float64(s.Bytes) / s.Duration.Seconds() s.AvgRate = round(rAvg) @@ -188,9 +197,17 @@ func (m *Monitor) Limit(want int, rate int64, block bool) (n int) { // If block == true, wait until m.sBytes < limit if now := m.update(0); block { + var startTime time.Time for m.sBytes >= limit && m.active { + if startTime.IsZero() { + startTime = time.Now() + } now = m.waitNextSample(now) } + // Compute the actual time spent in waitNextSample() calls + if !startTime.IsZero() { + m.sleepTime += time.Since(startTime) + } } // Make limit <= want (unlimited if the transfer is no longer active) @@ -221,7 +238,7 @@ func (m *Monitor) SetTransferSize(bytes int64) { // sample is done. func (m *Monitor) update(n int) (now time.Duration) { if !m.active { - return + return now } if now = clock(); n > 0 { m.tLast = now @@ -243,7 +260,7 @@ func (m *Monitor) update(n int) (now time.Duration) { } m.reset(now) } - return + return now } // reset clears the current sample state in preparation for the next sample. diff --git a/libs/flowrate/io.go b/internal/flowrate/io.go similarity index 96% rename from libs/flowrate/io.go rename to internal/flowrate/io.go index fbe0909725a..fdcb6dad841 100644 --- a/libs/flowrate/io.go +++ b/internal/flowrate/io.go @@ -46,14 +46,14 @@ func (r *Reader) Read(p []byte) (n int, err error) { if len(p) > 0 { n, err = r.IO(r.Reader.Read(p)) } - return + return n, err } // SetLimit changes the transfer rate limit to new bytes per second and returns // the previous setting. func (r *Reader) SetLimit(new int64) (old int64) { old, r.limit = r.limit, new - return + return old } // SetBlocking changes the blocking behavior and returns the previous setting. A @@ -61,7 +61,7 @@ func (r *Reader) SetLimit(new int64) (old int64) { // may be read at this time due to the rate limit. func (r *Reader) SetBlocking(new bool) (old bool) { old, r.block = r.block, new - return + return old } // Close closes the underlying reader if it implements the io.Closer interface. @@ -97,22 +97,21 @@ func (w *Writer) Write(p []byte) (n int, err error) { var c int for len(p) > 0 && err == nil { s := p[:w.Limit(len(p), w.limit, w.block)] - if len(s) > 0 { - c, err = w.IO(w.Writer.Write(s)) - } else { + if len(s) == 0 { return n, ErrLimit } + c, err = w.IO(w.Writer.Write(s)) p = p[c:] n += c } - return + return n, err } // SetLimit changes the transfer rate limit to new bytes per second and returns // the previous setting. func (w *Writer) SetLimit(new int64) (old int64) { old, w.limit = w.limit, new - return + return old } // SetBlocking changes the blocking behavior and returns the previous setting. A @@ -120,7 +119,7 @@ func (w *Writer) SetLimit(new int64) (old int64) { // may be written at this time due to the rate limit. func (w *Writer) SetBlocking(new bool) (old bool) { old, w.block = w.block, new - return + return old } // Close closes the underlying writer if it implements the io.Closer interface. diff --git a/internal/flowrate/io_test.go b/internal/flowrate/io_test.go new file mode 100644 index 00000000000..39585f0c431 --- /dev/null +++ b/internal/flowrate/io_test.go @@ -0,0 +1,229 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "bytes" + "testing" + "time" +) + +const ( + _50ms = 50 * time.Millisecond + _100ms = 100 * time.Millisecond + _200ms = 200 * time.Millisecond + _300ms = 300 * time.Millisecond + _400ms = 400 * time.Millisecond + _500ms = 500 * time.Millisecond +) + +func nextStatus(m *Monitor) Status { + samples := m.samples + for i := 0; i < 30; i++ { + if s := m.Status(); s.Samples != samples { + return s + } + time.Sleep(5 * time.Millisecond) + } + return m.Status() +} + +func TestReader(t *testing.T) { + in := make([]byte, 100) + for i := range in { + in[i] = byte(i) + } + b := make([]byte, 100) + r := NewReader(bytes.NewReader(in), 100) + start := time.Now() + + // Make sure r implements Limiter + _ = Limiter(r) + + // 1st read of 10 bytes is performed immediately + if n, err := r.Read(b); n != 10 || err != nil { + t.Fatalf("r.Read(b) expected 10 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("r.Read(b) took too long (%v)", rt) + } + + // No new Reads allowed in the current sample + r.SetBlocking(false) + if n, err := r.Read(b); n != 0 || err != nil { + t.Fatalf("r.Read(b) expected 0 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("r.Read(b) took too long (%v)", rt) + } + + status := [6]Status{0: r.Status()} // No samples in the first status + + // 2nd read of 10 bytes blocks until the next sample + r.SetBlocking(true) + if n, err := r.Read(b[10:]); n != 10 || err != nil { + t.Fatalf("r.Read(b[10:]) expected 10 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt < _100ms { + t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt) + } + + status[1] = r.Status() // 1st sample + status[2] = nextStatus(r.Monitor) // 2nd sample + status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample + + if n := r.Done(); n != 20 { + t.Fatalf("r.Done() expected 20; got %v", n) + } + + status[4] = r.Status() + status[5] = nextStatus(r.Monitor) // Timeout + start = status[0].Start + + // Active, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, Start, Duration, Idle, TimeRem, Progress + want := []Status{ + {start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true, 0}, + {start, 10, 1, 100, 100, 100, 100, 0, _100ms, 0, 0, 0, true, 0}, + {start, 20, 2, 100, 100, 100, 100, 0, _200ms, _100ms, 0, 0, true, 0}, + {start, 20, 3, 0, 90, 67, 100, 0, _300ms, _200ms, 0, 0, true, 0}, + {start, 20, 3, 0, 0, 67, 100, 0, _300ms, 0, 0, 0, false, 0}, + {start, 20, 3, 0, 0, 67, 100, 0, _300ms, 0, 0, 0, false, 0}, + } + for i, s := range status { + if !statusesAreEqual(&s, &want[i]) { + t.Errorf("r.Status(%v)\nexpected: %v\ngot : %v", i, want[i], s) + } + } + if !bytes.Equal(b[:20], in[:20]) { + t.Errorf("r.Read() input doesn't match output") + } +} + +// TestWriter tests the behavior of the Writer in the flowrate package. +// It verifies that the Writer correctly implements the Limiter interface, +// and that it correctly reports its status after writing data. +func TestWriter(t *testing.T) { + const bufferSize = 100 + const limit = 200 + const writeSize = 20 + const remainingSize = 80 + const transferSize = 100 + + // Initialize a buffer with sequential bytes + b := make([]byte, bufferSize) + for i := range b { + b[i] = byte(i) + } + + // Create a new Writer with a limit of 200 bytes per second + w := NewWriter(&bytes.Buffer{}, limit) + start := time.Now() + + // Subtest to verify that the Writer implements the Limiter interface + t.Run("implements limiter interface", func(t *testing.T) { + _, ok := any(w).(Limiter) + if !ok { + t.Fatalf("Expected Writer to implement Limiter interface") + } + }) + + // Subtest for non-blocking write + t.Run("non-blocking write", func(t *testing.T) { + w.SetBlocking(false) + n, err := w.Write(b) + if n != writeSize || err != ErrLimit { + t.Fatalf("w.Write(b) expected %d (ErrLimit); got %v (%v)", writeSize, n, err) + } + if rt := time.Since(start); rt > _50ms { + t.Fatalf("w.Write(b) took too long (%v)", rt) + } + }) + + // Subtest for blocking write + t.Run("blocking write", func(t *testing.T) { + w.SetBlocking(true) + n, err := w.Write(b[writeSize:]) + if n != remainingSize || err != nil { + t.Fatalf("w.Write(b[%d:]) expected %d (); got %v (%v)", writeSize, remainingSize, n, err) + } + // Explanation for `rt < _300ms` (as opposed to `< _500ms`) + // + // |<-- start | | | + // + // epochs: -----0ms|---100ms|---200ms|---300ms|---400ms|---500ms + // sends: 20|20 |20 |20 |20 |20# + // + // NOTE: The '#' symbol can thus happen before 500ms is up. + // Thus, we can only panic if rt < _300ms. + if rt := time.Since(start); rt < _300ms || rt > _500ms { + t.Fatalf("w.Write(b[%d:]) returned at unexpected time (%v)", writeSize, rt) + } + }) + + // Subtest for setting transfer size + t.Run("setting transfer size", func(t *testing.T) { + w.SetTransferSize(transferSize) + status := []Status{w.Status(), nextStatus(w.Monitor)} + start = status[0].Start + + // Define expected statuses + want := []Status{ + {start, remainingSize, 4, limit, limit, limit, limit, writeSize, _400ms, 0, _100ms, 80000, true, 0}, + {start, bufferSize, 5, limit, limit, limit, limit, 0, _500ms, _100ms, 0, 100000, true, 0}, + } + + // Compare actual and expected statuses + for i, s := range status { + if !statusesAreEqual(&s, &want[i]) { + t.Errorf("w.Status(%v)\nexpected: %v\ngot : %v\n", i, want[i], s) + } + } + }) + + // Subtest to verify that the written data matches the input + t.Run("written data matches input", func(t *testing.T) { + if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) { + t.Errorf("w.Write() input doesn't match output") + } + }) +} + +// statusesAreEqual returns true if s1 is equal to s2. Equality here means +// general equality of fields except for the duration and rates, which can +// drift due to unpredictable delays (e.g. thread wakes up 25ms after +// `time.Sleep` has ended). +func statusesAreEqual(s1 *Status, s2 *Status) bool { + if s1.Active == s2.Active && + s1.Start.Equal(s2.Start) && + durationsAreEqual(s1.Duration, s2.Duration) && + durationsAreEqual(s1.Idle, s2.Idle) && + s1.Bytes == s2.Bytes && + s1.Samples == s2.Samples && + ratesAreEqual(s1.InstRate, s2.InstRate) && + ratesAreEqual(s1.CurRate, s2.CurRate) && + ratesAreEqual(s1.AvgRate, s2.AvgRate) && + ratesAreEqual(s1.PeakRate, s2.PeakRate) && + s1.BytesRem == s2.BytesRem && + durationsAreEqual(s1.TimeRem, s2.TimeRem) && + s1.Progress == s2.Progress && + durationsAreEqual(s1.SleepTime, s2.SleepTime) { + return true + } + return false +} + +func durationsAreEqual(d1 time.Duration, d2 time.Duration) bool { + const maxDeviation = 50 * time.Millisecond + return d2-d1 <= maxDeviation +} + +func ratesAreEqual(r1 int64, r2 int64) bool { + const maxDeviation = int64(50) + sub := r1 - r2 + if sub < 0 { + sub = -sub + } + if sub <= maxDeviation { + return true + } + return false +} diff --git a/libs/flowrate/util.go b/internal/flowrate/util.go similarity index 58% rename from libs/flowrate/util.go rename to internal/flowrate/util.go index b33ddc70138..ca8e935288d 100644 --- a/libs/flowrate/util.go +++ b/internal/flowrate/util.go @@ -7,29 +7,56 @@ package flowrate import ( "math" "strconv" + "sync/atomic" "time" ) // clockRate is the resolution and precision of clock(). const clockRate = 20 * time.Millisecond -// czero is the process start time rounded down to the nearest clockRate -// increment. -var czero = time.Now().Round(clockRate) +var ( + hasInitializedClock = atomic.Bool{} + currentClockValue = atomic.Int64{} + clockStartTime = time.Time{} +) + +// checks if the clock update timer is running. If not, sets clockStartTime and starts it. +func ensureClockRunning() { + firstRun := hasInitializedClock.CompareAndSwap(false, true) + if !firstRun { + return + } + clockStartTime = time.Now().Round(clockRate) + go runClockUpdates() +} + +// increments the current clock value every clockRate interval. +func runClockUpdates() { + // Create a ticker that sends the current time on the channel every clockRate interval + ticker := time.Tick(clockRate) + + // First tick happens after clockrate, therefore initial value is 0. + for t := range ticker { + delta := t.Sub(clockStartTime) + rounded := clockRound(delta) + currentClockValue.Store(int64(rounded)) + } +} // clock returns a low resolution timestamp relative to the process start time. func clock() time.Duration { - return time.Now().Round(clockRate).Sub(czero) + return time.Duration(currentClockValue.Load()) } // clockToTime converts a clock() timestamp to an absolute time.Time value. func clockToTime(c time.Duration) time.Time { - return czero.Add(c) + return clockStartTime.Add(c) } // clockRound returns d rounded to the nearest clockRate increment. func clockRound(d time.Duration) time.Duration { - return (d + clockRate>>1) / clockRate * clockRate + //nolint:durationcheck + return ((d + clockRate>>1) / clockRate) * clockRate } // round returns x rounded to the nearest int64 (non-negative values only). diff --git a/internal/indexer/indexer_utils.go b/internal/indexer/indexer_utils.go index c6caaee6f85..48cbbe297db 100644 --- a/internal/indexer/indexer_utils.go +++ b/internal/indexer/indexer_utils.go @@ -8,8 +8,8 @@ import ( ) // If the actual event value is a float, we get the condition and parse it as a float -// to compare against -func compareFloat(op1 *big.Float, op2 interface{}) (int, bool, error) { +// to compare against. +func compareFloat(op1 *big.Float, op2 any) (int, bool, error) { switch opVal := op2.(type) { case *big.Int: vF := new(big.Float) @@ -29,8 +29,7 @@ func compareFloat(op1 *big.Float, op2 interface{}) (int, bool, error) { // needed to represent the integer to avoid rounding issues with floats // where 100 would equal to 100.2 because 100.2 is rounded to 100, while 100.7 // would be rounded to 101. -func compareInt(op1 *big.Int, op2 interface{}) (int, bool, error) { - +func compareInt(op1 *big.Int, op2 any) (int, bool, error) { switch opVal := op2.(type) { case *big.Int: return op1.Cmp(opVal), false, nil @@ -43,7 +42,7 @@ func compareInt(op1 *big.Int, op2 interface{}) (int, bool, error) { } } -func CheckBounds(ranges indexer.QueryRange, v interface{}) (bool, error) { +func CheckBounds(ranges indexer.QueryRange, v any) (bool, error) { // These functions fetch the lower and upper bounds of the query // It is expected that for x > 5, the value of lowerBound is 6. // This is achieved by adding one to the actual lower bound. @@ -63,7 +62,7 @@ func CheckBounds(ranges indexer.QueryRange, v interface{}) (bool, error) { // *Explanation for the isFloat condition below.* // In LowerBoundValue(), for floating points, we cannot simply add 1 due to the reasons explained in - // in the comment at the beginning. The same is true for subtracting one for UpperBoundValue(). + // the comment at the beginning. The same is true for subtracting one for UpperBoundValue(). // That means that for integers, if the condition is >=, cmp will be either 0 or 1 // ( cmp == -1 should always be false). // But if the lowerBound is a float, we have not subtracted one, so returning a 0 diff --git a/inspect/doc.go b/internal/inspect/doc.go similarity index 100% rename from inspect/doc.go rename to internal/inspect/doc.go diff --git a/inspect/inspect.go b/internal/inspect/inspect.go similarity index 92% rename from inspect/inspect.go rename to internal/inspect/inspect.go index ad87551b900..0e493dd29de 100644 --- a/inspect/inspect.go +++ b/internal/inspect/inspect.go @@ -6,10 +6,12 @@ import ( "net" "os" + "golang.org/x/sync/errgroup" + "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/inspect/rpc" + "github.com/cometbft/cometbft/internal/inspect/rpc" + cmtstrings "github.com/cometbft/cometbft/internal/strings" "github.com/cometbft/cometbft/libs/log" - cmtstrings "github.com/cometbft/cometbft/libs/strings" rpccore "github.com/cometbft/cometbft/rpc/core" "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/state/indexer" @@ -17,11 +19,9 @@ import ( "github.com/cometbft/cometbft/state/txindex" "github.com/cometbft/cometbft/store" "github.com/cometbft/cometbft/types" - - "golang.org/x/sync/errgroup" ) -var logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +var logger = log.NewLogger(os.Stdout) // Inspector manages an RPC service that exports methods to debug a failed node. // After a node shuts down due to a consensus failure, it will no longer start @@ -47,7 +47,7 @@ type Inspector struct { // The sinks are used to enable block and transaction querying via the RPC server. // The caller is responsible for starting and stopping the Inspector service. // -//nolint:lll + func New( cfg *config.RPCConfig, bs state.BlockStore, @@ -73,7 +73,7 @@ func NewFromConfig(cfg *config.Config) (*Inspector, error) { if err != nil { return nil, err } - bs := store.NewBlockStore(bsDB) + bs := store.NewBlockStore(bsDB, store.WithDBKeyLayout(cfg.Storage.ExperimentalKeyLayout)) sDB, err := config.DefaultDBProvider(&config.DBContext{ID: "state", Config: cfg}) if err != nil { return nil, err @@ -82,7 +82,7 @@ func NewFromConfig(cfg *config.Config) (*Inspector, error) { if err != nil { return nil, err } - txidx, blkidx, err := block.IndexerFromConfig(cfg, config.DefaultDBProvider, genDoc.ChainID) + txidx, blkidx, _, err := block.IndexerFromConfig(cfg, config.DefaultDBProvider, genDoc.ChainID) if err != nil { return nil, err } diff --git a/inspect/inspect_test.go b/internal/inspect/inspect_test.go similarity index 93% rename from inspect/inspect_test.go rename to internal/inspect/inspect_test.go index 12b774f5797..311c7af2693 100644 --- a/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -16,7 +16,7 @@ import ( abcitypes "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/inspect" + "github.com/cometbft/cometbft/internal/inspect" "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/libs/pubsub/query" httpclient "github.com/cometbft/cometbft/rpc/client/http" @@ -67,8 +67,7 @@ func TestBlock(t *testing.T) { blockStoreMock := &statemocks.BlockStore{} blockStoreMock.On("Height").Return(testHeight) blockStoreMock.On("Base").Return(int64(0)) - blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{}) - blockStoreMock.On("LoadBlock", testHeight).Return(testBlock) + blockStoreMock.On("LoadBlock", testHeight).Return(testBlock, &types.BlockMeta{}) blockStoreMock.On("Close").Return(nil) txIndexerMock := &txindexmocks.TxIndexer{} @@ -90,7 +89,7 @@ func TestBlock(t *testing.T) { // FIXME: used to induce context switch. // Determine more deterministic method for prompting a context switch startedWG.Wait() - requireConnect(t, rpcConfig.ListenAddress, 20) + requireConnect(t, rpcConfig.ListenAddress) cli, err := httpclient.New(rpcConfig.ListenAddress + "/v1") require.NoError(t, err) resultBlock, err := cli.Block(context.Background(), &testHeight) @@ -123,8 +122,8 @@ func TestTxSearch(t *testing.T) { txIndexerMock.On("Search", mock.Anything, mock.MatchedBy(func(q *query.Query) bool { return testQuery == strings.ReplaceAll(q.String(), " ", "") - })). - Return([]*abcitypes.TxResult{testTxResult}, nil) + }), mock.Anything). + Return([]*abcitypes.TxResult{testTxResult}, 1, nil) rpcConfig := config.TestRPCConfig() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) @@ -142,7 +141,7 @@ func TestTxSearch(t *testing.T) { // FIXME: used to induce context switch. // Determine more deterministic method for prompting a context switch startedWG.Wait() - requireConnect(t, rpcConfig.ListenAddress, 20) + requireConnect(t, rpcConfig.ListenAddress) cli, err := httpclient.New(rpcConfig.ListenAddress + "/v1") require.NoError(t, err) @@ -190,7 +189,7 @@ func TestTx(t *testing.T) { // FIXME: used to induce context switch. // Determine more deterministic method for prompting a context switch startedWG.Wait() - requireConnect(t, rpcConfig.ListenAddress, 20) + requireConnect(t, rpcConfig.ListenAddress) cli, err := httpclient.New(rpcConfig.ListenAddress + "/v1") require.NoError(t, err) @@ -239,7 +238,7 @@ func TestConsensusParams(t *testing.T) { // FIXME: used to induce context switch. // Determine more deterministic method for prompting a context switch startedWG.Wait() - requireConnect(t, rpcConfig.ListenAddress, 20) + requireConnect(t, rpcConfig.ListenAddress) cli, err := httpclient.New(rpcConfig.ListenAddress + "/v1") require.NoError(t, err) params, err := cli.ConsensusParams(context.Background(), &testHeight) @@ -258,8 +257,8 @@ func TestBlockResults(t *testing.T) { testGasUsed := int64(100) stateStoreMock := &statemocks.Store{} stateStoreMock.On("Close").Return(nil) - // cmtstate "github.com/cometbft/cometbft/proto/tendermint/state" - stateStoreMock.On("LoadFinalizeBlockResponse", testHeight).Return(&abcitypes.ResponseFinalizeBlock{ + // cmtstate "github.com/cometbft/cometbft/api/cometbft/state/v1" + stateStoreMock.On("LoadFinalizeBlockResponse", testHeight).Return(&abcitypes.FinalizeBlockResponse{ TxResults: []*abcitypes.ExecTxResult{ { GasUsed: testGasUsed, @@ -289,12 +288,12 @@ func TestBlockResults(t *testing.T) { // FIXME: used to induce context switch. // Determine more deterministic method for prompting a context switch startedWG.Wait() - requireConnect(t, rpcConfig.ListenAddress, 20) + requireConnect(t, rpcConfig.ListenAddress) cli, err := httpclient.New(rpcConfig.ListenAddress + "/v1") require.NoError(t, err) res, err := cli.BlockResults(context.Background(), &testHeight) require.NoError(t, err) - require.Equal(t, res.TxsResults[0].GasUsed, testGasUsed) + require.Equal(t, res.TxResults[0].GasUsed, testGasUsed) cancel() wg.Wait() @@ -336,7 +335,7 @@ func TestCommit(t *testing.T) { // FIXME: used to induce context switch. // Determine more deterministic method for prompting a context switch startedWG.Wait() - requireConnect(t, rpcConfig.ListenAddress, 20) + requireConnect(t, rpcConfig.ListenAddress) cli, err := httpclient.New(rpcConfig.ListenAddress + "/v1") require.NoError(t, err) res, err := cli.Commit(context.Background(), &testHeight) @@ -361,7 +360,7 @@ func TestBlockByHash(t *testing.T) { stateStoreMock.On("Close").Return(nil) blockStoreMock := &statemocks.BlockStore{} blockStoreMock.On("Close").Return(nil) - blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{ + blockStoreMock.On("LoadBlockByHash", testHash).Return(testBlock, &types.BlockMeta{ BlockID: types.BlockID{ Hash: testHash, }, @@ -369,7 +368,6 @@ func TestBlockByHash(t *testing.T) { Height: testHeight, }, }, nil) - blockStoreMock.On("LoadBlockByHash", testHash).Return(testBlock, nil) txIndexerMock := &txindexmocks.TxIndexer{} blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() @@ -389,7 +387,7 @@ func TestBlockByHash(t *testing.T) { // FIXME: used to induce context switch. // Determine more deterministic method for prompting a context switch startedWG.Wait() - requireConnect(t, rpcConfig.ListenAddress, 20) + requireConnect(t, rpcConfig.ListenAddress) cli, err := httpclient.New(rpcConfig.ListenAddress + "/v1") require.NoError(t, err) res, err := cli.BlockByHash(context.Background(), testHash) @@ -441,7 +439,7 @@ func TestBlockchain(t *testing.T) { // FIXME: used to induce context switch. // Determine more deterministic method for prompting a context switch startedWG.Wait() - requireConnect(t, rpcConfig.ListenAddress, 20) + requireConnect(t, rpcConfig.ListenAddress) cli, err := httpclient.New(rpcConfig.ListenAddress + "/v1") require.NoError(t, err) res, err := cli.BlockchainInfo(context.Background(), 0, 100) @@ -493,7 +491,7 @@ func TestValidators(t *testing.T) { // FIXME: used to induce context switch. // Determine more deterministic method for prompting a context switch startedWG.Wait() - requireConnect(t, rpcConfig.ListenAddress, 20) + requireConnect(t, rpcConfig.ListenAddress) cli, err := httpclient.New(rpcConfig.ListenAddress + "/v1") require.NoError(t, err) @@ -527,12 +525,11 @@ func TestBlockSearch(t *testing.T) { Header: types.Header{ Height: testHeight, }, - }, nil) - blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{ + }, &types.BlockMeta{ BlockID: types.BlockID{ Hash: testBlockHash, }, - }) + }, nil) blkIdxMock.On("Search", mock.Anything, mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })). Return([]int64{testHeight}, nil) @@ -553,7 +550,7 @@ func TestBlockSearch(t *testing.T) { // FIXME: used to induce context switch. // Determine more deterministic method for prompting a context switch startedWG.Wait() - requireConnect(t, rpcConfig.ListenAddress, 20) + requireConnect(t, rpcConfig.ListenAddress) cli, err := httpclient.New(rpcConfig.ListenAddress + "/v1") require.NoError(t, err) @@ -572,10 +569,12 @@ func TestBlockSearch(t *testing.T) { stateStoreMock.AssertExpectations(t) } -func requireConnect(t testing.TB, addr string, retries int) { +func requireConnect(tb testing.TB, addr string) { + tb.Helper() + retries := 20 parts := strings.SplitN(addr, "://", 2) if len(parts) != 2 { - t.Fatalf("malformed address to dial: %s", addr) + tb.Fatalf("malformed address to dial: %s", addr) } var err error for i := 0; i < retries; i++ { @@ -588,5 +587,5 @@ func requireConnect(t testing.TB, addr string, retries int) { // FIXME attempt to yield and let the other goroutine continue execution. time.Sleep(time.Microsecond * 100) } - t.Fatalf("unable to connect to server %s after %d tries: %s", addr, retries, err) + tb.Fatalf("unable to connect to server %s after %d tries: %s", addr, retries, err) } diff --git a/inspect/rpc/rpc.go b/internal/inspect/rpc/rpc.go similarity index 100% rename from inspect/rpc/rpc.go rename to internal/inspect/rpc/rpc.go diff --git a/internal/keytypes/keytypes.go b/internal/keytypes/keytypes.go new file mode 100644 index 00000000000..3b5babf491f --- /dev/null +++ b/internal/keytypes/keytypes.go @@ -0,0 +1,66 @@ +package keytypes + +import ( + "fmt" + "strings" + + "github.com/cometbft/cometbft/crypto" + "github.com/cometbft/cometbft/crypto/bls12381" + "github.com/cometbft/cometbft/crypto/ed25519" + "github.com/cometbft/cometbft/crypto/secp256k1" +) + +var keyTypes map[string]func() (crypto.PrivKey, error) + +func init() { + keyTypes = map[string]func() (crypto.PrivKey, error){ + ed25519.KeyType: func() (crypto.PrivKey, error) { //nolint: unparam + return ed25519.GenPrivKey(), nil + }, + secp256k1.KeyType: func() (crypto.PrivKey, error) { //nolint: unparam + return secp256k1.GenPrivKey(), nil + }, + } + if bls12381.Enabled { + keyTypes[bls12381.KeyType] = func() (crypto.PrivKey, error) { + pk, err := bls12381.GenPrivKey() + if err != nil { + return nil, fmt.Errorf("failed to generate BLS key: %w", err) + } + return pk, nil + } + } +} + +// GenPrivKey generates a private key of the given type. +func GenPrivKey(keyType string) (crypto.PrivKey, error) { + genF, ok := keyTypes[keyType] + if !ok { + return nil, fmt.Errorf("unsupported key type: %q", keyType) + } + return genF() +} + +// SupportedKeyTypesStr returns a string of supported key types. +func SupportedKeyTypesStr() string { + keyTypesSlice := make([]string, 0, len(keyTypes)) + for k := range keyTypes { + keyTypesSlice = append(keyTypesSlice, fmt.Sprintf("%q", k)) + } + return strings.Join(keyTypesSlice, ", ") +} + +// ListSupportedKeyTypes returns a list of supported key types. +func ListSupportedKeyTypes() []string { + keyTypesSlice := make([]string, 0, len(keyTypes)) + for k := range keyTypes { + keyTypesSlice = append(keyTypesSlice, k) + } + return keyTypesSlice +} + +// IsSupported returns true if the key type is supported. +func IsSupported(keyType string) bool { + _, ok := keyTypes[keyType] + return ok +} diff --git a/libs/net/net.go b/internal/net/net.go similarity index 92% rename from libs/net/net.go rename to internal/net/net.go index 54dbae90dad..4ab83a4139d 100644 --- a/libs/net/net.go +++ b/internal/net/net.go @@ -25,8 +25,8 @@ func ConnectContext(ctx context.Context, protoAddr string) (net.Conn, error) { // ProtocolAndAddress splits an address into the protocol and address components. // For instance, "tcp://127.0.0.1:8080" will be split into "tcp" and "127.0.0.1:8080". // If the address has no protocol prefix, the default is "tcp". -func ProtocolAndAddress(listenAddr string) (string, string) { - protocol, address := "tcp", listenAddr +func ProtocolAndAddress(listenAddr string) (protocol, address string) { + protocol, address = "tcp", listenAddr parts := strings.SplitN(address, "://", 2) if len(parts) == 2 { protocol, address = parts[0], parts[1] diff --git a/libs/net/net_test.go b/internal/net/net_test.go similarity index 99% rename from libs/net/net_test.go rename to internal/net/net_test.go index 38cd58f6a24..975e4f6d0f7 100644 --- a/libs/net/net_test.go +++ b/internal/net/net_test.go @@ -7,7 +7,6 @@ import ( ) func TestProtocolAndAddress(t *testing.T) { - cases := []struct { fullAddr string proto string diff --git a/libs/os/os.go b/internal/os/os.go similarity index 97% rename from libs/os/os.go rename to internal/os/os.go index 334eaf4c896..d93117e730a 100644 --- a/libs/os/os.go +++ b/internal/os/os.go @@ -12,7 +12,7 @@ import ( ) type logger interface { - Info(msg string, keyvals ...interface{}) + Info(msg string, keyvals ...any) } // TrapSignal catches the SIGTERM/SIGINT and executes cb function. After that it exits @@ -41,7 +41,7 @@ func Kill() error { } func Exit(s string) { - fmt.Printf(s + "\n") + fmt.Println(s) os.Exit(1) } diff --git a/libs/os/os_test.go b/internal/os/os_test.go similarity index 85% rename from libs/os/os_test.go rename to internal/os/os_test.go index 4de51844423..540a509d0bf 100644 --- a/libs/os/os_test.go +++ b/internal/os/os_test.go @@ -2,7 +2,6 @@ package os import ( "bytes" - "fmt" "os" "path/filepath" "testing" @@ -21,7 +20,7 @@ func TestCopyFile(t *testing.T) { t.Fatal(err) } - copyfile := fmt.Sprintf("%s.copy", tmpfile.Name()) + copyfile := tmpfile.Name() + ".copy" if err := CopyFile(tmpfile.Name(), copyfile); err != nil { t.Fatal(err) } @@ -44,30 +43,30 @@ func TestEnsureDir(t *testing.T) { defer os.RemoveAll(tmp) // Should be possible to create a new directory. - err = EnsureDir(filepath.Join(tmp, "dir"), 0755) + err = EnsureDir(filepath.Join(tmp, "dir"), 0o755) require.NoError(t, err) require.DirExists(t, filepath.Join(tmp, "dir")) // Should succeed on existing directory. - err = EnsureDir(filepath.Join(tmp, "dir"), 0755) + err = EnsureDir(filepath.Join(tmp, "dir"), 0o755) require.NoError(t, err) // Should fail on file. - err = os.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0644) + err = os.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0o644) require.NoError(t, err) - err = EnsureDir(filepath.Join(tmp, "file"), 0755) + err = EnsureDir(filepath.Join(tmp, "file"), 0o755) require.Error(t, err) // Should allow symlink to dir. err = os.Symlink(filepath.Join(tmp, "dir"), filepath.Join(tmp, "linkdir")) require.NoError(t, err) - err = EnsureDir(filepath.Join(tmp, "linkdir"), 0755) + err = EnsureDir(filepath.Join(tmp, "linkdir"), 0o755) require.NoError(t, err) // Should error on symlink to file. err = os.Symlink(filepath.Join(tmp, "file"), filepath.Join(tmp, "linkfile")) require.NoError(t, err) - err = EnsureDir(filepath.Join(tmp, "linkfile"), 0755) + err = EnsureDir(filepath.Join(tmp, "linkfile"), 0o755) require.Error(t, err) } @@ -83,7 +82,7 @@ func TestTrickedTruncation(t *testing.T) { originalWALPath := filepath.Join(tmpDir, "wal") originalWALContent := []byte("I AM BECOME DEATH, DESTROYER OF ALL WORLDS!") - if err := os.WriteFile(originalWALPath, originalWALContent, 0755); err != nil { + if err := os.WriteFile(originalWALPath, originalWALContent, 0o755); err != nil { t.Fatal(err) } diff --git a/libs/progressbar/progressbar.go b/internal/progressbar/progressbar.go similarity index 97% rename from libs/progressbar/progressbar.go rename to internal/progressbar/progressbar.go index 072804c7625..bd3aa09c15c 100644 --- a/libs/progressbar/progressbar.go +++ b/internal/progressbar/progressbar.go @@ -36,6 +36,6 @@ func (bar *Bar) Play(cur int64) { fmt.Printf("\r[%-50s]%3d%% %8d/%d", bar.rate, bar.percent, bar.cur, bar.total) } -func (bar *Bar) Finish() { +func (*Bar) Finish() { fmt.Println() } diff --git a/libs/progressbar/progressbar_test.go b/internal/progressbar/progressbar_test.go similarity index 100% rename from libs/progressbar/progressbar_test.go rename to internal/progressbar/progressbar_test.go diff --git a/libs/rand/random.go b/internal/rand/random.go similarity index 87% rename from libs/rand/random.go rename to internal/rand/random.go index 053e03e15e8..5a9f04bf81c 100644 --- a/libs/rand/random.go +++ b/internal/rand/random.go @@ -37,14 +37,28 @@ func NewRand() *Rand { return rand } -func (r *Rand) init() { +// Make a new stdlib rand source. Its up to the caller to ensure +// that the rand source is not called in parallel. +// The failure mode of calling the returned rand multiple times in parallel is +// repeated values across threads. +func NewStdlibRand() *mrand.Rand { + // G404: Use of weak random number generator (math/rand instead of crypto/rand) + //nolint:gosec + return mrand.New(mrand.NewSource(newSeed())) +} + +func newSeed() int64 { bz := cRandBytes(8) var seed uint64 for i := 0; i < 8; i++ { seed |= uint64(bz[i]) seed <<= 8 } - r.reset(int64(seed)) + return int64(seed) +} + +func (r *Rand) init() { + r.reset(newSeed()) } func (r *Rand) reset(seed int64) { @@ -53,7 +67,7 @@ func (r *Rand) reset(seed int64) { r.rand = mrand.New(mrand.NewSource(seed)) } -//---------------------------------------- +// ---------------------------------------- // Global functions func Seed(seed int64) { @@ -140,7 +154,7 @@ func Perm(n int) []int { return grand.Perm(n) } -//---------------------------------------- +// ---------------------------------------- // Rand methods func (r *Rand) Seed(seed int64) { @@ -284,7 +298,7 @@ func (r *Rand) Intn(n int) int { return i } -// Bool returns a uniformly random boolean +// Bool returns a uniformly random boolean. func (r *Rand) Bool() bool { // See https://github.com/golang/go/issues/23804#issuecomment-365370418 // for reasoning behind computing like this @@ -302,6 +316,8 @@ func (r *Rand) Perm(n int) []int { // NOTE: This relies on the os's random number generator. // For real security, we should salt that with some seed. // See github.com/cometbft/cometbft/crypto for a more secure reader. +// This function is thread safe, see: +// https://stackoverflow.com/questions/75685374/is-golang-crypto-rand-thread-safe func cRandBytes(numBytes int) []byte { b := make([]byte, numBytes) _, err := crand.Read(b) diff --git a/libs/rand/random_test.go b/internal/rand/random_test.go similarity index 76% rename from libs/rand/random_test.go rename to internal/rand/random_test.go index ec4aa327185..91515413fcc 100644 --- a/libs/rand/random_test.go +++ b/internal/rand/random_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "log" "sync" "testing" "time" @@ -14,20 +15,20 @@ import ( func TestRandStr(t *testing.T) { l := 243 s := Str(l) - assert.Equal(t, l, len(s)) + assert.Len(t, s, l) } func TestRandBytes(t *testing.T) { l := 243 b := Bytes(l) - assert.Equal(t, l, len(b)) + assert.Len(t, b, l) } func TestRandIntn(t *testing.T) { n := 243 for i := 0; i < 100; i++ { x := Intn(n) - assert.True(t, x < n) + assert.Less(t, x, n) } } @@ -54,7 +55,10 @@ func testThemAll() string { // Use it. out := new(bytes.Buffer) perm := Perm(10) - blob, _ := json.Marshal(perm) + blob, err := json.Marshal(perm) + if err != nil { + log.Fatalf("couldn't unmarshal perm: %v", err) + } fmt.Fprintf(out, "perm: %s\n", blob) fmt.Fprintf(out, "randInt: %d\n", Int()) fmt.Fprintf(out, "randUint: %d\n", Uint()) @@ -83,6 +87,24 @@ func TestRngConcurrencySafety(_ *testing.T) { wg.Wait() } +// Makes a new stdlib random instance 100 times concurrently. +// Ensures that it is concurrent safe to create rand instances, and call independent rand +// sources in parallel. +func TestStdlibRngConcurrencySafety(_ *testing.T) { + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + r := NewStdlibRand() + _ = r.Uint64() + <-time.After(time.Millisecond * time.Duration(Intn(100))) + _ = r.Perm(3) + }() + } + wg.Wait() +} + func BenchmarkRandBytes10B(b *testing.B) { benchmarkRandBytes(b, 10) } @@ -104,10 +126,12 @@ func BenchmarkRandBytes100KiB(b *testing.B) { } func BenchmarkRandBytes1MiB(b *testing.B) { + b.Helper() benchmarkRandBytes(b, 1024*1024) } func benchmarkRandBytes(b *testing.B, n int) { + b.Helper() for i := 0; i < b.N; i++ { _ = Bytes(n) } diff --git a/internal/rpctrace/rpctrace.go b/internal/rpctrace/rpctrace.go index b24c91f488b..de1cdd1aca6 100644 --- a/internal/rpctrace/rpctrace.go +++ b/internal/rpctrace/rpctrace.go @@ -1,13 +1,10 @@ package rpctrace -import "github.com/gofrs/uuid" +import "github.com/google/uuid" // New returns a randomly generated string which can be used to assist in // tracing RPC errors. func New() (string, error) { - id, err := uuid.NewV4() - if err != nil { - return "", err - } + id := uuid.New() return id.String(), nil } diff --git a/libs/strings/string.go b/internal/strings/string.go similarity index 97% rename from libs/strings/string.go rename to internal/strings/string.go index f012d761b0e..9f93247cb9f 100644 --- a/libs/strings/string.go +++ b/internal/strings/string.go @@ -82,7 +82,7 @@ func ASCIITrim(s string) string { return string(r) } -// StringSliceEqual checks if string slices a and b are equal +// StringSliceEqual checks if string slices a and b are equal. func StringSliceEqual(a, b []string) bool { if len(a) != len(b) { return false diff --git a/libs/strings/string_test.go b/internal/strings/string_test.go similarity index 90% rename from libs/strings/string_test.go rename to internal/strings/string_test.go index 1ec7b0d56be..afd32c87bcb 100644 --- a/libs/strings/string_test.go +++ b/internal/strings/string_test.go @@ -3,9 +3,8 @@ package strings import ( "testing" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestStringInSlice(t *testing.T) { @@ -31,10 +30,10 @@ func TestIsASCIIText(t *testing.T) { } func TestASCIITrim(t *testing.T) { - assert.Equal(t, ASCIITrim(" "), "") - assert.Equal(t, ASCIITrim(" a"), "a") - assert.Equal(t, ASCIITrim("a "), "a") - assert.Equal(t, ASCIITrim(" a "), "a") + assert.Equal(t, "", ASCIITrim(" ")) + assert.Equal(t, "a", ASCIITrim(" a")) + assert.Equal(t, "a", ASCIITrim("a ")) + assert.Equal(t, "a", ASCIITrim(" a ")) assert.Panics(t, func() { ASCIITrim("\xC2\xA2") }) } diff --git a/libs/tempfile/tempfile.go b/internal/tempfile/tempfile.go similarity index 96% rename from libs/tempfile/tempfile.go rename to internal/tempfile/tempfile.go index f79cd0e1632..eb6bf02f0ec 100644 --- a/libs/tempfile/tempfile.go +++ b/internal/tempfile/tempfile.go @@ -15,18 +15,18 @@ import ( const ( atomicWriteFilePrefix = "write-file-atomic-" // Maximum number of atomic write file conflicts before we start reseeding - // (reduced from golang's default 10 due to using an increased randomness space) + // (reduced from golang's default 10 due to using an increased randomness space). atomicWriteFileMaxNumConflicts = 5 // Maximum number of attempts to make at writing the write file before giving up - // (reduced from golang's default 10000 due to using an increased randomness space) + // (reduced from golang's default 10000 due to using an increased randomness space). atomicWriteFileMaxNumWriteAttempts = 1000 // LCG constants from Donald Knuth MMIX - // This LCG's has a period equal to 2**64 + // This LCG's has a period equal to 2**64. lcgA = 6364136223846793005 lcgC = 1442695040888963407 // Create in case it doesn't exist and force kernel // flush, which still leaves the potential of lingering disk cache. - // Never overwrites files + // Never overwrites files. atomicWriteFileFlag = os.O_WRONLY | os.O_CREATE | os.O_SYNC | os.O_TRUNC | os.O_EXCL ) @@ -65,7 +65,7 @@ func randWriteFileSuffix() string { suffix := strconv.Itoa(int(r)) if string(suffix[0]) == "-" { // Replace first "-" with "0". This is purely for UI clarity, - // as otherwhise there would be two `-` in a row. + // as otherwise there would be two `-` in a row. suffix = strings.Replace(suffix, "-", "0", 1) } return suffix diff --git a/libs/tempfile/tempfile_test.go b/internal/tempfile/tempfile_test.go similarity index 88% rename from libs/tempfile/tempfile_test.go rename to internal/tempfile/tempfile_test.go index 4ff18863f4a..e2f8fa5385a 100644 --- a/libs/tempfile/tempfile_test.go +++ b/internal/tempfile/tempfile_test.go @@ -6,18 +6,18 @@ import ( "bytes" "fmt" "os" - testing "testing" + "testing" "github.com/stretchr/testify/require" - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtrand "github.com/cometbft/cometbft/internal/rand" ) func TestWriteFileAtomic(t *testing.T) { var ( data = []byte(cmtrand.Str(cmtrand.Intn(2048))) old = cmtrand.Bytes(cmtrand.Intn(2048)) - perm os.FileMode = 0600 + perm os.FileMode = 0o600 ) f, err := os.CreateTemp("/tmp", "write-atomic-test-") @@ -26,7 +26,7 @@ func TestWriteFileAtomic(t *testing.T) { } defer os.Remove(f.Name()) - if err = os.WriteFile(f.Name(), old, 0600); err != nil { + if err = os.WriteFile(f.Name(), old, 0o600); err != nil { t.Fatal(err) } @@ -68,7 +68,7 @@ func TestWriteFileAtomicDuplicateFile(t *testing.T) { firstFileRand := randWriteFileSuffix() atomicWriteFileRand = defaultSeed fname := "/tmp/" + atomicWriteFilePrefix + firstFileRand - f, err := os.OpenFile(fname, atomicWriteFileFlag, 0777) + f, err := os.OpenFile(fname, atomicWriteFileFlag, 0o777) defer os.Remove(fname) // Defer here, in case there is a panic in WriteFileAtomic. defer os.Remove(fileToWrite) @@ -76,7 +76,7 @@ func TestWriteFileAtomicDuplicateFile(t *testing.T) { require.NoError(t, err) _, err = f.WriteString(testString) require.NoError(t, err) - err = WriteFileAtomic(fileToWrite, []byte(expectedString), 0777) + err = WriteFileAtomic(fileToWrite, []byte(expectedString), 0o777) require.NoError(t, err) // Check that the first atomic file was untouched firstAtomicFileBytes, err := os.ReadFile(fname) @@ -112,8 +112,8 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) { for i := 0; i < atomicWriteFileMaxNumConflicts+2; i++ { fileRand := randWriteFileSuffix() fname := "/tmp/" + atomicWriteFilePrefix + fileRand - f, err := os.OpenFile(fname, atomicWriteFileFlag, 0777) - require.Nil(t, err) + f, err := os.OpenFile(fname, atomicWriteFileFlag, 0o777) + require.NoError(t, err) _, err = f.WriteString(fmt.Sprintf(testString, i)) require.NoError(t, err) defer os.Remove(fname) @@ -123,7 +123,7 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) { // Defer here, in case there is a panic in WriteFileAtomic. defer os.Remove(fileToWrite) - err := WriteFileAtomic(fileToWrite, []byte(expectedString), 0777) + err := WriteFileAtomic(fileToWrite, []byte(expectedString), 0o777) require.NoError(t, err) // Check that all intermittent atomic file were untouched atomicWriteFileRand = defaultSeed @@ -131,13 +131,13 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) { fileRand := randWriteFileSuffix() fname := "/tmp/" + atomicWriteFilePrefix + fileRand firstAtomicFileBytes, err := os.ReadFile(fname) - require.Nil(t, err, "Error reading first atomic file") + require.NoError(t, err, "Error reading first atomic file") require.Equal(t, []byte(fmt.Sprintf(testString, i)), firstAtomicFileBytes, "atomic write file %d was overwritten", i) } // Check that the resultant file is correct resultantFileBytes, err := os.ReadFile(fileToWrite) - require.Nil(t, err, "Error reading resultant file") + require.NoError(t, err, "Error reading resultant file") require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes") } diff --git a/internal/test/block.go b/internal/test/block.go index 31587cf1712..16491a0e4cb 100644 --- a/internal/test/block.go +++ b/internal/test/block.go @@ -16,9 +16,7 @@ const ( DefaultTestChainID = "test-chain" ) -var ( - DefaultTestTime = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) -) +var DefaultTestTime = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) func RandomAddress() []byte { return crypto.CRandBytes(crypto.AddressSize) @@ -43,7 +41,7 @@ func MakeBlockIDWithHash(hash []byte) types.BlockID { } // MakeHeader fills the rest of the contents of the header such that it passes -// validate basic +// validate basic. func MakeHeader(t *testing.T, h *types.Header) *types.Header { t.Helper() if h.Version.Block == 0 { diff --git a/internal/test/commit.go b/internal/test/commit.go index 599d56d3012..b17ad155e44 100644 --- a/internal/test/commit.go +++ b/internal/test/commit.go @@ -4,7 +4,6 @@ import ( "fmt" "time" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/types" ) @@ -20,14 +19,14 @@ func MakeCommitFromVoteSet(blockID types.BlockID, voteSet *types.VoteSet, valida ValidatorIndex: int32(i), Height: voteSet.GetHeight(), Round: voteSet.GetRound(), - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, BlockID: blockID, Timestamp: now, } v := vote.ToProto() - if err := validators[i].SignVote(voteSet.ChainID(), v); err != nil { + if err := validators[i].SignVote(voteSet.ChainID(), v, false); err != nil { return nil, err } vote.Signature = v.Signature @@ -36,7 +35,7 @@ func MakeCommitFromVoteSet(blockID types.BlockID, voteSet *types.VoteSet, valida } } - return voteSet.MakeExtendedCommit(types.ABCIParams{VoteExtensionsEnableHeight: 0}).ToCommit(), nil + return voteSet.MakeExtendedCommit(types.DefaultFeatureParams()).ToCommit(), nil } func MakeCommit(blockID types.BlockID, height int64, round int32, valSet *types.ValidatorSet, privVals []types.PrivValidator, chainID string, now time.Time) (*types.Commit, error) { @@ -52,7 +51,7 @@ func MakeCommit(blockID types.BlockID, height int64, round int32, valSet *types. } addr := pk.Address() - idx, _ := valSet.GetByAddress(addr) + idx, _ := valSet.GetByAddressMut(addr) if idx < 0 { return nil, fmt.Errorf("validator with address %s not in validator set", addr) } @@ -62,14 +61,14 @@ func MakeCommit(blockID types.BlockID, height int64, round int32, valSet *types. ValidatorIndex: idx, Height: height, Round: round, - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, BlockID: blockID, Timestamp: now, } v := vote.ToProto() - if err := privVal.SignVote(chainID, v); err != nil { + if err := privVal.SignVote(chainID, v, false); err != nil { return nil, err } diff --git a/internal/test/config.go b/internal/test/config.go index 2685584e178..f0d54f9de28 100644 --- a/internal/test/config.go +++ b/internal/test/config.go @@ -6,14 +6,22 @@ import ( "path/filepath" "github.com/cometbft/cometbft/config" - cmtos "github.com/cometbft/cometbft/libs/os" + cmtos "github.com/cometbft/cometbft/internal/os" ) func ResetTestRoot(testName string) *config.Config { - return ResetTestRootWithChainID(testName, "") + return resetTestRoot(testName, "", true) } func ResetTestRootWithChainID(testName string, chainID string) *config.Config { + return resetTestRoot(testName, chainID, true) +} + +func ResetTestRootWithChainIDNoOverwritePrivval(testName string, chainID string) *config.Config { + return resetTestRoot(testName, chainID, false) +} + +func resetTestRoot(testName string, chainID string, overwritePrivKey bool) *config.Config { // create a unique, concurrency-safe test directory under os.TempDir() rootDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s_", chainID, testName)) if err != nil { @@ -32,11 +40,12 @@ func ResetTestRootWithChainID(testName string, chainID string) *config.Config { chainID = DefaultTestChainID } testGenesis := fmt.Sprintf(testGenesisFmt, chainID) - cmtos.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644) + cmtos.MustWriteFile(genesisFilePath, []byte(testGenesis), 0o644) } - // we always overwrite the priv val - cmtos.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644) - cmtos.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644) + if overwritePrivKey { + cmtos.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0o644) + } + cmtos.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0o644) config := config.TestConfig().SetRoot(rootDir) return config @@ -46,12 +55,16 @@ var testGenesisFmt = `{ "genesis_time": "2018-10-10T08:20:13.695936996Z", "chain_id": "%s", "initial_height": "1", - "consensus_params": { + "consensus_params": { "block": { "max_bytes": "22020096", "max_gas": "-1", "time_iota_ms": "10" }, + "synchrony": { + "message_delay": "500000000", + "precision": "10000000" + }, "evidence": { "max_age_num_blocks": "100000", "max_age_duration": "172800000000000", @@ -65,8 +78,12 @@ var testGenesisFmt = `{ "abci": { "vote_extensions_enable_height": "0" }, - "version": {} - }, + "version": {}, + "feature": { + "vote_extensions_enable_height": "0", + "pbts_enable_height": "1" + } + }, "validators": [ { "pub_key": { diff --git a/internal/test/genesis.go b/internal/test/genesis.go index 22b8028b8e1..7e71f1d5207 100644 --- a/internal/test/genesis.go +++ b/internal/test/genesis.go @@ -12,7 +12,6 @@ func GenesisDoc( consensusParams *types.ConsensusParams, chainID string, ) *types.GenesisDoc { - genesisValidators := make([]types.GenesisValidator, len(validators)) for i := range validators { diff --git a/internal/test/params.go b/internal/test/params.go index c4421d53cc4..136267aa27b 100644 --- a/internal/test/params.go +++ b/internal/test/params.go @@ -5,10 +5,12 @@ import ( ) // ConsensusParams returns a default set of ConsensusParams that are suitable -// for use in testing +// for use in testing. func ConsensusParams() *types.ConsensusParams { c := types.DefaultConsensusParams() // enable vote extensions - c.ABCI.VoteExtensionsEnableHeight = 1 + c.Feature.VoteExtensionsEnableHeight = 1 + // enabled PBTS + c.Feature.PbtsEnableHeight = 1 return c } diff --git a/internal/test/validator.go b/internal/test/validator.go index ddc471ee8e6..4c436dce89e 100644 --- a/internal/test/validator.go +++ b/internal/test/validator.go @@ -2,11 +2,13 @@ package test import ( "context" + "fmt" "sort" "testing" "github.com/stretchr/testify/require" + "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/types" ) @@ -22,11 +24,12 @@ func Validator(_ context.Context, votingPower int64) (*types.Validator, types.Pr } func ValidatorSet(ctx context.Context, t *testing.T, numValidators int, votingPower int64) (*types.ValidatorSet, []types.PrivValidator) { + t.Helper() + var ( valz = make([]*types.Validator, numValidators) privValidators = make([]types.PrivValidator, numValidators) ) - t.Helper() for i := 0; i < numValidators; i++ { val, privValidator, err := Validator(ctx, votingPower) @@ -39,3 +42,22 @@ func ValidatorSet(ctx context.Context, t *testing.T, numValidators int, votingPo return types.NewValidatorSet(valz), privValidators } + +func GenesisValidatorSet(nVals int) ([]types.GenesisValidator, map[string]types.PrivValidator) { + vals := make([]types.GenesisValidator, nVals) + privVals := make(map[string]types.PrivValidator, nVals) + for i := 0; i < nVals; i++ { + secret := []byte(fmt.Sprintf("test%d", i)) + pk := ed25519.GenPrivKeyFromSecret(secret) + valAddr := pk.PubKey().Address() + vals[i] = types.GenesisValidator{ + Address: valAddr, + PubKey: pk.PubKey(), + Power: 1000, + Name: fmt.Sprintf("test%d", i), + } + privVals[valAddr.String()] = types.NewMockPVWithParams(pk, false, false) + } + + return vals, privVals +} diff --git a/libs/timer/throttle_timer.go b/internal/timer/throttle_timer.go similarity index 88% rename from libs/timer/throttle_timer.go rename to internal/timer/throttle_timer.go index d27269f4e53..efebb72a4f2 100644 --- a/libs/timer/throttle_timer.go +++ b/internal/timer/throttle_timer.go @@ -24,9 +24,9 @@ type ThrottleTimer struct { } func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { - var ch = make(chan struct{}) - var quit = make(chan struct{}) - var t = &ThrottleTimer{Name: name, Ch: ch, dur: dur, quit: quit} + ch := make(chan struct{}) + quit := make(chan struct{}) + t := &ThrottleTimer{Name: name, Ch: ch, dur: dur, quit: quit} t.mtx.Lock() t.timer = time.AfterFunc(dur, t.fireRoutine) t.mtx.Unlock() @@ -64,7 +64,7 @@ func (t *ThrottleTimer) Unset() { } // For ease of .Stop()'ing services before .Start()'ing them, -// we ignore .Stop()'s on nil ThrottleTimers +// we ignore .Stop()'s on nil ThrottleTimers. func (t *ThrottleTimer) Stop() bool { if t == nil { return false diff --git a/libs/timer/throttle_timer_test.go b/internal/timer/throttle_timer_test.go similarity index 94% rename from libs/timer/throttle_timer_test.go rename to internal/timer/throttle_timer_test.go index 527c89ecea8..6e3bf76640b 100644 --- a/libs/timer/throttle_timer_test.go +++ b/internal/timer/throttle_timer_test.go @@ -4,8 +4,6 @@ import ( "testing" "time" - // make govet noshadow happy... - asrt "github.com/stretchr/testify/assert" cmtsync "github.com/cometbft/cometbft/libs/sync" @@ -31,7 +29,7 @@ func (c *thCounter) Count() int { } // Read should run in a go-routine and -// updates count by one every time a packet comes in +// updates count by one every time a packet comes in. func (c *thCounter) Read() { for range c.input { c.Increment() diff --git a/libs/bytes/bytes.go b/libs/bytes/bytes.go index 95b4cc35fca..7c5037eaab6 100644 --- a/libs/bytes/bytes.go +++ b/libs/bytes/bytes.go @@ -9,12 +9,12 @@ import ( // HexBytes enables HEX-encoding for json/encoding. type HexBytes []byte -// Marshal needed for protobuf compatibility +// Marshal needed for protobuf compatibility. func (bz HexBytes) Marshal() ([]byte, error) { return bz, nil } -// Unmarshal needed for protobuf compatibility +// Unmarshal needed for protobuf compatibility. func (bz *HexBytes) Unmarshal(data []byte) error { *bz = data return nil @@ -58,8 +58,12 @@ func (bz HexBytes) String() string { func (bz HexBytes) Format(s fmt.State, verb rune) { switch verb { case 'p': - s.Write([]byte(fmt.Sprintf("%p", bz))) + if _, err := s.Write([]byte(fmt.Sprintf("%p", bz))); err != nil { + panic(err) + } default: - s.Write([]byte(fmt.Sprintf("%X", []byte(bz)))) + if _, err := s.Write([]byte(fmt.Sprintf("%X", []byte(bz)))); err != nil { + panic(err) + } } } diff --git a/libs/bytes/bytes_test.go b/libs/bytes/bytes_test.go index db882f1c1a5..5e926d055d0 100644 --- a/libs/bytes/bytes_test.go +++ b/libs/bytes/bytes_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // This is a trivial test for protobuf compatibility. @@ -14,20 +15,20 @@ func TestMarshal(t *testing.T) { bz := []byte("hello world") dataB := HexBytes(bz) bz2, err := dataB.Marshal() - assert.Nil(t, err) + require.NoError(t, err) assert.Equal(t, bz, bz2) var dataB2 HexBytes err = (&dataB2).Unmarshal(bz) - assert.Nil(t, err) + require.NoError(t, err) assert.Equal(t, dataB, dataB2) } // Test that the hex encoding works. func TestJSONMarshal(t *testing.T) { type TestStruct struct { - B1 []byte - B2 HexBytes + B1 []byte `json:"B1" yaml:"B1"` // normal bytes + B2 HexBytes `json:"B2" yaml:"B2"` // hex bytes } cases := []struct { @@ -40,7 +41,6 @@ func TestJSONMarshal(t *testing.T) { } for i, tc := range cases { - tc := tc t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { ts := TestStruct{B1: tc.input, B2: tc.input} @@ -49,7 +49,7 @@ func TestJSONMarshal(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, string(jsonBytes), tc.expected) + assert.Equal(t, tc.expected, string(jsonBytes)) // TODO do fuzz testing to ensure that unmarshal fails @@ -65,6 +65,7 @@ func TestJSONMarshal(t *testing.T) { } } +// Test that the hex encoding works. func TestHexBytes_String(t *testing.T) { hs := HexBytes([]byte("test me")) if _, err := strconv.ParseInt(hs.String(), 16, 64); err != nil { diff --git a/libs/cli/flags/log_level.go b/libs/cli/flags/log_level.go index 34c00f61922..2abfee5d735 100644 --- a/libs/cli/flags/log_level.go +++ b/libs/cli/flags/log_level.go @@ -73,7 +73,6 @@ func ParseLogLevel(lvl string, logger log.Logger, defaultLogLevelValue string) ( list) } options = append(options, option) - } } diff --git a/libs/cli/flags/log_level_test.go b/libs/cli/flags/log_level_test.go index 17af292e7cd..ad44e72976f 100644 --- a/libs/cli/flags/log_level_test.go +++ b/libs/cli/flags/log_level_test.go @@ -15,7 +15,7 @@ const ( func TestParseLogLevel(t *testing.T) { var buf bytes.Buffer - jsonLogger := log.NewTMJSONLoggerNoTS(&buf) + jsonLogger := log.NewJSONLoggerNoTS(&buf) correctLogLevels := []struct { lvl string @@ -24,23 +24,26 @@ func TestParseLogLevel(t *testing.T) { {"mempool:error", []string{ ``, // if no default is given, assume info ``, - `{"_msg":"Mesmero","level":"error","module":"mempool"}`, - `{"_msg":"Mind","level":"info","module":"state"}`, // if no default is given, assume info - ``}}, + `{"level":"ERROR","msg":"Mesmero","module":"mempool"}`, + `{"level":"INFO","msg":"Mind","module":"state"}`, // if no default is given, assume info + ``, + }}, {"mempool:error,*:debug", []string{ - `{"_msg":"Kingpin","level":"debug","module":"wire"}`, + `{"level":"DEBUG","msg":"Kingpin","module":"mempool","module":"wire"}`, ``, - `{"_msg":"Mesmero","level":"error","module":"mempool"}`, - `{"_msg":"Mind","level":"info","module":"state"}`, - `{"_msg":"Gideon","level":"debug"}`}}, + `{"level":"ERROR","msg":"Mesmero","module":"mempool"}`, + `{"level":"INFO","msg":"Mind","module":"state"}`, + `{"level":"DEBUG","msg":"Gideon"}`, + }}, {"*:debug,wire:none", []string{ ``, - `{"_msg":"Kitty Pryde","level":"info","module":"mempool"}`, - `{"_msg":"Mesmero","level":"error","module":"mempool"}`, - `{"_msg":"Mind","level":"info","module":"state"}`, - `{"_msg":"Gideon","level":"debug"}`}}, + `{"level":"INFO","msg":"Kitty Pryde","module":"mempool"}`, + `{"level":"ERROR","msg":"Mesmero","module":"mempool"}`, + `{"level":"INFO","msg":"Mind","module":"state"}`, + `{"level":"DEBUG","msg":"Gideon"}`, + }}, } for _, c := range correctLogLevels { diff --git a/libs/cli/helper.go b/libs/cli/helper.go index 37fe34fc9ff..0bd0b4be668 100644 --- a/libs/cli/helper.go +++ b/libs/cli/helper.go @@ -18,11 +18,11 @@ func WriteConfigVals(dir string, vals map[string]string) error { data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") - return os.WriteFile(cfile, []byte(data), 0600) + return os.WriteFile(cfile, []byte(data), 0o600) } // RunWithArgs executes the given command with the specified command line args -// and environmental variables set. It returns any error returned from cmd.Execute() +// and environmental variables set. It returns any error returned from cmd.Execute(). func RunWithArgs(cmd Executable, args []string, env map[string]string) error { oargs := os.Args oenv := map[string]string{} @@ -52,7 +52,7 @@ func RunWithArgs(cmd Executable, args []string, env map[string]string) error { // RunCaptureWithArgs executes the given command with the specified command // line args and environmental variables set. It returns string fields // representing output written to stdout and stderr, additionally any error -// from cmd.Execute() is also returned +// from cmd.Execute() is also returned. func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) { oldout, olderr := os.Stdout, os.Stderr // keep backup of the real stdout rOut, wOut, _ := os.Pipe() diff --git a/libs/cli/setup.go b/libs/cli/setup.go index 9154fa9860c..ebb590d881f 100644 --- a/libs/cli/setup.go +++ b/libs/cli/setup.go @@ -19,12 +19,12 @@ const ( ) // Executable is the minimal interface to *corba.Command, so we can -// wrap if desired before the test +// wrap if desired before the test. type Executable interface { Execute() error } -// PrepareBaseCmd is meant for CometBFT and other servers +// PrepareBaseCmd is meant for CometBFT and other servers. func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { cobra.OnInitialize(func() { initEnv(envPrefix) }) cmd.PersistentFlags().StringP(HomeFlag, "", defaultHome, "directory for config and data") @@ -55,7 +55,7 @@ func initEnv(prefix string) { } // This copies all variables like TMROOT to TM_ROOT, -// so we can support both formats for the user +// so we can support both formats for the user. func copyEnvVars(prefix string) { prefix = strings.ToUpper(prefix) ps := prefix + "_" @@ -71,7 +71,7 @@ func copyEnvVars(prefix string) { } } -// Executor wraps the cobra Command with a nicer Execute method +// Executor wraps the cobra Command with a nicer Execute method. type Executor struct { *cobra.Command Exit func(int) // this is os.Exit by default, override in tests @@ -81,7 +81,7 @@ type ExitCoder interface { ExitCode() int } -// execute adds all child commands to the root command sets flags appropriately. +// Execute adds all child commands to the root command sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. func (e Executor) Execute() error { e.SilenceUsage = true @@ -110,7 +110,7 @@ func (e Executor) Execute() error { type cobraCmdFunc func(cmd *cobra.Command, args []string) error // Returns a single function that calls each argument function in sequence -// RunE, PreRunE, PersistentPreRunE, etc. all have this same signature +// RunE, PreRunE, PersistentPreRunE, etc. all have this same signature. func concatCobraCmdFuncs(fs ...cobraCmdFunc) cobraCmdFunc { return func(cmd *cobra.Command, args []string) error { for _, f := range fs { @@ -124,7 +124,7 @@ func concatCobraCmdFuncs(fs ...cobraCmdFunc) cobraCmdFunc { } } -// Bind all flags and read the config into viper +// Bind all flags and read the config into viper. func bindFlagsLoadViper(cmd *cobra.Command, _ []string) error { // cmd.Flags() includes flags from this command and all persistent flags from the parent if err := viper.BindPFlags(cmd.Flags()); err != nil { diff --git a/libs/cli/setup_test.go b/libs/cli/setup_test.go index fec49e5c1ed..3303fd6ab93 100644 --- a/libs/cli/setup_test.go +++ b/libs/cli/setup_test.go @@ -27,8 +27,11 @@ func TestSetupEnv(t *testing.T) { {nil, map[string]string{"DEMO_FOOBAR": "good"}, "good"}, {nil, map[string]string{"DEMOFOOBAR": "silly"}, "silly"}, // and that cli overrides env... - {[]string{"--foobar", "important"}, - map[string]string{"DEMO_FOOBAR": "ignored"}, "important"}, + { + []string{"--foobar", "important"}, + map[string]string{"DEMO_FOOBAR": "ignored"}, + "important", + }, } for idx, tc := range cases { @@ -37,7 +40,7 @@ func TestSetupEnv(t *testing.T) { var foo string demo := &cobra.Command{ Use: "demo", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { foo = viper.GetString("foobar") return nil }, @@ -49,7 +52,7 @@ func TestSetupEnv(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) err := RunWithArgs(cmd, args, tc.env) - require.Nil(t, err, i) + require.NoError(t, err, i) assert.Equal(t, tc.expected, foo, i) } } @@ -68,7 +71,7 @@ func TestSetupConfig(t *testing.T) { cval1 := "fubble" conf1 := tempDir() err := WriteConfigVals(conf1, map[string]string{"boo": cval1}) - require.Nil(t, err) + require.NoError(t, err) cases := []struct { args []string @@ -95,7 +98,7 @@ func TestSetupConfig(t *testing.T) { var foo, two string boo := &cobra.Command{ Use: "reader", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { foo = viper.GetString("boo") two = viper.GetString("two-words") return nil @@ -109,7 +112,7 @@ func TestSetupConfig(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) err := RunWithArgs(cmd, args, tc.env) - require.Nil(t, err, i) + require.NoError(t, err, i) assert.Equal(t, tc.expected, foo, i) assert.Equal(t, tc.expectedTwo, two, i) } @@ -127,11 +130,11 @@ func TestSetupUnmarshal(t *testing.T) { cval1, cval2 := "someone", "else" conf1 := tempDir() err := WriteConfigVals(conf1, map[string]string{"name": cval1}) - require.Nil(t, err) + require.NoError(t, err) // even with some ignored fields, should be no problem conf2 := tempDir() err = WriteConfigVals(conf2, map[string]string{"name": cval2, "foo": "bar"}) - require.Nil(t, err) + require.NoError(t, err) // unused is not declared on a flag and remains from base base := DemoConfig{ @@ -174,7 +177,7 @@ func TestSetupUnmarshal(t *testing.T) { cfg := base marsh := &cobra.Command{ Use: "marsh", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { return viper.Unmarshal(&cfg) }, } @@ -188,7 +191,7 @@ func TestSetupUnmarshal(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) err := RunWithArgs(cmd, args, tc.env) - require.Nil(t, err, i) + require.NoError(t, err, i) assert.Equal(t, tc.expected, cfg, i) } } @@ -211,7 +214,7 @@ func TestSetupTrace(t *testing.T) { // test command that store value of foobar in local variable trace := &cobra.Command{ Use: "trace", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { return fmt.Errorf("trace flag = %t", viper.GetBool(TraceFlag)) }, } @@ -221,14 +224,14 @@ func TestSetupTrace(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) stdout, stderr, err := RunCaptureWithArgs(cmd, args, tc.env) - require.NotNil(t, err, i) + require.Error(t, err, i) require.Equal(t, "", stdout, i) require.NotEqual(t, "", stderr, i) msg := strings.Split(stderr, "\n") - desired := fmt.Sprintf("ERROR: %s", tc.expected) + desired := "ERROR: " + tc.expected assert.Equal(t, desired, msg[0], i) t.Log(msg) - if tc.long && assert.True(t, len(msg) > 2, i) { + if tc.long && assert.Greater(t, len(msg), 2, i) { // the next line starts the stack trace... assert.Contains(t, stderr, "TestSetupTrace", i) assert.Contains(t, stderr, "setup_test.go", i) diff --git a/libs/flowrate/io_test.go b/libs/flowrate/io_test.go deleted file mode 100644 index 4d7de417e46..00000000000 --- a/libs/flowrate/io_test.go +++ /dev/null @@ -1,197 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -package flowrate - -import ( - "bytes" - "testing" - "time" -) - -const ( - _50ms = 50 * time.Millisecond - _100ms = 100 * time.Millisecond - _200ms = 200 * time.Millisecond - _300ms = 300 * time.Millisecond - _400ms = 400 * time.Millisecond - _500ms = 500 * time.Millisecond -) - -func nextStatus(m *Monitor) Status { - samples := m.samples - for i := 0; i < 30; i++ { - if s := m.Status(); s.Samples != samples { - return s - } - time.Sleep(5 * time.Millisecond) - } - return m.Status() -} - -func TestReader(t *testing.T) { - in := make([]byte, 100) - for i := range in { - in[i] = byte(i) - } - b := make([]byte, 100) - r := NewReader(bytes.NewReader(in), 100) - start := time.Now() - - // Make sure r implements Limiter - _ = Limiter(r) - - // 1st read of 10 bytes is performed immediately - if n, err := r.Read(b); n != 10 || err != nil { - t.Fatalf("r.Read(b) expected 10 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("r.Read(b) took too long (%v)", rt) - } - - // No new Reads allowed in the current sample - r.SetBlocking(false) - if n, err := r.Read(b); n != 0 || err != nil { - t.Fatalf("r.Read(b) expected 0 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("r.Read(b) took too long (%v)", rt) - } - - status := [6]Status{0: r.Status()} // No samples in the first status - - // 2nd read of 10 bytes blocks until the next sample - r.SetBlocking(true) - if n, err := r.Read(b[10:]); n != 10 || err != nil { - t.Fatalf("r.Read(b[10:]) expected 10 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt < _100ms { - t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt) - } - - status[1] = r.Status() // 1st sample - status[2] = nextStatus(r.Monitor) // 2nd sample - status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample - - if n := r.Done(); n != 20 { - t.Fatalf("r.Done() expected 20; got %v", n) - } - - status[4] = r.Status() - status[5] = nextStatus(r.Monitor) // Timeout - start = status[0].Start - - // Active, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, Start, Duration, Idle, TimeRem, Progress - want := []Status{ - {start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true}, - {start, 10, 1, 100, 100, 100, 100, 0, _100ms, 0, 0, 0, true}, - {start, 20, 2, 100, 100, 100, 100, 0, _200ms, _100ms, 0, 0, true}, - {start, 20, 3, 0, 90, 67, 100, 0, _300ms, _200ms, 0, 0, true}, - {start, 20, 3, 0, 0, 67, 100, 0, _300ms, 0, 0, 0, false}, - {start, 20, 3, 0, 0, 67, 100, 0, _300ms, 0, 0, 0, false}, - } - for i, s := range status { - s := s - if !statusesAreEqual(&s, &want[i]) { - t.Errorf("r.Status(%v)\nexpected: %v\ngot : %v", i, want[i], s) - } - } - if !bytes.Equal(b[:20], in[:20]) { - t.Errorf("r.Read() input doesn't match output") - } -} - -func TestWriter(t *testing.T) { - b := make([]byte, 100) - for i := range b { - b[i] = byte(i) - } - w := NewWriter(&bytes.Buffer{}, 200) - start := time.Now() - - // Make sure w implements Limiter - _ = Limiter(w) - - // Non-blocking 20-byte write for the first sample returns ErrLimit - w.SetBlocking(false) - if n, err := w.Write(b); n != 20 || err != ErrLimit { - t.Fatalf("w.Write(b) expected 20 (ErrLimit); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("w.Write(b) took too long (%v)", rt) - } - - // Blocking 80-byte write - w.SetBlocking(true) - if n, err := w.Write(b[20:]); n != 80 || err != nil { - t.Fatalf("w.Write(b[20:]) expected 80 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt < _300ms { - // Explanation for `rt < _300ms` (as opposed to `< _400ms`) - // - // |<-- start | | - // epochs: -----0ms|---100ms|---200ms|---300ms|---400ms - // sends: 20|20 |20 |20 |20# - // - // NOTE: The '#' symbol can thus happen before 400ms is up. - // Thus, we can only panic if rt < _300ms. - t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt) - } - - w.SetTransferSize(100) - status := []Status{w.Status(), nextStatus(w.Monitor)} - start = status[0].Start - - // Active, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, Start, Duration, Idle, TimeRem, Progress - want := []Status{ - {start, 80, 4, 200, 200, 200, 200, 20, _400ms, 0, _100ms, 80000, true}, - {start, 100, 5, 200, 200, 200, 200, 0, _500ms, _100ms, 0, 100000, true}, - } - - for i, s := range status { - s := s - if !statusesAreEqual(&s, &want[i]) { - t.Errorf("w.Status(%v)\nexpected: %v\ngot : %v\n", i, want[i], s) - } - } - if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) { - t.Errorf("w.Write() input doesn't match output") - } -} - -const maxDeviationForDuration = 50 * time.Millisecond -const maxDeviationForRate int64 = 50 - -// statusesAreEqual returns true if s1 is equal to s2. Equality here means -// general equality of fields except for the duration and rates, which can -// drift due to unpredictable delays (e.g. thread wakes up 25ms after -// `time.Sleep` has ended). -func statusesAreEqual(s1 *Status, s2 *Status) bool { - if s1.Active == s2.Active && - s1.Start == s2.Start && - durationsAreEqual(s1.Duration, s2.Duration, maxDeviationForDuration) && - s1.Idle == s2.Idle && - s1.Bytes == s2.Bytes && - s1.Samples == s2.Samples && - ratesAreEqual(s1.InstRate, s2.InstRate, maxDeviationForRate) && - ratesAreEqual(s1.CurRate, s2.CurRate, maxDeviationForRate) && - ratesAreEqual(s1.AvgRate, s2.AvgRate, maxDeviationForRate) && - ratesAreEqual(s1.PeakRate, s2.PeakRate, maxDeviationForRate) && - s1.BytesRem == s2.BytesRem && - durationsAreEqual(s1.TimeRem, s2.TimeRem, maxDeviationForDuration) && - s1.Progress == s2.Progress { - return true - } - return false -} - -func durationsAreEqual(d1 time.Duration, d2 time.Duration, maxDeviation time.Duration) bool { - return d2-d1 <= maxDeviation -} - -func ratesAreEqual(r1 int64, r2 int64, maxDeviation int64) bool { - sub := r1 - r2 - if sub < 0 { - sub = -sub - } - if sub <= maxDeviation { - return true - } - return false -} diff --git a/libs/json/decoder.go b/libs/json/decoder.go index 86ff27d3935..1aca2ca223b 100644 --- a/libs/json/decoder.go +++ b/libs/json/decoder.go @@ -10,11 +10,11 @@ import ( // Unmarshal unmarshals JSON into the given value, using Amino-compatible JSON encoding (strings // for 64-bit numbers, and type wrappers for registered types). -func Unmarshal(bz []byte, v interface{}) error { +func Unmarshal(bz []byte, v any) error { return decode(bz, v) } -func decode(bz []byte, v interface{}) error { +func decode(bz []byte, v any) error { if len(bz) == 0 { return errors.New("cannot decode empty bytes") } @@ -115,7 +115,6 @@ func decodeReflectList(bz []byte, rv reflect.Value) error { return fmt.Errorf("got %v bytes, expected %v", len(buf), rv.Len()) } reflect.Copy(rv, reflect.ValueOf(buf)) - } else if err := decodeStdlib(bz, rv); err != nil { return err } diff --git a/libs/json/decoder_test.go b/libs/json/decoder_test.go index 9a33bf0e2ad..2dd0e97497f 100644 --- a/libs/json/decoder_test.go +++ b/libs/json/decoder_test.go @@ -21,7 +21,7 @@ func TestUnmarshal(t *testing.T) { testcases := map[string]struct { json string - value interface{} + value any err bool }{ "bool true": {"true", true, false}, @@ -131,7 +131,6 @@ func TestUnmarshal(t *testing.T) { "invalid type": {`"foo"`, Struct{}, true}, } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { // Create a target variable as a pointer to the zero value of the tc.value type, // and wrap it in an empty interface. Decode into that interface. diff --git a/libs/json/doc.go b/libs/json/doc.go index a4fb461db5f..18a4c97cee9 100644 --- a/libs/json/doc.go +++ b/libs/json/doc.go @@ -90,7 +90,7 @@ // // type Struct struct { // Car *Car -// Vehicle Vehicle +// Vehicle // } // // Struct{Car: &Car{Wheels: 4}, Vehicle: &Car{Wheels: 4}} diff --git a/libs/json/encoder.go b/libs/json/encoder.go index 11990e2af6c..86d71a5869b 100644 --- a/libs/json/encoder.go +++ b/libs/json/encoder.go @@ -19,7 +19,7 @@ var ( // Marshal marshals the value as JSON, using Amino-compatible JSON encoding (strings for // 64-bit numbers, and type wrappers for registered types). -func Marshal(v interface{}) ([]byte, error) { +func Marshal(v any) ([]byte, error) { buf := new(bytes.Buffer) err := encode(buf, v) if err != nil { @@ -29,7 +29,7 @@ func Marshal(v interface{}) ([]byte, error) { } // MarshalIndent marshals the value as JSON, using the given prefix and indentation. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { +func MarshalIndent(v any, prefix, indent string) ([]byte, error) { bz, err := Marshal(v) if err != nil { return nil, err @@ -42,7 +42,7 @@ func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { return buf.Bytes(), nil } -func encode(w io.Writer, v interface{}) error { +func encode(w *bytes.Buffer, v any) error { // Bare nil values can't be reflected, so we must handle them here. if v == nil { return writeStr(w, "null") @@ -60,7 +60,7 @@ func encode(w io.Writer, v interface{}) error { return encodeReflect(w, rv) } -func encodeReflect(w io.Writer, rv reflect.Value) error { +func encodeReflect(w *bytes.Buffer, rv reflect.Value) error { if !rv.IsValid() { return errors.New("invalid reflect value") } @@ -115,7 +115,7 @@ func encodeReflect(w io.Writer, rv reflect.Value) error { } } -func encodeReflectList(w io.Writer, rv reflect.Value) error { +func encodeReflectList(w *bytes.Buffer, rv reflect.Value) error { // Emit nil slices as null. if rv.Kind() == reflect.Slice && rv.IsNil() { return writeStr(w, "null") @@ -150,7 +150,7 @@ func encodeReflectList(w io.Writer, rv reflect.Value) error { return writeStr(w, "]") } -func encodeReflectMap(w io.Writer, rv reflect.Value) error { +func encodeReflectMap(w *bytes.Buffer, rv reflect.Value) error { if rv.Type().Key().Kind() != reflect.String { return errors.New("map key must be string") } @@ -181,7 +181,7 @@ func encodeReflectMap(w io.Writer, rv reflect.Value) error { return writeStr(w, "}") } -func encodeReflectStruct(w io.Writer, rv reflect.Value) error { +func encodeReflectStruct(w *bytes.Buffer, rv reflect.Value) error { sInfo := makeStructInfo(rv.Type()) if err := writeStr(w, "{"); err != nil { return err @@ -212,7 +212,7 @@ func encodeReflectStruct(w io.Writer, rv reflect.Value) error { return writeStr(w, "}") } -func encodeReflectInterface(w io.Writer, rv reflect.Value) error { +func encodeReflectInterface(w *bytes.Buffer, rv reflect.Value) error { // Get concrete value and dereference pointers. for rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Interface { if rv.IsNil() { @@ -237,14 +237,17 @@ func encodeReflectInterface(w io.Writer, rv reflect.Value) error { return writeStr(w, "}") } -func encodeStdlib(w io.Writer, v interface{}) error { - // Doesn't stream the output because that adds a newline, as per: - // https://golang.org/pkg/encoding/json/#Encoder.Encode - blob, err := json.Marshal(v) +func encodeStdlib(w *bytes.Buffer, v any) error { + // Stream the output of the JSON marshaling directly into the buffer. + // The stdlib encoder will write a newline, so we must truncate it, + // which is why we pass in a bytes.Buffer throughout, not io.Writer. + enc := json.NewEncoder(w) + err := enc.Encode(v) if err != nil { return err } - _, err = w.Write(blob) + // Remove the last byte from the buffer + w.Truncate(w.Len() - 1) return err } diff --git a/libs/json/encoder_test.go b/libs/json/encoder_test.go index e6eb18a1225..c39d219d3fb 100644 --- a/libs/json/encoder_test.go +++ b/libs/json/encoder_test.go @@ -19,7 +19,7 @@ func TestMarshal(t *testing.T) { boat := Boat{Sail: true} testcases := map[string]struct { - value interface{} + value any output string }{ "nil": {nil, `null`}, @@ -94,7 +94,6 @@ func TestMarshal(t *testing.T) { }, } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { bz, err := json.Marshal(tc.value) require.NoError(t, err) @@ -102,3 +101,20 @@ func TestMarshal(t *testing.T) { }) } } + +func BenchmarkJsonMarshalStruct(b *testing.B) { + s := "string" + sPtr := &s + i64 := int64(64) + ti := time.Date(2020, 6, 2, 18, 5, 13, 4346374, time.FixedZone("UTC+2", 2*60*60)) + car := &Car{Wheels: 4} + boat := Boat{Sail: true} + for i := 0; i < b.N; i++ { + _, _ = json.Marshal(Struct{ + Bool: true, Float64: 3.14, Int32: 32, Int64: 64, Int64Ptr: &i64, + String: "foo", StringPtrPtr: &sPtr, Bytes: []byte{1, 2, 3}, + Time: ti, Car: car, Boat: boat, Vehicles: []Vehicle{car, boat}, + Child: &Struct{Bool: false, String: "child"}, private: "private", + }) + } +} diff --git a/libs/json/helpers_test.go b/libs/json/helpers_test.go index 1776c9723cb..4f6666557da 100644 --- a/libs/json/helpers_test.go +++ b/libs/json/helpers_test.go @@ -23,14 +23,14 @@ type Car struct { Wheels int32 } -func (c *Car) Drive() error { return nil } +func (*Car) Drive() error { return nil } // Boat is a value implementation of Vehicle. type Boat struct { Sail bool } -func (b Boat) Drive() error { return nil } +func (Boat) Drive() error { return nil } // These are public and private encryption keys. type ( @@ -43,7 +43,7 @@ type CustomPtr struct { Value string } -func (c *CustomPtr) MarshalJSON() ([]byte, error) { +func (*CustomPtr) MarshalJSON() ([]byte, error) { return []byte("\"custom\""), nil } @@ -58,11 +58,11 @@ type CustomValue struct { Value string } -func (c CustomValue) MarshalJSON() ([]byte, error) { +func (CustomValue) MarshalJSON() ([]byte, error) { return []byte("\"custom\""), nil } -func (c CustomValue) UnmarshalJSON(_ []byte) error { +func (CustomValue) UnmarshalJSON(_ []byte) error { return nil } diff --git a/libs/json/structs.go b/libs/json/structs.go index 8c717e3c83b..259abd058e2 100644 --- a/libs/json/structs.go +++ b/libs/json/structs.go @@ -9,10 +9,8 @@ import ( cmtsync "github.com/cometbft/cometbft/libs/sync" ) -var ( - // cache caches struct info. - cache = newStructInfoCache() -) +// cache caches struct info. +var cache = newStructInfoCache() // structCache is a cache of struct info. type structInfoCache struct { diff --git a/libs/json/types.go b/libs/json/types.go index 4d9a0e229c8..39c24a26942 100644 --- a/libs/json/types.go +++ b/libs/json/types.go @@ -8,10 +8,8 @@ import ( cmtsync "github.com/cometbft/cometbft/libs/sync" ) -var ( - // typeRegistry contains globally registered types for JSON encoding/decoding. - typeRegistry = newTypes() -) +// typeRegistry contains globally registered types for JSON encoding/decoding. +var typeRegistry = newTypes() // RegisterType registers a type for Amino-compatible interface encoding in the global type // registry. These types will be encoded with a type wrapper `{"type":"","value":}` @@ -20,7 +18,7 @@ var ( // the a value or pointer based on the registered type. // // Should only be called in init() functions, as it panics on error. -func RegisterType(_type interface{}, name string) { +func RegisterType(_type any, name string) { if _type == nil { panic("cannot register nil type") } diff --git a/libs/log/debug_off.go b/libs/log/debug_off.go new file mode 100644 index 00000000000..c582b54415a --- /dev/null +++ b/libs/log/debug_off.go @@ -0,0 +1,6 @@ +//go:build nodebug + +package log + +// LogDebug determines whether debug logs are stripped at compile time +const LogDebug = false diff --git a/libs/log/debug_on.go b/libs/log/debug_on.go new file mode 100644 index 00000000000..d37f3a96563 --- /dev/null +++ b/libs/log/debug_on.go @@ -0,0 +1,6 @@ +//go:build !nodebug + +package log + +// LogDebug determines whether debug logs are stripped at compile time. +const LogDebug = true diff --git a/libs/log/filter.go b/libs/log/filter.go index 4b7ed981cd8..e82c168f871 100644 --- a/libs/log/filter.go +++ b/libs/log/filter.go @@ -8,6 +8,7 @@ const ( levelDebug level = 1 << iota levelInfo levelError + levelWarn ) type filter struct { @@ -18,8 +19,8 @@ type filter struct { } type keyval struct { - key interface{} - value interface{} + key any + value any } // NewFilter wraps next and implements filtering. See the commentary on the @@ -38,28 +39,38 @@ func NewFilter(next Logger, options ...Option) Logger { return l } -func (l *filter) Info(msg string, keyvals ...interface{}) { - levelAllowed := l.allowed&levelInfo != 0 +func (l *filter) Error(msg string, keyvals ...any) { + levelAllowed := l.allowed&levelError != 0 if !levelAllowed { return } - l.next.Info(msg, keyvals...) + l.next.Error(msg, keyvals...) } -func (l *filter) Debug(msg string, keyvals ...interface{}) { - levelAllowed := l.allowed&levelDebug != 0 +func (l *filter) Warn(msg string, keyvals ...any) { + levelAllowed := l.allowed&levelWarn != 0 if !levelAllowed { return } - l.next.Debug(msg, keyvals...) + l.next.Warn(msg, keyvals...) } -func (l *filter) Error(msg string, keyvals ...interface{}) { - levelAllowed := l.allowed&levelError != 0 +func (l *filter) Info(msg string, keyvals ...any) { + levelAllowed := l.allowed&levelInfo != 0 if !levelAllowed { return } - l.next.Error(msg, keyvals...) + l.next.Info(msg, keyvals...) +} + +func (l *filter) Debug(msg string, keyvals ...any) { + if LogDebug { + levelAllowed := l.allowed&levelDebug != 0 + if !levelAllowed { + return + } + l.next.Debug(msg, keyvals...) + } } // With implements Logger by constructing a new filter with a keyvals appended @@ -82,7 +93,7 @@ func (l *filter) Error(msg string, keyvals ...interface{}) { // log.AllowError(), // log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) // logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam" -func (l *filter) With(keyvals ...interface{}) Logger { +func (l *filter) With(keyvals ...any) Logger { keyInAllowedKeyvals := false for i := len(keyvals) - 2; i >= 0; i -= 2 { @@ -124,7 +135,7 @@ func (l *filter) With(keyvals ...interface{}) Logger { } } -//-------------------------------------------------------------------------------- +// -------------------------------------------------------------------------------- // Option sets a parameter for the filter. type Option func(*filter) @@ -133,16 +144,18 @@ type Option func(*filter) // for such level. func AllowLevel(lvl string) (Option, error) { switch lvl { - case "debug": - return AllowDebug(), nil - case "info": - return AllowInfo(), nil case "error": return AllowError(), nil + case "warn": + return AllowWarn(), nil + case "info": + return AllowInfo(), nil + case "debug": + return AllowDebug(), nil case "none": return AllowNone(), nil default: - return nil, fmt.Errorf("expected either \"info\", \"debug\", \"error\" or \"none\" level, given %s", lvl) + return nil, fmt.Errorf("expected either \"error\", \"warn\", \"info\", \"debug\" or \"none\" level, given %s", lvl) } } @@ -151,19 +164,24 @@ func AllowAll() Option { return AllowDebug() } -// AllowDebug allows error, info and debug level log events to pass. -func AllowDebug() Option { - return allowed(levelError | levelInfo | levelDebug) +// AllowError allows only error level log events to pass. +func AllowError() Option { + return allowed(levelError) +} + +// AllowWarn allows error and warning level log events to pass. +func AllowWarn() Option { + return allowed(levelError | levelWarn) } -// AllowInfo allows error and info level log events to pass. +// AllowInfo allows error, info and warning level log events to pass. func AllowInfo() Option { - return allowed(levelError | levelInfo) + return allowed(levelError | levelWarn | levelInfo | levelWarn) } -// AllowError allows only error level log events to pass. -func AllowError() Option { - return allowed(levelError) +// AllowDebug allows all log events to pass. +func AllowDebug() Option { + return allowed(levelError | levelWarn | levelInfo | levelDebug) } // AllowNone allows no leveled log events to pass. @@ -175,22 +193,29 @@ func allowed(allowed level) Option { return func(l *filter) { l.allowed = allowed } } -// AllowDebugWith allows error, info and debug level log events to pass for a specific key value pair. -func AllowDebugWith(key interface{}, value interface{}) Option { - return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo | levelDebug } +// AllowErrorWith allows ONLY error level log events to pass for a specific key value pair. +func AllowErrorWith(key any, value any) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError } +} + +// AllowInfoWith allows error and warning level log events to pass for a specific key value pair. +func AllowWarnWith(key any, value any) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelWarn } } -// AllowInfoWith allows error and info level log events to pass for a specific key value pair. -func AllowInfoWith(key interface{}, value interface{}) Option { - return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo } +// AllowInfoWith allows error, warning and info level log events to pass for a specific key value pair. +func AllowInfoWith(key any, value any) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelWarn | levelInfo } } -// AllowErrorWith allows only error level log events to pass for a specific key value pair. -func AllowErrorWith(key interface{}, value interface{}) Option { - return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError } +// AllowDebugWith allows all log events to pass for a specific key value pair. +func AllowDebugWith(key any, value any) Option { + return func(l *filter) { + l.allowedKeyvals[keyval{key, value}] = levelError | levelWarn | levelInfo | levelDebug + } } // AllowNoneWith allows no leveled log events to pass for a specific key value pair. -func AllowNoneWith(key interface{}, value interface{}) Option { +func AllowNoneWith(key any, value any) Option { return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = 0 } } diff --git a/libs/log/filter_test.go b/libs/log/filter_test.go index f98fd6e71e2..05e8bacff25 100644 --- a/libs/log/filter_test.go +++ b/libs/log/filter_test.go @@ -18,33 +18,36 @@ func TestVariousLevels(t *testing.T) { "AllowAll", log.AllowAll(), strings.Join([]string{ - `{"_msg":"here","level":"debug","this is":"debug log"}`, - `{"_msg":"here","level":"info","this is":"info log"}`, - `{"_msg":"here","level":"error","this is":"error log"}`, + `{"level":"DEBUG","msg":"here","this is":"debug log"}`, + `{"level":"INFO","msg":"here","this is":"info log"}`, + `{"level":"WARN","msg":"here","this is":"warn log"}`, + `{"level":"ERROR","msg":"here","this is":"error log"}`, }, "\n"), }, { - "AllowDebug", - log.AllowDebug(), + "AllowError", + log.AllowError(), strings.Join([]string{ - `{"_msg":"here","level":"debug","this is":"debug log"}`, - `{"_msg":"here","level":"info","this is":"info log"}`, - `{"_msg":"here","level":"error","this is":"error log"}`, + `{"level":"ERROR","msg":"here","this is":"error log"}`, }, "\n"), }, { "AllowInfo", log.AllowInfo(), strings.Join([]string{ - `{"_msg":"here","level":"info","this is":"info log"}`, - `{"_msg":"here","level":"error","this is":"error log"}`, + `{"level":"INFO","msg":"here","this is":"info log"}`, + `{"level":"WARN","msg":"here","this is":"warn log"}`, + `{"level":"ERROR","msg":"here","this is":"error log"}`, }, "\n"), }, { - "AllowError", - log.AllowError(), + "AllowDebug", + log.AllowDebug(), strings.Join([]string{ - `{"_msg":"here","level":"error","this is":"error log"}`, + `{"level":"DEBUG","msg":"here","this is":"debug log"}`, + `{"level":"INFO","msg":"here","this is":"info log"}`, + `{"level":"WARN","msg":"here","this is":"warn log"}`, + `{"level":"ERROR","msg":"here","this is":"error log"}`, }, "\n"), }, { @@ -55,13 +58,13 @@ func TestVariousLevels(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { var buf bytes.Buffer - logger := log.NewFilter(log.NewTMJSONLoggerNoTS(&buf), tc.allowed) + logger := log.NewFilter(log.NewJSONLoggerNoTS(&buf), tc.allowed) logger.Debug("here", "this is", "debug log") logger.Info("here", "this is", "info log") + logger.Warn("here", "this is", "warn log") logger.Error("here", "this is", "error log") if want, have := tc.want, strings.TrimSpace(buf.String()); want != have { @@ -74,13 +77,13 @@ func TestVariousLevels(t *testing.T) { func TestLevelContext(t *testing.T) { var buf bytes.Buffer - logger := log.NewTMJSONLoggerNoTS(&buf) + logger := log.NewJSONLoggerNoTS(&buf) logger = log.NewFilter(logger, log.AllowError()) logger = logger.With("context", "value") logger.Error("foo", "bar", "baz") - want := `{"_msg":"foo","bar":"baz","context":"value","level":"error"}` + want := `{"level":"ERROR","msg":"foo","context":"value","bar":"baz"}` have := strings.TrimSpace(buf.String()) if want != have { t.Errorf("\nwant '%s'\nhave '%s'", want, have) @@ -96,12 +99,12 @@ func TestLevelContext(t *testing.T) { func TestVariousAllowWith(t *testing.T) { var buf bytes.Buffer - logger := log.NewTMJSONLoggerNoTS(&buf) + logger := log.NewJSONLoggerNoTS(&buf) logger1 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value")) logger1.With("context", "value").Info("foo", "bar", "baz") - want := `{"_msg":"foo","bar":"baz","context":"value","level":"info"}` + want := `{"level":"INFO","msg":"foo","context":"value","bar":"baz"}` have := strings.TrimSpace(buf.String()) if want != have { t.Errorf("\nwant '%s'\nhave '%s'", want, have) @@ -132,7 +135,7 @@ func TestVariousAllowWith(t *testing.T) { logger3.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") - want = `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}` + want = `{"level":"INFO","msg":"foo","user":"Sam","context":"value","bar":"baz"}` have = strings.TrimSpace(buf.String()) if want != have { t.Errorf("\nwant '%s'\nhave '%s'", want, have) diff --git a/libs/log/lazy.go b/libs/log/lazy.go index a8fb74f03aa..e38d710ee9b 100644 --- a/libs/log/lazy.go +++ b/libs/log/lazy.go @@ -8,13 +8,13 @@ import ( type LazySprintf struct { format string - args []interface{} + args []any } // NewLazySprintf defers fmt.Sprintf until the Stringer interface is invoked. // This is particularly useful for avoiding calling Sprintf when debugging is not // active. -func NewLazySprintf(format string, args ...interface{}) *LazySprintf { +func NewLazySprintf(format string, args ...any) *LazySprintf { return &LazySprintf{format, args} } @@ -22,21 +22,25 @@ func (l *LazySprintf) String() string { return fmt.Sprintf(l.format, l.args...) } -type LazyBlockHash struct { - block hashable +// LazyHash is a wrapper around a hashable object that defers the Hash call +// until the Stringer interface is invoked. +// This is particularly useful for avoiding calling Sprintf when debugging is +// not active. +type LazyHash struct { + inner hashable } type hashable interface { Hash() cmtbytes.HexBytes } -// NewLazyBlockHash defers block Hash until the Stringer interface is invoked. +// NewLazyHash defers calling `Hash()` until the Stringer interface is invoked. // This is particularly useful for avoiding calling Sprintf when debugging is not // active. -func NewLazyBlockHash(block hashable) *LazyBlockHash { - return &LazyBlockHash{block} +func NewLazyHash(inner hashable) *LazyHash { + return &LazyHash{inner} } -func (l *LazyBlockHash) String() string { - return l.block.Hash().String() +func (l *LazyHash) String() string { + return l.inner.Hash().String() } diff --git a/libs/log/lazy_test.go b/libs/log/lazy_test.go new file mode 100644 index 00000000000..6cdb62d6e73 --- /dev/null +++ b/libs/log/lazy_test.go @@ -0,0 +1,24 @@ +package log_test + +import ( + "testing" + + "github.com/cometbft/cometbft/internal/test" + "github.com/cometbft/cometbft/libs/log" +) + +func TestLazyHash_Txs(t *testing.T) { + const height = 2 + const numTxs = 5 + txs := test.MakeNTxs(height, numTxs) + + for i := 0; i < numTxs; i++ { + lazyHash := log.NewLazyHash(txs[i]) + if lazyHash.String() != txs[i].Hash().String() { + t.Fatalf("expected %s, got %s", txs[i].Hash().String(), lazyHash.String()) + } + if len(lazyHash.String()) <= 0 { + t.Fatalf("expected non-empty hash, got empty hash") + } + } +} diff --git a/libs/log/logger.go b/libs/log/logger.go index 22ed68f1a1a..d32446c62be 100644 --- a/libs/log/logger.go +++ b/libs/log/logger.go @@ -1,30 +1,148 @@ package log import ( + "context" + "fmt" "io" + "log/slog" - kitlog "github.com/go-kit/log" + "github.com/lmittmann/tint" ) -// Logger is what any CometBFT library should take. +// Logger is the CometBFT logging interface. type Logger interface { - Debug(msg string, keyvals ...interface{}) - Info(msg string, keyvals ...interface{}) - Error(msg string, keyvals ...interface{}) + // Error logs a message at level ERROR. + Error(msg string, keyvals ...any) + // Warn logs a message at level WARN. + Warn(msg string, keyvals ...any) + // Info logs a message at level INFO. + Info(msg string, keyvals ...any) + // Debug logs a message at level DEBUG. + Debug(msg string, keyvals ...any) - With(keyvals ...interface{}) Logger + // With returns a new contextual logger with keyvals prepended to those + // passed to calls to Info, Warn, Debug or Error. + With(keyvals ...any) Logger } -// NewSyncWriter returns a new writer that is safe for concurrent use by -// multiple goroutines. Writes to the returned writer are passed on to w. If -// another write is already in progress, the calling goroutine blocks until -// the writer is available. +type baseLogger struct { + srcLogger *slog.Logger +} + +// Interface assertions. +var _ Logger = (*baseLogger)(nil) + +// NewLogger returns a logger that writes msg and keyvals to w using slog as an +// underlying logger. +// +// github.com/lmittmann/tint library is used to colorize the output. // -// If w implements the following interface, so does the returned writer. +// NOTE: +// - the underlying logger could be swapped with something else in the future +// - w must be safe for concurrent use by multiple goroutines if the returned +// Logger will be used concurrently. +func NewLogger(w io.Writer) Logger { + return NewLoggerWithColor(w, true) +} + +// NewLoggerWithColor returns a logger that writes msg and keyvals to w using +// slog as an underlying logger, with an option to define whether the logs +// should be colored. +func NewLoggerWithColor(w io.Writer, color bool) Logger { + logger := slog.New(tint.NewHandler(w, &tint.Options{ + Level: slog.LevelDebug, + TimeFormat: "2006-01-02T15:04:05.000", + ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr { + if err, ok := a.Value.Any().(error); ok { + aErr := tint.Err(err) + aErr.Key = a.Key + return aErr + } + return a + }, + NoColor: !color, + }, + )) + return &baseLogger{slog.New(&tabHandler{h: logger.Handler()})} +} + +func (l *baseLogger) Error(msg string, keyvals ...any) { + l.srcLogger.Error(msg, keyvals...) +} + +func (l *baseLogger) Warn(msg string, keyvals ...any) { + l.srcLogger.Warn(msg, keyvals...) +} + +func (l *baseLogger) Info(msg string, keyvals ...any) { + l.srcLogger.Info(msg, keyvals...) +} + +func (l *baseLogger) Debug(msg string, keyvals ...any) { + if LogDebug { + l.srcLogger.Debug(msg, keyvals...) + } +} + +func (l *baseLogger) With(keyvals ...any) Logger { + return &baseLogger{l.srcLogger.With(keyvals...)} +} + +// NewJSONLogger returns a Logger that writes msg and keyvals to w as using +// slog (slog.NewJSONHandler). // -// interface { -// Fd() uintptr -// } -func NewSyncWriter(w io.Writer) io.Writer { - return kitlog.NewSyncWriter(w) +// NOTE: +// - the underlying logger could be swapped with something else in the future +// - w must be safe for concurrent use by multiple goroutines if the returned +// Logger will be used concurrently. +func NewJSONLogger(w io.Writer) Logger { + logger := slog.New(slog.NewJSONHandler(w, &slog.HandlerOptions{Level: slog.LevelDebug})) + return &baseLogger{logger} +} + +// NewJSONLoggerNoTS is the same as NewJSONLogger, but without the timestamp. +// Used for testing purposes. +func NewJSONLoggerNoTS(w io.Writer) Logger { + logger := slog.New(slog.NewJSONHandler(w, &slog.HandlerOptions{ + Level: slog.LevelDebug, + ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr { + // Remove time from the output for predictable test output. + if a.Key == slog.TimeKey { + return slog.Attr{} + } + + return a + }, + })) + return &baseLogger{logger} +} + +// tabHandler is a slog.Handler that adds two tabs between the message and the attributes. +type tabHandler struct { + h slog.Handler +} + +func (th tabHandler) Handle(ctx context.Context, r slog.Record) error { + // Format the message with some spaces between the message and the attributes. + formattedMsg := fmt.Sprintf("%-44s", r.Message) + + // Create a new Record with the formatted message. + record := slog.NewRecord(r.Time, r.Level, formattedMsg, r.PC) + r.Attrs(func(a slog.Attr) bool { + record.Add(a) + return true + }) + return th.h.Handle(ctx, record) +} + +func (th *tabHandler) Enabled(ctx context.Context, lvl slog.Level) bool { + return th.h.Enabled(ctx, lvl) +} + +func (th *tabHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + return &tabHandler{h: th.h.WithAttrs(attrs)} +} + +func (th *tabHandler) WithGroup(name string) slog.Handler { + return &tabHandler{h: th.h.WithGroup(name)} } diff --git a/libs/log/tm_logger_test.go b/libs/log/logger_test.go similarity index 69% rename from libs/log/tm_logger_test.go rename to libs/log/logger_test.go index 95b4fd5379d..7a1ae7cc5c0 100644 --- a/libs/log/tm_logger_test.go +++ b/libs/log/logger_test.go @@ -12,7 +12,7 @@ import ( func TestLoggerLogsItsErrors(t *testing.T) { var buf bytes.Buffer - logger := log.NewTMLogger(&buf) + logger := log.NewLogger(&buf) logger.Info("foo", "baz baz", "bar") msg := strings.TrimSpace(buf.String()) if !strings.Contains(msg, "foo") { @@ -23,7 +23,7 @@ func TestLoggerLogsItsErrors(t *testing.T) { func TestInfo(t *testing.T) { var bufInfo bytes.Buffer - l := log.NewTMLogger(&bufInfo) + l := log.NewLogger(&bufInfo) l.Info("Client initialized with old header (trusted is more recent)", "old", 42, "trustedHeight", "forty two", @@ -33,7 +33,7 @@ func TestInfo(t *testing.T) { // Remove the timestamp information to allow // us to test against the expected message. - receivedmsg := strings.Split(msg, "] ")[1] + receivedmsg := strings.Split(msg, " ")[1] const expectedmsg = `Client initialized with old header (trusted is more recent) old=42 trustedHeight="forty two" @@ -46,7 +46,7 @@ func TestInfo(t *testing.T) { func TestDebug(t *testing.T) { var bufDebug bytes.Buffer - ld := log.NewTMLogger(&bufDebug) + ld := log.NewLogger(&bufDebug) ld.Debug("Client initialized with old header (trusted is more recent)", "old", 42, "trustedHeight", "forty two", @@ -56,7 +56,30 @@ func TestDebug(t *testing.T) { // Remove the timestamp information to allow // us to test against the expected message. - receivedmsg := strings.Split(msg, "] ")[1] + receivedmsg := strings.Split(msg, " ")[1] + + const expectedmsg = `Client initialized with old header + (trusted is more recent) old=42 trustedHeight="forty two" + trustedHash=74657374206D65` + if strings.EqualFold(receivedmsg, expectedmsg) { + t.Fatalf("received %s, expected %s", receivedmsg, expectedmsg) + } +} + +func TestWarn(t *testing.T) { + var bufErr bytes.Buffer + + le := log.NewLogger(&bufErr) + le.Warn("Client initialized with old header (trusted is more recent)", + "old", 42, + "trustedHeight", "forty two", + "trustedHash", []byte("test me")) + + msg := strings.TrimSpace(bufErr.String()) + + // Remove the timestamp information to allow + // us to test against the expected message. + receivedmsg := strings.Split(msg, " ")[1] const expectedmsg = `Client initialized with old header (trusted is more recent) old=42 trustedHeight="forty two" @@ -69,7 +92,7 @@ func TestDebug(t *testing.T) { func TestError(t *testing.T) { var bufErr bytes.Buffer - le := log.NewTMLogger(&bufErr) + le := log.NewLogger(&bufErr) le.Error("Client initialized with old header (trusted is more recent)", "old", 42, "trustedHeight", "forty two", @@ -79,7 +102,7 @@ func TestError(t *testing.T) { // Remove the timestamp information to allow // us to test against the expected message. - receivedmsg := strings.Split(msg, "] ")[1] + receivedmsg := strings.Split(msg, " ")[1] const expectedmsg = `Client initialized with old header (trusted is more recent) old=42 trustedHeight="forty two" @@ -89,15 +112,16 @@ func TestError(t *testing.T) { } } -func BenchmarkTMLoggerSimple(b *testing.B) { - benchmarkRunner(b, log.NewTMLogger(io.Discard), baseInfoMessage) +func BenchmarkLoggerSimple(b *testing.B) { + benchmarkRunner(b, log.NewLogger(io.Discard), baseInfoMessage) } -func BenchmarkTMLoggerContextual(b *testing.B) { - benchmarkRunner(b, log.NewTMLogger(io.Discard), withInfoMessage) +func BenchmarkLoggerContextual(b *testing.B) { + benchmarkRunner(b, log.NewLogger(io.Discard), withInfoMessage) } func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { + b.Helper() lc := logger.With("common_key", "common_value") b.ReportAllocs() b.ResetTimer() diff --git a/libs/log/nop_logger.go b/libs/log/nop_logger.go index 12d75abe6b7..b039e1da951 100644 --- a/libs/log/nop_logger.go +++ b/libs/log/nop_logger.go @@ -2,16 +2,14 @@ package log type nopLogger struct{} -// Interface assertions +// Interface assertions. var _ Logger = (*nopLogger)(nil) // NewNopLogger returns a logger that doesn't do anything. func NewNopLogger() Logger { return &nopLogger{} } -func (nopLogger) Info(string, ...interface{}) {} -func (nopLogger) Debug(string, ...interface{}) {} -func (nopLogger) Error(string, ...interface{}) {} - -func (l *nopLogger) With(...interface{}) Logger { - return l -} +func (nopLogger) Error(string, ...any) {} +func (nopLogger) Warn(string, ...any) {} +func (nopLogger) Info(string, ...any) {} +func (nopLogger) Debug(string, ...any) {} +func (l *nopLogger) With(...any) Logger { return l } diff --git a/libs/log/testing_logger.go b/libs/log/testing_logger.go index 7c6f661a745..a5649987a4c 100644 --- a/libs/log/testing_logger.go +++ b/libs/log/testing_logger.go @@ -1,57 +1,25 @@ package log import ( - "io" "os" "testing" - - "github.com/go-kit/log/term" ) -var ( - // reuse the same logger across all tests - _testingLogger Logger -) +// reuse the same logger across all tests. +var _testingLogger Logger -// TestingLogger returns a TMLogger which writes to STDOUT if testing being run +// TestingLogger returns a Logger which writes to STDOUT if testing being run // with the verbose (-v) flag, NopLogger otherwise. // -// Note that the call to TestingLogger() must be made -// inside a test (not in the init func) because -// verbose flag only set at the time of testing. +// Note that the call to TestingLogger() must be made inside a test (not in the +// init func) because verbose flag only set at the time of testing. func TestingLogger() Logger { - return TestingLoggerWithOutput(os.Stdout) -} - -// TestingLoggerWOutput returns a TMLogger which writes to (w io.Writer) if testing being run -// with the verbose (-v) flag, NopLogger otherwise. -// -// Note that the call to TestingLoggerWithOutput(w io.Writer) must be made -// inside a test (not in the init func) because -// verbose flag only set at the time of testing. -func TestingLoggerWithOutput(w io.Writer) Logger { - if _testingLogger != nil { - return _testingLogger - } - - if testing.Verbose() { - _testingLogger = NewTMLogger(NewSyncWriter(w)) - } else { - _testingLogger = NewNopLogger() - } - - return _testingLogger -} - -// TestingLoggerWithColorFn allow you to provide your own color function. See -// TestingLogger for documentation. -func TestingLoggerWithColorFn(colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { if _testingLogger != nil { return _testingLogger } if testing.Verbose() { - _testingLogger = NewTMLoggerWithColorFn(NewSyncWriter(os.Stdout), colorFn) + _testingLogger = NewLogger(os.Stdout) } else { _testingLogger = NewNopLogger() } diff --git a/libs/log/tm_json_logger.go b/libs/log/tm_json_logger.go deleted file mode 100644 index 786b618da86..00000000000 --- a/libs/log/tm_json_logger.go +++ /dev/null @@ -1,24 +0,0 @@ -package log - -import ( - "io" - - kitlog "github.com/go-kit/log" -) - -// NewTMJSONLogger returns a Logger that encodes keyvals to the Writer as a -// single JSON object. Each log event produces no more than one call to -// w.Write. The passed Writer must be safe for concurrent use by multiple -// goroutines if the returned Logger will be used concurrently. -func NewTMJSONLogger(w io.Writer) Logger { - logger := kitlog.NewJSONLogger(w) - logger = kitlog.With(logger, "ts", kitlog.DefaultTimestampUTC) - return &tmLogger{logger} -} - -// NewTMJSONLoggerNoTS is the same as NewTMJSONLogger, but without the -// timestamp. -func NewTMJSONLoggerNoTS(w io.Writer) Logger { - logger := kitlog.NewJSONLogger(w) - return &tmLogger{logger} -} diff --git a/libs/log/tm_logger.go b/libs/log/tm_logger.go deleted file mode 100644 index ac0d08adb00..00000000000 --- a/libs/log/tm_logger.go +++ /dev/null @@ -1,86 +0,0 @@ -package log - -import ( - "fmt" - "io" - - kitlog "github.com/go-kit/log" - kitlevel "github.com/go-kit/log/level" - "github.com/go-kit/log/term" -) - -const ( - msgKey = "_msg" // "_" prefixed to avoid collisions - moduleKey = "module" -) - -type tmLogger struct { - srcLogger kitlog.Logger -} - -// Interface assertions -var _ Logger = (*tmLogger)(nil) - -// NewTMLogger returns a logger that encodes msg and keyvals to the Writer -// using go-kit's log as an underlying logger and our custom formatter. Note -// that underlying logger could be swapped with something else. -func NewTMLogger(w io.Writer) Logger { - // Color by level value - colorFn := func(keyvals ...interface{}) term.FgBgColor { - if keyvals[0] != kitlevel.Key() { - panic(fmt.Sprintf("expected level key to be first, got %v", keyvals[0])) - } - switch keyvals[1].(kitlevel.Value).String() { - case "debug": - return term.FgBgColor{Fg: term.DarkGray} - case "error": - return term.FgBgColor{Fg: term.Red} - default: - return term.FgBgColor{} - } - } - - return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} -} - -// NewTMLoggerWithColorFn allows you to provide your own color function. See -// NewTMLogger for documentation. -func NewTMLoggerWithColorFn(w io.Writer, colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { - return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} -} - -// Info logs a message at level Info. -func (l *tmLogger) Info(msg string, keyvals ...interface{}) { - lWithLevel := kitlevel.Info(l.srcLogger) - - if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { - errLogger := kitlevel.Error(l.srcLogger) - kitlog.With(errLogger, msgKey, msg).Log("err", err) //nolint:errcheck // no need to check error again - } -} - -// Debug logs a message at level Debug. -func (l *tmLogger) Debug(msg string, keyvals ...interface{}) { - lWithLevel := kitlevel.Debug(l.srcLogger) - - if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { - errLogger := kitlevel.Error(l.srcLogger) - kitlog.With(errLogger, msgKey, msg).Log("err", err) //nolint:errcheck // no need to check error again - } -} - -// Error logs a message at level Error. -func (l *tmLogger) Error(msg string, keyvals ...interface{}) { - lWithLevel := kitlevel.Error(l.srcLogger) - - lWithMsg := kitlog.With(lWithLevel, msgKey, msg) - if err := lWithMsg.Log(keyvals...); err != nil { - lWithMsg.Log("err", err) //nolint:errcheck // no need to check error again - } -} - -// With returns a new contextual logger with keyvals prepended to those passed -// to calls to Info, Debug or Error. -func (l *tmLogger) With(keyvals ...interface{}) Logger { - return &tmLogger{kitlog.With(l.srcLogger, keyvals...)} -} diff --git a/libs/log/tmfmt_logger.go b/libs/log/tmfmt_logger.go deleted file mode 100644 index 1d8cb80aac9..00000000000 --- a/libs/log/tmfmt_logger.go +++ /dev/null @@ -1,141 +0,0 @@ -package log - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "strings" - "sync" - "time" - - kitlog "github.com/go-kit/log" - kitlevel "github.com/go-kit/log/level" - "github.com/go-logfmt/logfmt" -) - -type tmfmtEncoder struct { - *logfmt.Encoder - buf bytes.Buffer -} - -func (l *tmfmtEncoder) Reset() { - l.Encoder.Reset() - l.buf.Reset() -} - -var tmfmtEncoderPool = sync.Pool{ - New: func() interface{} { - var enc tmfmtEncoder - enc.Encoder = logfmt.NewEncoder(&enc.buf) - return &enc - }, -} - -type tmfmtLogger struct { - w io.Writer -} - -// NewTMFmtLogger returns a logger that encodes keyvals to the Writer in -// CometBFT custom format. Note complex types (structs, maps, slices) -// formatted as "%+v". -// -// Each log event produces no more than one call to w.Write. -// The passed Writer must be safe for concurrent use by multiple goroutines if -// the returned Logger will be used concurrently. -func NewTMFmtLogger(w io.Writer) kitlog.Logger { - return &tmfmtLogger{w} -} - -func (l tmfmtLogger) Log(keyvals ...interface{}) error { - enc := tmfmtEncoderPool.Get().(*tmfmtEncoder) - enc.Reset() - defer tmfmtEncoderPool.Put(enc) - - const unknown = "unknown" - lvl := "none" - msg := unknown - module := unknown - - // indexes of keys to skip while encoding later - excludeIndexes := make([]int, 0) - - for i := 0; i < len(keyvals)-1; i += 2 { - // Extract level - switch keyvals[i] { - case kitlevel.Key(): - excludeIndexes = append(excludeIndexes, i) - switch keyvals[i+1].(type) { //nolint:gocritic - case string: - lvl = keyvals[i+1].(string) - case kitlevel.Value: - lvl = keyvals[i+1].(kitlevel.Value).String() - default: - panic(fmt.Sprintf("level value of unknown type %T", keyvals[i+1])) - } - // and message - case msgKey: - excludeIndexes = append(excludeIndexes, i) - msg = keyvals[i+1].(string) - // and module (could be multiple keyvals; if such case last keyvalue wins) - case moduleKey: - excludeIndexes = append(excludeIndexes, i) - module = keyvals[i+1].(string) - } - - // Print []byte as a hexadecimal string (uppercased) - if b, ok := keyvals[i+1].([]byte); ok { - keyvals[i+1] = strings.ToUpper(hex.EncodeToString(b)) - } - - // Realize stringers - if s, ok := keyvals[i+1].(fmt.Stringer); ok { - keyvals[i+1] = s.String() - } - - } - - // Form a custom CometBFT line - // - // Example: - // D[2016-05-02|11:06:44.322] Stopping AddrBook (ignoring: already stopped) - // - // Description: - // D - first character of the level, uppercase (ASCII only) - // [2016-05-02|11:06:44.322] - our time format (see https://golang.org/src/time/format.go) - // Stopping ... - message - enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().Format("2006-01-02|15:04:05.000"), msg)) - - if module != unknown { - enc.buf.WriteString("module=" + module + " ") - } - -KeyvalueLoop: - for i := 0; i < len(keyvals)-1; i += 2 { - for _, j := range excludeIndexes { - if i == j { - continue KeyvalueLoop - } - } - - err := enc.EncodeKeyval(keyvals[i], keyvals[i+1]) - if err == logfmt.ErrUnsupportedValueType { - enc.EncodeKeyval(keyvals[i], fmt.Sprintf("%+v", keyvals[i+1])) //nolint:errcheck // no need to check error again - } else if err != nil { - return err - } - } - - // Add newline to the end of the buffer - if err := enc.EndRecord(); err != nil { - return err - } - - // The Logger interface requires implementations to be safe for concurrent - // use by multiple goroutines. For this implementation that means making - // only one call to l.w.Write() for each call to Log. - if _, err := l.w.Write(enc.buf.Bytes()); err != nil { - return err - } - return nil -} diff --git a/libs/log/tmfmt_logger_test.go b/libs/log/tmfmt_logger_test.go deleted file mode 100644 index d4e8f8bfec2..00000000000 --- a/libs/log/tmfmt_logger_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package log_test - -import ( - "bytes" - "errors" - "io" - "math" - "regexp" - "testing" - - kitlog "github.com/go-kit/log" - "github.com/stretchr/testify/assert" - - "github.com/cometbft/cometbft/libs/log" -) - -func TestTMFmtLogger(t *testing.T) { - t.Parallel() - buf := &bytes.Buffer{} - logger := log.NewTMFmtLogger(buf) - - if err := logger.Log("hello", "world"); err != nil { - t.Fatal(err) - } - assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ hello=world\n$`), buf.String()) - - buf.Reset() - if err := logger.Log("a", 1, "err", errors.New("error")); err != nil { - t.Fatal(err) - } - assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ a=1 err=error\n$`), buf.String()) - - buf.Reset() - if err := logger.Log("std_map", map[int]int{1: 2}, "my_map", mymap{0: 0}); err != nil { - t.Fatal(err) - } - assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ std_map=map\[1:2\] my_map=special_behavior\n$`), buf.String()) - - buf.Reset() - if err := logger.Log("level", "error"); err != nil { - t.Fatal(err) - } - assert.Regexp(t, regexp.MustCompile(`E\[.+\] unknown \s+\n$`), buf.String()) - - buf.Reset() - if err := logger.Log("_msg", "Hello"); err != nil { - t.Fatal(err) - } - assert.Regexp(t, regexp.MustCompile(`N\[.+\] Hello \s+\n$`), buf.String()) - - buf.Reset() - if err := logger.Log("module", "main", "module", "crypto", "module", "wire"); err != nil { - t.Fatal(err) - } - assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+module=wire\s+\n$`), buf.String()) - - buf.Reset() - if err := logger.Log("hash", []byte("test me")); err != nil { - t.Fatal(err) - } - assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ hash=74657374206D65\n$`), buf.String()) -} - -func BenchmarkTMFmtLoggerSimple(b *testing.B) { - benchmarkRunnerKitlog(b, log.NewTMFmtLogger(io.Discard), baseMessage) -} - -func BenchmarkTMFmtLoggerContextual(b *testing.B) { - benchmarkRunnerKitlog(b, log.NewTMFmtLogger(io.Discard), withMessage) -} - -func TestTMFmtLoggerConcurrency(t *testing.T) { - t.Parallel() - testConcurrency(t, log.NewTMFmtLogger(io.Discard), 10000) -} - -func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Logger)) { - lc := kitlog.With(logger, "common_key", "common_value") - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - f(lc) - } -} - -var ( - baseMessage = func(logger kitlog.Logger) { logger.Log("foo_key", "foo_value") } //nolint:errcheck - withMessage = func(logger kitlog.Logger) { kitlog.With(logger, "a", "b").Log("d", "f") } //nolint:errcheck -) - -// These test are designed to be run with the race detector. - -func testConcurrency(t *testing.T, logger kitlog.Logger, total int) { - n := int(math.Sqrt(float64(total))) - share := total / n - - errC := make(chan error, n) - - for i := 0; i < n; i++ { - go func() { - errC <- spam(logger, share) - }() - } - - for i := 0; i < n; i++ { - err := <-errC - if err != nil { - t.Fatalf("concurrent logging error: %v", err) - } - } -} - -func spam(logger kitlog.Logger, count int) error { - for i := 0; i < count; i++ { - err := logger.Log("key", i) - if err != nil { - return err - } - } - return nil -} - -type mymap map[int]int - -func (m mymap) String() string { return "special_behavior" } diff --git a/libs/log/tracing_logger.go b/libs/log/tracing_logger.go index d2a6ff44e5e..2046fc27f62 100644 --- a/libs/log/tracing_logger.go +++ b/libs/log/tracing_logger.go @@ -28,24 +28,30 @@ type tracingLogger struct { next Logger } -func (l *tracingLogger) Info(msg string, keyvals ...interface{}) { - l.next.Info(msg, formatErrors(keyvals)...) +func (l *tracingLogger) Error(msg string, keyvals ...any) { + l.next.Error(msg, formatErrors(keyvals)...) } -func (l *tracingLogger) Debug(msg string, keyvals ...interface{}) { - l.next.Debug(msg, formatErrors(keyvals)...) +func (l *tracingLogger) Warn(msg string, keyvals ...any) { + l.next.Warn(msg, formatErrors(keyvals)...) } -func (l *tracingLogger) Error(msg string, keyvals ...interface{}) { - l.next.Error(msg, formatErrors(keyvals)...) +func (l *tracingLogger) Info(msg string, keyvals ...any) { + l.next.Info(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) Debug(msg string, keyvals ...any) { + if LogDebug { + l.next.Debug(msg, formatErrors(keyvals)...) + } } -func (l *tracingLogger) With(keyvals ...interface{}) Logger { +func (l *tracingLogger) With(keyvals ...any) Logger { return &tracingLogger{next: l.next.With(formatErrors(keyvals)...)} } -func formatErrors(keyvals []interface{}) []interface{} { - newKeyvals := make([]interface{}, len(keyvals)) +func formatErrors(keyvals []any) []any { + newKeyvals := make([]any, len(keyvals)) copy(newKeyvals, keyvals) for i := 0; i < len(newKeyvals)-1; i += 2 { if err, ok := newKeyvals[i+1].(stackTracer); ok { diff --git a/libs/log/tracing_logger_test.go b/libs/log/tracing_logger_test.go index 9af2fd0ef4d..d0251779f53 100644 --- a/libs/log/tracing_logger_test.go +++ b/libs/log/tracing_logger_test.go @@ -15,7 +15,7 @@ import ( func TestTracingLogger(t *testing.T) { var buf bytes.Buffer - logger := log.NewTMJSONLoggerNoTS(&buf) + logger := log.NewJSONLoggerNoTS(&buf) logger1 := log.NewTracingLogger(logger) err1 := errors.New("courage is grace under pressure") @@ -24,11 +24,11 @@ func TestTracingLogger(t *testing.T) { want := strings.ReplaceAll( strings.ReplaceAll( - `{"_msg":"foo","err1":"`+ + `{"level":"INFO","msg":"foo","err1":"`+ fmt.Sprintf("%+v", err1)+ `","err2":"`+ fmt.Sprintf("%+v", err2)+ - `","level":"info"}`, + `"}`, "\t", "", ), "\n", "") have := strings.ReplaceAll(strings.ReplaceAll(strings.TrimSpace(buf.String()), "\\n", ""), "\\t", "") @@ -44,10 +44,7 @@ func TestTracingLogger(t *testing.T) { "foo", "err2", stderr.New("once you choose hope, anything's possible"), ) - want = `{"_msg":"foo",` + - `"err1":"opportunities don't happen. You create them",` + - `"err2":"once you choose hope, anything's possible",` + - `"level":"info"}` + want = `{"level":"INFO","msg":"foo","err1":"opportunities don't happen. You create them","err2":"once you choose hope, anything's possible"}` have = strings.TrimSpace(buf.String()) if want != have { t.Errorf("\nwant '%s'\nhave '%s'", want, have) @@ -57,7 +54,7 @@ func TestTracingLogger(t *testing.T) { logger.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") - want = `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}` + want = `{"level":"INFO","msg":"foo","user":"Sam","context":"value","bar":"baz"}` have = strings.TrimSpace(buf.String()) if want != have { t.Errorf("\nwant '%s'\nhave '%s'", want, have) diff --git a/libs/math/fraction.go b/libs/math/fraction.go index a8d28559243..ffa58dd71ef 100644 --- a/libs/math/fraction.go +++ b/libs/math/fraction.go @@ -21,13 +21,13 @@ func (fr Fraction) String() string { return fmt.Sprintf("%d/%d", fr.Numerator, fr.Denominator) } -// ParseFractions takes the string of a fraction as input i.e "2/3" and converts this +// ParseFraction takes the string of a fraction as input i.e "2/3" and converts this // to the equivalent fraction else returns an error. The format of the string must be // one number followed by a slash (/) and then the other number. func ParseFraction(f string) (Fraction, error) { o := strings.Split(f, "/") if len(o) != 2 { - return Fraction{}, errors.New("incorrect formating: should have a single slash i.e. \"1/3\"") + return Fraction{}, errors.New("incorrect formatting: should have a single slash i.e. \"1/3\"") } numerator, err := strconv.ParseUint(o[0], 10, 64) if err != nil { diff --git a/libs/math/fraction_test.go b/libs/math/fraction_test.go index 73ca0f6c83f..550cf9162c8 100644 --- a/libs/math/fraction_test.go +++ b/libs/math/fraction_test.go @@ -4,10 +4,10 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestParseFraction(t *testing.T) { - testCases := []struct { f string exp Fraction @@ -76,11 +76,10 @@ func TestParseFraction(t *testing.T) { for idx, tc := range testCases { output, err := ParseFraction(tc.f) if tc.err { - assert.Error(t, err, idx) + require.Error(t, err, idx) } else { - assert.NoError(t, err, idx) + require.NoError(t, err, idx) } assert.Equal(t, tc.exp, output, idx) } - } diff --git a/libs/math/math.go b/libs/math/math.go index cf567a97a59..fa33d63e14b 100644 --- a/libs/math/math.go +++ b/libs/math/math.go @@ -14,7 +14,7 @@ func MaxInt(a, b int) int { return b } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- func MinInt64(a, b int64) int64 { if a < b { diff --git a/libs/math/safemath.go b/libs/math/safemath.go index ff7f0908f94..0464c35b254 100644 --- a/libs/math/safemath.go +++ b/libs/math/safemath.go @@ -5,12 +5,14 @@ import ( "math" ) -var ErrOverflowInt32 = errors.New("int32 overflow") -var ErrOverflowUint8 = errors.New("uint8 overflow") -var ErrOverflowInt8 = errors.New("int8 overflow") +var ( + ErrOverflowInt32 = errors.New("int32 overflow") + ErrOverflowUint8 = errors.New("uint8 overflow") + ErrOverflowInt8 = errors.New("int8 overflow") +) // SafeAddInt32 adds two int32 integers -// If there is an overflow this will panic +// If there is an overflow this will panic. func SafeAddInt32(a, b int32) int32 { if b > 0 && (a > math.MaxInt32-b) { panic(ErrOverflowInt32) @@ -21,7 +23,7 @@ func SafeAddInt32(a, b int32) int32 { } // SafeSubInt32 subtracts two int32 integers -// If there is an overflow this will panic +// If there is an overflow this will panic. func SafeSubInt32(a, b int32) int32 { if b > 0 && (a < math.MinInt32+b) { panic(ErrOverflowInt32) @@ -32,7 +34,7 @@ func SafeSubInt32(a, b int32) int32 { } // SafeConvertInt32 takes a int and checks if it overflows -// If there is an overflow this will panic +// If there is an overflow this will panic. func SafeConvertInt32(a int64) int32 { if a > math.MaxInt32 { panic(ErrOverflowInt32) @@ -43,7 +45,7 @@ func SafeConvertInt32(a int64) int32 { } // SafeConvertUint8 takes an int64 and checks if it overflows -// If there is an overflow it returns an error +// If there is an overflow it returns an error. func SafeConvertUint8(a int64) (uint8, error) { if a > math.MaxUint8 { return 0, ErrOverflowUint8 @@ -54,7 +56,7 @@ func SafeConvertUint8(a int64) (uint8, error) { } // SafeConvertInt8 takes an int64 and checks if it overflows -// If there is an overflow it returns an error +// If there is an overflow it returns an error. func SafeConvertInt8(a int64) (int8, error) { if a > math.MaxInt8 { return 0, ErrOverflowInt8 diff --git a/libs/metrics/discard/discard.go b/libs/metrics/discard/discard.go new file mode 100644 index 00000000000..c5060e8b392 --- /dev/null +++ b/libs/metrics/discard/discard.go @@ -0,0 +1,62 @@ +// The MIT License (MIT) + +// Copyright (c) 2015 Peter Bourgon + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Package discard provides a no-op metrics backend. +package discard + +import "github.com/cometbft/cometbft/libs/metrics" + +type counter struct{} + +// NewCounter returns a new no-op counter. +func NewCounter() metrics.Counter { return counter{} } + +// With implements Counter. +func (c counter) With(...string) metrics.Counter { return c } + +// Add implements Counter. +func (counter) Add(float64) {} + +type gauge struct{} + +// NewGauge returns a new no-op gauge. +func NewGauge() metrics.Gauge { return gauge{} } + +// With implements Gauge. +func (g gauge) With(...string) metrics.Gauge { return g } + +// Set implements Gauge. +func (gauge) Set(float64) {} + +// Add implements metrics.Gauge. +func (gauge) Add(float64) {} + +type histogram struct{} + +// NewHistogram returns a new no-op histogram. +func NewHistogram() metrics.Histogram { return histogram{} } + +// With implements Histogram. +func (h histogram) With(...string) metrics.Histogram { return h } + +// Observe implements histogram. +func (histogram) Observe(float64) {} diff --git a/libs/metrics/doc.go b/libs/metrics/doc.go new file mode 100644 index 00000000000..95e5b93acce --- /dev/null +++ b/libs/metrics/doc.go @@ -0,0 +1,118 @@ +// The MIT License (MIT) + +// Copyright (c) 2015 Peter Bourgon + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Package metrics provides a framework for application instrumentation. It's +// primarily designed to help you get started with good and robust +// instrumentation, and to help you migrate from a less-capable system like +// Graphite to a more-capable system like Prometheus. If your organization has +// already standardized on an instrumentation system like Prometheus, and has no +// plans to change, it may make sense to use that system's instrumentation +// library directly. +// +// This package provides three core metric abstractions (Counter, Gauge, and +// Histogram) and implementations for almost all common instrumentation +// backends. Each metric has an observation method (Add, Set, or Observe, +// respectively) used to record values, and a With method to "scope" the +// observation by various parameters. For example, you might have a Histogram to +// record request durations, parameterized by the method that's being called. +// +// var requestDuration metrics.Histogram +// // ... +// requestDuration.With("method", "MyMethod").Observe(time.Since(begin)) +// +// This allows a single high-level metrics object (requestDuration) to work with +// many code paths somewhat dynamically. The concept of With is fully supported +// in some backends like Prometheus, and not supported in other backends like +// Graphite. So, With may be a no-op, depending on the concrete implementation +// you choose. Please check the implementation to know for sure. For +// implementations that don't provide With, it's necessary to fully parameterize +// each metric in the metric name, e.g. +// +// // Statsd +// c := statsd.NewCounter("request_duration_MyMethod_200") +// c.Add(1) +// +// // Prometheus +// c := prometheus.NewCounter(stdprometheus.CounterOpts{ +// Name: "request_duration", +// ... +// }, []string{"method", "status_code"}) +// c.With("method", "MyMethod", "status_code", strconv.Itoa(code)).Add(1) +// +// # Usage +// +// Metrics are dependencies, and should be passed to the components that need +// them in the same way you'd construct and pass a database handle, or reference +// to another component. Metrics should *not* be created in the global scope. +// Instead, instantiate metrics in your func main, using whichever concrete +// implementation is appropriate for your organization. +// +// latency := prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ +// Namespace: "myteam", +// Subsystem: "foosvc", +// Name: "request_latency_seconds", +// Help: "Incoming request latency in seconds.", +// }, []string{"method", "status_code"}) +// +// Write your components to take the metrics they will use as parameters to +// their constructors. Use the interface types, not the concrete types. That is, +// +// // NewAPI takes metrics.Histogram, not *prometheus.Summary +// func NewAPI(s Store, logger log.Logger, latency metrics.Histogram) *API { +// // ... +// } +// +// func (a *API) ServeFoo(w http.ResponseWriter, r *http.Request) { +// begin := time.Now() +// // ... +// a.latency.Observe(time.Since(begin).Seconds()) +// } +// +// Finally, pass the metrics as dependencies when building your object graph. +// This should happen in func main, not in the global scope. +// +// api := NewAPI(store, logger, latency) +// http.ListenAndServe("/", api) +// +// Note that metrics are "write-only" interfaces. +// +// # Implementation details +// +// All metrics are safe for concurrent use. Considerable design influence has +// been taken from https://github.com/codahale/metrics and +// https://prometheus.io. +// +// Each telemetry system has different semantics for label values, push vs. +// pull, support for histograms, etc. These properties influence the design of +// their respective packages. This table attempts to summarize the key points of +// distinction. +// +// SYSTEM DIM COUNTERS GAUGES HISTOGRAMS +// dogstatsd n batch, push-aggregate batch, push-aggregate native, batch, push-each +// statsd 1 batch, push-aggregate batch, push-aggregate native, batch, push-each +// graphite 1 batch, push-aggregate batch, push-aggregate synthetic, batch, push-aggregate +// expvar 1 atomic atomic synthetic, batch, in-place expose +// influx n custom custom custom +// prometheus n native native native +// pcp 1 native native native +// cloudwatch n batch push-aggregate batch push-aggregate synthetic, batch, push-aggregate +package metrics diff --git a/libs/metrics/lv/labelvalues.go b/libs/metrics/lv/labelvalues.go new file mode 100644 index 00000000000..7a3cc9bc50a --- /dev/null +++ b/libs/metrics/lv/labelvalues.go @@ -0,0 +1,36 @@ +// The MIT License (MIT) + +// Copyright (c) 2015 Peter Bourgon + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package lv + +// LabelValues is a type alias that provides validation on its With method. +// Metrics may include it as a member to help them satisfy With semantics and +// save some code duplication. +type LabelValues []string + +// With validates the input, and returns a new aggregate labelValues. +func (lvs LabelValues) With(labelValues ...string) LabelValues { + if len(labelValues)%2 != 0 { + labelValues = append(labelValues, "unknown") + } + return append(lvs, labelValues...) +} diff --git a/libs/metrics/lv/labelvalues_test.go b/libs/metrics/lv/labelvalues_test.go new file mode 100644 index 00000000000..42af4c59a7a --- /dev/null +++ b/libs/metrics/lv/labelvalues_test.go @@ -0,0 +1,44 @@ +// The MIT License (MIT) + +// Copyright (c) 2015 Peter Bourgon + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package lv + +import ( + "strings" + "testing" +) + +func TestWith(t *testing.T) { + var a LabelValues + b := a.With("a", "1") + c := a.With("b", "2", "c", "3") + + if want, have := "", strings.Join(a, ""); want != have { + t.Errorf("With appears to mutate the original LabelValues: want %q, have %q", want, have) + } + if want, have := "a1", strings.Join(b, ""); want != have { + t.Errorf("With does not appear to return the right thing: want %q, have %q", want, have) + } + if want, have := "b2c3", strings.Join(c, ""); want != have { + t.Errorf("With does not appear to return the right thing: want %q, have %q", want, have) + } +} diff --git a/libs/metrics/metrics.go b/libs/metrics/metrics.go new file mode 100644 index 00000000000..f98e568d704 --- /dev/null +++ b/libs/metrics/metrics.go @@ -0,0 +1,47 @@ +// The MIT License (MIT) + +// Copyright (c) 2015 Peter Bourgon + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package metrics + +// Counter describes a metric that accumulates values monotonically. +// An example of a counter is the number of received HTTP requests. +type Counter interface { + With(labelValues ...string) Counter + Add(delta float64) +} + +// Gauge describes a metric that takes specific values over time. +// An example of a gauge is the current depth of a job queue. +type Gauge interface { + With(labelValues ...string) Gauge + Set(value float64) + Add(delta float64) +} + +// Histogram describes a metric that takes repeated observations of the same +// kind of thing, and produces a statistical summary of those observations, +// typically expressed as quantiles or buckets. An example of a histogram is +// HTTP request latencies. +type Histogram interface { + With(labelValues ...string) Histogram + Observe(value float64) +} diff --git a/libs/metrics/prometheus/prometheus.go b/libs/metrics/prometheus/prometheus.go new file mode 100644 index 00000000000..c985a6645d5 --- /dev/null +++ b/libs/metrics/prometheus/prometheus.go @@ -0,0 +1,187 @@ +// The MIT License (MIT) + +// Copyright (c) 2015 Peter Bourgon + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Package prometheus provides Prometheus implementations for metrics. +// Individual metrics are mapped to their Prometheus counterparts, and +// (depending on the constructor used) may be automatically registered in the +// global Prometheus metrics registry. +package prometheus + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/cometbft/cometbft/libs/metrics" + "github.com/cometbft/cometbft/libs/metrics/lv" +) + +// Counter implements Counter, via a Prometheus CounterVec. +type Counter struct { + cv *prometheus.CounterVec + lvs lv.LabelValues +} + +// NewCounterFrom constructs and registers a Prometheus CounterVec, +// and returns a usable Counter object. +func NewCounterFrom(opts prometheus.CounterOpts, labelNames []string) *Counter { + cv := prometheus.NewCounterVec(opts, labelNames) + prometheus.MustRegister(cv) + return NewCounter(cv) +} + +// NewCounter wraps the CounterVec and returns a usable Counter object. +func NewCounter(cv *prometheus.CounterVec) *Counter { + return &Counter{ + cv: cv, + } +} + +// With implements Counter. +func (c *Counter) With(labelValues ...string) metrics.Counter { + return &Counter{ + cv: c.cv, + lvs: c.lvs.With(labelValues...), + } +} + +// Add implements Counter. +func (c *Counter) Add(delta float64) { + c.cv.With(makeLabels(c.lvs...)).Add(delta) +} + +// Gauge implements Gauge, via a Prometheus GaugeVec. +type Gauge struct { + gv *prometheus.GaugeVec + lvs lv.LabelValues +} + +// NewGaugeFrom constructs and registers a Prometheus GaugeVec, +// and returns a usable Gauge object. +func NewGaugeFrom(opts prometheus.GaugeOpts, labelNames []string) *Gauge { + gv := prometheus.NewGaugeVec(opts, labelNames) + prometheus.MustRegister(gv) + return NewGauge(gv) +} + +// NewGauge wraps the GaugeVec and returns a usable Gauge object. +func NewGauge(gv *prometheus.GaugeVec) *Gauge { + return &Gauge{ + gv: gv, + } +} + +// With implements Gauge. +func (g *Gauge) With(labelValues ...string) metrics.Gauge { + return &Gauge{ + gv: g.gv, + lvs: g.lvs.With(labelValues...), + } +} + +// Set implements Gauge. +func (g *Gauge) Set(value float64) { + g.gv.With(makeLabels(g.lvs...)).Set(value) +} + +// Add is supported by Prometheus GaugeVecs. +func (g *Gauge) Add(delta float64) { + g.gv.With(makeLabels(g.lvs...)).Add(delta) +} + +// Summary implements Histogram, via a Prometheus SummaryVec. The difference +// between a Summary and a Histogram is that Summaries don't require predefined +// quantile buckets, but cannot be statistically aggregated. +type Summary struct { + sv *prometheus.SummaryVec + lvs lv.LabelValues +} + +// NewSummaryFrom constructs and registers a Prometheus SummaryVec, +// and returns a usable Summary object. +func NewSummaryFrom(opts prometheus.SummaryOpts, labelNames []string) *Summary { + sv := prometheus.NewSummaryVec(opts, labelNames) + prometheus.MustRegister(sv) + return NewSummary(sv) +} + +// NewSummary wraps the SummaryVec and returns a usable Summary object. +func NewSummary(sv *prometheus.SummaryVec) *Summary { + return &Summary{ + sv: sv, + } +} + +// With implements Histogram. +func (s *Summary) With(labelValues ...string) metrics.Histogram { + return &Summary{ + sv: s.sv, + lvs: s.lvs.With(labelValues...), + } +} + +// Observe implements Histogram. +func (s *Summary) Observe(value float64) { + s.sv.With(makeLabels(s.lvs...)).Observe(value) +} + +// Histogram implements Histogram via a Prometheus HistogramVec. The difference +// between a Histogram and a Summary is that Histograms require predefined +// quantile buckets, and can be statistically aggregated. +type Histogram struct { + hv *prometheus.HistogramVec + lvs lv.LabelValues +} + +// NewHistogramFrom constructs and registers a Prometheus HistogramVec, +// and returns a usable Histogram object. +func NewHistogramFrom(opts prometheus.HistogramOpts, labelNames []string) *Histogram { + hv := prometheus.NewHistogramVec(opts, labelNames) + prometheus.MustRegister(hv) + return NewHistogram(hv) +} + +// NewHistogram wraps the HistogramVec and returns a usable Histogram object. +func NewHistogram(hv *prometheus.HistogramVec) *Histogram { + return &Histogram{ + hv: hv, + } +} + +// With implements Histogram. +func (h *Histogram) With(labelValues ...string) metrics.Histogram { + return &Histogram{ + hv: h.hv, + lvs: h.lvs.With(labelValues...), + } +} + +// Observe implements Histogram. +func (h *Histogram) Observe(value float64) { + h.hv.With(makeLabels(h.lvs...)).Observe(value) +} + +func makeLabels(labelValues ...string) prometheus.Labels { + labels := prometheus.Labels{} + for i := 0; i < len(labelValues); i += 2 { + labels[labelValues[i]] = labelValues[i+1] + } + return labels +} diff --git a/libs/metrics/prometheus/prometheus_test.go b/libs/metrics/prometheus/prometheus_test.go new file mode 100644 index 00000000000..86ead05e217 --- /dev/null +++ b/libs/metrics/prometheus/prometheus_test.go @@ -0,0 +1,245 @@ +// The MIT License (MIT) + +// Copyright (c) 2015 Peter Bourgon + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package prometheus + +import ( + "io" + "math" + "math/rand" + "net/http" + "net/http/httptest" + "reflect" + "regexp" + "strconv" + "strings" + "testing" + "time" + + stdprometheus "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/libs/metrics/teststat" +) + +func TestCounter(t *testing.T) { + s := newServer() + defer s.Close() + + namespace, subsystem, name := "ns", "ss", "foo" + re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + `{alpha="alpha-value",beta="beta-value"} ([0-9\.]+)`) + + counter := NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: "This is the help string.", + }, []string{"alpha", "beta"}).With("beta", "beta-value", "alpha", "alpha-value") // order shouldn't matter + + // minimal delay to allow the prometheus server come up with results and avoid errors during test + time.Sleep(100 * time.Millisecond) + + value := func() float64 { + matches := re.FindStringSubmatch(scrape(t, s)) + require.Greater(t, len(matches), 0) + f, _ := strconv.ParseFloat(matches[1], 64) + return f + } + + if err := teststat.TestCounter(counter, value); err != nil { + t.Fatal(err) + } +} + +func TestGauge(t *testing.T) { + s := newServer() + defer s.Close() + + namespace, subsystem, name := "aaa", "bbb", "ccc" + re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + `{foo="bar"} ([0-9\.]+)`) + + gauge := NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: "This is a different help string.", + }, []string{"foo"}).With("foo", "bar") + + // minimal delay to allow the prometheus server come up with results and avoid errors during test + time.Sleep(100 * time.Millisecond) + + value := func() []float64 { + matches := re.FindStringSubmatch(scrape(t, s)) + require.Greater(t, len(matches), 0) + f, _ := strconv.ParseFloat(matches[1], 64) + return []float64{f} + } + + if err := teststat.TestGauge(gauge, value); err != nil { + t.Fatal(err) + } +} + +func TestSummary(t *testing.T) { + s := newServer() + defer s.Close() + + namespace, subsystem, name := "test", "prometheus", "summary" + re50 := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + `{a="a",b="b",quantile="0.5"} ([0-9\.]+)`) + re90 := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + `{a="a",b="b",quantile="0.9"} ([0-9\.]+)`) + re99 := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + `{a="a",b="b",quantile="0.99"} ([0-9\.]+)`) + + summary := NewSummaryFrom(stdprometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: "This is the help string for the summary.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, []string{"a", "b"}).With("b", "b").With("a", "a") + + // minimal delay to allow the prometheus server come up with results and avoid errors during test + time.Sleep(100 * time.Millisecond) + + quantiles := func() (float64, float64, float64, float64) { + buf := scrape(t, s) + match50 := re50.FindStringSubmatch(buf) + p50, _ := strconv.ParseFloat(match50[1], 64) + match90 := re90.FindStringSubmatch(buf) + p90, _ := strconv.ParseFloat(match90[1], 64) + match99 := re99.FindStringSubmatch(buf) + p99, _ := strconv.ParseFloat(match99[1], 64) + return p50, p90, 0, p99 + } + + if err := teststat.TestHistogram(summary, quantiles, 0.01); err != nil { + t.Fatal(err) + } +} + +func TestHistogram(t *testing.T) { + // Prometheus reports histograms as a count of observations that fell into + // each predefined bucket, with the bucket value representing a global upper + // limit. That is, the count monotonically increases over the buckets. This + // requires a different strategy to test. + + s := newServer() + defer s.Close() + + namespace, subsystem, name := "test", "prometheus", "histogram" + re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + `_bucket{x="1",le="([0-9]+|\+Inf)"} ([0-9\.]+)`) + + numStdev := 3 + bucketMin := (teststat.Mean - (numStdev * teststat.Stdev)) + bucketMax := (teststat.Mean + (numStdev * teststat.Stdev)) + if bucketMin < 0 { + bucketMin = 0 + } + bucketCount := 10 + bucketDelta := (bucketMax - bucketMin) / bucketCount + buckets := []float64{} + for i := bucketMin; i <= bucketMax; i += bucketDelta { + buckets = append(buckets, float64(i)) + } + + histogram := NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: "This is the help string for the histogram.", + Buckets: buckets, + }, []string{"x"}).With("x", "1") + + // minimal delay to allow the prometheus server come up with results and avoid errors during test + time.Sleep(100 * time.Millisecond) + + // Can't TestHistogram, because Prometheus Histograms don't dynamically + // compute quantiles. Instead, they fill up buckets. So, let's populate the + // histogram kind of manually. + teststat.PopulateNormalHistogram(histogram, rand.Int()) + + // Then, we use ExpectedObservationsLessThan to validate. + for _, line := range strings.Split(scrape(t, s), "\n") { + match := re.FindStringSubmatch(line) + if match == nil { + continue + } + + bucket, _ := strconv.ParseInt(match[1], 10, 64) + have, _ := strconv.ParseFloat(match[2], 64) + + want := teststat.ExpectedObservationsLessThan(bucket) + if match[1] == "+Inf" { + want = int64(teststat.Count) // special case + } + + // Unfortunately, we observe experimentally that Prometheus is quite + // imprecise at the extremes. I'm setting a very high tolerance for now. + // It would be great to dig in and figure out whether that's a problem + // with my Expected calculation, or in Prometheus. + tolerance := 0.5 + if delta := math.Abs(float64(want) - float64(have)); (delta / float64(want)) > tolerance { + t.Errorf("Bucket %d: want %d, have %d (%.1f%%)", bucket, want, int(have), (100.0 * delta / float64(want))) + } + } +} + +func TestInconsistentLabelCardinality(t *testing.T) { + defer func() { + x := recover() + if x == nil { + t.Fatal("expected panic, got none") + } + err, ok := x.(error) + if !ok { + t.Fatalf("expected error, got %s", reflect.TypeOf(x)) + } + if want, have := "inconsistent label cardinality", err.Error(); !strings.HasPrefix(have, want) { + t.Fatalf("want %q, have %q", want, have) + } + }() + + NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: "test", + Subsystem: "inconsistent_label_cardinality", + Name: "foobar", + Help: "This is the help string for the metric.", + }, []string{"a", "b"}).With( + "a", "1", "b", "2", "c", "KABOOM!", + ).Add(123) +} + +func newServer() *httptest.Server { + return httptest.NewServer(promhttp.HandlerFor(stdprometheus.DefaultGatherer, promhttp.HandlerOpts{})) +} + +func scrape(t *testing.T, s *httptest.Server) string { + t.Helper() + + resp, err := http.Get(s.URL) + require.NoError(t, err) + buf, err := io.ReadAll(resp.Body) + require.NoError(t, err) + err = resp.Body.Close() + require.NoError(t, err) + return string(buf) +} diff --git a/libs/metrics/teststat/populate.go b/libs/metrics/teststat/populate.go new file mode 100644 index 00000000000..cbc45a0d956 --- /dev/null +++ b/libs/metrics/teststat/populate.go @@ -0,0 +1,95 @@ +// The MIT License (MIT) + +// Copyright (c) 2015 Peter Bourgon + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package teststat + +import ( + "math" + "math/rand" + + "github.com/cometbft/cometbft/libs/metrics" +) + +// PopulateNormalHistogram makes a series of normal random observations into the +// histogram. The number of observations is determined by Count. The randomness +// is determined by Mean, Stdev, and the seed parameter. +// +// This is a low-level function, exported only for metrics that don't perform +// dynamic quantile computation, like a Prometheus Histogram (c.f. Summary). In +// most cases, you don't need to use this function, and can use TestHistogram +// instead. +func PopulateNormalHistogram(h metrics.Histogram, seed int) { + r := rand.New(rand.NewSource(int64(seed))) //nolint:gosec + for i := 0; i < Count; i++ { + sample := r.NormFloat64()*float64(Stdev) + float64(Mean) + if sample < 0 { + sample = 0 + } + h.Observe(sample) + } +} + +func normalQuantiles() (p50, p90, p95, p99 float64) { + return nvq(50), nvq(90), nvq(95), nvq(99) +} + +func nvq(quantile int) float64 { + // https://en.wikipedia.org/wiki/Normal_distribution#Quantile_function + return float64(Mean) + float64(Stdev)*math.Sqrt2*erfinv(2*(float64(quantile)/100)-1) +} + +func erfinv(y float64) float64 { + // https://stackoverflow.com/questions/5971830/need-code-for-inverse-error-function + if y < -1.0 || y > 1.0 { + panic("invalid input") + } + + var ( + a = [4]float64{0.886226899, -1.645349621, 0.914624893, -0.140543331} + b = [4]float64{-2.118377725, 1.442710462, -0.329097515, 0.012229801} + c = [4]float64{-1.970840454, -1.624906493, 3.429567803, 1.641345311} + d = [2]float64{3.543889200, 1.637067800} + ) + + const y0 = 0.7 + var x, z float64 + + switch { + case math.Abs(y) == 1.0: + x = -y * math.Log(0.0) + case y < -y0: + z = math.Sqrt(-math.Log((1.0 + y) / 2.0)) + x = -(((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0) + default: + if y < y0 { + z = y * y + x = y * (((a[3]*z+a[2])*z+a[1])*z + a[0]) / ((((b[3]*z+b[3])*z+b[1])*z+b[0])*z + 1.0) + } else { + z = math.Sqrt(-math.Log((1.0 - y) / 2.0)) + x = (((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0) + } + x -= (math.Erf(x) - y) / (2.0 / math.SqrtPi * math.Exp(-x*x)) + x -= (math.Erf(x) - y) / (2.0 / math.SqrtPi * math.Exp(-x*x)) + } + + return x +} diff --git a/libs/metrics/teststat/teststat.go b/libs/metrics/teststat/teststat.go new file mode 100644 index 00000000000..a2c9307550c --- /dev/null +++ b/libs/metrics/teststat/teststat.go @@ -0,0 +1,155 @@ +// The MIT License (MIT) + +// Copyright (c) 2015 Peter Bourgon + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Package teststat provides helpers for testing metrics backends. +package teststat + +import ( + "errors" + "fmt" + "math" + "math/rand" + "reflect" + "sort" + "strings" + + "github.com/cometbft/cometbft/libs/metrics" +) + +// TestCounter puts some deltas through the counter, and then calls the value +// func to check that the counter has the correct final value. +func TestCounter(counter metrics.Counter, value func() float64) error { + want := FillCounter(counter) + if have := value(); want != have { + return fmt.Errorf("want %f, have %f", want, have) + } + + return nil +} + +// FillCounter puts some deltas through the counter and returns the total value. +func FillCounter(counter metrics.Counter) float64 { + a := rand.Perm(100) + n := rand.Intn(len(a)) //nolint:gosec + + var want float64 + for i := 0; i < n; i++ { + f := float64(a[i]) + counter.Add(f) + want += f + } + return want +} + +// TestGauge puts some values through the gauge, and then calls the value func +// to check that the gauge has the correct final value. +func TestGauge(gauge metrics.Gauge, value func() []float64) error { + a := rand.Perm(100) + n := rand.Intn(len(a)) //nolint:gosec + + var want []float64 + for i := 0; i < n; i++ { + f := float64(a[i]) + gauge.Set(f) + want = append(want, f) + } + + for i := 0; i < n; i++ { + f := float64(a[i]) + gauge.Add(f) + want = append(want, want[len(want)-1]+f) + } + + have := value() + + switch len(have) { + case 0: + return errors.New("got 0 values") + case 1: // provider doesn't support multi value + if have[0] != want[len(want)-1] { + return fmt.Errorf("want %f, have %f", want, have) + } + default: // provider support multi value gauges + sort.Float64s(want) + sort.Float64s(have) + if !reflect.DeepEqual(want, have) { + return fmt.Errorf("want %f, have %f", want, have) + } + } + + return nil +} + +// TestHistogram puts some observations through the histogram, and then calls +// the quantiles func to checks that the histogram has computed the correct +// quantiles within some tolerance. +func TestHistogram(histogram metrics.Histogram, quantiles func() (p50, p90, p95, p99 float64), tolerance float64) error { + PopulateNormalHistogram(histogram, rand.Int()) //nolint:gosec + + want50, want90, want95, want99 := normalQuantiles() + have50, have90, have95, have99 := quantiles() + + var errs []string + if want, have := want50, have50; !cmp(want, have, tolerance) { + errs = append(errs, fmt.Sprintf("p50: want %f, have %f", want, have)) + } + if want, have := want90, have90; !cmp(want, have, tolerance) { + errs = append(errs, fmt.Sprintf("p90: want %f, have %f", want, have)) + } + if have95 > 0 { // prometheus doesn't compute p95 + if want, have := want95, have95; !cmp(want, have, tolerance) { + errs = append(errs, fmt.Sprintf("p95: want %f, have %f", want, have)) + } + } + if want, have := want99, have99; !cmp(want, have, tolerance) { + errs = append(errs, fmt.Sprintf("p99: want %f, have %f", want, have)) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "; ")) + } + + return nil +} + +var ( + // Count is the number of observations. + Count = 12345 + + // Mean is the center of the normal distribution of observations. + Mean = 500 + + // Stdev of the normal distribution of observations. + Stdev = 25 +) + +// ExpectedObservationsLessThan returns the number of observations that should +// have a value less than or equal to the given value, given a normal +// distribution of observations described by Count, Mean, and Stdev. +func ExpectedObservationsLessThan(bucket int64) int64 { + // https://code.google.com/p/gostat/source/browse/stat/normal.go + cdf := ((1.0 / 2.0) * (1 + math.Erf((float64(bucket)-float64(Mean))/(float64(Stdev)*math.Sqrt2)))) + return int64(cdf * float64(Count)) +} + +func cmp(want, have, tol float64) bool { + return (math.Abs(want-have) / want) <= tol +} diff --git a/libs/protoio/io.go b/libs/protoio/io.go index b23545f10c5..4630ea45cb4 100644 --- a/libs/protoio/io.go +++ b/libs/protoio/io.go @@ -28,6 +28,8 @@ // // Modified to return number of bytes written by Writer.WriteMsg(), and added byteReader. +// Package protoio may be internalized (made private) in future releases. +// XXX Deprecated. package protoio import ( @@ -37,7 +39,7 @@ import ( ) type Writer interface { - WriteMsg(proto.Message) (int, error) + WriteMsg(msg proto.Message) (int, error) } type WriteCloser interface { @@ -58,7 +60,7 @@ type marshaler interface { MarshalTo(data []byte) (n int, err error) } -func getSize(v interface{}) (int, bool) { +func getSize(v any) (int, bool) { if sz, ok := v.(interface { Size() (n int) }); ok { @@ -96,3 +98,7 @@ func (r *byteReader) ReadByte() (byte, error) { } return r.buf[0], nil } + +func (r *byteReader) resetBytesRead() { + r.bytesRead = 0 +} diff --git a/libs/protoio/io_test.go b/libs/protoio/io_test.go index b95c187df0f..e5ba2a103ef 100644 --- a/libs/protoio/io_test.go +++ b/libs/protoio/io_test.go @@ -131,7 +131,7 @@ func TestVarintNoClose(t *testing.T) { require.NoError(t, err) } -// issue 32 +// issue 32. func TestVarintMaxSize(t *testing.T) { buf := newBuffer() writer := protoio.NewDelimitedWriter(buf) diff --git a/libs/protoio/reader.go b/libs/protoio/reader.go index 95b8d345585..054a114df8b 100644 --- a/libs/protoio/reader.go +++ b/libs/protoio/reader.go @@ -49,24 +49,25 @@ func NewDelimitedReader(r io.Reader, maxSize int) ReadCloser { if c, ok := r.(io.Closer); ok { closer = c } - return &varintReader{r, nil, maxSize, closer} + return &varintReader{r, newByteReader(r), nil, maxSize, closer} } type varintReader struct { - r io.Reader - buf []byte - maxSize int - closer io.Closer -} - -func (r *varintReader) ReadMsg(msg proto.Message) (int, error) { + r io.Reader // ReadUvarint needs an io.ByteReader, and we also need to keep track of the // number of bytes read, so we use our own byteReader. This can't be // buffered, so the caller should pass a buffered io.Reader to avoid poor // performance. - byteReader := newByteReader(r.r) - l, err := binary.ReadUvarint(byteReader) - n := byteReader.bytesRead + byteReader *byteReader + buf []byte + maxSize int + closer io.Closer +} + +func (r *varintReader) ReadMsg(msg proto.Message) (int, error) { + r.byteReader.resetBytesRead() + l, err := binary.ReadUvarint(r.byteReader) + n := r.byteReader.bytesRead if err != nil { return n, err } diff --git a/libs/protoio/writer.go b/libs/protoio/writer.go index 0eb65850cfd..d1f6f03d1bc 100644 --- a/libs/protoio/writer.go +++ b/libs/protoio/writer.go @@ -42,7 +42,7 @@ import ( // equivalent to the gogoproto NewDelimitedWriter, except WriteMsg() also returns the // number of bytes written, which is necessary in the p2p package. func NewDelimitedWriter(w io.Writer) WriteCloser { - return &varintWriter{w, make([]byte, binary.MaxVarintLen64), nil} + return &varintWriter{w, nil, nil} } type varintWriter struct { @@ -69,6 +69,9 @@ func (w *varintWriter) WriteMsg(msg proto.Message) (int, error) { } // fallback + if w.lenBuf == nil { + w.lenBuf = make([]byte, binary.MaxVarintLen64) + } data, err := proto.Marshal(msg) if err != nil { return 0, err diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go index 33e9109861e..5c2f391af54 100644 --- a/libs/pubsub/example_test.go +++ b/libs/pubsub/example_test.go @@ -7,7 +7,6 @@ import ( "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/libs/log" - "github.com/cometbft/cometbft/libs/pubsub" "github.com/cometbft/cometbft/libs/pubsub/query" ) diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index c2a1ec060aa..444a20be5d4 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -31,6 +31,9 @@ // return subscription.Err() // } // } +// +// Package pubsub may be internalized (made private) in future releases. +// XXX Deprecated. package pubsub import ( @@ -81,7 +84,7 @@ type cmd struct { clientID string // publish - msg interface{} + msg any events map[string][]string } @@ -150,7 +153,8 @@ func (s *Server) Subscribe( ctx context.Context, clientID string, query Query, - outCapacity ...int) (*Subscription, error) { + outCapacity ...int, +) (*Subscription, error) { outCap := 1 if len(outCapacity) > 0 { if outCapacity[0] <= 0 { @@ -266,14 +270,14 @@ func (s *Server) NumClientSubscriptions(clientID string) int { // Publish publishes the given message. An error will be returned to the caller // if the context is canceled. -func (s *Server) Publish(ctx context.Context, msg interface{}) error { +func (s *Server) Publish(ctx context.Context, msg any) error { return s.PublishWithEvents(ctx, msg, make(map[string][]string)) } // PublishWithEvents publishes the given message with the set of events. The set // is matched with clients queries. If there is a match, the message is sent to // the client. -func (s *Server) PublishWithEvents(ctx context.Context, msg interface{}, events map[string][]string) error { +func (s *Server) PublishWithEvents(ctx context.Context, msg any, events map[string][]string) error { select { case s.cmds <- cmd{op: pub, msg: msg, events: events}: return nil @@ -289,7 +293,7 @@ func (s *Server) OnStop() { s.cmds <- cmd{op: shutdown} } -// NOTE: not goroutine safe +// NOTE: not goroutine safe. type state struct { // query string -> client -> subscription subscriptions map[string]map[string]*Subscription @@ -313,8 +317,8 @@ func (s *Server) OnStart() error { return nil } -// OnReset implements Service.OnReset -func (s *Server) OnReset() error { +// OnReset implements Service.OnReset. +func (*Server) OnReset() error { return nil } @@ -403,7 +407,7 @@ func (state *state) removeAll(reason error) { } } -func (state *state) send(msg interface{}, events map[string][]string) error { +func (state *state) send(msg any, events map[string][]string) error { for qStr, clientSubscriptions := range state.subscriptions { q := state.queries[qStr].q diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index f14e9eff4a6..a2cdb319e70 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/libs/log" - "github.com/cometbft/cometbft/libs/pubsub" "github.com/cometbft/cometbft/libs/pubsub/query" ) @@ -47,13 +46,13 @@ func TestSubscribe(t *testing.T) { defer close(published) err := s.Publish(ctx, "Quicksilver") - assert.NoError(t, err) + require.NoError(t, err) err = s.Publish(ctx, "Asylum") - assert.NoError(t, err) + require.NoError(t, err) err = s.Publish(ctx, "Ivan") - assert.NoError(t, err) + require.NoError(t, err) }() select { @@ -112,10 +111,10 @@ func TestSubscribeUnbuffered(t *testing.T) { defer close(published) err := s.Publish(ctx, "Ultron") - assert.NoError(t, err) + require.NoError(t, err) err = s.Publish(ctx, "Darkhawk") - assert.NoError(t, err) + require.NoError(t, err) }() select { @@ -206,7 +205,7 @@ func TestSubscribeDuplicateKeys(t *testing.T) { testCases := []struct { query string - expected interface{} + expected any }{ { "withdraw.rewards='17'", @@ -392,20 +391,21 @@ func TestBufferCapacity(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) defer cancel() err = s.Publish(ctx, "Ironclad") - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, context.DeadlineExceeded, err) } } -func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } -func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) } -func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) } +func Benchmark10Clients(b *testing.B) { benchmarkNClients(b, 10) } +func Benchmark100Clients(b *testing.B) { benchmarkNClients(b, 100) } +func Benchmark1000Clients(b *testing.B) { benchmarkNClients(b, 1000) } -func Benchmark10ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(10, b) } -func Benchmark100ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(100, b) } -func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(1000, b) } +func Benchmark10ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(b, 10) } +func Benchmark100ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(b, 100) } +func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(b, 1000) } -func benchmarkNClients(n int, b *testing.B) { +func benchmarkNClients(b *testing.B, n int) { + b.Helper() s := pubsub.NewServer() err := s.Start() require.NoError(b, err) @@ -420,7 +420,7 @@ func benchmarkNClients(n int, b *testing.B) { for i := 0; i < n; i++ { subscription, err := s.Subscribe( ctx, - clientID, + fmt.Sprintf("%s-%d", clientID, i+1), query.MustCompile(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)), ) if err != nil { @@ -450,7 +450,8 @@ func benchmarkNClients(n int, b *testing.B) { } } -func benchmarkNClientsOneQuery(n int, b *testing.B) { +func benchmarkNClientsOneQuery(b *testing.B, n int) { + b.Helper() s := pubsub.NewServer() err := s.Start() require.NoError(b, err) @@ -463,7 +464,7 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) { ctx := context.Background() q := query.MustCompile("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1") for i := 0; i < n; i++ { - subscription, err := s.Subscribe(ctx, clientID, q) + subscription, err := s.Subscribe(ctx, fmt.Sprintf("%s-%d", clientID, i+1), q) if err != nil { b.Fatal(err) } @@ -482,18 +483,21 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - err = s.PublishWithEvents(ctx, "Gamora", map[string][]string{"abci.Account.Owner": {"Ivan"}, - "abci.Invoices.Number": {"1"}}) + err = s.PublishWithEvents(ctx, "Gamora", map[string][]string{ + "abci.Account.Owner": {"Ivan"}, + "abci.Invoices.Number": {"1"}, + }) require.NoError(b, err) } } // HELPERS -func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message, msgAndArgs ...interface{}) { +func assertReceive(t *testing.T, expected any, ch <-chan pubsub.Message) { + t.Helper() select { case actual := <-ch: - assert.Equal(t, expected, actual.Data(), msgAndArgs...) + assert.Equal(t, expected, actual.Data()) case <-time.After(1 * time.Second): t.Errorf("expected to receive %v from the channel, got nothing after 1s", expected) debug.PrintStack() @@ -501,6 +505,7 @@ func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message, } func assertCancelled(t *testing.T, subscription *pubsub.Subscription, err error) { + t.Helper() _, ok := <-subscription.Canceled() assert.False(t, ok) assert.Equal(t, err, subscription.Err()) diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go index 8382d40ce3f..9e3549ff25c 100644 --- a/libs/pubsub/query/query.go +++ b/libs/pubsub/query/query.go @@ -197,7 +197,7 @@ func compileCondition(cond syntax.Condition) (condition, error) { // Precompile the argument value matcher. argType := cond.Arg.Type - var argValue interface{} + var argValue any switch argType { case syntax.TString: @@ -234,7 +234,6 @@ func parseNumber(s string) (*big.Float, error) { } f, _, err := big.ParseFloat(extractNum.FindString(s), 10, uint(intVal.BitLen()), big.ToNearestEven) return f, err - } // A map of operator ⇒ argtype ⇒ match-constructor. @@ -243,31 +242,31 @@ func parseNumber(s string) (*big.Float, error) { // Disable the dupl lint for this map. The result isn't even correct. // //nolint:dupl -var opTypeMap = map[syntax.Token]map[syntax.Token]func(interface{}) func(string) bool{ +var opTypeMap = map[syntax.Token]map[syntax.Token]func(any) func(string) bool{ syntax.TContains: { - syntax.TString: func(v interface{}) func(string) bool { + syntax.TString: func(v any) func(string) bool { return func(s string) bool { return strings.Contains(s, v.(string)) } }, }, syntax.TEq: { - syntax.TString: func(v interface{}) func(string) bool { + syntax.TString: func(v any) func(string) bool { return func(s string) bool { return s == v.(string) } }, - syntax.TNumber: func(v interface{}) func(string) bool { + syntax.TNumber: func(v any) func(string) bool { return func(s string) bool { w, err := parseNumber(s) return err == nil && w.Cmp(v.(*big.Float)) == 0 } }, - syntax.TDate: func(v interface{}) func(string) bool { + syntax.TDate: func(v any) func(string) bool { return func(s string) bool { ts, err := syntax.ParseDate(s) return err == nil && ts.Equal(v.(time.Time)) } }, - syntax.TTime: func(v interface{}) func(string) bool { + syntax.TTime: func(v any) func(string) bool { return func(s string) bool { ts, err := syntax.ParseTime(s) return err == nil && ts.Equal(v.(time.Time)) @@ -275,19 +274,19 @@ var opTypeMap = map[syntax.Token]map[syntax.Token]func(interface{}) func(string) }, }, syntax.TLt: { - syntax.TNumber: func(v interface{}) func(string) bool { + syntax.TNumber: func(v any) func(string) bool { return func(s string) bool { w, err := parseNumber(s) return err == nil && w.Cmp(v.(*big.Float)) < 0 } }, - syntax.TDate: func(v interface{}) func(string) bool { + syntax.TDate: func(v any) func(string) bool { return func(s string) bool { ts, err := syntax.ParseDate(s) return err == nil && ts.Before(v.(time.Time)) } }, - syntax.TTime: func(v interface{}) func(string) bool { + syntax.TTime: func(v any) func(string) bool { return func(s string) bool { ts, err := syntax.ParseTime(s) return err == nil && ts.Before(v.(time.Time)) @@ -295,19 +294,19 @@ var opTypeMap = map[syntax.Token]map[syntax.Token]func(interface{}) func(string) }, }, syntax.TLeq: { - syntax.TNumber: func(v interface{}) func(string) bool { + syntax.TNumber: func(v any) func(string) bool { return func(s string) bool { w, err := parseNumber(s) return err == nil && w.Cmp(v.(*big.Float)) <= 0 } }, - syntax.TDate: func(v interface{}) func(string) bool { + syntax.TDate: func(v any) func(string) bool { return func(s string) bool { ts, err := syntax.ParseDate(s) return err == nil && !ts.After(v.(time.Time)) } }, - syntax.TTime: func(v interface{}) func(string) bool { + syntax.TTime: func(v any) func(string) bool { return func(s string) bool { ts, err := syntax.ParseTime(s) return err == nil && !ts.After(v.(time.Time)) @@ -315,19 +314,19 @@ var opTypeMap = map[syntax.Token]map[syntax.Token]func(interface{}) func(string) }, }, syntax.TGt: { - syntax.TNumber: func(v interface{}) func(string) bool { + syntax.TNumber: func(v any) func(string) bool { return func(s string) bool { w, err := parseNumber(s) return err == nil && w.Cmp(v.(*big.Float)) > 0 } }, - syntax.TDate: func(v interface{}) func(string) bool { + syntax.TDate: func(v any) func(string) bool { return func(s string) bool { ts, err := syntax.ParseDate(s) return err == nil && ts.After(v.(time.Time)) } }, - syntax.TTime: func(v interface{}) func(string) bool { + syntax.TTime: func(v any) func(string) bool { return func(s string) bool { ts, err := syntax.ParseTime(s) return err == nil && ts.After(v.(time.Time)) @@ -335,19 +334,19 @@ var opTypeMap = map[syntax.Token]map[syntax.Token]func(interface{}) func(string) }, }, syntax.TGeq: { - syntax.TNumber: func(v interface{}) func(string) bool { + syntax.TNumber: func(v any) func(string) bool { return func(s string) bool { w, err := parseNumber(s) return err == nil && w.Cmp(v.(*big.Float)) >= 0 } }, - syntax.TDate: func(v interface{}) func(string) bool { + syntax.TDate: func(v any) func(string) bool { return func(s string) bool { ts, err := syntax.ParseDate(s) return err == nil && !ts.Before(v.(time.Time)) } }, - syntax.TTime: func(v interface{}) func(string) bool { + syntax.TTime: func(v any) func(string) bool { return func(s string) bool { ts, err := syntax.ParseTime(s) return err == nil && !ts.Before(v.(time.Time)) diff --git a/libs/pubsub/query/query_test.go b/libs/pubsub/query/query_test.go index 2c8fcc557ed..81e8aacb793 100644 --- a/libs/pubsub/query/query_test.go +++ b/libs/pubsub/query/query_test.go @@ -174,7 +174,6 @@ var apiTypeEvents = []types.Event{ } func TestBigNumbers(t *testing.T) { - apiBigNumTest := map[string][]string{ "big.value": { "99999999999999999999", @@ -195,40 +194,61 @@ func TestBigNumbers(t *testing.T) { events map[string][]string matches bool }{ - // Test cases for values that exceed the capacity if int64/float64. - {`big.value >= 99999999999999999999`, + { + `big.value >= 99999999999999999999`, apiBigNumTest, - true}, - {`big.value > 99999999999999999998`, + true, + }, + { + `big.value > 99999999999999999998`, apiBigNumTest, - true}, - {`big2.value <= 18446744073709551615`, - apiBigNumTest, true}, - {`big.floatvalue >= 99999999999999999999`, + true, + }, + { + `big2.value <= 18446744073709551615`, + apiBigNumTest, true, + }, + { + `big.floatvalue >= 99999999999999999999`, apiBigNumTest, - true}, - {`big.floatvalue > 99999999999999999998.10`, + true, + }, + { + `big.floatvalue > 99999999999999999998.10`, apiBigNumTest, - true}, - {`big.floatvalue > 99999999999999999998`, + true, + }, + { + `big.floatvalue > 99999999999999999998`, apiBigNumTest, - true}, - {`big2.floatvalue <= 18446744073709551615.6`, + true, + }, + { + `big2.floatvalue <= 18446744073709551615.6`, apiBigNumTest, - true}, - {`big2.floatvalue <= 18446744073709551615.6`, + true, + }, + { + `big2.floatvalue <= 18446744073709551615.6`, apiBigNumTest, - true}, - {`big2.floatvalue >= 18446744073709551615`, + true, + }, + { + `big2.floatvalue >= 18446744073709551615`, apiBigNumTest, - true}, - {`big2.floatvalue >= 12.5`, + true, + }, + { + `big2.floatvalue >= 12.5`, apiBigNumTest, - true}, - {`big.value >= 10`, + true, + }, + { + `big.value >= 10`, apiBigNumTest, - true}, + true, + }, } for i, tc := range testCases { @@ -263,121 +283,201 @@ func TestCompiledMatches(t *testing.T) { events map[string][]string matches bool }{ - {`tm.events.type='NewBlock'`, + { + `tm.events.type='NewBlock'`, newTestEvents(`tm|events.type=NewBlock`), - true}, - {`tx.gas > 7`, + true, + }, + { + `tx.gas > 7`, newTestEvents(`tx|gas=8`), - true}, - {`transfer.amount > 7`, + true, + }, + { + `transfer.amount > 7`, newTestEvents(`transfer|amount=8stake`), - true}, - {`transfer.amount > 7`, + true, + }, + { + `transfer.amount > 7`, newTestEvents(`transfer|amount=8.045`), - true}, - {`transfer.amount > 7.043`, + true, + }, + { + `transfer.amount > 7.043`, newTestEvents(`transfer|amount=8.045stake`), - true}, - {`transfer.amount > 8.045`, + true, + }, + { + `transfer.amount > 8.045`, newTestEvents(`transfer|amount=8.045stake`), - false}, - {`tx.gas > 7 AND tx.gas < 9`, + false, + }, + { + `tx.gas > 7 AND tx.gas < 9`, newTestEvents(`tx|gas=8`), - true}, - {`body.weight >= 3.5`, + true, + }, + { + `body.weight >= 3.5`, newTestEvents(`body|weight=3.5`), - true}, - {`account.balance < 1000.0`, + true, + }, + { + `account.balance < 1000.0`, newTestEvents(`account|balance=900`), - true}, - {`apples.kg <= 4`, + true, + }, + { + `apples.kg <= 4`, newTestEvents(`apples|kg=4.0`), - true}, - {`body.weight >= 4.5`, + true, + }, + { + `body.weight >= 4.5`, newTestEvents(`body|weight=4.5`), - true}, - {`oranges.kg < 4 AND watermellons.kg > 10`, + true, + }, + { + `oranges.kg < 4 AND watermellons.kg > 10`, newTestEvents(`oranges|kg=3`, `watermellons|kg=12`), - true}, - {`peaches.kg < 4`, + true, + }, + { + `peaches.kg < 4`, newTestEvents(`peaches|kg=5`), - false}, - {`tx.date > DATE 2017-01-01`, + false, + }, + { + `tx.date > DATE 2017-01-01`, newTestEvents(`tx|date=` + time.Now().Format(syntax.DateFormat)), - true}, - {`tx.date = DATE 2017-01-01`, + true, + }, + { + `tx.date = DATE 2017-01-01`, newTestEvents(`tx|date=` + txDate), - true}, - {`tx.date = DATE 2018-01-01`, + true, + }, + { + `tx.date = DATE 2018-01-01`, newTestEvents(`tx|date=` + txDate), - false}, - {`tx.time >= TIME 2013-05-03T14:45:00Z`, + false, + }, + { + `tx.time >= TIME 2013-05-03T14:45:00Z`, newTestEvents(`tx|time=` + time.Now().Format(syntax.TimeFormat)), - true}, - {`tx.time = TIME 2013-05-03T14:45:00Z`, + true, + }, + { + `tx.time = TIME 2013-05-03T14:45:00Z`, newTestEvents(`tx|time=` + txTime), - false}, - {`abci.owner.name CONTAINS 'Igor'`, + false, + }, + { + `abci.owner.name CONTAINS 'Igor'`, newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), - true}, - {`abci.owner.name CONTAINS 'Igor'`, + true, + }, + { + `abci.owner.name CONTAINS 'Igor'`, newTestEvents(`abci|owner.name=Pavel|owner.name=Ivan`), - false}, - {`abci.owner.name = 'Igor'`, + false, + }, + { + `abci.owner.name = 'Igor'`, newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), - true}, - {`abci.owner.name = 'Ivan'`, + true, + }, + { + `abci.owner.name = 'Ivan'`, newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), - true}, - {`abci.owner.name = 'Ivan' AND abci.owner.name = 'Igor'`, + true, + }, + { + `abci.owner.name = 'Ivan' AND abci.owner.name = 'Igor'`, newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), - true}, - {`abci.owner.name = 'Ivan' AND abci.owner.name = 'John'`, + true, + }, + { + `abci.owner.name = 'Ivan' AND abci.owner.name = 'John'`, newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), - false}, - {`tm.events.type='NewBlock'`, + false, + }, + { + `tm.events.type='NewBlock'`, newTestEvents(`tm|events.type=NewBlock`, `app|name=fuzzed`), - true}, - {`app.name = 'fuzzed'`, + true, + }, + { + `app.name = 'fuzzed'`, newTestEvents(`tm|events.type=NewBlock`, `app|name=fuzzed`), - true}, - {`tm.events.type='NewBlock' AND app.name = 'fuzzed'`, + true, + }, + { + `tm.events.type='NewBlock' AND app.name = 'fuzzed'`, newTestEvents(`tm|events.type=NewBlock`, `app|name=fuzzed`), - true}, - {`tm.events.type='NewHeader' AND app.name = 'fuzzed'`, + true, + }, + { + `tm.events.type='NewHeader' AND app.name = 'fuzzed'`, newTestEvents(`tm|events.type=NewBlock`, `app|name=fuzzed`), - false}, - {`slash EXISTS`, + false, + }, + { + `slash EXISTS`, newTestEvents(`slash|reason=missing_signature|power=6000`), - true}, - {`slash EXISTS`, + true, + }, + { + `slash EXISTS`, newTestEvents(`transfer|recipient=cosmos1gu6y2a0ffteesyeyeesk23082c6998xyzmt9mz|sender=cosmos1crje20aj4gxdtyct7z3knxqry2jqt2fuaey6u5`), - false}, - {`slash.reason EXISTS AND slash.power > 1000`, + false, + }, + { + `slash.reason EXISTS AND slash.power > 1000`, newTestEvents(`slash|reason=missing_signature|power=6000`), - true}, - {`slash.reason EXISTS AND slash.power > 1000`, + true, + }, + { + `slash.reason EXISTS AND slash.power > 1000`, newTestEvents(`slash|reason=missing_signature|power=500`), - false}, - {`slash.reason EXISTS`, + false, + }, + { + `slash.reason EXISTS`, newTestEvents(`transfer|recipient=cosmos1gu6y2a0ffteesyeyeesk23082c6998xyzmt9mz|sender=cosmos1crje20aj4gxdtyct7z3knxqry2jqt2fuaey6u5`), - false}, + false, + }, // Test cases based on the OpenAPI examples. - {`tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA'`, - apiEvents, true}, - {`tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA' AND rewards.withdraw.source = 'SrcY'`, - apiEvents, true}, - {`tm.event = 'Tx' AND transfer.sender = 'AddrA'`, - apiEvents, false}, - {`tm.event = 'Tx' AND transfer.sender = 'AddrC'`, - apiEvents, true}, - {`tm.event = 'Tx' AND transfer.sender = 'AddrZ'`, - apiEvents, false}, - {`tm.event = 'Tx' AND rewards.withdraw.address = 'AddrZ'`, - apiEvents, false}, - {`tm.event = 'Tx' AND rewards.withdraw.source = 'W'`, - apiEvents, false}, + { + `tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA'`, + apiEvents, true, + }, + { + `tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA' AND rewards.withdraw.source = 'SrcY'`, + apiEvents, true, + }, + { + `tm.event = 'Tx' AND transfer.sender = 'AddrA'`, + apiEvents, false, + }, + { + `tm.event = 'Tx' AND transfer.sender = 'AddrC'`, + apiEvents, true, + }, + { + `tm.event = 'Tx' AND transfer.sender = 'AddrZ'`, + apiEvents, false, + }, + { + `tm.event = 'Tx' AND rewards.withdraw.address = 'AddrZ'`, + apiEvents, false, + }, + { + `tm.event = 'Tx' AND rewards.withdraw.source = 'W'`, + apiEvents, false, + }, } // NOTE: The original implementation allowed arbitrary prefix matches on diff --git a/libs/pubsub/query/syntax/doc.go b/libs/pubsub/query/syntax/doc.go index b9fb1afede2..e60423abfcc 100644 --- a/libs/pubsub/query/syntax/doc.go +++ b/libs/pubsub/query/syntax/doc.go @@ -17,7 +17,7 @@ // The lexical terms are defined here using RE2 regular expression notation: // // // The name of an event attribute (type.value) -// tag = #'\w+(\.\w+)*' +// tag = #`^[\w]+[\.-\w]?$` // // // A datestamp (YYYY-MM-DD) // date = #'DATE \d{4}-\d{2}-\d{2}' diff --git a/libs/pubsub/query/syntax/parser.go b/libs/pubsub/query/syntax/parser.go index 26c8554908a..e64f94b7fc4 100644 --- a/libs/pubsub/query/syntax/parser.go +++ b/libs/pubsub/query/syntax/parser.go @@ -95,7 +95,6 @@ func (a *Arg) Number() *big.Float { return nil } return f - } // Time returns the value of the argument text as a time, or the zero value if diff --git a/libs/pubsub/query/syntax/scanner.go b/libs/pubsub/query/syntax/scanner.go index 332e3f7b145..b0b9a433691 100644 --- a/libs/pubsub/query/syntax/scanner.go +++ b/libs/pubsub/query/syntax/scanner.go @@ -99,7 +99,7 @@ func (s *Scanner) Next() error { } if '0' <= ch && ch <= '9' { return s.scanNumber(ch) - } else if isTagRune(ch) { + } else if isFirstTagRune(ch) { return s.scanTagLike(ch) } switch ch { @@ -126,7 +126,7 @@ func (s *Scanner) Pos() int { return s.pos } func (s *Scanner) Err() error { return s.err } // scanNumber scans for numbers with optional fractional parts. -// Examples: 0, 1, 3.14 +// Examples: 0, 1, 3.14. func (s *Scanner) scanNumber(first rune) error { s.buf.WriteRune(first) if err := s.scanWhile(isDigit); err != nil { @@ -266,13 +266,14 @@ func (s *Scanner) scanDatestamp() error { func (s *Scanner) scanWhile(ok func(rune) bool) error { for { ch, err := s.rune() - if err == io.EOF { + switch { + case err == io.EOF: return nil - } else if err != nil { - return s.fail(err) - } else if !ok(ch) { + case !ok(ch): s.unrune() return nil + case err != nil: + return s.fail(err) } s.buf.WriteRune(ch) } @@ -302,7 +303,11 @@ func (s *Scanner) invalid(ch rune) error { func isDigit(r rune) bool { return '0' <= r && r <= '9' } func isTagRune(r rune) bool { - return r == '.' || r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) + return r == '.' || r == '_' || r == '-' || unicode.IsLetter(r) || unicode.IsDigit(r) +} + +func isFirstTagRune(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) } func isTimeRune(r rune) bool { diff --git a/libs/pubsub/query/syntax/syntax_test.go b/libs/pubsub/query/syntax/syntax_test.go index 29a85aa9ec6..a097500ff7a 100644 --- a/libs/pubsub/query/syntax/syntax_test.go +++ b/libs/pubsub/query/syntax/syntax_test.go @@ -25,6 +25,8 @@ func TestScanner(t *testing.T) { // Tags {`foo foo.bar`, []syntax.Token{syntax.TTag, syntax.TTag}}, + {`foo foo-foo.bar`, []syntax.Token{syntax.TTag, syntax.TTag}}, + {`foo foo._bar_bar`, []syntax.Token{syntax.TTag, syntax.TTag}}, // Strings (values) {` '' x 'x' 'x y'`, []syntax.Token{syntax.TString, syntax.TTag, syntax.TString, syntax.TString}}, @@ -167,6 +169,8 @@ func TestParseValid(t *testing.T) { {"hash='136E18F7E4C348B780CF873A0BF43922E5BAFA63'", true}, {"hash=136E18F7E4C348B780CF873A0BF43922E5BAFA63", false}, + + {"cosm-wasm.transfer_amount=100", true}, } for _, test := range tests { diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go index 8de570fefdd..3f80411c1b9 100644 --- a/libs/pubsub/subscription.go +++ b/libs/pubsub/subscription.go @@ -19,7 +19,7 @@ var ( // consists of three things: // 1) channel onto which messages and events are published // 2) channel which is closed if a client is too slow or choose to unsubscribe -// 3) err indicating the reason for (2) +// 3) err indicating the reason for (2). type Subscription struct { out chan Message @@ -72,16 +72,16 @@ func (s *Subscription) cancel(err error) { // Message glues data and events together. type Message struct { - data interface{} + data any events map[string][]string } -func NewMessage(data interface{}, events map[string][]string) Message { +func NewMessage(data any, events map[string][]string) Message { return Message{data, events} } // Data returns an original data published. -func (msg Message) Data() interface{} { +func (msg Message) Data() any { return msg.data } diff --git a/libs/service/service.go b/libs/service/service.go index 7a46e5e20b4..4cad55d2caf 100644 --- a/libs/service/service.go +++ b/libs/service/service.go @@ -1,3 +1,5 @@ +// Package service may be internalized (made private) in future releases. +// XXX Deprecated. package service import ( @@ -49,7 +51,7 @@ type Service interface { String() string // SetLogger sets a logger. - SetLogger(log.Logger) + SetLogger(l log.Logger) } /* @@ -159,8 +161,8 @@ func (bs *BaseService) Start() error { // OnStart implements Service by doing nothing. // NOTE: Do not put anything in here, -// that way users don't need to call BaseService.OnStart() -func (bs *BaseService) OnStart() error { return nil } +// that way users don't need to call BaseService.OnStart(). +func (*BaseService) OnStart() error { return nil } // Stop implements Service by calling OnStop (if defined) and closing quit // channel. An error will be returned if the service is already stopped. @@ -192,8 +194,8 @@ func (bs *BaseService) Stop() error { // OnStop implements Service by doing nothing. // NOTE: Do not put anything in here, -// that way users don't need to call BaseService.OnStop() -func (bs *BaseService) OnStop() {} +// that way users don't need to call BaseService.OnStop(). +func (*BaseService) OnStop() {} // Reset implements Service by calling OnReset callback (if defined). An error // will be returned if the service is running. @@ -215,7 +217,7 @@ func (bs *BaseService) Reset() error { } // OnReset implements Service by panicking. -func (bs *BaseService) OnReset() error { +func (*BaseService) OnReset() error { panic("The service cannot be reset") } diff --git a/libs/service/service_test.go b/libs/service/service_test.go index 7abc6f4fba8..f32525cfaa0 100644 --- a/libs/service/service_test.go +++ b/libs/service/service_test.go @@ -44,7 +44,7 @@ func TestBaseServiceReset(t *testing.T) { require.NoError(t, err) err = ts.Reset() - require.Error(t, err, "expected cant reset service error") + require.Error(t, err, "expected can't reset service error") err = ts.Stop() require.NoError(t, err) diff --git a/libs/sync/sync.go b/libs/sync/sync.go index c6e7101c606..c1256badd8c 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -1,6 +1,8 @@ //go:build !deadlock // +build !deadlock +// Package sync may be internalized (made private) in future releases. +// XXX Deprecated. package sync import "sync" diff --git a/libs/test/mutate.go b/libs/test/mutate.go index 3a0d58301be..4e2e6a1a397 100644 --- a/libs/test/mutate.go +++ b/libs/test/mutate.go @@ -1,10 +1,10 @@ package test import ( - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtrand "github.com/cometbft/cometbft/internal/rand" ) -// Contract: !bytes.Equal(input, output) && len(input) >= len(output) +// Contract: !bytes.Equal(input, output) && len(input) >= len(output). func MutateByteSlice(bytez []byte) []byte { // If bytez is empty, panic if len(bytez) == 0 { diff --git a/libs/time/mocks/source.go b/libs/time/mocks/source.go new file mode 100644 index 00000000000..a8e49b314ed --- /dev/null +++ b/libs/time/mocks/source.go @@ -0,0 +1,28 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + time "time" + + mock "github.com/stretchr/testify/mock" +) + +// Source is an autogenerated mock type for the Source type +type Source struct { + mock.Mock +} + +// Now provides a mock function with given fields: +func (_m *Source) Now() time.Time { + ret := _m.Called() + + var r0 time.Time + if rf, ok := ret.Get(0).(func() time.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} diff --git a/light/client.go b/light/client.go index d155c993f1e..f96088814cd 100644 --- a/light/client.go +++ b/light/client.go @@ -118,7 +118,7 @@ func MaxClockDrift(d time.Duration) Option { // As an example, say the light client received block B at a time // 12:05 (this is the real time) and the time on the block // was 12:00. Then the lag here is 5 minutes. -// Default: 10s +// Default: 10s. func MaxBlockLag(d time.Duration) Option { return func(c *Client) { c.maxBlockLag = d @@ -129,7 +129,7 @@ func MaxBlockLag(d time.Duration) Option { // light blocks from a primary provider, verifies them either sequentially or by // skipping some and stores them in a trusted store (usually, a local FS). // -// Default verification: SkippingVerification(DefaultTrustLevel) +// Default verification: SkippingVerification(DefaultTrustLevel). type Client struct { chainID string trustingPeriod time.Duration // see TrustOptions.Period @@ -178,14 +178,14 @@ func NewClient( primary provider.Provider, witnesses []provider.Provider, trustedStore store.Store, - options ...Option) (*Client, error) { - + options ...Option, +) (*Client, error) { if err := trustOptions.ValidateBasic(); err != nil { - return nil, fmt.Errorf("invalid TrustOptions: %w", err) + return nil, ErrInvalidTrustOptions{Err: err} } c, err := NewClientFromTrustedStore(chainID, trustOptions.Period, primary, witnesses, trustedStore, options...) - if err != nil { + if err != nil && !errors.Is(err, ErrEmptyTrustedStore) { return nil, err } @@ -203,20 +203,20 @@ func NewClient( } } - return c, err + return c, nil } // NewClientFromTrustedStore initializes existing client from the trusted store. // -// See NewClient +// See NewClient. func NewClientFromTrustedStore( chainID string, trustingPeriod time.Duration, primary provider.Provider, witnesses []provider.Provider, trustedStore store.Store, - options ...Option) (*Client, error) { - + options ...Option, +) (*Client, error) { c := &Client{ chainID: chainID, trustingPeriod: trustingPeriod, @@ -229,7 +229,7 @@ func NewClientFromTrustedStore( witnesses: witnesses, trustedStore: trustedStore, pruningSize: defaultPruningSize, - confirmationFn: func(action string) bool { return true }, + confirmationFn: func(_ string) bool { return true }, quit: make(chan struct{}), logger: log.NewNopLogger(), } @@ -246,8 +246,7 @@ func NewClientFromTrustedStore( // Verify witnesses are all on the same chain. for i, w := range witnesses { if w.ChainID() != chainID { - return nil, fmt.Errorf("witness #%d: %v is on another chain %s, expected %s", - i, w, w.ChainID(), chainID) + return nil, ErrUnexpectedChainID{Index: i, Witness: w, Actual: w.ChainID(), Expected: chainID} } } @@ -256,29 +255,25 @@ func NewClientFromTrustedStore( return nil, err } - if err := c.restoreTrustedLightBlock(); err != nil { - return nil, err - } - - return c, nil + return c, c.restoreTrustedLightBlock() } -// restoreTrustedLightBlock loads the latest trusted light block from the store +// restoreTrustedLightBlock loads the latest trusted light block from the store. func (c *Client) restoreTrustedLightBlock() error { lastHeight, err := c.trustedStore.LastLightBlockHeight() if err != nil { - return fmt.Errorf("can't get last trusted light block height: %w", err) + return ErrGetTrustedBlockHeight{Err: err} } - - if lastHeight > 0 { - trustedBlock, err := c.trustedStore.LightBlock(lastHeight) - if err != nil { - return fmt.Errorf("can't get last trusted light block: %w", err) - } - c.latestTrustedBlock = trustedBlock - c.logger.Info("Restored trusted light block", "height", lastHeight) + if lastHeight == -1 { + return ErrEmptyTrustedStore } + trustedBlock, err := c.trustedStore.LightBlock(lastHeight) + if err != nil { + return ErrGetTrustedBlock{Err: err} + } + c.latestTrustedBlock = trustedBlock + c.logger.Info("Restored trusted light block", "height", lastHeight) return nil } @@ -325,7 +320,7 @@ func (c *Client) checkTrustedHeaderUsingOptions(ctx context.Context, options Tru // remove all the headers (options.Height, trustedHeader.Height] err := c.cleanupAfter(options.Height) if err != nil { - return fmt.Errorf("cleanupAfter(%d): %w", options.Height, err) + return ErrCleanupAfter{Height: options.Height, Err: err} } c.logger.Info("Rolled back to older header (newer headers were removed)", @@ -344,13 +339,13 @@ func (c *Client) checkTrustedHeaderUsingOptions(ctx context.Context, options Tru action := fmt.Sprintf( "Prev. trusted header's hash %X doesn't match hash %X from primary provider. Remove all the stored light blocks?", c.latestTrustedBlock.Hash(), primaryHash) - if c.confirmationFn(action) { - err := c.Cleanup() - if err != nil { - return fmt.Errorf("failed to cleanup: %w", err) - } - } else { - return errors.New("refused to remove the stored light blocks despite hashes mismatch") + if !c.confirmationFn(action) { + return ErrRemoveStoredBlocksRefused + } + + err := c.Cleanup() + if err != nil { + return ErrCleanup{Err: err} } } @@ -374,17 +369,17 @@ func (c *Client) initializeWithTrustOptions(ctx context.Context, options TrustOp } if !bytes.Equal(l.Hash(), options.Hash) { - return fmt.Errorf("expected header's hash %X, but got %X", options.Hash, l.Hash()) + return ErrHeaderHashMismatch{Expected: options.Hash, Actual: l.Hash()} } // 2) Ensure that +2/3 of validators signed correctly. err = l.ValidatorSet.VerifyCommitLight(c.chainID, l.Commit.BlockID, l.Height, l.Commit) if err != nil { - return fmt.Errorf("invalid commit: %w", err) + return ErrInvalidCommit{Err: err} } // 3) Cross-verify with witnesses to ensure everybody has the same state. - if err := c.compareFirstHeaderWithWitnesses(ctx, l.SignedHeader); err != nil { + if err := c.compareFirstLightBlockWithWitnesses(ctx, l); err != nil { return err } @@ -412,19 +407,19 @@ func (c *Client) TrustedLightBlock(height int64) (*types.LightBlock, error) { func (c *Client) compareWithLatestHeight(height int64) (int64, error) { latestHeight, err := c.LastTrustedHeight() if err != nil { - return 0, fmt.Errorf("can't get last trusted height: %w", err) + return 0, ErrGetLastTrustedHeight{Err: err} } if latestHeight == -1 { - return 0, errors.New("no headers exist") + return 0, ErrNoHeadersExist } switch { case height > latestHeight: - return 0, fmt.Errorf("unverified header/valset requested (latest: %d)", latestHeight) + return 0, ErrUnverifiedHeight{Height: latestHeight} case height == 0: return latestHeight, nil case height < 0: - return 0, errors.New("negative height") + return 0, ErrNegativeHeight } return height, nil @@ -436,7 +431,7 @@ func (c *Client) compareWithLatestHeight(height int64) (int64, error) { func (c *Client) Update(ctx context.Context, now time.Time) (*types.LightBlock, error) { lastTrustedHeight, err := c.LastTrustedHeight() if err != nil { - return nil, fmt.Errorf("can't get last trusted height: %w", err) + return nil, ErrGetLastTrustedHeight{Err: err} } if lastTrustedHeight == -1 { @@ -470,10 +465,10 @@ func (c *Client) Update(ctx context.Context, now time.Time) (*types.LightBlock, // It returns provider.ErrlightBlockNotFound if light block is not found by // primary. // -// It will replace the primary provider if an error from a request to the provider occurs +// It will replace the primary provider if an error from a request to the provider occurs. func (c *Client) VerifyLightBlockAtHeight(ctx context.Context, height int64, now time.Time) (*types.LightBlock, error) { if height <= 0 { - return nil, errors.New("negative or zero height") + return nil, ErrNegativeOrZeroHeight } // Check if the light block already verified. @@ -524,10 +519,10 @@ func (c *Client) VerifyLightBlockAtHeight(ctx context.Context, height int64, now // restart. func (c *Client) VerifyHeader(ctx context.Context, newHeader *types.Header, now time.Time) error { if newHeader == nil { - return errors.New("nil header") + return ErrNilHeader } if newHeader.Height <= 0 { - return errors.New("negative or zero height") + return ErrNegativeOrZeroHeight } // Check if newHeader already verified. @@ -535,7 +530,7 @@ func (c *Client) VerifyHeader(ctx context.Context, newHeader *types.Header, now if err == nil { // Make sure it's the same header. if !bytes.Equal(l.Hash(), newHeader.Hash()) { - return fmt.Errorf("existing trusted header %X does not match newHeader %X", l.Hash(), newHeader.Hash()) + return ErrExistingHeaderHashMismatch{Existing: l.Hash(), New: newHeader.Hash()} } c.logger.Info("Header has already been verified", "height", newHeader.Height, "hash", newHeader.Hash()) @@ -545,11 +540,11 @@ func (c *Client) VerifyHeader(ctx context.Context, newHeader *types.Header, now // Request the header and the vals. l, err = c.lightBlockFromPrimary(ctx, newHeader.Height) if err != nil { - return fmt.Errorf("failed to retrieve light block from primary to verify against: %w", err) + return ErrGetBlock{Err: err} } if !bytes.Equal(l.Hash(), newHeader.Hash()) { - return fmt.Errorf("light block header %X does not match newHeader %X", l.Hash(), newHeader.Hash()) + return ErrLightHeaderHashMismatch{Existing: l.Hash(), New: newHeader.Hash()} } return c.verifyLightBlock(ctx, l, now) @@ -574,7 +569,7 @@ func (c *Client) verifyLightBlock(ctx context.Context, newLightBlock *types.Ligh firstBlockHeight, err := c.FirstTrustedHeight() if err != nil { - return fmt.Errorf("can't get first light block height: %w", err) + return ErrGetFirstBlockHeight{Err: err} } switch { @@ -587,7 +582,7 @@ func (c *Client) verifyLightBlock(ctx context.Context, newLightBlock *types.Ligh var firstBlock *types.LightBlock firstBlock, err = c.trustedStore.LightBlock(firstBlockHeight) if err != nil { - return fmt.Errorf("can't get first light block: %w", err) + return ErrGetFirstBlock{Err: err} } err = c.backwards(ctx, firstBlock.Header, newLightBlock.Header) @@ -596,7 +591,7 @@ func (c *Client) verifyLightBlock(ctx context.Context, newLightBlock *types.Ligh var closestBlock *types.LightBlock closestBlock, err = c.trustedStore.LightBlockBefore(newLightBlock.Height) if err != nil { - return fmt.Errorf("can't get signed header before height %d: %w", newLightBlock.Height, err) + return ErrGetSignedHeaderBeforeHeight{Height: newLightBlock.Height, Err: err} } err = verifyFunc(ctx, closestBlock, newLightBlock, now) } @@ -609,13 +604,13 @@ func (c *Client) verifyLightBlock(ctx context.Context, newLightBlock *types.Ligh return c.updateTrustedLightBlock(newLightBlock) } -// see VerifyHeader +// see VerifyHeader. func (c *Client) verifySequential( ctx context.Context, trustedBlock *types.LightBlock, newLightBlock *types.LightBlock, - now time.Time) error { - + now time.Time, +) error { var ( verifiedBlock = trustedBlock interimBlock *types.LightBlock @@ -708,8 +703,8 @@ func (c *Client) verifySkipping( source provider.Provider, trustedBlock *types.LightBlock, newLightBlock *types.LightBlock, - now time.Time) ([]*types.LightBlock, error) { - + now time.Time, +) ([]*types.LightBlock, error) { var ( blockCache = []*types.LightBlock{newLightBlock} depth = 0 @@ -773,13 +768,13 @@ func (c *Client) verifySkipping( } // verifySkippingAgainstPrimary does verifySkipping plus it compares new header with -// witnesses and replaces primary if it sends the light client an invalid header +// witnesses and replaces primary if it sends the light client an invalid header. func (c *Client) verifySkippingAgainstPrimary( ctx context.Context, trustedBlock *types.LightBlock, newLightBlock *types.LightBlock, - now time.Time) error { - + now time.Time, +) error { trace, err := c.verifySkipping(ctx, c.primary, trustedBlock, newLightBlock, now) switch errors.Unwrap(err).(type) { @@ -883,10 +878,10 @@ func (c *Client) cleanupAfter(height int64) error { for { h, err := c.trustedStore.LightBlockBefore(prevHeight) - if err == store.ErrLightBlockNotFound || (h != nil && h.Height <= height) { + if errors.Is(err, store.ErrLightBlockNotFound) || (h != nil && h.Height <= height) { break } else if err != nil { - return fmt.Errorf("failed to get header before %d: %w", prevHeight, err) + return ErrGetHeaderBeforeHeight{Height: prevHeight, Err: err} } err = c.trustedStore.DeleteLightBlock(h.Height) @@ -899,24 +894,19 @@ func (c *Client) cleanupAfter(height int64) error { } c.latestTrustedBlock = nil - err := c.restoreTrustedLightBlock() - if err != nil { - return err - } - - return nil + return c.restoreTrustedLightBlock() } func (c *Client) updateTrustedLightBlock(l *types.LightBlock) error { c.logger.Debug("updating trusted light block", "light_block", l) if err := c.trustedStore.SaveLightBlock(l); err != nil { - return fmt.Errorf("failed to save trusted header: %w", err) + return ErrSaveTrustedHeader{Err: err} } if c.pruningSize > 0 { if err := c.trustedStore.Prune(c.pruningSize); err != nil { - return fmt.Errorf("prune: %w", err) + return ErrPrune{Err: err} } } @@ -933,8 +923,8 @@ func (c *Client) updateTrustedLightBlock(l *types.LightBlock) error { func (c *Client) backwards( ctx context.Context, trustedHeader *types.Header, - newHeader *types.Header) error { - + newHeader *types.Header, +) error { var ( verifiedHeader = trustedHeader interimHeader *types.Header @@ -943,7 +933,7 @@ func (c *Client) backwards( for verifiedHeader.Height > newHeader.Height { interimBlock, err := c.lightBlockFromPrimary(ctx, verifiedHeader.Height-1) if err != nil { - return fmt.Errorf("failed to obtain the header at height #%d: %w", verifiedHeader.Height-1, err) + return ErrGetHeaderAtHeight{Height: verifiedHeader.Height - 1, Err: err} } interimHeader = interimBlock.Header c.logger.Debug("Verify newHeader against verifiedHeader", @@ -1015,7 +1005,7 @@ func (c *Client) lightBlockFromPrimary(ctx context.Context, height int64) (*type } } -// NOTE: requires a providerMutex lock +// NOTE: requires a providerMutex lock. func (c *Client) removeWitnesses(indexes []int) error { // check that we will still have witnesses remaining if len(c.witnesses) <= len(indexes) { @@ -1126,9 +1116,9 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool) return nil, lastError } -// compareFirstHeaderWithWitnesses compares h with all witnesses. If any +// compareFirstLightBlockWithWitnesses compares light block l with all witnesses. If any // witness reports a different header than h, the function returns an error. -func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.SignedHeader) error { +func (c *Client) compareFirstLightBlockWithWitnesses(ctx context.Context, l *types.LightBlock) error { compareCtx, cancel := context.WithCancel(ctx) defer cancel() @@ -1141,7 +1131,7 @@ func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.S errc := make(chan error, len(c.witnesses)) for i, witness := range c.witnesses { - go c.compareNewHeaderWithWitness(compareCtx, errc, h, witness, i) + go c.compareNewLightBlockWithWitness(compareCtx, errc, l, witness, i) } witnessesToRemove := make([]int, 0, len(c.witnesses)) @@ -1153,31 +1143,36 @@ func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.S switch e := err.(type) { case nil: continue - case errConflictingHeaders: - c.logger.Error(fmt.Sprintf(`Witness #%d has a different header. Please check primary is correct -and remove witness. Otherwise, use the different primary`, e.WitnessIndex), "witness", c.witnesses[e.WitnessIndex]) + case ErrConflictingHeaders: + c.logger.Error("Witness reports a conflicting header. "+ + "Please check if the primary is correct or use a different witness.", + "witness", c.witnesses[e.WitnessIndex], "err", err) return err case errBadWitness: // If witness sent us an invalid header, then remove it - c.logger.Info("witness sent an invalid light block, removing...", + c.logger.Info("Witness sent an invalid light block, removing...", "witness", c.witnesses[e.WitnessIndex], "err", err) witnessesToRemove = append(witnessesToRemove, e.WitnessIndex) + case ErrProposerPrioritiesDiverge: + c.logger.Error("Witness reports conflicting proposer priorities. "+ + "Please check if the primary is correct or use a different witness.", + "witness", c.witnesses[e.WitnessIndex], "err", err) + return err default: // benign errors can be ignored with the exception of context errors if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return err } // the witness either didn't respond or didn't have the block. We ignore it. - c.logger.Info("error comparing first header with witness. You may want to consider removing the witness", + c.logger.Info("Error comparing first header with witness. You may want to consider removing the witness", "err", err) } - } // remove witnesses that have misbehaved if err := c.removeWitnesses(witnessesToRemove); err != nil { - c.logger.Error("failed to remove witnesses", "err", err, "witnessesToRemove", witnessesToRemove) + c.logger.Error("Failed to remove witnesses", "err", err, "witnessesToRemove", witnessesToRemove) } return nil diff --git a/light/client_benchmark_test.go b/light/client_benchmark_test.go index e9d11c952f3..fb206110d29 100644 --- a/light/client_benchmark_test.go +++ b/light/client_benchmark_test.go @@ -6,7 +6,6 @@ import ( "time" dbm "github.com/cometbft/cometbft-db" - "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/light" "github.com/cometbft/cometbft/light/provider" @@ -22,7 +21,7 @@ import ( // // Remember that none of these benchmarks account for network latency. var ( - benchmarkFullNode = mockp.New(genMockNode(chainID, 1000, 100, 1, bTime)) + benchmarkFullNode = mockp.New(genMockNode(1000, 100, 1, bTime)) genesisBlock, _ = benchmarkFullNode.LightBlock(context.Background(), 1) ) diff --git a/light/client_test.go b/light/client_test.go index 6e975212d62..cfa2d8c7f54 100644 --- a/light/client_test.go +++ b/light/client_test.go @@ -2,7 +2,6 @@ package light_test import ( "context" - "errors" "sync" "testing" "time" @@ -11,7 +10,6 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/light" @@ -32,11 +30,13 @@ var ( bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") h1 = keys.GenSignedHeader(chainID, 1, bTime, nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) - // 3/3 signed - h2 = keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, + // 3/3 signed. + vals2 = vals.CopyIncrementProposerPriority(1) + h2 = keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals2, vals2, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h1.Hash()}) - // 3/3 signed - h3 = keys.GenSignedHeaderLastBlockID(chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals, + // 3/3 signed. + vals3 = vals2.CopyIncrementProposerPriority(1) + h3 = keys.GenSignedHeaderLastBlockID(chainID, 3, bTime.Add(1*time.Hour), nil, vals3, vals3, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h2.Hash()}) trustPeriod = 4 * time.Hour trustOptions = light.TrustOptions{ @@ -46,9 +46,9 @@ var ( } valSet = map[int64]*types.ValidatorSet{ 1: vals, - 2: vals, - 3: vals, - 4: vals, + 2: vals2, + 3: vals3, + 4: vals.CopyIncrementProposerPriority(1), } headerSet = map[int64]*types.SignedHeader{ 1: h1, @@ -58,27 +58,27 @@ var ( 3: h3, } l1 = &types.LightBlock{SignedHeader: h1, ValidatorSet: vals} - l2 = &types.LightBlock{SignedHeader: h2, ValidatorSet: vals} + l2 = &types.LightBlock{SignedHeader: h2, ValidatorSet: vals2} fullNode = mockp.New( chainID, headerSet, valSet, ) deadNode = mockp.NewDeadMock(chainID) - largeFullNode = mockp.New(genMockNode(chainID, 10, 3, 0, bTime)) + largeFullNode = mockp.New(genMockNode(10, 3, 0, bTime)) ) func TestValidateTrustOptions(t *testing.T) { testCases := []struct { - err bool - to light.TrustOptions + expErr error + to light.TrustOptions }{ { - false, + nil, trustOptions, }, { - true, + light.ErrNegativeOrZeroPeriod, light.TrustOptions{ Period: -1 * time.Hour, Height: 1, @@ -86,7 +86,7 @@ func TestValidateTrustOptions(t *testing.T) { }, }, { - true, + light.ErrNegativeOrZeroHeight, light.TrustOptions{ Period: 1 * time.Hour, Height: 0, @@ -94,7 +94,7 @@ func TestValidateTrustOptions(t *testing.T) { }, }, { - true, + light.ErrInvalidHashSize{32, 14}, light.TrustOptions{ Period: 1 * time.Hour, Height: 1, @@ -105,10 +105,11 @@ func TestValidateTrustOptions(t *testing.T) { for _, tc := range testCases { err := tc.to.ValidateBasic() - if tc.err { - assert.Error(t, err) - } else { - assert.NoError(t, err) + switch { + case tc.expErr != nil && assert.Error(t, err): //nolint:testifylint // require.Error doesn't work with the logic here + assert.Equal(t, tc.expErr, err) + default: + require.NoError(t, err) } } } @@ -216,7 +217,6 @@ func TestClient_SequentialVerification(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { c, err := light.NewClient( ctx, @@ -246,9 +246,9 @@ func TestClient_SequentialVerification(t *testing.T) { _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(3*time.Hour)) if tc.verifyErr { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) } }) } @@ -341,7 +341,6 @@ func TestClient_SkippingVerification(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { c, err := light.NewClient( ctx, @@ -370,18 +369,18 @@ func TestClient_SkippingVerification(t *testing.T) { _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(3*time.Hour)) if tc.verifyErr { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) } }) } } // start from a large light block to make sure that the pivot height doesn't select a height outside -// the appropriate range +// the appropriate range. func TestClientLargeBisectionVerification(t *testing.T) { - veryLargeFullNode := mockp.New(genMockNode(chainID, 100, 3, 0, bTime)) + veryLargeFullNode := mockp.New(genMockNode(100, 3, 0, bTime)) trustedLightBlock, err := veryLargeFullNode.LightBlock(ctx, 5) require.NoError(t, err) c, err := light.NewClient( @@ -399,7 +398,7 @@ func TestClientLargeBisectionVerification(t *testing.T) { ) require.NoError(t, err) h, err := c.Update(ctx, bTime.Add(100*time.Minute)) - assert.NoError(t, err) + require.NoError(t, err) h2, err := veryLargeFullNode.LightBlock(ctx, 100) require.NoError(t, err) assert.Equal(t, h, h2) @@ -430,7 +429,7 @@ func TestClientBisectionBetweenTrustedHeaders(t *testing.T) { // verify using bisection the light block between the two trusted light blocks _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour)) - assert.NoError(t, err) + require.NoError(t, err) } func TestClient_Cleanup(t *testing.T) { @@ -452,11 +451,11 @@ func TestClient_Cleanup(t *testing.T) { // Check no light blocks exist after Cleanup. l, err := c.TrustedLightBlock(1) - assert.Error(t, err) - assert.Nil(t, l) + require.Error(t, err) + require.Nil(t, l) } -// trustedHeader.Height == options.Height +// trustedHeader.Height == options.Height. func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { // 1. options.Hash == trustedHeader.Hash { @@ -476,7 +475,7 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { require.NoError(t, err) l, err := c.TrustedLightBlock(1) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, l) assert.Equal(t, l.Hash(), h1.Hash()) assert.Equal(t, l.ValidatorSet.Hash(), h1.ValidatorsHash.Bytes()) @@ -517,15 +516,15 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { require.NoError(t, err) l, err := c.TrustedLightBlock(1) - assert.NoError(t, err) + require.NoError(t, err) if assert.NotNil(t, l) { assert.Equal(t, l.Hash(), header1.Hash()) - assert.NoError(t, l.ValidateBasic(chainID)) + require.NoError(t, l.ValidateBasic(chainID)) } } } -// trustedHeader.Height < options.Height +// trustedHeader.Height < options.Height. func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { // 1. options.Hash == trustedHeader.Hash { @@ -550,10 +549,10 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { // Check we still have the 1st header (+header+). l, err := c.TrustedLightBlock(1) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, l) assert.Equal(t, l.Hash(), h1.Hash()) - assert.NoError(t, l.ValidateBasic(chainID)) + require.NoError(t, l.ValidateBasic(chainID)) } // 2. options.Hash != trustedHeader.Hash @@ -596,12 +595,12 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { // Check we no longer have the invalid 1st header (+header+). l, err := c.TrustedLightBlock(1) - assert.Error(t, err) - assert.Nil(t, l) + require.Error(t, err) + require.Nil(t, l) } } -// trustedHeader.Height > options.Height +// trustedHeader.Height > options.Height. func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { // 1. options.Hash == trustedHeader.Hash { @@ -626,19 +625,19 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { // Check we still have the 1st light block. l, err := c.TrustedLightBlock(1) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, l) assert.Equal(t, l.Hash(), h1.Hash()) - assert.NoError(t, l.ValidateBasic(chainID)) + require.NoError(t, l.ValidateBasic(chainID)) // Check we no longer have 2nd light block. l, err = c.TrustedLightBlock(2) - assert.Error(t, err) - assert.Nil(t, l) + require.Error(t, err) + require.Nil(t, l) l, err = c.TrustedLightBlock(3) - assert.Error(t, err) - assert.Nil(t, l) + require.Error(t, err) + require.Nil(t, l) } // 2. options.Hash != trustedHeader.Hash @@ -685,15 +684,15 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { // Check we have swapped invalid 1st light block (+lightblock+) with correct one (+lightblock2+). l, err := c.TrustedLightBlock(1) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, l) assert.Equal(t, l.Hash(), header1.Hash()) - assert.NoError(t, l.ValidateBasic(chainID)) + require.NoError(t, l.ValidateBasic(chainID)) // Check we no longer have invalid 2nd light block (+lightblock2+). l, err = c.TrustedLightBlock(2) - assert.Error(t, err) - assert.Nil(t, l) + require.Error(t, err) + require.Nil(t, l) } } @@ -711,10 +710,10 @@ func TestClient_Update(t *testing.T) { // should result in downloading & verifying header #3 l, err := c.Update(ctx, bTime.Add(2*time.Hour)) - assert.NoError(t, err) + require.NoError(t, err) if assert.NotNil(t, l) { assert.EqualValues(t, 3, l.Height) - assert.NoError(t, l.ValidateBasic(chainID)) + require.NoError(t, l.ValidateBasic(chainID)) } } @@ -745,13 +744,13 @@ func TestClient_Concurrency(t *testing.T) { assert.Equal(t, chainID, c.ChainID()) _, err := c.LastTrustedHeight() - assert.NoError(t, err) + require.NoError(t, err) _, err = c.FirstTrustedHeight() - assert.NoError(t, err) + require.NoError(t, err) l, err := c.TrustedLightBlock(1) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, l) }() } @@ -776,7 +775,7 @@ func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) { require.NoError(t, err) assert.NotEqual(t, c.Primary(), deadNode) - assert.Equal(t, 2, len(c.Witnesses())) + assert.Len(t, c.Witnesses(), 2) } func TestClient_BackwardsVerification(t *testing.T) { @@ -806,12 +805,12 @@ func TestClient_BackwardsVerification(t *testing.T) { // 2) untrusted header is expired but trusted header is not => expect no error h, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(8*time.Minute)) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, h) // 3) already stored headers should return the header without error h, err = c.VerifyLightBlockAtHeight(ctx, 5, bTime.Add(6*time.Minute)) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, h) // 4a) First verify latest header @@ -820,16 +819,15 @@ func TestClient_BackwardsVerification(t *testing.T) { // 4b) Verify backwards using bisection => expect no error _, err = c.VerifyLightBlockAtHeight(ctx, 7, bTime.Add(9*time.Minute)) - assert.NoError(t, err) + require.NoError(t, err) // shouldn't have verified this header in the process _, err = c.TrustedLightBlock(8) - assert.Error(t, err) + require.Error(t, err) // 5) Try bisection method, but closest header (at 7) has expired // so expect error _, err = c.VerifyLightBlockAtHeight(ctx, 8, bTime.Add(12*time.Minute)) - assert.Error(t, err) - + require.Error(t, err) } { testCases := []struct { @@ -880,7 +878,7 @@ func TestClient_BackwardsVerification(t *testing.T) { require.NoError(t, err, idx) _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour).Add(1*time.Second)) - assert.Error(t, err, idx) + require.Error(t, err, idx) } } } @@ -903,23 +901,44 @@ func TestClient_NewClientFromTrustedStore(t *testing.T) { // 2) Check light block exists (deadNode is being used to ensure we're not getting // it from primary) h, err := c.TrustedLightBlock(1) - assert.NoError(t, err) + require.NoError(t, err) assert.EqualValues(t, l1.Height, h.Height) } +func TestClient_NewClientFromEmptyTrustedStore(t *testing.T) { + // empty DB + db := dbs.New(dbm.NewMemDB(), chainID) + + c, err := light.NewClientFromTrustedStore( + chainID, + trustPeriod, + fullNode, + []provider.Provider{fullNode}, + db, + ) + + if err == nil { + assert.NotPanics(t, func() { + _, _ = c.VerifyLightBlockAtHeight(ctx, 2, bTime) + }) + } else { + require.ErrorIs(t, err, light.ErrEmptyTrustedStore) + } +} + func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { // different headers hash then primary plus less than 1/3 signed (no fork) badProvider1 := mockp.New( chainID, map[int64]*types.SignedHeader{ 1: h1, - 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, + 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals2, vals2, hash("app_hash2"), hash("cons_hash"), hash("results_hash"), len(keys), len(keys), types.BlockID{Hash: h1.Hash()}), }, map[int64]*types.ValidatorSet{ 1: vals, - 2: vals, + 2: vals2, }, ) // header is empty @@ -931,7 +950,7 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { }, map[int64]*types.ValidatorSet{ 1: vals, - 2: vals, + 2: vals2, }, ) @@ -954,14 +973,14 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { // witness behaves incorrectly -> removed from list, no error l, err := c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour)) - assert.NoError(t, err) + require.NoError(t, err) assert.EqualValues(t, 1, len(c.Witnesses())) // light block should still be verified assert.EqualValues(t, 2, l.Height) // remaining witnesses don't have light block -> error _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, light.ErrFailedHeaderCrossReferencing, err) } // witness does not have a light block -> left in the list @@ -998,11 +1017,11 @@ func TestClient_TrustedValidatorSet(t *testing.T) { light.Logger(log.TestingLogger()), ) require.NoError(t, err) - assert.Equal(t, 2, len(c.Witnesses())) + assert.Len(t, c.Witnesses(), 2) _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour).Add(1*time.Second)) - assert.NoError(t, err) - assert.Equal(t, 1, len(c.Witnesses())) + require.NoError(t, err) + assert.Len(t, c.Witnesses(), 1) } func TestClientPrunesHeadersAndValidatorSets(t *testing.T) { @@ -1025,7 +1044,7 @@ func TestClientPrunesHeadersAndValidatorSets(t *testing.T) { require.Equal(t, int64(3), h.Height) _, err = c.TrustedLightBlock(1) - assert.Error(t, err) + require.Error(t, err) } func TestClientEnsureValidHeadersAndValSets(t *testing.T) { @@ -1092,15 +1111,15 @@ func TestClientEnsureValidHeadersAndValSets(t *testing.T) { _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) if tc.err { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) } } } func TestClientHandlesContexts(t *testing.T) { - p := mockp.New(genMockNode(chainID, 100, 10, 1, bTime)) + p := mockp.New(genMockNode(100, 10, 1, bTime)) genBlock, err := p.LightBlock(ctx, 1) require.NoError(t, err) @@ -1121,7 +1140,7 @@ func TestClientHandlesContexts(t *testing.T) { ) require.Error(t, ctxTimeOut.Err()) require.Error(t, err) - require.True(t, errors.Is(err, context.DeadlineExceeded)) + require.ErrorIs(t, err, context.DeadlineExceeded) // instantiate the client for real c, err := light.NewClient( @@ -1144,7 +1163,7 @@ func TestClientHandlesContexts(t *testing.T) { _, err = c.VerifyLightBlockAtHeight(ctxTimeOutBlock, 100, bTime.Add(100*time.Minute)) require.Error(t, ctxTimeOutBlock.Err()) require.Error(t, err) - require.True(t, errors.Is(err, context.DeadlineExceeded)) + require.ErrorIs(t, err, context.DeadlineExceeded) // verify a block with a cancel ctxCancel, cancel := context.WithCancel(ctx) @@ -1153,5 +1172,58 @@ func TestClientHandlesContexts(t *testing.T) { _, err = c.VerifyLightBlockAtHeight(ctxCancel, 100, bTime.Add(100*time.Minute)) require.Error(t, ctxCancel.Err()) require.Error(t, err) - require.True(t, errors.Is(err, context.Canceled)) + require.ErrorIs(t, err, context.Canceled) +} + +// TestClientErrorsDifferentProposerPriorities tests the case where the witness +// sends us a light block with a validator set with different proposer priorities. +func TestClientErrorsDifferentProposerPriorities(t *testing.T) { + primary := mockp.New( + chainID, + map[int64]*types.SignedHeader{ + 1: h1, + 2: h2, + }, + map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals2, + }, + ) + witness := mockp.New( + chainID, + map[int64]*types.SignedHeader{ + 1: h1, + 2: h2, + }, + map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals, + }, + ) + + // Proposer priorities in vals and vals2 are different. + // This is because vals2 = vals.CopyIncrementProposerPriority(1) + require.Equal(t, vals.Hash(), vals2.Hash()) + require.NotEqual(t, vals.ProposerPriorityHash(), vals2.ProposerPriorityHash()) + + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + fullNode, + []provider.Provider{primary, witness}, + dbs.New(dbm.NewMemDB(), chainID), + light.Logger(log.TestingLogger()), + light.MaxRetryAttempts(1), + ) + // witness should have behaved properly -> no error + require.NoError(t, err) + assert.EqualValues(t, 2, len(c.Witnesses())) + + // witness behaves incorrectly, but we can't prove who's guilty -> error + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour)) + require.Error(t, err) + + // witness left in the list + assert.EqualValues(t, 2, len(c.Witnesses())) } diff --git a/light/detector.go b/light/detector.go index 228dec61a6c..130ebf2f7b8 100644 --- a/light/detector.go +++ b/light/detector.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "errors" - "fmt" "time" "github.com/cometbft/cometbft/light/provider" @@ -27,11 +26,12 @@ import ( // trusted and saves it to the trusted store. func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.LightBlock, now time.Time) error { if primaryTrace == nil || len(primaryTrace) < 2 { - return errors.New("nil or single block primary trace") + return ErrNilOrSinglePrimaryTrace } var ( headerMatched bool - lastVerifiedHeader = primaryTrace[len(primaryTrace)-1].SignedHeader + lastVerifiedBlock = primaryTrace[len(primaryTrace)-1] + lastVerifiedHeader = lastVerifiedBlock.SignedHeader witnessesToRemove = make([]int, 0) ) c.logger.Debug("Running detector against trace", "finalizeBlockHeight", lastVerifiedHeader.Height, @@ -48,7 +48,7 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig // and compare it with the header from the primary errc := make(chan error, len(c.witnesses)) for i, witness := range c.witnesses { - go c.compareNewHeaderWithWitness(ctx, errc, lastVerifiedHeader, witness, i) + go c.compareNewLightBlockWithWitness(ctx, errc, lastVerifiedBlock, witness, i) } // handle errors from the header comparisons as they come in @@ -58,7 +58,7 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig switch e := err.(type) { case nil: // at least one header matched headerMatched = true - case errConflictingHeaders: + case ErrConflictingHeaders: // We have conflicting headers. This could possibly imply an attack on the light client. // First we need to verify the witness's header using the same skipping verification and then we // need to find the point that the headers diverge and examine this for any evidence of an attack. @@ -79,6 +79,10 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig c.logger.Info("witness returned an error during header comparison, removing...", "witness", c.witnesses[e.WitnessIndex], "err", err) witnessesToRemove = append(witnessesToRemove, e.WitnessIndex) + case ErrProposerPrioritiesDiverge: + c.logger.Info("witness reported validator set with different proposer priorities", + "witness", c.witnesses[e.WitnessIndex], "err", err) + return e default: // Benign errors which can be ignored unless there was a context // canceled @@ -104,23 +108,24 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig return ErrFailedHeaderCrossReferencing } -// compareNewHeaderWithWitness takes the verified header from the primary and compares it with a +// compareNewLightBlockWithWitness takes the verified header from the primary and compares it with a // header from a specified witness. The function can return one of three errors: // -// 1: errConflictingHeaders -> there may have been an attack on this light client +// 1: ErrConflictingHeaders -> there may have been an attack on this light client // 2: errBadWitness -> the witness has either not responded, doesn't have the header or has given us an invalid one // // Note: In the case of an invalid header we remove the witness // // 3: nil -> the hashes of the two headers match -func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan error, h *types.SignedHeader, - witness provider.Provider, witnessIndex int) { +func (c *Client) compareNewLightBlockWithWitness(ctx context.Context, errc chan error, l *types.LightBlock, + witness provider.Provider, witnessIndex int, +) { + h := l.SignedHeader lightBlock, err := witness.LightBlock(ctx, h.Height) switch err { // no error means we move on to checking the hash of the two headers case nil: - break // the witness hasn't been helpful in comparing headers, we mark the response and continue // comparing with the rest of the witnesses @@ -150,7 +155,7 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro // witness' last header is below the primary's header. We check the times to see if the blocks // have conflicting times if !lightBlock.Time.Before(h.Time) { - errc <- errConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} + errc <- ErrConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} return } @@ -175,7 +180,7 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro // the witness still doesn't have a block at the height of the primary. // Check if there is a conflicting time if !lightBlock.Time.Before(h.Time) { - errc <- errConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} + errc <- ErrConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} return } @@ -197,7 +202,13 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro } if !bytes.Equal(h.Hash(), lightBlock.Hash()) { - errc <- errConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} + errc <- ErrConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} + } + + // ProposerPriorityHash is not part of the header hash, so we need to check it separately. + wanted, got := l.ValidatorSet.ProposerPriorityHash(), lightBlock.ValidatorSet.ProposerPriorityHash() + if !bytes.Equal(wanted, got) { + errc <- ErrProposerPrioritiesDiverge{WitnessHash: got, WitnessIndex: witnessIndex, PrimaryHash: wanted} } c.logger.Debug("Matching header received by witness", "height", h.Height, "witness", witnessIndex) @@ -213,7 +224,7 @@ func (c *Client) sendEvidence(ctx context.Context, ev *types.LightClientAttackEv } // handleConflictingHeaders handles the primary style of attack, which is where a primary and witness have -// two headers of the same height but with different hashes +// two headers of the same height but with different hashes. func (c *Client) handleConflictingHeaders( ctx context.Context, primaryTrace []*types.LightBlock, @@ -238,14 +249,14 @@ func (c *Client) handleConflictingHeaders( // and generate evidence against the primary that we can send to the witness commonBlock, trustedBlock := witnessTrace[0], witnessTrace[len(witnessTrace)-1] evidenceAgainstPrimary := newLightClientAttackEvidence(primaryBlock, trustedBlock, commonBlock) - c.logger.Error("ATTEMPTED ATTACK DETECTED. Sending evidence againt primary by witness", "ev", evidenceAgainstPrimary, + c.logger.Error("ATTEMPTED ATTACK DETECTED. Sending evidence against primary by witness", "ev", evidenceAgainstPrimary, "primary", c.primary, "witness", supportingWitness) c.sendEvidence(ctx, evidenceAgainstPrimary, supportingWitness) if primaryBlock.Commit.Round != witnessTrace[len(witnessTrace)-1].Commit.Round { c.logger.Info("The light client has detected, and prevented, an attempted amnesia attack." + " We think this attack is pretty unlikely, so if you see it, that's interesting to us." + - " Can you let us know by opening an issue through https://github.com/tendermint/tendermint/issues/new?") + " Can you let us know by opening an issue through https://github.com/cometbft/cometbft/issues/new?") } // This may not be valid because the witness itself is at fault. So now we reverse it, examining the @@ -274,7 +285,7 @@ func (c *Client) handleConflictingHeaders( } // examineConflictingHeaderAgainstTrace takes a trace from one provider and a divergent header that -// it has received from another and preforms verifySkipping at the heights of each of the intermediate +// it has received from another and performs verifySkipping at the heights of each of the intermediate // headers in the trace until it reaches the divergentHeader. 1 of 2 things can happen. // // 1. The light client verifies a header that is different to the intermediate header in the trace. This @@ -293,7 +304,6 @@ func (c *Client) examineConflictingHeaderAgainstTrace( targetBlock *types.LightBlock, source provider.Provider, now time.Time, ) ([]*types.LightBlock, *types.LightBlock, error) { - var ( previouslyVerifiedBlock, sourceBlock *types.LightBlock sourceTrace []*types.LightBlock @@ -301,8 +311,7 @@ func (c *Client) examineConflictingHeaderAgainstTrace( ) if targetBlock.Height < trace[0].Height { - return nil, nil, fmt.Errorf("target block has a height lower than the trusted height (%d < %d)", - targetBlock.Height, trace[0].Height) + return nil, nil, ErrTargetBlockHeightLessThanTrusted{Target: targetBlock.Height, Trusted: trace[0].Height} } for idx, traceBlock := range trace { @@ -314,8 +323,7 @@ func (c *Client) examineConflictingHeaderAgainstTrace( // the end of the trace has a lesser time than the target block then all blocks in the trace should have a // lesser time if traceBlock.Time.After(targetBlock.Time) { - return nil, nil, - errors.New("sanity check failed: expected traceblock to have a lesser time than the target block") + return nil, nil, ErrInvalidBlockTime } // before sending back the divergent block and trace we need to ensure we have verified @@ -323,7 +331,7 @@ func (c *Client) examineConflictingHeaderAgainstTrace( if previouslyVerifiedBlock.Height != targetBlock.Height { sourceTrace, err = c.verifySkipping(ctx, source, previouslyVerifiedBlock, targetBlock, now) if err != nil { - return nil, nil, fmt.Errorf("verifySkipping of conflicting header failed: %w", err) + return nil, nil, ErrVerifySkipping{Err: err} } } return sourceTrace, traceBlock, nil @@ -335,7 +343,7 @@ func (c *Client) examineConflictingHeaderAgainstTrace( } else { sourceBlock, err = source.LightBlock(ctx, traceBlock.Height) if err != nil { - return nil, nil, fmt.Errorf("failed to examine trace: %w", err) + return nil, nil, ErrExamineTrace{Err: err} } } @@ -343,8 +351,7 @@ func (c *Client) examineConflictingHeaderAgainstTrace( // else we cannot continue with verification. if idx == 0 { if shash, thash := sourceBlock.Hash(), traceBlock.Hash(); !bytes.Equal(shash, thash) { - return nil, nil, fmt.Errorf("trusted block is different to the source's first block (%X = %X)", - thash, shash) + return nil, nil, ErrBlockHashMismatch{TraceBlockHash: thash, SourceBlockHash: shash} } previouslyVerifiedBlock = sourceBlock continue @@ -354,7 +361,7 @@ func (c *Client) examineConflictingHeaderAgainstTrace( // intermediate height sourceTrace, err = c.verifySkipping(ctx, source, previouslyVerifiedBlock, sourceBlock, now) if err != nil { - return nil, nil, fmt.Errorf("verifySkipping of conflicting header failed: %w", err) + return nil, nil, ErrVerifySkipping{Err: err} } // check if the headers verified by the source has diverged from the trace if shash, thash := sourceBlock.Hash(), traceBlock.Hash(); !bytes.Equal(shash, thash) { @@ -370,13 +377,12 @@ func (c *Client) examineConflictingHeaderAgainstTrace( // prerequisites to this function were not met. Namely that either trace[len(trace)-1].Height < targetBlock.Height // or that trace[i].Hash() != targetBlock.Hash() return nil, nil, errNoDivergence - } // getTargetBlockOrLatest gets the latest height, if it is greater than the target height then it queries // the target height else it returns the latest. returns true if it successfully managed to acquire the target // height. -func (c *Client) getTargetBlockOrLatest( +func (*Client) getTargetBlockOrLatest( ctx context.Context, height int64, witness provider.Provider, diff --git a/light/detector_test.go b/light/detector_test.go index d8afacefd89..bc31d181ddc 100644 --- a/light/detector_test.go +++ b/light/detector_test.go @@ -8,7 +8,6 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/light" "github.com/cometbft/cometbft/light/provider" @@ -27,7 +26,7 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) { primaryValidators = make(map[int64]*types.ValidatorSet, latestHeight) ) - witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight, valSize, 2, bTime) + witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(latestHeight, valSize, 2, bTime) witness := mockp.New(chainID, witnessHeaders, witnessValidators) forgedKeys := chainKeys[divergenceHeight-1].ChangeKeys(3) // we change 3 out of the 5 validators (still 2/5 remain) forgedVals := forgedKeys.ToValidators(2, 0) @@ -62,7 +61,7 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) { // Check verification returns an error. _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, light.ErrLightClientAttack, err) } @@ -107,7 +106,7 @@ func TestLightClientAttackEvidence_Equivocation(t *testing.T) { primaryValidators = make(map[int64]*types.ValidatorSet, latestHeight) ) // validators don't change in this network (however we still use a map just for convenience) - witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight+2, valSize, 2, bTime) + witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(latestHeight+2, valSize, 2, bTime) witness := mockp.New(chainID, witnessHeaders, witnessValidators) for height := int64(1); height <= latestHeight; height++ { @@ -145,7 +144,7 @@ func TestLightClientAttackEvidence_Equivocation(t *testing.T) { // Check verification returns an error. _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, light.ErrLightClientAttack, err) } @@ -184,7 +183,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { primaryValidators = make(map[int64]*types.ValidatorSet, forgedHeight) ) - witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight, valSize, 2, bTime) + witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(latestHeight, valSize, 2, bTime) // primary has the exact same headers except it forges one extra header in the future using keys from 2/5ths of // the validators @@ -259,7 +258,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { // Now assert that verification returns an error. We craft the light clients time to be a little ahead of the chain // to allow a window for the attack to manifest itself. _, err = c.Update(ctx, bTime.Add(time.Duration(forgedHeight)*time.Minute)) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, light.ErrLightClientAttack, err) } @@ -276,7 +275,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { // We attempt the same call but now the supporting witness has a block which should // immediately conflict in time with the primary _, err = c.VerifyLightBlockAtHeight(ctx, forgedHeight, bTime.Add(time.Duration(forgedHeight)*time.Minute)) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, light.ErrLightClientAttack, err) } assert.True(t, witness.HasEvidence(evAgainstPrimary)) @@ -301,17 +300,17 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { require.NoError(t, err) _, err = c.Update(ctx, bTime.Add(time.Duration(forgedHeight)*time.Minute)) - assert.NoError(t, err) + require.NoError(t, err) } // 1. Different nodes therefore a divergent header is produced. // => light client returns an error upon creation because primary and witness // have a different view. func TestClientDivergentTraces1(t *testing.T) { - primary := mockp.New(genMockNode(chainID, 10, 5, 2, bTime)) + primary := mockp.New(genMockNode(10, 5, 2, bTime)) firstBlock, err := primary.LightBlock(ctx, 1) require.NoError(t, err) - witness := mockp.New(genMockNode(chainID, 10, 5, 2, bTime)) + witness := mockp.New(genMockNode(10, 5, 2, bTime)) _, err = light.NewClient( ctx, @@ -332,9 +331,9 @@ func TestClientDivergentTraces1(t *testing.T) { } // 2. Two out of three nodes don't respond but the third has a header that matches -// => verification should be successful and all the witnesses should remain +// => verification should be successful and all the witnesses should remain. func TestClientDivergentTraces2(t *testing.T) { - primary := mockp.New(genMockNode(chainID, 10, 5, 2, bTime)) + primary := mockp.New(genMockNode(10, 5, 2, bTime)) firstBlock, err := primary.LightBlock(ctx, 1) require.NoError(t, err) c, err := light.NewClient( @@ -354,20 +353,20 @@ func TestClientDivergentTraces2(t *testing.T) { require.NoError(t, err) _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) - assert.NoError(t, err) - assert.Equal(t, 3, len(c.Witnesses())) + require.NoError(t, err) + assert.Len(t, c.Witnesses(), 3) } // 3. witness has the same first header, but different second header -// => creation should succeed, but the verification should fail +// => creation should succeed, but the verification should fail. func TestClientDivergentTraces3(t *testing.T) { - _, primaryHeaders, primaryVals := genMockNode(chainID, 10, 5, 2, bTime) + _, primaryHeaders, primaryVals := genMockNode(10, 5, 2, bTime) primary := mockp.New(chainID, primaryHeaders, primaryVals) firstBlock, err := primary.LightBlock(ctx, 1) require.NoError(t, err) - _, mockHeaders, mockVals := genMockNode(chainID, 10, 5, 2, bTime) + _, mockHeaders, mockVals := genMockNode(10, 5, 2, bTime) mockHeaders[1] = primaryHeaders[1] mockVals[1] = primaryVals[1] witness := mockp.New(chainID, mockHeaders, mockVals) @@ -389,20 +388,20 @@ func TestClientDivergentTraces3(t *testing.T) { require.NoError(t, err) _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) - assert.Error(t, err) - assert.Equal(t, 1, len(c.Witnesses())) + require.Error(t, err) + assert.Len(t, c.Witnesses(), 1) } // 4. Witness has a divergent header but can not produce a valid trace to back it up. -// It should be ignored +// It should be ignored. func TestClientDivergentTraces4(t *testing.T) { - _, primaryHeaders, primaryVals := genMockNode(chainID, 10, 5, 2, bTime) + _, primaryHeaders, primaryVals := genMockNode(10, 5, 2, bTime) primary := mockp.New(chainID, primaryHeaders, primaryVals) firstBlock, err := primary.LightBlock(ctx, 1) require.NoError(t, err) - _, mockHeaders, mockVals := genMockNode(chainID, 10, 5, 2, bTime) + _, mockHeaders, mockVals := genMockNode(10, 5, 2, bTime) witness := primary.Copy(chainID) witness.AddLightBlock(&types.LightBlock{ SignedHeader: mockHeaders[10], @@ -425,6 +424,6 @@ func TestClientDivergentTraces4(t *testing.T) { require.NoError(t, err) _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) - assert.Error(t, err) - assert.Equal(t, 1, len(c.Witnesses())) + require.Error(t, err) + assert.Len(t, c.Witnesses(), 1) } diff --git a/light/doc.go b/light/doc.go index 3cc2741bf05..805372432ce 100644 --- a/light/doc.go +++ b/light/doc.go @@ -63,7 +63,7 @@ This package provides three major things: Example usage: - db, err := dbm.NewGoLevelDB("light-client-db", dbDir) + db, err := dbm.NewPebbleDB("light-client-db", dbDir) if err != nil { // handle error } @@ -121,7 +121,7 @@ See https://docs.cometbft.com/main/core/light-client.html for usage example. Or see -https://github.com/cometbft/cometbft/tree/main/spec/consensus/light-client +https://github.com/cometbft/cometbft/blob/main/spec/light-client/README.md for the full spec */ package light diff --git a/light/errors.go b/light/errors.go index bc6357def66..4fe67d3a6f6 100644 --- a/light/errors.go +++ b/light/errors.go @@ -5,9 +5,42 @@ import ( "fmt" "time" + cmtbytes "github.com/cometbft/cometbft/libs/bytes" + cmtmath "github.com/cometbft/cometbft/libs/math" + "github.com/cometbft/cometbft/light/provider" "github.com/cometbft/cometbft/types" ) +var ( + + // ErrFailedHeaderCrossReferencing is returned when the detector was not able to cross reference the header + // with any of the connected witnesses. + ErrFailedHeaderCrossReferencing = errors.New("all witnesses have either not responded, don't have the " + + "blocks or sent invalid blocks. You should look to change your witnesses " + + "or review the light client's logs for more information") + // ErrLightClientAttack is returned when the light client has detected an attempt + // to verify a false header and has sent the evidence to either a witness or primary. + ErrLightClientAttack = errors.New(`attempted attack detected. +Light client received valid conflicting header from witness. +Unable to verify header. Evidence has been sent to both providers. +Check logs for full evidence and trace`) + + // ErrNoWitnesses means that there are not enough witnesses connected to + // continue running the light client. + ErrNoWitnesses = errors.New("no witnesses connected. please reset light client") + ErrNilOrSinglePrimaryTrace = errors.New("nil or single block primary trace") + ErrHeaderHeightAdjacent = errors.New("headers must be non adjacent in height") + ErrHeaderHeightNotAdjacent = errors.New("headers must be adjacent in height") + ErrNegativeOrZeroPeriod = errors.New("negative or zero period") + ErrNegativeHeight = errors.New("negative height") + ErrNegativeOrZeroHeight = errors.New("negative or zero height") + ErrInvalidBlockTime = errors.New("expected traceblock to have a lesser time than the target block") + ErrRemoveStoredBlocksRefused = errors.New("refused to remove the stored light blocks despite hashes mismatch") + ErrNoHeadersExist = errors.New("no headers exist") + ErrNilHeader = errors.New("nil header") + ErrEmptyTrustedStore = errors.New("trusted store is empty") +) + // ErrOldHeaderExpired means the old (trusted) header has expired according to // the given trustingPeriod and current time. If so, the light client must be // reset subjectively. @@ -20,6 +53,134 @@ func (e ErrOldHeaderExpired) Error() string { return fmt.Sprintf("old header has expired at %v (now: %v)", e.At, e.Now) } +type ErrTargetBlockHeightLessThanTrusted struct { + Target int64 + Trusted int64 +} + +func (e ErrTargetBlockHeightLessThanTrusted) Error() string { + return fmt.Sprintf("target block has a height lower than the trusted height (%d < %d)", e.Target, e.Trusted) +} + +type ErrHeaderHeightNotMonotonic struct { + GotHeight int64 + OldHeight int64 +} + +func (e ErrHeaderHeightNotMonotonic) Error() string { + return fmt.Sprintf("expected new header height %d to be greater than one of old header %d", e.GotHeight, e.OldHeight) +} + +type ErrHeaderTimeNotMonotonic struct { + GotTime time.Time + OldTime time.Time +} + +func (e ErrHeaderTimeNotMonotonic) Error() string { + return fmt.Sprintf("expected new header time %v to be after old header time %v", e.GotTime, e.OldTime) +} + +type ErrHeaderTimeExceedMaxClockDrift struct { + Ti time.Time + Now time.Time + Drift time.Duration +} + +func (e ErrHeaderTimeExceedMaxClockDrift) Error() string { + return fmt.Sprintf("new header has a time from the future %v (now: %v; max clock drift: %v)", e.Ti, e.Now, e.Drift) +} + +type ErrUnverifiedHeight struct { + Height int64 +} + +func (e ErrUnverifiedHeight) Error() string { + return fmt.Sprintf("unverified header/valset requested (latest: %d)", e.Height) +} + +type ErrInvalidTrustLevel struct { + Level cmtmath.Fraction +} + +func (e ErrInvalidTrustLevel) Error() string { + return fmt.Sprintf("trustLevel must be within [1/3, 1], given %v", e.Level) +} + +type ErrValidatorsMismatch struct { + HeaderHash cmtbytes.HexBytes + ValidatorsHash cmtbytes.HexBytes + Height int64 +} + +func (e ErrValidatorsMismatch) Error() string { + return fmt.Sprintf("expected new header validators (%X) to match those that were supplied (%X) at height %d", e.HeaderHash, e.ValidatorsHash, e.Height) +} + +type ErrValidatorHashMismatch struct { + TrustedHash cmtbytes.HexBytes + ValidatorHash cmtbytes.HexBytes +} + +func (e ErrValidatorHashMismatch) Error() string { + return fmt.Sprintf("expected old header next validators (%X) to match those from new header (%X)", e.TrustedHash, e.ValidatorHash) +} + +type ErrBlockHashMismatch struct { + TraceBlockHash cmtbytes.HexBytes + SourceBlockHash cmtbytes.HexBytes +} + +func (e ErrBlockHashMismatch) Error() string { + return fmt.Sprintf("trusted block is different to the source's first block (%X = %X)", e.TraceBlockHash, e.SourceBlockHash) +} + +type ErrHeaderHashMismatch struct { + Expected cmtbytes.HexBytes + Actual cmtbytes.HexBytes +} + +func (e ErrHeaderHashMismatch) Error() string { + return fmt.Sprintf("expected header's hash %X, but got %X", e.Expected, e.Actual) +} + +type ErrExistingHeaderHashMismatch struct { + Existing cmtbytes.HexBytes + New cmtbytes.HexBytes +} + +func (e ErrExistingHeaderHashMismatch) Error() string { + return fmt.Sprintf("existing trusted header %X does not match newHeader %X", e.Existing, e.New) +} + +type ErrLightHeaderHashMismatch struct { + Existing cmtbytes.HexBytes + New cmtbytes.HexBytes +} + +func (e ErrLightHeaderHashMismatch) Error() string { + return fmt.Sprintf("light block header %X does not match newHeader %X", e.Existing, e.New) +} + +type ErrInvalidHashSize struct { + Expected int + Actual int +} + +func (e ErrInvalidHashSize) Error() string { + return fmt.Sprintf("expected hash size to be %d bytes, got %d bytes", e.Expected, e.Actual) +} + +type ErrUnexpectedChainID struct { + Index int + Witness provider.Provider + Actual string + Expected string +} + +func (e ErrUnexpectedChainID) Error() string { + return fmt.Sprintf("witness #%d: %v is on another chain %s, expected %s", e.Index, e.Witness, e.Actual, e.Expected) +} + // ErrNewValSetCantBeTrusted means the new validator set cannot be trusted // because < 1/3rd (+trustLevel+) of the old validator set has signed. type ErrNewValSetCantBeTrusted struct { @@ -27,7 +188,7 @@ type ErrNewValSetCantBeTrusted struct { } func (e ErrNewValSetCantBeTrusted) Error() string { - return fmt.Sprintf("cant trust new val set: %v", e.Reason) + return fmt.Sprintf("can't trust new val set: %v", e.Reason) } // ErrInvalidHeader means the header either failed the basic validation or @@ -40,11 +201,229 @@ func (e ErrInvalidHeader) Error() string { return fmt.Sprintf("invalid header: %v", e.Reason) } -// ErrFailedHeaderCrossReferencing is returned when the detector was not able to cross reference the header -// with any of the connected witnesses. -var ErrFailedHeaderCrossReferencing = errors.New("all witnesses have either not responded, don't have the " + - " blocks or sent invalid blocks. You should look to change your witnesses" + - " or review the light client's logs for more information") +func (e ErrInvalidHeader) Unwrap() error { + return e.Reason +} + +type ErrVerifySkipping struct { + Err error +} + +func (e ErrVerifySkipping) Error() string { + return fmt.Sprintf("verifySkipping of conflicting header failed: %v", e.Err) +} + +func (e ErrVerifySkipping) Unwrap() error { + return e.Err +} + +type ErrExamineTrace struct { + Err error +} + +func (e ErrExamineTrace) Error() string { + return fmt.Sprintf("failed to examine trace: %v", e.Err) +} + +func (e ErrExamineTrace) Unwrap() error { + return e.Err +} + +type ErrHeaderValidateBasic struct { + Err error +} + +func (e ErrHeaderValidateBasic) Error() string { + return fmt.Sprintf("untrustedHeader.ValidateBasic failed: %v", e.Err) +} + +func (e ErrHeaderValidateBasic) Unwrap() error { + return e.Err +} + +type ErrInvalidTrustOptions struct { + Err error +} + +func (e ErrInvalidTrustOptions) Error() string { + return fmt.Sprintf("invalid TrustOptions: %v", e.Err) +} + +func (e ErrInvalidTrustOptions) Unwrap() error { + return e.Err +} + +type ErrGetTrustedBlock struct { + Err error +} + +func (e ErrGetTrustedBlock) Error() string { + return fmt.Sprintf("can't get last trusted light block: %v", e.Err) +} + +func (e ErrGetTrustedBlock) Unwrap() error { + return e.Err +} + +type ErrGetTrustedBlockHeight struct { + Err error +} + +func (e ErrGetTrustedBlockHeight) Error() string { + return fmt.Sprintf("can't get last trusted light block height: %v", e.Err) +} + +func (e ErrGetTrustedBlockHeight) Unwrap() error { + return e.Err +} + +type ErrCleanup struct { + Err error +} + +func (e ErrCleanup) Error() string { + return fmt.Sprintf("failed to cleanup: %v", e.Err) +} + +func (e ErrCleanup) Unwrap() error { + return e.Err +} + +type ErrGetBlock struct { + Err error +} + +func (e ErrGetBlock) Error() string { + return fmt.Sprintf("failed to retrieve light block from primary to verify against: %v", e.Err) +} + +func (e ErrGetBlock) Unwrap() error { + return e.Err +} + +type ErrGetFirstBlock struct { + Err error +} + +func (e ErrGetFirstBlock) Error() string { + return fmt.Sprintf("can't get first light block: %v", e.Err) +} + +func (e ErrGetFirstBlock) Unwrap() error { + return e.Err +} + +type ErrGetFirstBlockHeight struct { + Err error +} + +func (e ErrGetFirstBlockHeight) Error() string { + return fmt.Sprintf("can't get first light block height: %v", e.Err) +} + +func (e ErrGetFirstBlockHeight) Unwrap() error { + return e.Err +} + +type ErrInvalidCommit struct { + Err error +} + +func (e ErrInvalidCommit) Error() string { + return fmt.Sprintf("invalid commit: %v", e.Err) +} + +func (e ErrInvalidCommit) Unwrap() error { + return e.Err +} + +type ErrGetLastTrustedHeight struct { + Err error +} + +func (e ErrGetLastTrustedHeight) Error() string { + return fmt.Sprintf("can't get last trusted height: %v", e.Err) +} + +func (e ErrGetLastTrustedHeight) Unwrap() error { + return e.Err +} + +type ErrPrune struct { + Err error +} + +func (e ErrPrune) Error() string { + return fmt.Sprintf("prune: %v", e.Err) +} + +func (e ErrPrune) Unwrap() error { + return e.Err +} + +type ErrSaveTrustedHeader struct { + Err error +} + +func (e ErrSaveTrustedHeader) Error() string { + return fmt.Sprintf("failed to save trusted header: %v", e.Err) +} + +func (e ErrSaveTrustedHeader) Unwrap() error { + return e.Err +} + +type ErrCleanupAfter struct { + Height int64 + Err error +} + +func (e ErrCleanupAfter) Error() string { + return fmt.Sprintf("cleanup after height %d failed: %v", e.Height, e.Err) +} + +func (e ErrCleanupAfter) Unwrap() error { + return e.Err +} + +type ErrGetSignedHeaderBeforeHeight struct { + Height int64 + Err error +} + +func (e ErrGetSignedHeaderBeforeHeight) Error() string { + return fmt.Sprintf("can't get signed header before height %d: %v", e.Height, e.Err) +} + +func (e ErrGetSignedHeaderBeforeHeight) Unwrap() error { + return e.Err +} + +type ErrGetHeaderBeforeHeight struct { + Height int64 + Err error +} + +func (e ErrGetHeaderBeforeHeight) Error() string { + return fmt.Sprintf("failed to get header before %d: %v", e.Height, e.Err) +} + +func (e ErrGetHeaderBeforeHeight) Unwrap() error { + return e.Err +} + +type ErrGetHeaderAtHeight struct { + Height int64 + Err error +} + +func (e ErrGetHeaderAtHeight) Error() string { + return fmt.Sprintf("failed to obtain the header at height #%d: %v", e.Height, e.Err) +} + +func (e ErrGetHeaderAtHeight) Unwrap() error { + return e.Err +} // ErrVerificationFailed means either sequential or skipping verification has // failed to verify from header #1 to header #2 due to some reason. @@ -63,32 +442,36 @@ func (e ErrVerificationFailed) Error() string { return fmt.Sprintf("verify from #%d to #%d failed: %v", e.From, e.To, e.Reason) } -// ErrLightClientAttack is returned when the light client has detected an attempt -// to verify a false header and has sent the evidence to either a witness or primary. -var ErrLightClientAttack = errors.New(`attempted attack detected. - Light client received valid conflicting header from witness. - Unable to verify header. Evidence has been sent to both providers. - Check logs for full evidence and trace`, -) - -// ErrNoWitnesses means that there are not enough witnesses connected to -// continue running the light client. -var ErrNoWitnesses = errors.New("no witnesses connected. please reset light client") - -// ----------------------------- INTERNAL ERRORS --------------------------------- - // ErrConflictingHeaders is thrown when two conflicting headers are discovered. -type errConflictingHeaders struct { +type ErrConflictingHeaders struct { Block *types.LightBlock WitnessIndex int } -func (e errConflictingHeaders) Error() string { +func (e ErrConflictingHeaders) Error() string { return fmt.Sprintf( "header hash (%X) from witness (%d) does not match primary", e.Block.Hash(), e.WitnessIndex) } +// ErrProposerPrioritiesDiverge is thrown when two conflicting headers are +// discovered, but the error is non-attributable comparing to ErrConflictingHeaders. +// The difference is in validator set proposer priorities, which may change +// with every round of consensus. +type ErrProposerPrioritiesDiverge struct { + WitnessHash []byte + WitnessIndex int + PrimaryHash []byte +} + +func (e ErrProposerPrioritiesDiverge) Error() string { + return fmt.Sprintf( + "validator set's proposer priority hashes do not match: witness[%d]=%X, primary=%X", + e.WitnessIndex, e.WitnessHash, e.PrimaryHash) +} + +// ----------------------------- INTERNAL ERRORS --------------------------------- + // errBadWitness is returned when the witness either does not respond or // responds with an invalid header. type errBadWitness struct { @@ -100,6 +483,10 @@ func (e errBadWitness) Error() string { return fmt.Sprintf("Witness %d returned error: %s", e.WitnessIndex, e.Reason.Error()) } +func (e errBadWitness) Unwrap() error { + return e.Reason +} + var errNoDivergence = errors.New( "sanity check failed: no divergence between the original trace and the provider's new trace", ) diff --git a/light/example_test.go b/light/example_test.go index 2bbbb4a0aab..6e27b8b9870 100644 --- a/light/example_test.go +++ b/light/example_test.go @@ -9,7 +9,6 @@ import ( "time" dbm "github.com/cometbft/cometbft-db" - "github.com/cometbft/cometbft/abci/example/kvstore" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/light" @@ -17,6 +16,7 @@ import ( httpp "github.com/cometbft/cometbft/light/provider/http" dbs "github.com/cometbft/cometbft/light/store/db" rpctest "github.com/cometbft/cometbft/rpc/test" + cmttime "github.com/cometbft/cometbft/types/time" ) // Automatically getting new headers and verifying them. @@ -42,7 +42,7 @@ func ExampleClient_Update() { stdlog.Fatal(err) } - db, err := dbm.NewGoLevelDB("light-client-db", dbDir) + db, err := dbm.NewPebbleDB("light-client-db", dbDir) if err != nil { stdlog.Fatal(err) } @@ -71,7 +71,7 @@ func ExampleClient_Update() { time.Sleep(2 * time.Second) - h, err := c.Update(context.Background(), time.Now()) + h, err := c.Update(context.Background(), cmttime.Now()) if err != nil { stdlog.Fatal(err) } @@ -107,7 +107,7 @@ func ExampleClient_VerifyLightBlockAtHeight() { stdlog.Fatal(err) } - db, err := dbm.NewGoLevelDB("light-client-db", dbDir) + db, err := dbm.NewPebbleDB("light-client-db", dbDir) if err != nil { stdlog.Fatal(err) } @@ -134,7 +134,7 @@ func ExampleClient_VerifyLightBlockAtHeight() { } }() - _, err = c.VerifyLightBlockAtHeight(context.Background(), 3, time.Now()) + _, err = c.VerifyLightBlockAtHeight(context.Background(), 3, cmttime.Now()) if err != nil { stdlog.Fatal(err) } diff --git a/light/helpers_test.go b/light/helpers_test.go index e88335c4cf4..ce06eeb6d89 100644 --- a/light/helpers_test.go +++ b/light/helpers_test.go @@ -3,11 +3,10 @@ package light_test import ( "time" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/crypto/tmhash" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" "github.com/cometbft/cometbft/types" cmttime "github.com/cometbft/cometbft/types/time" "github.com/cometbft/cometbft/version" @@ -99,8 +98,8 @@ func (pkz privKeys) signHeader(header *types.Header, valSet *types.ValidatorSet, } func makeVote(header *types.Header, valset *types.ValidatorSet, - key crypto.PrivKey, blockID types.BlockID) *types.Vote { - + key crypto.PrivKey, blockID types.BlockID, +) *types.Vote { addr := key.PubKey().Address() idx, _ := valset.GetByAddress(addr) vote := &types.Vote{ @@ -109,7 +108,7 @@ func makeVote(header *types.Header, valset *types.ValidatorSet, Height: header.Height, Round: 1, Timestamp: cmttime.Now(), - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, BlockID: blockID, } @@ -133,8 +132,8 @@ func makeVote(header *types.Header, valset *types.ValidatorSet, } func genHeader(chainID string, height int64, bTime time.Time, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { - + valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, +) *types.Header { return &types.Header{ Version: cmtversion.Consensus{Block: version.BlockProtocol, App: 0}, ChainID: chainID, @@ -154,8 +153,8 @@ func genHeader(chainID string, height int64, bTime time.Time, txs types.Txs, // GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader. func (pkz privKeys) GenSignedHeader(chainID string, height int64, bTime time.Time, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) *types.SignedHeader { - + valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int, +) *types.SignedHeader { header := genHeader(chainID, height, bTime, txs, valset, nextValset, appHash, consHash, resHash) return &types.SignedHeader{ Header: header, @@ -166,8 +165,8 @@ func (pkz privKeys) GenSignedHeader(chainID string, height int64, bTime time.Tim // GenSignedHeaderLastBlockID calls genHeader and signHeader and combines them into a SignedHeader. func (pkz privKeys) GenSignedHeaderLastBlockID(chainID string, height int64, bTime time.Time, txs types.Txs, valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int, - lastBlockID types.BlockID) *types.SignedHeader { - + lastBlockID types.BlockID, +) *types.SignedHeader { header := genHeader(chainID, height, bTime, txs, valset, nextValset, appHash, consHash, resHash) header.LastBlockID = lastBlockID return &types.SignedHeader{ @@ -185,16 +184,16 @@ func (pkz privKeys) ChangeKeys(delta int) privKeys { // blockSize) and with variation in validator sets. BlockIntervals are in per minute. // NOTE: Expected to have a large validator set size ~ 100 validators. func genMockNodeWithKeys( - chainID string, blockSize int64, valSize int, valVariation float32, bTime time.Time) ( map[int64]*types.SignedHeader, map[int64]*types.ValidatorSet, - map[int64]privKeys) { - + map[int64]privKeys, +) { var ( + chainID = "test-chain" headers = make(map[int64]*types.SignedHeader, blockSize) valset = make(map[int64]*types.ValidatorSet, blockSize+1) keymap = make(map[int64]privKeys, blockSize+1) @@ -239,15 +238,16 @@ func genMockNodeWithKeys( } func genMockNode( - chainID string, blockSize int64, valSize int, valVariation float32, bTime time.Time) ( string, map[int64]*types.SignedHeader, - map[int64]*types.ValidatorSet) { - headers, valset, _ := genMockNodeWithKeys(chainID, blockSize, valSize, valVariation, bTime) + map[int64]*types.ValidatorSet, +) { + chainID := "test-chain" + headers, valset, _ := genMockNodeWithKeys(blockSize, valSize, valVariation, bTime) return chainID, headers, valset } diff --git a/light/provider/errors.go b/light/provider/errors.go index 398647b3e17..b3ef342008f 100644 --- a/light/provider/errors.go +++ b/light/provider/errors.go @@ -7,14 +7,14 @@ import ( var ( // ErrHeightTooHigh is returned when the height is higher than the last - // block that the provider has. The light client will not remove the provider + // block that the provider has. The light client will not remove the provider. ErrHeightTooHigh = errors.New("height requested is too high") // ErrLightBlockNotFound is returned when a provider can't find the // requested header (i.e. it has been pruned). - // The light client will not remove the provider + // The light client will not remove the provider. ErrLightBlockNotFound = errors.New("light block not found") // ErrNoResponse is returned if the provider doesn't respond to the - // request in a gieven time + // request in a gieven time. ErrNoResponse = errors.New("client failed to respond") ) @@ -25,5 +25,17 @@ type ErrBadLightBlock struct { } func (e ErrBadLightBlock) Error() string { - return fmt.Sprintf("client provided bad signed header: %s", e.Reason.Error()) + return "client provided bad signed header: " + e.Reason.Error() +} + +func (e ErrBadLightBlock) Unwrap() error { + return e.Reason +} + +type ErrNegativeHeight struct { + Height int64 +} + +func (e ErrNegativeHeight) Error() string { + return fmt.Sprintf("expected height >= 0, got height %d", e.Height) } diff --git a/light/provider/http/http.go b/light/provider/http/http.go index ca3ad52039e..0478ea01938 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -164,7 +164,6 @@ OUTER_LOOP: default: return nil, err } - } } @@ -212,7 +211,7 @@ func (p *http) signedHeader(ctx context.Context, height *int64) (*types.SignedHe func validateHeight(height int64) (*int64, error) { if height < 0 { - return nil, fmt.Errorf("expected height >= 0, got height %d", height) + return nil, provider.ErrNegativeHeight{Height: height} } h := &height @@ -223,7 +222,7 @@ func validateHeight(height int64) (*int64, error) { } // exponential backoff (with jitter) -// 0.5s -> 2s -> 4.5s -> 8s -> 12.5 with 1s variation +// 0.5s -> 2s -> 4.5s -> 8s -> 12.5 with 1s variation. func backoffTimeout(attempt uint16) time.Duration { //nolint:gosec // G404: Use of weak random number generator return time.Duration(500*attempt*attempt)*time.Millisecond + time.Duration(rand.Intn(1000))*time.Millisecond diff --git a/light/provider/http/http_test.go b/light/provider/http/http_test.go index ef0e588e865..f6b4565d9a0 100644 --- a/light/provider/http/http_test.go +++ b/light/provider/http/http_test.go @@ -22,15 +22,15 @@ import ( func TestNewProvider(t *testing.T) { c, err := lighthttp.New("chain-test", "192.168.0.1:26657") require.NoError(t, err) - require.Equal(t, fmt.Sprintf("%s", c), "http{http://192.168.0.1:26657}") + require.Equal(t, "http{http://192.168.0.1:26657}", fmt.Sprintf("%s", c)) c, err = lighthttp.New("chain-test", "http://153.200.0.1:26657") require.NoError(t, err) - require.Equal(t, fmt.Sprintf("%s", c), "http{http://153.200.0.1:26657}") + require.Equal(t, "http{http://153.200.0.1:26657}", fmt.Sprintf("%s", c)) c, err = lighthttp.New("chain-test", "153.200.0.1") require.NoError(t, err) - require.Equal(t, fmt.Sprintf("%s", c), "http{http://153.200.0.1}") + require.Equal(t, "http{http://153.200.0.1}", fmt.Sprintf("%s", c)) } func TestProvider(t *testing.T) { @@ -47,7 +47,7 @@ func TestProvider(t *testing.T) { chainID := genDoc.ChainID c, err := rpchttp.New(rpcAddr + path) - require.Nil(t, err) + require.NoError(t, err) p := lighthttp.NewWithClient(chainID, c) require.NoError(t, err) @@ -61,11 +61,10 @@ func TestProvider(t *testing.T) { lb, err := p.LightBlock(context.Background(), 0) require.NoError(t, err) require.NotNil(t, lb) - assert.True(t, lb.Height < 1000) - assert.True(t, lb.Height >= 10) + assert.GreaterOrEqual(t, lb.Height, int64(10)) // let's check this is valid somehow - assert.Nil(t, lb.ValidateBasic(chainID)) + require.NoError(t, lb.ValidateBasic(chainID)) // historical queries now work :) lb, err = p.LightBlock(context.Background(), 0) @@ -80,7 +79,7 @@ func TestProvider(t *testing.T) { lb, err = p.LightBlock(context.Background(), 0) require.NoError(t, err) require.NotNil(t, lb) - lb, err = p.LightBlock(context.Background(), lb.Height+1000) + lb, err = p.LightBlock(context.Background(), lb.Height+100000) require.Error(t, err) require.Nil(t, lb) assert.Equal(t, provider.ErrHeightTooHigh, err) diff --git a/light/provider/mock/deadmock.go b/light/provider/mock/deadmock.go index 789cc255fd9..1d12838026f 100644 --- a/light/provider/mock/deadmock.go +++ b/light/provider/mock/deadmock.go @@ -18,12 +18,12 @@ func NewDeadMock(chainID string) provider.Provider { func (p *deadMock) ChainID() string { return p.chainID } -func (p *deadMock) String() string { return "deadMock" } +func (*deadMock) String() string { return "deadMock" } -func (p *deadMock) LightBlock(context.Context, int64) (*types.LightBlock, error) { +func (*deadMock) LightBlock(context.Context, int64) (*types.LightBlock, error) { return nil, provider.ErrNoResponse } -func (p *deadMock) ReportEvidence(context.Context, types.Evidence) error { +func (*deadMock) ReportEvidence(context.Context, types.Evidence) error { return provider.ErrNoResponse } diff --git a/light/provider/provider.go b/light/provider/provider.go index 333d8c1e891..7a20e26ede5 100644 --- a/light/provider/provider.go +++ b/light/provider/provider.go @@ -25,5 +25,5 @@ type Provider interface { LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) // ReportEvidence reports an evidence of misbehavior. - ReportEvidence(context.Context, types.Evidence) error + ReportEvidence(ctx context.Context, ev types.Evidence) error } diff --git a/light/proxy/errors.go b/light/proxy/errors.go new file mode 100644 index 00000000000..0c92dd6faf6 --- /dev/null +++ b/light/proxy/errors.go @@ -0,0 +1,28 @@ +package proxy + +import "fmt" + +type ErrCreateHTTPClient struct { + Addr string + Err error +} + +func (e ErrCreateHTTPClient) Error() string { + return fmt.Sprintf("failed to create http client for %s: %v", e.Addr, e.Err) +} + +func (e ErrCreateHTTPClient) Unwrap() error { + return e.Err +} + +type ErrStartHTTPClient struct { + Err error +} + +func (e ErrStartHTTPClient) Error() string { + return fmt.Sprintf("can't start client: %v", e.Err) +} + +func (e ErrStartHTTPClient) Unwrap() error { + return e.Err +} diff --git a/light/proxy/proxy.go b/light/proxy/proxy.go index 0380b2d14dd..f21bea6a2c2 100644 --- a/light/proxy/proxy.go +++ b/light/proxy/proxy.go @@ -2,7 +2,6 @@ package proxy import ( "context" - "fmt" "net" "net/http" @@ -34,7 +33,7 @@ func NewProxy( ) (*Proxy, error) { rpcClient, err := rpchttp.NewWithTimeout(providerAddr, uint(config.WriteTimeout.Seconds())) if err != nil { - return nil, fmt.Errorf("failed to create http client for %s: %w", providerAddr, err) + return nil, ErrCreateHTTPClient{Addr: providerAddr, Err: err} } return &Proxy{ @@ -109,7 +108,7 @@ func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) { // 3) Start a client. if !p.Client.IsRunning() { if err := p.Client.Start(); err != nil { - return nil, mux, fmt.Errorf("can't start client: %w", err) + return nil, mux, ErrStartHTTPClient{Err: err} } } diff --git a/light/proxy/routes.go b/light/proxy/routes.go index 62e20712706..1b38606d0ed 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -37,6 +37,7 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { "dump_consensus_state": rpcserver.NewRPCFunc(makeDumpConsensusStateFunc(c), ""), "consensus_state": rpcserver.NewRPCFunc(makeConsensusStateFunc(c), ""), "consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height", rpcserver.Cacheable("height")), + "unconfirmed_tx": rpcserver.NewRPCFunc(makeUnconfirmedTxFunc(c), "hash"), "unconfirmed_txs": rpcserver.NewRPCFunc(makeUnconfirmedTxsFunc(c), "limit"), "num_unconfirmed_txs": rpcserver.NewRPCFunc(makeNumUnconfirmedTxsFunc(c), ""), @@ -73,7 +74,7 @@ func makeStatusFunc(c *lrpc.Client) rpcStatusFunc { type rpcNetInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultNetInfo, error) func makeNetInfoFunc(c *lrpc.Client) rpcNetInfoFunc { - return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultNetInfo, error) { + return func(ctx *rpctypes.Context, _, _ int64) (*ctypes.ResultNetInfo, error) { return c.NetInfo(ctx.Context()) } } @@ -190,7 +191,7 @@ func makeBlockSearchFunc(c *lrpc.Client) rpcBlockSearchFunc { return func( ctx *rpctypes.Context, query string, - prove bool, + _ bool, page, perPage *int, orderBy string, ) (*ctypes.ResultBlockSearch, error) { @@ -231,6 +232,14 @@ func makeConsensusParamsFunc(c *lrpc.Client) rpcConsensusParamsFunc { } } +type rpcUnconfirmedTxFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultUnconfirmedTx, error) + +func makeUnconfirmedTxFunc(c *lrpc.Client) rpcUnconfirmedTxFunc { + return func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultUnconfirmedTx, error) { + return c.UnconfirmedTx(ctx.Context(), hash) + } +} + type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) func makeUnconfirmedTxsFunc(c *lrpc.Client) rpcUnconfirmedTxsFunc { diff --git a/light/rpc/client.go b/light/rpc/client.go index ec8d5a426ae..d7950d32d10 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -3,7 +3,6 @@ package rpc import ( "bytes" "context" - "errors" "fmt" "regexp" "time" @@ -11,17 +10,16 @@ import ( "github.com/cometbft/cometbft/crypto/merkle" cmtbytes "github.com/cometbft/cometbft/libs/bytes" cmtmath "github.com/cometbft/cometbft/libs/math" - service "github.com/cometbft/cometbft/libs/service" + "github.com/cometbft/cometbft/libs/service" rpcclient "github.com/cometbft/cometbft/rpc/client" ctypes "github.com/cometbft/cometbft/rpc/core/types" rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" cmterrors "github.com/cometbft/cometbft/types/errors" + cmttime "github.com/cometbft/cometbft/types/time" ) -var errNegOrZeroHeight = errors.New("negative or zero height") - // KeyPathFunc builds a merkle path out of the given path and key. type KeyPathFunc func(path string, key []byte) (merkle.KeyPath, error) @@ -67,7 +65,7 @@ func KeyPathFn(fn KeyPathFunc) Option { // DefaultMerkleKeyPathFn creates a function used to generate merkle key paths // from a path string and a key. This is the default used by the cosmos SDK. -// This merkle key paths are required when verifying /abci_query calls +// This merkle key paths are required when verifying /abci_query calls. func DefaultMerkleKeyPathFn() KeyPathFunc { // regexp for extracting store name from /abci_query path storeNameRegexp := regexp.MustCompile(`\/store\/(.+)\/key`) @@ -75,7 +73,7 @@ func DefaultMerkleKeyPathFn() KeyPathFunc { return func(path string, key []byte) (merkle.KeyPath, error) { matches := storeNameRegexp.FindStringSubmatch(path) if len(matches) != 2 { - return nil, fmt.Errorf("can't find store name in %s using %s", path, storeNameRegexp) + return nil, ErrMissingStoreName{Path: path, Rex: storeNameRegexp} } storeName := matches[1] @@ -130,8 +128,8 @@ func (c *Client) ABCIQuery(ctx context.Context, path string, data cmtbytes.HexBy // ABCIQueryWithOptions returns an error if opts.Prove is false. func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data cmtbytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - + opts rpcclient.ABCIQueryOptions, +) (*ctypes.ResultABCIQuery, error) { // always request the proof opts.Prove = true @@ -143,16 +141,16 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data cmt // Validate the response. if resp.IsErr() { - return nil, fmt.Errorf("err response code: %v", resp.Code) + return nil, ErrResponseCode{Code: resp.Code} } if len(resp.Key) == 0 { return nil, cmterrors.ErrRequiredField{Field: "key"} } if resp.ProofOps == nil || len(resp.ProofOps.Ops) == 0 { - return nil, errors.New("no proof ops") + return nil, ErrNoProofOps } if resp.Height <= 0 { - return nil, errNegOrZeroHeight + return nil, ErrNegOrZeroHeight } // Update the light client if we're behind. @@ -167,23 +165,23 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data cmt if resp.Value != nil { // 1) build a Merkle key path from path and resp.Key if c.keyPathFn == nil { - return nil, errors.New("please configure Client with KeyPathFn option") + return nil, ErrNilKeyPathFn } kp, err := c.keyPathFn(path, resp.Key) if err != nil { - return nil, fmt.Errorf("can't build merkle key path: %w", err) + return nil, ErrBuildMerkleKeyPath{Err: err} } // 2) verify value err = c.prt.VerifyValue(resp.ProofOps, l.AppHash, kp.String(), resp.Value) if err != nil { - return nil, fmt.Errorf("verify value proof: %w", err) + return nil, ErrVerifyValueProof{Err: err} } } else { // OR validate the absence proof against the trusted header. err = c.prt.VerifyAbsence(resp.ProofOps, l.AppHash, string(resp.Key)) if err != nil { - return nil, fmt.Errorf("verify absence proof: %w", err) + return nil, ErrVerifyAbsenceProof{Err: err} } } @@ -202,6 +200,10 @@ func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.Resu return c.next.BroadcastTxSync(ctx, tx) } +func (c *Client) UnconfirmedTx(ctx context.Context, hash []byte) (*ctypes.ResultUnconfirmedTx, error) { + return c.next.UnconfirmedTx(ctx, hash) +} + func (c *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { return c.next.UnconfirmedTxs(ctx, limit) } @@ -237,7 +239,7 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.Re return nil, err } if res.BlockHeight <= 0 { - return nil, errNegOrZeroHeight + return nil, ErrNegOrZeroHeight } // Update the light client if we're behind. @@ -248,8 +250,7 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.Re // Verify hash. if cH, tH := res.ConsensusParams.Hash(), l.ConsensusHash; !bytes.Equal(cH, tH) { - return nil, fmt.Errorf("params hash %X does not match trusted hash %X", - cH, tH) + return nil, ErrParamHashMismatch{ConsensusParamsHash: cH, ConsensusHash: tH} } return res, nil @@ -270,10 +271,10 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) // Validate res. for i, meta := range res.BlockMetas { if meta == nil { - return nil, fmt.Errorf("nil block meta %d", i) + return nil, ErrNilBlockMeta{Index: i} } if err := meta.ValidateBasic(); err != nil { - return nil, fmt.Errorf("invalid block meta %d: %w", i, err) + return nil, ErrInvalidBlockMeta{I: i, Err: err} } } @@ -289,11 +290,10 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) for _, meta := range res.BlockMetas { h, err := c.lc.TrustedLightBlock(meta.Header.Height) if err != nil { - return nil, fmt.Errorf("trusted header %d: %w", meta.Header.Height, err) + return nil, ErrTrustedHeader{Height: meta.Header.Height, Err: err} } if bmH, tH := meta.Header.Hash(), h.Hash(); !bytes.Equal(bmH, tH) { - return nil, fmt.Errorf("block meta header %X does not match with trusted header %X", - bmH, tH) + return nil, ErrBlockMetaHeaderMismatch{BlockMetaHeader: bmH, TrustedHeader: tH} } } @@ -323,8 +323,7 @@ func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, return nil, err } if bmH, bH := res.BlockID.Hash, res.Block.Hash(); !bytes.Equal(bmH, bH) { - return nil, fmt.Errorf("blockID %X does not match with block %X", - bmH, bH) + return nil, ErrBlockIDMismatch{BlockID: bmH, Block: bH} } // Update the light client if we're behind. @@ -335,8 +334,7 @@ func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, // Verify block. if bH, tH := res.Block.Hash(), l.Hash(); !bytes.Equal(bH, tH) { - return nil, fmt.Errorf("block header %X does not match with trusted header %X", - bH, tH) + return nil, ErrBlockHeaderMismatch{BlockHeader: bH, TrustedHeader: tH} } return res, nil @@ -357,8 +355,7 @@ func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBl return nil, err } if bmH, bH := res.BlockID.Hash, res.Block.Hash(); !bytes.Equal(bmH, bH) { - return nil, fmt.Errorf("blockID %X does not match with block %X", - bmH, bH) + return nil, ErrBlockIDMismatch{BlockID: bmH, Block: bH} } // Update the light client if we're behind. @@ -369,8 +366,7 @@ func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBl // Verify block. if bH, tH := res.Block.Hash(), l.Hash(); !bytes.Equal(bH, tH) { - return nil, fmt.Errorf("block header %X does not match with trusted header %X", - bH, tH) + return nil, ErrBlockHeaderMismatch{BlockHeader: bH, TrustedHeader: tH} } return res, nil @@ -378,13 +374,13 @@ func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBl // BlockResults returns the block results for the given height. If no height is // provided, the results of the block preceding the latest are returned. -// NOTE: Light client only verifies the tx results +// NOTE: Light client only verifies the tx results. func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { var h int64 if height == nil { res, err := c.next.Status(ctx) if err != nil { - return nil, fmt.Errorf("can't get latest height: %w", err) + return nil, ErrGetLatestHeight{Err: err} } // Can't return the latest block results here because we won't be able to // prove them. Return the results for the previous block instead. @@ -400,7 +396,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul // Validate res. if res.Height <= 0 { - return nil, errNegOrZeroHeight + return nil, ErrNegOrZeroHeight } // Update the light client if we're behind. @@ -411,18 +407,17 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul } // Build a Merkle tree out of the above 3 binary slices. - rH := state.TxResultsHash(res.TxsResults) + rH := state.TxResultsHash(res.TxResults) // Verify block results. if !bytes.Equal(rH, trustedBlock.LastResultsHash) { - return nil, fmt.Errorf("last results %X does not match with trusted last results %X", - rH, trustedBlock.LastResultsHash) + return nil, ErrLastResultMismatch{ResultHash: rH, LastResultHash: trustedBlock.LastResultsHash} } return res, nil } -// Header fetches and verifies the header directly via the light client +// Header fetches and verifies the header directly via the light client. func (c *Client) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) { lb, err := c.updateLightClientIfNeededTo(ctx, height) if err != nil { @@ -449,8 +444,7 @@ func (c *Client) HeaderByHash(ctx context.Context, hash cmtbytes.HexBytes) (*cty } if !bytes.Equal(lb.Header.Hash(), res.Header.Hash()) { - return nil, fmt.Errorf("primary header hash does not match trusted header hash. (%X != %X)", - lb.Header.Hash(), res.Header.Hash()) + return nil, ErrPrimaryHeaderMismatch{PrimaryHeaderHash: lb.Header.Hash(), TrustedHeaderHash: res.Header.Hash()} } return res, nil @@ -480,7 +474,7 @@ func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.Resul // Validate res. if res.Height <= 0 { - return nil, errNegOrZeroHeight + return nil, ErrNegOrZeroHeight } // Update the light client if we're behind. @@ -518,7 +512,6 @@ func (c *Client) Validators( height *int64, pagePtr, perPagePtr *int, ) (*ctypes.ResultValidators, error) { - // Update the light client if we're behind and retrieve the light block at the // requested height or at the latest height if no height is provided. l, err := c.updateLightClientIfNeededTo(ctx, height) @@ -540,7 +533,8 @@ func (c *Client) Validators( BlockHeight: l.Height, Validators: v, Count: len(v), - Total: totalCount}, nil + Total: totalCount, + }, nil } func (c *Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { @@ -548,7 +542,8 @@ func (c *Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*cty } func (c *Client) Subscribe(ctx context.Context, subscriber, query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { + outCapacity ...int, +) (out <-chan ctypes.ResultEvent, err error) { return c.next.Subscribe(ctx, subscriber, query, outCapacity...) } @@ -566,12 +561,12 @@ func (c *Client) updateLightClientIfNeededTo(ctx context.Context, height *int64) err error ) if height == nil { - l, err = c.lc.Update(ctx, time.Now()) + l, err = c.lc.Update(ctx, cmttime.Now()) } else { - l, err = c.lc.VerifyLightBlockAtHeight(ctx, *height, time.Now()) + l, err = c.lc.VerifyLightBlockAtHeight(ctx, *height, cmttime.Now()) } if err != nil { - return nil, fmt.Errorf("failed to update light client to %d: %w", *height, err) + return nil, ErrUpdateClient{Height: *height, Err: err} } return l, nil } @@ -582,7 +577,7 @@ func (c *Client) RegisterOpDecoder(typ string, dec merkle.OpDecoder) { // SubscribeWS subscribes for events using the given query and remote address as // a subscriber, but does not verify responses (UNSAFE)! -// TODO: verify data +// TODO: verify data. func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { out, err := c.next.Subscribe(context.Background(), ctx.RemoteAddr(), query) if err != nil { @@ -629,9 +624,9 @@ func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*ctypes.ResultUnsubscr return &ctypes.ResultUnsubscribe{}, nil } -// XXX: Copied from rpc/core/env.go +// XXX: Copied from rpc/core/env.go. const ( - // see README + // see README. defaultPerPage = 30 maxPerPage = 100 ) @@ -651,7 +646,7 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { } page := *pagePtr if page <= 0 || page > pages { - return 1, fmt.Errorf("page should be within [1, %d] range, given %d", pages, page) + return 1, ErrPageRange{Pages: pages, Page: page} } return page, nil diff --git a/light/rpc/errors.go b/light/rpc/errors.go new file mode 100644 index 00000000000..bfd1989a805 --- /dev/null +++ b/light/rpc/errors.go @@ -0,0 +1,190 @@ +package rpc + +import ( + "errors" + "fmt" + "regexp" + + cmtbytes "github.com/cometbft/cometbft/libs/bytes" +) + +var ( + ErrNegOrZeroHeight = errors.New("negative or zero height") + ErrNoProofOps = errors.New("no proof ops") + ErrNilKeyPathFn = errors.New("please configure Client with KeyPathFn option") +) + +type ErrMissingStoreName struct { + Path string + Rex *regexp.Regexp +} + +func (e ErrMissingStoreName) Error() string { + return fmt.Sprintf("can't find store name in %s using %s", e.Path, e.Rex) +} + +type ErrResponseCode struct { + Code uint32 +} + +func (e ErrResponseCode) Error() string { + return fmt.Sprintf("err response code: %v", e.Code) +} + +type ErrPageRange struct { + Pages int + Page int +} + +func (e ErrPageRange) Error() string { + return fmt.Sprintf("page should be within [1, %d] range, given %d", e.Pages, e.Page) +} + +type ErrNilBlockMeta struct { + Index int +} + +func (e ErrNilBlockMeta) Error() string { + return fmt.Sprintf("nil block meta %d", e.Index) +} + +type ErrParamHashMismatch struct { + ConsensusParamsHash []byte + ConsensusHash cmtbytes.HexBytes +} + +func (e ErrParamHashMismatch) Error() string { + return fmt.Sprintf("params hash %X does not match trusted hash %X", e.ConsensusParamsHash, e.ConsensusHash) +} + +type ErrLastResultMismatch struct { + ResultHash []byte + LastResultHash cmtbytes.HexBytes +} + +func (e ErrLastResultMismatch) Error() string { + return fmt.Sprintf("last results %X does not match with trusted last results %X", e.ResultHash, e.LastResultHash) +} + +type ErrPrimaryHeaderMismatch struct { + PrimaryHeaderHash cmtbytes.HexBytes + TrustedHeaderHash cmtbytes.HexBytes +} + +func (e ErrPrimaryHeaderMismatch) Error() string { + return fmt.Sprintf("primary header hash does not match trusted header hash. (%X != %X)", e.PrimaryHeaderHash, e.TrustedHeaderHash) +} + +type ErrBlockHeaderMismatch struct { + BlockHeader cmtbytes.HexBytes + TrustedHeader cmtbytes.HexBytes +} + +func (e ErrBlockHeaderMismatch) Error() string { + return fmt.Sprintf("block header %X does not match with trusted header %X", e.BlockHeader, e.TrustedHeader) +} + +type ErrBlockMetaHeaderMismatch struct { + BlockMetaHeader cmtbytes.HexBytes + TrustedHeader cmtbytes.HexBytes +} + +func (e ErrBlockMetaHeaderMismatch) Error() string { + return fmt.Sprintf("block meta header %X does not match with trusted header %X", e.BlockMetaHeader, e.TrustedHeader) +} + +type ErrBlockIDMismatch struct { + BlockID cmtbytes.HexBytes + Block cmtbytes.HexBytes +} + +func (e ErrBlockIDMismatch) Error() string { + return fmt.Sprintf("blockID %X does not match with block %X", e.BlockID, e.Block) +} + +type ErrBuildMerkleKeyPath struct { + Err error +} + +func (e ErrBuildMerkleKeyPath) Error() string { + return fmt.Sprintf("can't build merkle key path: %v", e.Err) +} + +func (e ErrBuildMerkleKeyPath) Unwrap() error { + return e.Err +} + +type ErrVerifyValueProof struct { + Err error +} + +func (e ErrVerifyValueProof) Error() string { + return fmt.Sprintf("verify value proof: %v", e.Err) +} + +func (e ErrVerifyValueProof) Unwrap() error { + return e.Err +} + +type ErrVerifyAbsenceProof struct { + Err error +} + +func (e ErrVerifyAbsenceProof) Error() string { + return fmt.Sprintf("verify absence proof: %v", e.Err) +} + +func (e ErrVerifyAbsenceProof) Unwrap() error { + return e.Err +} + +type ErrGetLatestHeight struct { + Err error +} + +func (e ErrGetLatestHeight) Error() string { + return fmt.Sprintf("can't get latest height: %v", e.Err) +} + +func (e ErrGetLatestHeight) Unwrap() error { + return e.Err +} + +type ErrInvalidBlockMeta struct { + I int + Err error +} + +func (e ErrInvalidBlockMeta) Error() string { + return fmt.Sprintf("invalid block meta %d: %v", e.I, e.Err) +} + +func (e ErrInvalidBlockMeta) Unwrap() error { + return e.Err +} + +type ErrTrustedHeader struct { + Height int64 + Err error +} + +func (e ErrTrustedHeader) Error() string { + return fmt.Sprintf("trusted header %d: %v", e.Height, e.Err) +} + +func (e ErrTrustedHeader) Unwrap() error { + return e.Err +} + +type ErrUpdateClient struct { + Height int64 + Err error +} + +func (e ErrUpdateClient) Error() string { + return fmt.Sprintf("failed to update light client to %d: %v", e.Height, e.Err) +} + +func (e ErrUpdateClient) Unwrap() error { + return e.Err +} diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go index 4fe3e65131e..dc3792941c2 100644 --- a/light/rpc/mocks/light_client.go +++ b/light/rpc/mocks/light_client.go @@ -21,6 +21,10 @@ type LightClient struct { func (_m *LightClient) ChainID() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -35,6 +39,10 @@ func (_m *LightClient) ChainID() string { func (_m *LightClient) TrustedLightBlock(height int64) (*types.LightBlock, error) { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for TrustedLightBlock") + } + var r0 *types.LightBlock var r1 error if rf, ok := ret.Get(0).(func(int64) (*types.LightBlock, error)); ok { @@ -61,6 +69,10 @@ func (_m *LightClient) TrustedLightBlock(height int64) (*types.LightBlock, error func (_m *LightClient) Update(ctx context.Context, now time.Time) (*types.LightBlock, error) { ret := _m.Called(ctx, now) + if len(ret) == 0 { + panic("no return value specified for Update") + } + var r0 *types.LightBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, time.Time) (*types.LightBlock, error)); ok { @@ -87,6 +99,10 @@ func (_m *LightClient) Update(ctx context.Context, now time.Time) (*types.LightB func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int64, now time.Time) (*types.LightBlock, error) { ret := _m.Called(ctx, height, now) + if len(ret) == 0 { + panic("no return value specified for VerifyLightBlockAtHeight") + } + var r0 *types.LightBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, int64, time.Time) (*types.LightBlock, error)); ok { diff --git a/light/setup.go b/light/setup.go index 66ae15d6aa8..7482c850676 100644 --- a/light/setup.go +++ b/light/setup.go @@ -22,8 +22,8 @@ func NewHTTPClient( primaryAddress string, witnessesAddresses []string, trustedStore store.Store, - options ...Option) (*Client, error) { - + options ...Option, +) (*Client, error) { providers, err := providersFromAddresses(append(witnessesAddresses, primaryAddress), chainID) if err != nil { return nil, err @@ -51,8 +51,8 @@ func NewHTTPClientFromTrustedStore( primaryAddress string, witnessesAddresses []string, trustedStore store.Store, - options ...Option) (*Client, error) { - + options ...Option, +) (*Client, error) { providers, err := providersFromAddresses(append(witnessesAddresses, primaryAddress), chainID) if err != nil { return nil, err diff --git a/light/store/db/db.go b/light/store/db/db.go index 93f272304a5..24104162a14 100644 --- a/light/store/db/db.go +++ b/light/store/db/db.go @@ -2,39 +2,85 @@ package db import ( "encoding/binary" - "fmt" - "regexp" - "strconv" dbm "github.com/cometbft/cometbft-db" - cmterrors "github.com/cometbft/cometbft/types/errors" - + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" cmtsync "github.com/cometbft/cometbft/libs/sync" "github.com/cometbft/cometbft/light/store" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/types" + cmterrors "github.com/cometbft/cometbft/types/errors" ) -var sizeKey = []byte("size") - type dbs struct { db dbm.DB prefix string mtx cmtsync.RWMutex size uint16 + + dbKeyLayout LightStoreKeyLayout +} + +func isEmpty(db dbm.DB) bool { + iter, err := db.Iterator(nil, nil) + if err != nil { + panic(err) + } + + defer iter.Close() + for ; iter.Valid(); iter.Next() { + return false + } + return true +} + +func setDBKeyLayout(db dbm.DB, lightStore *dbs, dbKeyLayoutVersion string) { + if !isEmpty(db) { + var version []byte + var err error + if version, err = lightStore.db.Get([]byte("version")); err != nil { + // WARN: This is because currently cometBFT DB does not return an error if the key does not exist + // If this behavior changes we need to account for that. + panic(err) + } + if len(version) != 0 { + dbKeyLayoutVersion = string(version) + } + } + + switch dbKeyLayoutVersion { + case "v1", "": + lightStore.dbKeyLayout = &v1LegacyLayout{} + dbKeyLayoutVersion = "v1" + case "v2": + lightStore.dbKeyLayout = &v2Layout{} + default: + panic("unknown key layout version") + } + + if err := lightStore.db.SetSync([]byte("version"), []byte(dbKeyLayoutVersion)); err != nil { + panic(err) + } } // New returns a Store that wraps any DB (with an optional prefix in case you // want to use one DB with many light clients). func New(db dbm.DB, prefix string) store.Store { + return NewWithDBVersion(db, prefix, "") +} + +func NewWithDBVersion(db dbm.DB, prefix string, dbKeyVersion string) store.Store { + dbStore := &dbs{db: db, prefix: prefix} + + setDBKeyLayout(db, dbStore, dbKeyVersion) + size := uint16(0) - bz, err := db.Get(sizeKey) + bz, err := db.Get(dbStore.dbKeyLayout.SizeKey(prefix)) if err == nil && len(bz) > 0 { size = unmarshalSize(bz) } - - return &dbs{db: db, prefix: prefix, size: size} + dbStore.size = size + return dbStore } // SaveLightBlock persists LightBlock to the db. @@ -52,7 +98,7 @@ func (s *dbs) SaveLightBlock(lb *types.LightBlock) error { lbBz, err := lbpb.Marshal() if err != nil { - return fmt.Errorf("marshaling LightBlock: %w", err) + return store.ErrMarshalBlock{Err: err} } s.mtx.Lock() @@ -61,20 +107,20 @@ func (s *dbs) SaveLightBlock(lb *types.LightBlock) error { b := s.db.NewBatch() defer b.Close() if err = b.Set(s.lbKey(lb.Height), lbBz); err != nil { - return err + return store.ErrStore{Err: err} } - if err = b.Set(sizeKey, marshalSize(s.size+1)); err != nil { - return err + if err = b.Set(s.dbKeyLayout.SizeKey(s.prefix), marshalSize(s.size+1)); err != nil { + return store.ErrStore{Err: err} } if err = b.WriteSync(); err != nil { - return err + return store.ErrStore{Err: err} } s.size++ return nil } -// DeleteLightBlockAndValidatorSet deletes the LightBlock from +// DeleteLightBlock deletes the LightBlock from // the db. // // Safe for concurrent use by multiple goroutines. @@ -89,13 +135,13 @@ func (s *dbs) DeleteLightBlock(height int64) error { b := s.db.NewBatch() defer b.Close() if err := b.Delete(s.lbKey(height)); err != nil { - return err + return store.ErrStore{Err: err} } - if err := b.Set(sizeKey, marshalSize(s.size-1)); err != nil { - return err + if err := b.Set(s.dbKeyLayout.SizeKey(s.prefix), marshalSize(s.size-1)); err != nil { + return store.ErrStore{Err: err} } if err := b.WriteSync(); err != nil { - return err + return store.ErrStore{Err: err} } s.size-- @@ -121,12 +167,12 @@ func (s *dbs) LightBlock(height int64) (*types.LightBlock, error) { var lbpb cmtproto.LightBlock err = lbpb.Unmarshal(bz) if err != nil { - return nil, fmt.Errorf("unmarshal error: %w", err) + return nil, store.ErrUnmarshal{Err: err} } lightBlock, err := types.LightBlockFromProto(&lbpb) if err != nil { - return nil, fmt.Errorf("proto conversion error: %w", err) + return nil, store.ErrProtoConversion{Err: err} } return lightBlock, err @@ -135,7 +181,7 @@ func (s *dbs) LightBlock(height int64) (*types.LightBlock, error) { // LastLightBlockHeight returns the last LightBlock height stored. // // Safe for concurrent use by multiple goroutines. -func (s *dbs) LastLightBlockHeight() (int64, error) { +func (s *dbs) LastLightBlockHeight() (height int64, err error) { itr, err := s.db.ReverseIterator( s.lbKey(1), append(s.lbKey(1<<63-1), byte(0x00)), @@ -147,20 +193,24 @@ func (s *dbs) LastLightBlockHeight() (int64, error) { for itr.Valid() { key := itr.Key() - _, height, ok := parseLbKey(key) - if ok { + height, err = s.dbKeyLayout.ParseLBKey(key, s.prefix) + if err == nil { return height, nil } itr.Next() } - return -1, itr.Error() + if itr.Error() != nil { + err = itr.Error() + } + + return -1, err } // FirstLightBlockHeight returns the first LightBlock height stored. // // Safe for concurrent use by multiple goroutines. -func (s *dbs) FirstLightBlockHeight() (int64, error) { +func (s *dbs) FirstLightBlockHeight() (height int64, err error) { itr, err := s.db.Iterator( s.lbKey(1), append(s.lbKey(1<<63-1), byte(0x00)), @@ -172,14 +222,16 @@ func (s *dbs) FirstLightBlockHeight() (int64, error) { for itr.Valid() { key := itr.Key() - _, height, ok := parseLbKey(key) - if ok { + height, err = s.dbKeyLayout.ParseLBKey(key, s.prefix) + if err == nil { return height, nil } itr.Next() } - - return -1, itr.Error() + if itr.Error() != nil { + err = itr.Error() + } + return -1, err } // LightBlockBefore iterates over light blocks until it finds a block before @@ -202,14 +254,14 @@ func (s *dbs) LightBlockBefore(height int64) (*types.LightBlock, error) { for itr.Valid() { key := itr.Key() - _, existingHeight, ok := parseLbKey(key) - if ok { + existingHeight, err := s.dbKeyLayout.ParseLBKey(key, s.prefix) + if err == nil { return s.LightBlock(existingHeight) } itr.Next() } if err = itr.Error(); err != nil { - return nil, err + return nil, store.ErrStore{Err: err} } return nil, store.ErrLightBlockNotFound @@ -236,7 +288,7 @@ func (s *dbs) Prune(size uint16) error { append(s.lbKey(1<<63-1), byte(0x00)), ) if err != nil { - return err + return store.ErrStore{Err: err} } defer itr.Close() @@ -246,10 +298,10 @@ func (s *dbs) Prune(size uint16) error { pruned := 0 for itr.Valid() && numToPrune > 0 { key := itr.Key() - _, height, ok := parseLbKey(key) - if ok { + height, err := s.dbKeyLayout.ParseLBKey(key, s.prefix) + if err == nil { if err = b.Delete(s.lbKey(height)); err != nil { - return err + return store.ErrStore{Err: err} } } itr.Next() @@ -257,12 +309,12 @@ func (s *dbs) Prune(size uint16) error { pruned++ } if err = itr.Error(); err != nil { - return err + return store.ErrStore{Err: err} } err = b.WriteSync() if err != nil { - return err + return store.ErrStore{Err: err} } // 3) Update size. @@ -271,8 +323,8 @@ func (s *dbs) Prune(size uint16) error { s.size -= uint16(pruned) - if wErr := s.db.SetSync(sizeKey, marshalSize(s.size)); wErr != nil { - return fmt.Errorf("failed to persist size: %w", wErr) + if wErr := s.db.SetSync(s.dbKeyLayout.SizeKey(s.prefix), marshalSize(s.size)); wErr != nil { + return store.ErrStore{Err: wErr} } return nil @@ -288,33 +340,7 @@ func (s *dbs) Size() uint16 { } func (s *dbs) lbKey(height int64) []byte { - return []byte(fmt.Sprintf("lb/%s/%020d", s.prefix, height)) -} - -var keyPattern = regexp.MustCompile(`^(lb)/([^/]*)/([0-9]+)$`) - -func parseKey(key []byte) (part string, prefix string, height int64, ok bool) { - submatch := keyPattern.FindSubmatch(key) - if submatch == nil { - return "", "", 0, false - } - part = string(submatch[1]) - prefix = string(submatch[2]) - height, err := strconv.ParseInt(string(submatch[3]), 10, 64) - if err != nil { - return "", "", 0, false - } - ok = true // good! - return -} - -func parseLbKey(key []byte) (prefix string, height int64, ok bool) { - var part string - part, prefix, height, ok = parseKey(key) - if part != "lb" { - return "", 0, false - } - return + return s.dbKeyLayout.LBKey(height, s.prefix) } func marshalSize(size uint16) []byte { diff --git a/light/store/db/db_key_layout.go b/light/store/db/db_key_layout.go new file mode 100644 index 00000000000..03fdc128bd3 --- /dev/null +++ b/light/store/db/db_key_layout.go @@ -0,0 +1,127 @@ +package db + +import ( + "errors" + "fmt" + "regexp" + "strconv" + + "github.com/google/orderedcode" +) + +type LightStoreKeyLayout interface { + // Implementations of ParseLBKey should create a copy of the key parameter, + // rather than modify it in place. + ParseLBKey(key []byte, storePrefix string) (height int64, err error) + LBKey(height int64, prefix string) []byte + SizeKey(prefix string) []byte +} + +type v1LegacyLayout struct{} + +// LBKey implements LightStoreKeyLayout. +func (v1LegacyLayout) LBKey(height int64, prefix string) []byte { + const ( + fixedPrefix = "lb/" + fixedPrefixLen = len(fixedPrefix) + ) + key := make([]byte, 0, fixedPrefixLen+len(prefix)+1+20) + + key = append(key, fixedPrefix...) + key = append(key, prefix...) + key = append(key, '/') + + var ( + heightStr = strconv.FormatInt(height, 10) + padding = 20 - len(heightStr) + ) + for i := 0; i < padding; i++ { + key = append(key, '0') + } + key = append(key, heightStr...) + + return key +} + +// ParseLBKey implements LightStoreKeyLayout. +func (v1LegacyLayout) ParseLBKey(key []byte, _ string) (height int64, err error) { + var part string + part, _, height, err = parseKey(key) + if part != "lb" { + return 0, err + } + return height, nil +} + +// SizeKey implements LightStoreKeyLayout. +func (v1LegacyLayout) SizeKey(_ string) []byte { + return []byte("size") +} + +var _ LightStoreKeyLayout = v1LegacyLayout{} + +var keyPattern = regexp.MustCompile(`^(lb)/([^/]*)/([0-9]+)$`) + +func parseKey(key []byte) (part string, prefix string, height int64, err error) { + submatch := keyPattern.FindSubmatch(key) + if submatch == nil { + return "", "", 0, errors.New("not a light block key") + } + part = string(submatch[1]) + prefix = string(submatch[2]) + height, err = strconv.ParseInt(string(submatch[3]), 10, 64) + if err != nil { + return "", "", 0, err + } + return part, prefix, height, nil +} + +const ( + // prefixes must be unique across all db's. + prefixLightBlock = int64(11) + prefixSize = int64(12) +) + +type v2Layout struct{} + +// LBKey implements LightStoreKeyLayout. +func (v2Layout) LBKey(height int64, prefix string) []byte { + key, err := orderedcode.Append(nil, prefix, prefixLightBlock, height) + if err != nil { + panic(err) + } + return key +} + +// ParseLBKey implements LightStoreKeyLayout. +func (v2Layout) ParseLBKey(key []byte, storePrefix string) (height int64, err error) { + var ( + dbPrefix string + lightBlockPrefix int64 + ) + remaining, err := orderedcode.Parse(string(key), &dbPrefix, &lightBlockPrefix, &height) + if err != nil { + err = fmt.Errorf("failed to parse light block key: %w", err) + } + if len(remaining) != 0 { + err = fmt.Errorf("expected no remainder when parsing light block key but got: %s", remaining) + } + if lightBlockPrefix != prefixLightBlock { + err = fmt.Errorf("expected light block prefix but got: %d", lightBlockPrefix) + } + if dbPrefix != storePrefix { + err = fmt.Errorf("parsed key has a different prefix. Expected: %s, got: %s", storePrefix, dbPrefix) + } + return height, err +} + +// SizeKey implements LightStoreKeyLayout. +func (v2Layout) SizeKey(prefix string) []byte { + key, err := orderedcode.Append(nil, prefix, prefixSize) + if err != nil { + panic(err) + } + return key +} + +var _ LightStoreKeyLayout = v2Layout{} diff --git a/light/store/db/db_test.go b/light/store/db/db_test.go index a2caffa81f4..f2a19f4a42e 100644 --- a/light/store/db/db_test.go +++ b/light/store/db/db_test.go @@ -1,23 +1,138 @@ package db import ( + "bytes" + "fmt" "sync" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/tmhash" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" "github.com/cometbft/cometbft/version" ) +func TestV1LBKey(t *testing.T) { + const prefix = "v1" + + sprintf := func(h int64) []byte { + return []byte(fmt.Sprintf("lb/%s/%020d", prefix, h)) + } + + cases := []struct { + height int64 + wantKey []byte + }{ + {1, sprintf(1)}, + {12, sprintf(12)}, + {123, sprintf(123)}, + {1234, sprintf(1234)}, + {12345, sprintf(12345)}, + {123456, sprintf(123456)}, + {1234567, sprintf(1234567)}, + {12345678, sprintf(12345678)}, + {123456789, sprintf(123456789)}, + {1234567890, sprintf(1234567890)}, + {12345678901, sprintf(12345678901)}, + {123456789012, sprintf(123456789012)}, + {1234567890123, sprintf(1234567890123)}, + {12345678901234, sprintf(12345678901234)}, + {123456789012345, sprintf(123456789012345)}, + {1234567890123456, sprintf(1234567890123456)}, + {12345678901234567, sprintf(12345678901234567)}, + {123456789012345678, sprintf(123456789012345678)}, + {1234567890123456789, sprintf(1234567890123456789)}, + } + + for i, tc := range cases { + gotKey := v1LegacyLayout{}.LBKey(tc.height, prefix) + if !bytes.Equal(gotKey, tc.wantKey) { + t.Errorf("test case %d: want %s, got %s", i, tc.wantKey, gotKey) + } + } +} + +func TestDBKeyLayoutVersioning(t *testing.T) { + prefix := "TestDBKeyLayoutVersioning" + db := dbm.NewMemDB() + dbStore := New(db, prefix) + + // Empty store + height, err := dbStore.LastLightBlockHeight() + require.NoError(t, err) + assert.EqualValues(t, -1, height) + + lb := randLightBlock(int64(1)) + // 1 key + err = dbStore.SaveLightBlock(lb) + require.NoError(t, err) + + lbKey := v1LegacyLayout{}.LBKey(int64(1), prefix) + + lbRetrieved, err := db.Get(lbKey) + require.NoError(t, err) + + var lbpb cmtproto.LightBlock + err = lbpb.Unmarshal(lbRetrieved) + require.NoError(t, err) + + lightBlock, err := types.LightBlockFromProto(&lbpb) + require.NoError(t, err) + + require.Equal(t, lightBlock.AppHash, lb.AppHash) + require.Equal(t, lightBlock.ConsensusHash, lb.ConsensusHash) + + lbKeyV2 := v2Layout{}.LBKey(1, prefix) + + lbv2, err := db.Get(lbKeyV2) + require.NoError(t, err) + require.Equal(t, len(lbv2), 0) + + // test on v2 + + prefix = "TestDBKeyLayoutVersioningV2" + db2 := dbm.NewMemDB() + dbStore2 := NewWithDBVersion(db2, prefix, "v2") + + // Empty store + height, err = dbStore2.LastLightBlockHeight() + require.NoError(t, err) + assert.EqualValues(t, -1, height) + + // 1 key + err = dbStore2.SaveLightBlock(lb) + require.NoError(t, err) + + lbKey = v1LegacyLayout{}.LBKey(int64(1), prefix) + // No block is found if we look for a key parsed with v1 + lbRetrieved, err = db2.Get(lbKey) + require.NoError(t, err) + require.Equal(t, len(lbRetrieved), 0) + + // Key parsed with v2 should find the light block + lbKeyV2 = v2Layout{}.LBKey(1, prefix) + lbv2, err = db2.Get(lbKeyV2) + require.NoError(t, err) + + // Unmarshall the light block bytes + err = lbpb.Unmarshal(lbv2) + require.NoError(t, err) + + lightBlock, err = types.LightBlockFromProto(&lbpb) + require.NoError(t, err) + + require.Equal(t, lightBlock.AppHash, lb.AppHash) + require.Equal(t, lightBlock.ConsensusHash, lb.ConsensusHash) +} + func TestLast_FirstLightBlockHeight(t *testing.T) { dbStore := New(dbm.NewMemDB(), "TestLast_FirstLightBlockHeight") @@ -43,8 +158,8 @@ func TestLast_FirstLightBlockHeight(t *testing.T) { assert.EqualValues(t, 1, height) } -func Test_SaveLightBlock(t *testing.T) { - dbStore := New(dbm.NewMemDB(), "Test_SaveLightBlockAndValidatorSet") +func Test_SaveLightBlockCustomConfig(t *testing.T) { + dbStore := NewWithDBVersion(dbm.NewMemDB(), "Test_SaveLightBlockAndValidatorSet", "v2") // Empty store h, err := dbStore.LightBlock(1) @@ -177,7 +292,7 @@ func randLightBlock(height int64) *types.LightBlock { Version: cmtversion.Consensus{Block: version.BlockProtocol, App: 0}, ChainID: cmtrand.Str(12), Height: height, - Time: time.Now(), + Time: cmttime.Now(), LastBlockID: types.BlockID{}, LastCommitHash: crypto.CRandBytes(tmhash.Size), DataHash: crypto.CRandBytes(tmhash.Size), diff --git a/light/store/errors.go b/light/store/errors.go index 099b5964d36..d5ec086b971 100644 --- a/light/store/errors.go +++ b/light/store/errors.go @@ -1,9 +1,58 @@ package store -import "errors" - -var ( - // ErrLightBlockNotFound is returned when a store does not have the - // requested header. - ErrLightBlockNotFound = errors.New("light block not found") +import ( + "errors" + "fmt" ) + +// ErrLightBlockNotFound is returned when a store does not have the +// requested header. +var ErrLightBlockNotFound = errors.New("light block not found") + +type ErrMarshalBlock struct { + Err error +} + +func (e ErrMarshalBlock) Error() string { + return fmt.Sprintf("marshaling LightBlock: %v", e.Err) +} + +func (e ErrMarshalBlock) Unwrap() error { + return e.Err +} + +type ErrUnmarshal struct { + Err error +} + +func (e ErrUnmarshal) Error() string { + return fmt.Sprintf("unmarshal error: %v", e.Err) +} + +func (e ErrUnmarshal) Unwrap() error { + return e.Err +} + +type ErrProtoConversion struct { + Err error +} + +func (e ErrProtoConversion) Error() string { + return fmt.Sprintf("proto conversion error: %v", e.Err) +} + +func (e ErrProtoConversion) Unwrap() error { + return e.Err +} + +type ErrStore struct { + Err error +} + +func (e ErrStore) Error() string { + return e.Err.Error() +} + +func (e ErrStore) Unwrap() error { + return e.Err +} diff --git a/light/trust_options.go b/light/trust_options.go index b2fd1d48dc2..85aef328ae5 100644 --- a/light/trust_options.go +++ b/light/trust_options.go @@ -1,8 +1,6 @@ package light import ( - "errors" - "fmt" "time" "github.com/cometbft/cometbft/crypto/tmhash" @@ -38,16 +36,13 @@ type TrustOptions struct { // ValidateBasic performs basic validation. func (opts TrustOptions) ValidateBasic() error { if opts.Period <= 0 { - return errors.New("negative or zero period") + return ErrNegativeOrZeroPeriod } if opts.Height <= 0 { - return errors.New("zero or negative height") + return ErrNegativeOrZeroHeight } if len(opts.Hash) != tmhash.Size { - return fmt.Errorf("expected hash size to be %d bytes, got %d bytes", - tmhash.Size, - len(opts.Hash), - ) + return ErrInvalidHashSize{Expected: tmhash.Size, Actual: len(opts.Hash)} } return nil } diff --git a/light/verifier.go b/light/verifier.go index 8905db56b05..ca3048353fb 100644 --- a/light/verifier.go +++ b/light/verifier.go @@ -10,11 +10,9 @@ import ( "github.com/cometbft/cometbft/types" ) -var ( - // DefaultTrustLevel - new header can be trusted if at least one correct - // validator signed it. - DefaultTrustLevel = cmtmath.Fraction{Numerator: 1, Denominator: 3} -) +// DefaultTrustLevel - new header can be trusted if at least one correct +// validator signed it. +var DefaultTrustLevel = cmtmath.Fraction{Numerator: 1, Denominator: 3} // VerifyNonAdjacent verifies non-adjacent untrustedHeader against // trustedHeader. It ensures that: @@ -37,10 +35,10 @@ func VerifyNonAdjacent( trustingPeriod time.Duration, now time.Time, maxClockDrift time.Duration, - trustLevel cmtmath.Fraction) error { - + trustLevel cmtmath.Fraction, +) error { if untrustedHeader.Height == trustedHeader.Height+1 { - return errors.New("headers must be non adjacent in height") + return ErrHeaderHeightAdjacent } if HeaderExpired(trustedHeader, trustingPeriod, now) { @@ -54,8 +52,9 @@ func VerifyNonAdjacent( return ErrInvalidHeader{err} } + verifiedSignatureCache := types.NewSignatureCache() // Ensure that +`trustLevel` (default 1/3) or more of last trusted validators signed correctly. - err := trustedVals.VerifyCommitLightTrusting(trustedHeader.ChainID, untrustedHeader.Commit, trustLevel) + err := trustedVals.VerifyCommitLightTrustingWithCache(trustedHeader.ChainID, untrustedHeader.Commit, trustLevel, verifiedSignatureCache) if err != nil { switch e := err.(type) { case types.ErrNotEnoughVotingPowerSigned: @@ -70,8 +69,8 @@ func VerifyNonAdjacent( // NOTE: this should always be the last check because untrustedVals can be // intentionally made very large to DOS the light client. not the case for // VerifyAdjacent, where validator set is known in advance. - if err := untrustedVals.VerifyCommitLight(trustedHeader.ChainID, untrustedHeader.Commit.BlockID, - untrustedHeader.Height, untrustedHeader.Commit); err != nil { + if err := untrustedVals.VerifyCommitLightWithCache(trustedHeader.ChainID, untrustedHeader.Commit.BlockID, + untrustedHeader.Height, untrustedHeader.Commit, verifiedSignatureCache); err != nil { return ErrInvalidHeader{err} } @@ -96,10 +95,10 @@ func VerifyAdjacent( untrustedVals *types.ValidatorSet, // height=X+1 trustingPeriod time.Duration, now time.Time, - maxClockDrift time.Duration) error { - + maxClockDrift time.Duration, +) error { if untrustedHeader.Height != trustedHeader.Height+1 { - return errors.New("headers must be adjacent in height") + return ErrHeaderHeightNotAdjacent } if HeaderExpired(trustedHeader, trustingPeriod, now) { @@ -115,11 +114,7 @@ func VerifyAdjacent( // Check the validator hashes are the same if !bytes.Equal(untrustedHeader.ValidatorsHash, trustedHeader.NextValidatorsHash) { - err := fmt.Errorf("expected old header next validators (%X) to match those from new header (%X)", - trustedHeader.NextValidatorsHash, - untrustedHeader.ValidatorsHash, - ) - return err + return ErrValidatorHashMismatch{TrustedHash: trustedHeader.NextValidatorsHash, ValidatorHash: untrustedHeader.ValidatorsHash} } // Ensure that +2/3 of new validators signed correctly. @@ -140,8 +135,8 @@ func Verify( trustingPeriod time.Duration, now time.Time, maxClockDrift time.Duration, - trustLevel cmtmath.Fraction) error { - + trustLevel cmtmath.Fraction, +) error { if untrustedHeader.Height != trustedHeader.Height+1 { return VerifyNonAdjacent(trustedHeader, trustedVals, untrustedHeader, untrustedVals, trustingPeriod, now, maxClockDrift, trustLevel) @@ -155,37 +150,26 @@ func verifyNewHeaderAndVals( untrustedVals *types.ValidatorSet, trustedHeader *types.SignedHeader, now time.Time, - maxClockDrift time.Duration) error { - + maxClockDrift time.Duration, +) error { if err := untrustedHeader.ValidateBasic(trustedHeader.ChainID); err != nil { - return fmt.Errorf("untrustedHeader.ValidateBasic failed: %w", err) + return ErrHeaderValidateBasic{Err: err} } if untrustedHeader.Height <= trustedHeader.Height { - return fmt.Errorf("expected new header height %d to be greater than one of old header %d", - untrustedHeader.Height, - trustedHeader.Height) + return ErrHeaderHeightNotMonotonic{GotHeight: untrustedHeader.Height, OldHeight: trustedHeader.Height} } if !untrustedHeader.Time.After(trustedHeader.Time) { - return fmt.Errorf("expected new header time %v to be after old header time %v", - untrustedHeader.Time, - trustedHeader.Time) + return ErrHeaderTimeNotMonotonic{GotTime: untrustedHeader.Time, OldTime: trustedHeader.Time} } if !untrustedHeader.Time.Before(now.Add(maxClockDrift)) { - return fmt.Errorf("new header has a time from the future %v (now: %v; max clock drift: %v)", - untrustedHeader.Time, - now, - maxClockDrift) + return ErrHeaderTimeExceedMaxClockDrift{Ti: untrustedHeader.Time, Now: now, Drift: maxClockDrift} } if !bytes.Equal(untrustedHeader.ValidatorsHash, untrustedVals.Hash()) { - return fmt.Errorf("expected new header validators (%X) to match those that were supplied (%X) at height %d", - untrustedHeader.ValidatorsHash, - untrustedVals.Hash(), - untrustedHeader.Height, - ) + return ErrValidatorsMismatch{HeaderHash: untrustedHeader.ValidatorsHash, ValidatorsHash: untrustedVals.Hash(), Height: untrustedHeader.Height} } return nil @@ -198,7 +182,7 @@ func ValidateTrustLevel(lvl cmtmath.Fraction) error { if lvl.Numerator*3 < lvl.Denominator || // < 1/3 lvl.Numerator > lvl.Denominator || // > 1 lvl.Denominator == 0 { - return fmt.Errorf("trustLevel must be within [1/3, 1], given %v", lvl) + return ErrInvalidTrustLevel{Level: lvl} } return nil } @@ -231,14 +215,16 @@ func VerifyBackwards(untrustedHeader, trustedHeader *types.Header) error { return ErrInvalidHeader{ fmt.Errorf("expected older header time %v to be before new header time %v", untrustedHeader.Time, - trustedHeader.Time)} + trustedHeader.Time), + } } if !bytes.Equal(untrustedHeader.Hash(), trustedHeader.LastBlockID.Hash) { return ErrInvalidHeader{ fmt.Errorf("older header hash %X does not match trusted header's last block %X", untrustedHeader.Hash(), - trustedHeader.LastBlockID.Hash)} + trustedHeader.LastBlockID.Hash), + } } return nil diff --git a/light/verifier_test.go b/light/verifier_test.go index 5758bc46d94..40714a2b7cc 100644 --- a/light/verifier_test.go +++ b/light/verifier_test.go @@ -6,10 +6,12 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" cmtmath "github.com/cometbft/cometbft/libs/math" "github.com/cometbft/cometbft/light" "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" ) const ( @@ -46,8 +48,8 @@ func TestVerifyAdjacentHeaders(t *testing.T) { vals, 3 * time.Hour, bTime.Add(2 * time.Hour), - nil, - "headers must be adjacent in height", + light.ErrHeaderHeightNotAdjacent, + "", }, // different chainID -> error 1: { @@ -56,8 +58,8 @@ func TestVerifyAdjacentHeaders(t *testing.T) { vals, 3 * time.Hour, bTime.Add(2 * time.Hour), - nil, - "header belongs to another chain", + light.ErrInvalidHeader{light.ErrHeaderValidateBasic{fmt.Errorf("header belongs to another chain %q, not %q", "different-chainID", chainID)}}, + "", }, // new header's time is before old header's time -> error 2: { @@ -66,8 +68,8 @@ func TestVerifyAdjacentHeaders(t *testing.T) { vals, 3 * time.Hour, bTime.Add(2 * time.Hour), - nil, - "to be after old header time", + light.ErrInvalidHeader{light.ErrHeaderTimeNotMonotonic{bTime.Add(-1 * time.Hour), bTime}}, + "", }, // new header's time is from the future -> error 3: { @@ -76,8 +78,8 @@ func TestVerifyAdjacentHeaders(t *testing.T) { vals, 3 * time.Hour, bTime.Add(2 * time.Hour), - nil, - "new header has a time from the future", + light.ErrInvalidHeader{light.ErrHeaderTimeExceedMaxClockDrift{bTime.Add(3 * time.Hour), bTime.Add(2 * time.Hour), 10 * time.Second}}, + "", }, // new header's time is from the future, but it's acceptable (< maxClockDrift) -> no error 4: { @@ -127,8 +129,9 @@ func TestVerifyAdjacentHeaders(t *testing.T) { keys.ToValidators(10, 1), 3 * time.Hour, bTime.Add(2 * time.Hour), - nil, - "to match those from new header", + light.ErrValidatorHashMismatch{header.NextValidatorsHash, keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, keys.ToValidators(10, 1), vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)).ValidatorsHash}, + "", }, // vals are inconsistent with newHeader -> error 9: { @@ -137,8 +140,10 @@ func TestVerifyAdjacentHeaders(t *testing.T) { keys.ToValidators(10, 1), 3 * time.Hour, bTime.Add(2 * time.Hour), - nil, - "to match those that were supplied", + light.ErrInvalidHeader{light.ErrValidatorsMismatch{keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)).ValidatorsHash, keys.ToValidators(10, 1).Hash(), keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)).Height}}, + "", }, // old header has expired -> error 10: { @@ -147,26 +152,24 @@ func TestVerifyAdjacentHeaders(t *testing.T) { keys.ToValidators(10, 1), 1 * time.Hour, bTime.Add(1 * time.Hour), - nil, - "old header has expired", + light.ErrOldHeaderExpired{bTime.Add(1 * time.Hour), bTime.Add(1 * time.Hour)}, + "", }, } for i, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { err := light.VerifyAdjacent(header, tc.newHeader, tc.newVals, tc.trustingPeriod, tc.now, maxClockDrift) switch { - case tc.expErr != nil && assert.Error(t, err): + case tc.expErr != nil && assert.Error(t, err): //nolint:testifylint // require.Error doesn't work with the logic here assert.Equal(t, tc.expErr, err) case tc.expErrText != "": assert.Contains(t, err.Error(), tc.expErrText) default: - assert.NoError(t, err) + require.NoError(t, err) } }) } - } func TestVerifyNonAdjacentHeaders(t *testing.T) { @@ -267,19 +270,18 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { err := light.VerifyNonAdjacent(header, vals, tc.newHeader, tc.newVals, tc.trustingPeriod, tc.now, maxClockDrift, light.DefaultTrustLevel) switch { - case tc.expErr != nil && assert.Error(t, err): + case tc.expErr != nil && assert.Error(t, err): //nolint:testifylint // require.Error doesn't work with the logic here assert.Equal(t, tc.expErr, err) case tc.expErrText != "": assert.Contains(t, err.Error(), tc.expErrText) default: - assert.NoError(t, err) + require.NoError(t, err) } }) } @@ -298,11 +300,14 @@ func TestVerifyReturnsErrorIfTrustLevelIsInvalid(t *testing.T) { bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) + trustingPeriod = 2 * time.Hour + now = cmttime.Now() ) - err := light.Verify(header, vals, header, vals, 2*time.Hour, time.Now(), maxClockDrift, + err := light.Verify(header, vals, header, vals, trustingPeriod, now, maxClockDrift, cmtmath.Fraction{Numerator: 2, Denominator: 1}) - assert.Error(t, err) + expectedErr := light.ErrOldHeaderExpired{At: bTime.Add(trustingPeriod), Now: now} + require.EqualError(t, err, expectedErr.Error()) } func TestValidateTrustLevel(t *testing.T) { @@ -327,9 +332,9 @@ func TestValidateTrustLevel(t *testing.T) { for _, tc := range testCases { err := light.ValidateTrustLevel(tc.lvl) if !tc.valid { - assert.Error(t, err) + require.EqualError(t, err, light.ErrInvalidTrustLevel{Level: tc.lvl}.Error()) } else { - assert.NoError(t, err) + require.NoError(t, err) } } } diff --git a/mempool/bench_test.go b/mempool/bench_test.go index 190ed49f92f..e63f7bb90f2 100644 --- a/mempool/bench_test.go +++ b/mempool/bench_test.go @@ -1,19 +1,15 @@ package mempool import ( - "fmt" - "testing" - + "context" + "strconv" + "sync" "sync/atomic" + "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/abci/example/kvstore" - abciserver "github.com/cometbft/cometbft/abci/server" - "github.com/cometbft/cometbft/internal/test" - "github.com/cometbft/cometbft/libs/log" - cmtrand "github.com/cometbft/cometbft/libs/rand" "github.com/cometbft/cometbft/proxy" ) @@ -23,18 +19,12 @@ func BenchmarkReap(b *testing.B) { mp, cleanup := newMempoolWithApp(cc) defer cleanup() - mp.config.Size = 100_000_000 // so that the nmempool never saturates + mp.config.Size = 100_000_000 // so that the mempool never saturates + addTxs(b, mp, 0, 10000) - size := 10000 - for i := 0; i < size; i++ { - tx := kvstore.NewTxFromID(i) - if _, err := mp.CheckTx(tx); err != nil { - b.Fatal(err) - } - } b.ResetTimer() for i := 0; i < b.N; i++ { - mp.ReapMaxBytesMaxGas(100000000, 10000000) + mp.ReapMaxBytesMaxGas(100_000_000, -1) } } @@ -45,16 +35,16 @@ func BenchmarkCheckTx(b *testing.B) { defer cleanup() mp.config.Size = 100_000_000 - b.ResetTimer() + for i := 0; i < b.N; i++ { b.StopTimer() tx := kvstore.NewTxFromID(i) b.StartTimer() - if _, err := mp.CheckTx(tx); err != nil { - b.Fatal(err) - } + rr, err := mp.CheckTx(tx, "") + require.NoError(b, err, i) + rr.Wait() } } @@ -65,7 +55,6 @@ func BenchmarkParallelCheckTx(b *testing.B) { defer cleanup() mp.config.Size = 100_000_000 - var txcnt uint64 next := func() uint64 { return atomic.AddUint64(&txcnt, 1) @@ -75,9 +64,9 @@ func BenchmarkParallelCheckTx(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { tx := kvstore.NewTxFromID(int(next())) - if _, err := mp.CheckTx(tx); err != nil { - b.Fatal(err) - } + rr, err := mp.CheckTx(tx, "") + require.NoError(b, err, tx) + rr.Wait() } }) } @@ -91,57 +80,148 @@ func BenchmarkCheckDuplicateTx(b *testing.B) { mp.config.Size = 2 tx := kvstore.NewTxFromID(1) - if _, err := mp.CheckTx(tx); err != nil { + if _, err := mp.CheckTx(tx, ""); err != nil { b.Fatal(err) } - e := mp.FlushAppConn() - require.True(b, e == nil) + err := mp.FlushAppConn() + require.NoError(b, err) b.ResetTimer() for i := 0; i < b.N; i++ { - if _, err := mp.CheckTx(tx); err == nil { - b.Fatal("tx should be duplicate") - } + _, err := mp.CheckTx(tx, "") + require.ErrorAs(b, err, &ErrTxInCache, "tx should be duplicate") } - } -func BenchmarkUpdateRemoteClient(b *testing.B) { - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmtrand.Str(6)) +func BenchmarkUpdate(b *testing.B) { app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + mp, cleanup := newMempoolWithApp(cc) + defer cleanup() - // Start server - server := abciserver.NewSocketServer(sockPath, app) - server.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := server.Start(); err != nil { - b.Fatalf("Error starting socket server: %v", err.Error()) + numTxs := 1000 + b.ResetTimer() + for i := 1; i <= b.N; i++ { + b.StopTimer() + txs := addTxs(b, mp, i*numTxs, numTxs) + require.Equal(b, numTxs, len(txs)) + require.Equal(b, numTxs, mp.Size()) + b.StartTimer() + + doUpdate(b, mp, int64(i), txs) + require.Zero(b, mp.Size()) } +} - b.Cleanup(func() { - if err := server.Stop(); err != nil { - b.Error(err) - } - }) - cfg := test.ResetTestRoot("mempool_test") - mp, cleanup := newMempoolWithAppAndConfig(proxy.NewRemoteClientCreator(sockPath, "socket", true), cfg) +func BenchmarkUpdateAndRecheck(b *testing.B) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + mp, cleanup := newMempoolWithApp(cc) defer cleanup() + numTxs := 1000 b.ResetTimer() for i := 1; i <= b.N; i++ { + b.StopTimer() + mp.Flush() + txs := addTxs(b, mp, 0, numTxs) + require.Equal(b, numTxs, len(txs)) + require.Equal(b, numTxs, mp.Size()) + b.StartTimer() + + // Update a part of txs and recheck the rest. + doUpdate(b, mp, int64(i), txs[:numTxs/2]) + } +} +func BenchmarkUpdateRemoteClient(b *testing.B) { + mp, cleanup := newMempoolWithAsyncConnection(b) + defer cleanup() + + b.ResetTimer() + for i := 1; i <= b.N; i++ { + b.StopTimer() tx := kvstore.NewTxFromID(i) + _, err := mp.CheckTx(tx, "") + require.NoError(b, err) + err = mp.FlushAppConn() + require.NoError(b, err) + require.Equal(b, 1, mp.Size()) + b.StartTimer() - _, e := mp.CheckTx(tx) - require.True(b, e == nil) + txs := mp.ReapMaxTxs(mp.Size()) + doUpdate(b, mp, int64(i), txs) + } +} - e = mp.FlushAppConn() - require.True(b, e == nil) +// Benchmarks the time it takes a blocking iterator to access all transactions +// in the mempool. +func BenchmarkBlockingIterator(b *testing.B) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + mp, cleanup := newMempoolWithApp(cc) + defer cleanup() - require.True(b, mp.Size() == 1) + const numTxs = 1000 + txs := addTxs(b, mp, 0, numTxs) + require.Equal(b, numTxs, len(txs)) + require.Equal(b, numTxs, mp.Size()) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + iter := NewBlockingIterator(context.TODO(), mp, b.Name()) + b.StartTimer() - var txs = mp.ReapMaxTxs(mp.Size()) - doCommit(b, mp, app, txs, int64(i)) - assert.True(b, true) + // Iterate until all txs in the mempool are accessed. + for c := 0; c < numTxs; c++ { + if entry := <-iter.WaitNextCh(); entry == nil { + continue + } + } } +} +// Benchmarks the time it takes multiple concurrent blocking iterators to access +// all transactions in the mempool. +func BenchmarkConcurrentkBlockingIterators(b *testing.B) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + mp, cleanup := newMempoolWithApp(cc) + defer cleanup() + + const numTxs = 1000 + const numIterators = 10 + + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + // Flush mempool and add a bunch of txs. + mp.Flush() + txs := addTxs(b, mp, 0, numTxs) + require.Equal(b, numTxs, len(txs)) + require.Equal(b, numTxs, mp.Size()) + // Create concurrent iterators. + iters := make([]Iterator, numIterators) + for j := 0; j < numIterators; j++ { + iters[j] = NewBlockingIterator(context.TODO(), mp, strconv.Itoa(j)) + } + wg := sync.WaitGroup{} + wg.Add(numIterators) + b.StartTimer() + + for j := 0; j < numIterators; j++ { + go func(iter Iterator) { + defer wg.Done() + // Iterate until all txs in the mempool are accessed. + for c := 0; c < numTxs; c++ { + if entry := <-iter.WaitNextCh(); entry == nil { + continue + } + } + }(iters[j]) + } + + wg.Wait() + } } diff --git a/mempool/cache.go b/mempool/cache.go index 37977e8fc5e..9736361b3dc 100644 --- a/mempool/cache.go +++ b/mempool/cache.go @@ -53,11 +53,12 @@ func (c *LRUTxCache) GetList() *list.List { return c.list } +// Reset resets the cache to an empty state. func (c *LRUTxCache) Reset() { c.mtx.Lock() defer c.mtx.Unlock() - c.cacheMap = make(map[types.TxKey]*list.Element, c.size) + clear(c.cacheMap) c.list.Init() } diff --git a/mempool/cache_test.go b/mempool/cache_test.go index 573bebc487a..1d5f7c9e2fe 100644 --- a/mempool/cache_test.go +++ b/mempool/cache_test.go @@ -3,10 +3,9 @@ package mempool import ( "crypto/rand" "crypto/sha256" + "strconv" "testing" - "fmt" - "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/abci/example/kvstore" @@ -30,14 +29,14 @@ func TestCacheRemove(t *testing.T) { cache.Push(txBytes) // make sure its added to both the linked list and the map - require.Equal(t, i+1, len(cache.cacheMap)) + require.Len(t, cache.cacheMap, i+1) require.Equal(t, i+1, cache.list.Len()) } for i := 0; i < numTxs; i++ { cache.Remove(txs[i]) // make sure its removed from both the map and the linked list - require.Equal(t, numTxs-(i+1), len(cache.cacheMap)) + require.Len(t, cache.cacheMap, numTxs-(i+1)) require.Equal(t, numTxs-(i+1), cache.list.Len()) } } @@ -64,23 +63,23 @@ func TestCacheAfterUpdate(t *testing.T) { } for tcIndex, tc := range tests { for i := 0; i < tc.numTxsToCreate; i++ { - tx := kvstore.NewTx(fmt.Sprintf("%d", i), "value") - reqRes, err := mp.CheckTx(tx) + tx := kvstore.NewTx(strconv.Itoa(i), "value") + reqRes, err := mp.CheckTx(tx, "") require.NoError(t, err) require.False(t, reqRes.Response.GetCheckTx().IsErr()) } updateTxs := []types.Tx{} for _, v := range tc.updateIndices { - tx := kvstore.NewTx(fmt.Sprintf("%d", v), "value") + tx := kvstore.NewTx(strconv.Itoa(v), "value") updateTxs = append(updateTxs, tx) } err := mp.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil) require.NoError(t, err) for _, v := range tc.reAddIndices { - tx := kvstore.NewTx(fmt.Sprintf("%d", v), "value") - reqRes, err := mp.CheckTx(tx) + tx := kvstore.NewTx(strconv.Itoa(v), "value") + reqRes, err := mp.CheckTx(tx, "") if err == nil { require.False(t, reqRes.Response.GetCheckTx().IsErr()) } @@ -94,7 +93,7 @@ func TestCacheAfterUpdate(t *testing.T) { "cache larger than expected on testcase %d", tcIndex) nodeVal := node.Value.(types.TxKey) - expTx := kvstore.NewTx(fmt.Sprintf("%d", tc.txsInCache[len(tc.txsInCache)-counter-1]), "value") + expTx := kvstore.NewTx(strconv.Itoa(tc.txsInCache[len(tc.txsInCache)-counter-1]), "value") expectedBz := sha256.Sum256(expTx) // Reference for reading the errors: // >>> sha256('\x00').hexdigest() @@ -108,7 +107,7 @@ func TestCacheAfterUpdate(t *testing.T) { counter++ node = node.Next() } - require.Equal(t, len(tc.txsInCache), counter, + require.Len(t, tc.txsInCache, counter, "cache smaller than expected on testcase %d", tcIndex) mp.Flush() } diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go index fd08e7dd991..a07df9eac87 100644 --- a/mempool/clist_mempool.go +++ b/mempool/clist_mempool.go @@ -3,18 +3,27 @@ package mempool import ( "bytes" "context" - "sync" + "fmt" + "slices" "sync/atomic" + "time" abcicli "github.com/cometbft/cometbft/abci/client" abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/libs/clist" + "github.com/cometbft/cometbft/internal/clist" "github.com/cometbft/cometbft/libs/log" cmtmath "github.com/cometbft/cometbft/libs/math" cmtsync "github.com/cometbft/cometbft/libs/sync" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/proxy" "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" +) + +const ( + noSender = nodekey.ID("") + defaultLane = "default" ) // CListMempool is an ordered in-memory pool for transactions before they are @@ -23,17 +32,12 @@ import ( // mempool uses a concurrent list structure for storing transactions that can // be efficiently accessed by multiple concurrent readers. type CListMempool struct { - // Atomic integers - height int64 // the last block Update()'d to - txsBytes int64 // total size of mempool, in bytes + height atomic.Int64 // the last block Update()'d to // notify listeners (ie. consensus) when txs are available - notifiedTxsAvailable bool + notifiedTxsAvailable atomic.Bool txsAvailable chan struct{} // fires once for each height, when the mempool is not empty - - // Function set by the reactor to be called when a transaction is removed - // from the mempool. - removeTxOnReactorCb func(txKey types.TxKey) + onNewTx func(types.Tx) config *config.MempoolConfig @@ -45,17 +49,25 @@ type CListMempool struct { proxyAppConn proxy.AppConnMempool - // Track whether we're rechecking txs. - // These are not protected by a mutex and are expected to be mutated in - // serial (ie. by abci responses which are called in serial). - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here + // Keeps track of the rechecking process. + recheck *recheck + + // Data in the following variables must to be kept in sync and updated atomically. + txsMtx cmtsync.RWMutex + lanes map[LaneID]*clist.CList // each lane is a linked-list of (valid) txs + txsMap map[types.TxKey]*clist.CElement // for quick access to the mempool entry of a given tx + laneBytes map[LaneID]int64 // number of bytes per lane (for metrics) + txsBytes int64 // total size of mempool, in bytes + numTxs int64 // total number of txs in the mempool - // Concurrent linked-list of valid txs. - // `txsMap`: txKey -> CElement is for quick access to txs. - // Transactions in both `txs` and `txsMap` must to be kept in sync. - txs *clist.CList - txsMap sync.Map + addTxChMtx cmtsync.RWMutex // Protects the fields below + addTxCh chan struct{} // Blocks until the next TX is added + addTxSeq int64 // Helps detect is new TXs have been added to a given lane + addTxLaneSeqs map[LaneID]int64 // Sequence of the last TX added to a given lane + + // Immutable fields, only set during initialization. + defaultLane LaneID + sortedLanes []lane // lanes sorted by priority, in descending order // Keep a cache of already-seen txs. // This reduces the pressure on the proxyApp. @@ -70,24 +82,67 @@ var _ Mempool = &CListMempool{} // CListMempoolOption sets an optional parameter on the mempool. type CListMempoolOption func(*CListMempool) +// A lane is defined by its ID and priority. +// A laneID is a string uinquely identifying a lane. +// Multiple lanes can have the same priority. +type LaneID string + +// The priority of a lane. +type LanePriority uint32 + +// Lane corresponds to a transaction class as defined by the application. +// A lane is identified by a string name and has priority level. +// Different lanes can have the same priority. +type lane struct { + id LaneID + priority LanePriority +} + // NewCListMempool returns a new mempool with the given configuration and // connection to an application. func NewCListMempool( cfg *config.MempoolConfig, proxyAppConn proxy.AppConnMempool, + lanesInfo *LanesInfo, height int64, options ...CListMempoolOption, ) *CListMempool { mp := &CListMempool{ config: cfg, proxyAppConn: proxyAppConn, - txs: clist.New(), - height: height, - recheckCursor: nil, - recheckEnd: nil, + txsMap: make(map[types.TxKey]*clist.CElement), + laneBytes: make(map[LaneID]int64), logger: log.NewNopLogger(), metrics: NopMetrics(), + addTxCh: make(chan struct{}), + addTxLaneSeqs: make(map[LaneID]int64), } + mp.height.Store(height) + + // Initialize lanes + if lanesInfo == nil || len(lanesInfo.lanes) == 0 { + // The only lane will be "default" with priority 1. + lanesInfo = &LanesInfo{lanes: map[LaneID]LanePriority{defaultLane: 1}, defaultLane: defaultLane} + } + numLanes := len(lanesInfo.lanes) + mp.lanes = make(map[LaneID]*clist.CList, numLanes) + mp.defaultLane = lanesInfo.defaultLane + mp.sortedLanes = make([]lane, 0, numLanes) + for id, priority := range lanesInfo.lanes { + mp.lanes[id] = clist.New() + mp.sortedLanes = append(mp.sortedLanes, lane{id: id, priority: priority}) + } + slices.SortStableFunc(mp.sortedLanes, func(i, j lane) int { + if i.priority > j.priority { + return -1 + } + if i.priority < j.priority { + return 1 + } + return 0 + }) + + mp.recheck = newRecheck(mp) if cfg.CacheSize > 0 { mp.cache = NewLRUTxCache(cfg.CacheSize) @@ -95,8 +150,6 @@ func NewCListMempool( mp.cache = NopTxCache{} } - proxyAppConn.SetResponseCallback(mp.globalCb) - for _, option := range options { option(mp) } @@ -104,18 +157,6 @@ func NewCListMempool( return mp } -func (mem *CListMempool) getCElement(txKey types.TxKey) (*clist.CElement, bool) { - if e, ok := mem.txsMap.Load(txKey); ok { - return e.(*clist.CElement), true - } - return nil, false -} - -func (mem *CListMempool) InMempool(txKey types.TxKey) bool { - _, ok := mem.getCElement(txKey) - return ok -} - func (mem *CListMempool) addToCache(tx types.Tx) bool { return mem.cache.Push(tx) } @@ -133,34 +174,45 @@ func (mem *CListMempool) tryRemoveFromCache(tx types.Tx) { } } -func (mem *CListMempool) removeAllTxs() { - for e := mem.txs.Front(); e != nil; e = e.Next() { - mem.txs.Remove(e) +func (mem *CListMempool) removeAllTxs(lane LaneID) { + mem.txsMtx.Lock() + defer mem.txsMtx.Unlock() + + for e := mem.lanes[lane].Front(); e != nil; e = e.Next() { + mem.lanes[lane].Remove(e) e.DetachPrev() } - - mem.txsMap.Range(func(key, _ interface{}) bool { - mem.txsMap.Delete(key) - mem.invokeRemoveTxOnReactor(key.(types.TxKey)) - return true - }) + mem.txsMap = make(map[types.TxKey]*clist.CElement) + delete(mem.laneBytes, lane) + mem.txsBytes = 0 } -// NOTE: not thread safe - should only be called once, on startup -func (mem *CListMempool) EnableTxsAvailable() { - mem.txsAvailable = make(chan struct{}, 1) -} +// addSender adds a peer ID to the list of senders on the entry corresponding to +// tx, identified by its key. +func (mem *CListMempool) addSender(txKey types.TxKey, sender nodekey.ID) error { + if sender == noSender { + return nil + } -func (mem *CListMempool) SetTxRemovedCallback(cb func(txKey types.TxKey)) { - mem.removeTxOnReactorCb = cb -} + mem.txsMtx.Lock() + defer mem.txsMtx.Unlock() -func (mem *CListMempool) invokeRemoveTxOnReactor(txKey types.TxKey) { - // Note that the callback is nil in the unit tests, where there are no - // reactors. - if mem.removeTxOnReactorCb != nil { - mem.removeTxOnReactorCb(txKey) + elem, ok := mem.txsMap[txKey] + if !ok { + return ErrTxNotFound } + + memTx := elem.Value.(*mempoolTx) + if found := memTx.addSender(sender); found { + // It should not be possible to receive twice a tx from the same sender. + return ErrTxAlreadyReceivedFromSender + } + return nil +} + +// NOTE: not thread safe - should only be called once, on startup. +func (mem *CListMempool) EnableTxsAvailable() { + mem.txsAvailable = make(chan struct{}, 1) } // SetLogger sets the Logger. @@ -187,6 +239,12 @@ func WithMetrics(metrics *Metrics) CListMempoolOption { return func(mem *CListMempool) { mem.metrics = metrics } } +// WithNewTxCallback sets a callback function to be executed when a new transaction is added to the mempool. +// The callback function will receive the newly added transaction as a parameter. +func WithNewTxCallback(cb func(types.Tx)) CListMempoolOption { + return func(mem *CListMempool) { mem.onNewTx = cb } +} + // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) Lock() { mem.updateMtx.Lock() @@ -197,14 +255,45 @@ func (mem *CListMempool) Unlock() { mem.updateMtx.Unlock() } +// Safe for concurrent use by multiple goroutines. +func (mem *CListMempool) PreUpdate() { + if mem.recheck.setRecheckFull() { + mem.logger.Debug("The state of recheckFull has flipped") + } +} + +// Size returns the total number of transactions in the mempool (that is, all lanes). // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) Size() int { - return mem.txs.Len() + mem.txsMtx.RLock() + defer mem.txsMtx.RUnlock() + + return int(mem.numTxs) } // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) SizeBytes() int64 { - return atomic.LoadInt64(&mem.txsBytes) + mem.txsMtx.RLock() + defer mem.txsMtx.RUnlock() + + return mem.txsBytes +} + +// LaneSizes returns, the number of transactions in the given lane and the total +// number of bytes used by all transactions in the lane. +// +// Safe for concurrent use by multiple goroutines. +func (mem *CListMempool) LaneSizes(lane LaneID) (numTxs int, bytes int64) { + mem.txsMtx.RLock() + defer mem.txsMtx.RUnlock() + + bytes = mem.laneBytes[lane] + + txs, ok := mem.lanes[lane] + if !ok { + panic(ErrLaneNotFound{laneID: lane}) + } + return txs.Len(), bytes } // Lock() must be help by the caller during execution. @@ -219,36 +308,29 @@ func (mem *CListMempool) FlushAppConn() error { // XXX: Unsafe! Calling Flush may leave mempool in inconsistent state. func (mem *CListMempool) Flush() { - mem.updateMtx.RLock() - defer mem.updateMtx.RUnlock() + mem.updateMtx.Lock() + defer mem.updateMtx.Unlock() - _ = atomic.SwapInt64(&mem.txsBytes, 0) + mem.txsBytes = 0 + mem.numTxs = 0 mem.cache.Reset() - mem.removeAllTxs() + for lane := range mem.lanes { + mem.removeAllTxs(lane) + } } -// TxsFront returns the first transaction in the ordered list for peer -// goroutines to call .NextWait() on. -// FIXME: leaking implementation details! -// -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsFront() *clist.CElement { - return mem.txs.Front() -} +func (mem *CListMempool) Contains(txKey types.TxKey) bool { + mem.txsMtx.RLock() + defer mem.txsMtx.RUnlock() -// TxsWaitChan returns a channel to wait on transactions. It will be closed -// once the mempool is not empty (ie. the internal `mem.txs` has at least one -// element) -// -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsWaitChan() <-chan struct{} { - return mem.txs.WaitChan() + _, ok := mem.txsMap[txKey] + return ok } // It blocks if we're waiting on Update() or Reap(). // Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) CheckTx(tx types.Tx) (*abcicli.ReqRes, error) { +func (mem *CListMempool) CheckTx(tx types.Tx, sender nodekey.ID) (*abcicli.ReqRes, error) { mem.updateMtx.RLock() // use defer to unlock mutex because application (*local client*) might panic defer mem.updateMtx.RUnlock() @@ -256,6 +338,7 @@ func (mem *CListMempool) CheckTx(tx types.Tx) (*abcicli.ReqRes, error) { txSize := len(tx) if err := mem.isFull(txSize); err != nil { + mem.metrics.RejectedTxs.Add(1) return nil, err } @@ -279,85 +362,204 @@ func (mem *CListMempool) CheckTx(tx types.Tx) (*abcicli.ReqRes, error) { if added := mem.addToCache(tx); !added { mem.metrics.AlreadyReceivedTxs.Add(1) + // Record a new sender for a tx we've already seen. + // Note it's possible a tx is still in the cache but no longer in the mempool + // (eg. after committing a block, txs are removed from mempool but not cache), + // so we only record the sender for txs still in the mempool. + if err := mem.addSender(tx.Key(), sender); err != nil { + mem.logger.Error("Could not add sender to tx", "tx", log.NewLazyHash(tx), "sender", sender, "err", err) + } // TODO: consider punishing peer for dups, // its non-trivial since invalid txs can become valid, // but they can spam the same tx with little cost to them atm. return nil, ErrTxInCache } - reqRes, err := mem.proxyAppConn.CheckTxAsync(context.TODO(), &abci.RequestCheckTx{Tx: tx}) + reqRes, err := mem.proxyAppConn.CheckTxAsync(context.TODO(), &abci.CheckTxRequest{ + Tx: tx, + Type: abci.CHECK_TX_TYPE_CHECK, + }) if err != nil { - mem.logger.Error("RequestCheckTx", "err", err) - return nil, ErrCheckTxAsync{Err: err} + panic(fmt.Errorf("CheckTx request for tx %s failed: %w", tx.Hash(), err)) } + reqRes.SetCallback(mem.handleCheckTxResponse(tx, sender)) return reqRes, nil } -// Global callback that will be called after every ABCI response. -func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { - switch res.Value.(type) { - case *abci.Response_CheckTx: - switch req.GetCheckTx().GetType() { - case abci.CheckTxType_New: - if mem.recheckCursor != nil { - // this should never happen - panic("recheck cursor is not nil before resCbFirstTime") +// handleCheckTxResponse handles CheckTx responses for transactions validated for the first time. +// +// - sender optionally holds the ID of the peer that sent the transaction, if any. +func (mem *CListMempool) handleCheckTxResponse(tx types.Tx, sender nodekey.ID) func(res *abci.Response) error { + return func(r *abci.Response) error { + res := r.GetCheckTx() + if res == nil { + panic(fmt.Sprintf("unexpected response value %v not of type CheckTx", r)) + } + + // Check that rechecking txs is not in process. + if !mem.recheck.done() { + panic(fmt.Sprint("rechecking has not finished; cannot check new tx ", tx.Hash())) + } + + var postCheckErr error + if mem.postCheck != nil { + postCheckErr = mem.postCheck(tx, res) + } + + // If tx is invalid, remove it from the cache. + if res.Code != abci.CodeTypeOK || postCheckErr != nil { + mem.tryRemoveFromCache(tx) + mem.logger.Debug( + "Rejected invalid transaction", + "tx", log.NewLazyHash(tx), + "res", res, + "err", postCheckErr, + ) + mem.metrics.FailedTxs.Add(1) + + if postCheckErr != nil { + return postCheckErr } - mem.resCbFirstTime(req.GetCheckTx().Tx, res) + return ErrInvalidTx + } - case abci.CheckTxType_Recheck: - if mem.recheckCursor == nil { - return + // If the app returned a non-empty lane, use it; otherwise use the default lane. + lane := mem.defaultLane + if res.LaneId != "" { + if _, ok := mem.lanes[lane]; !ok { + panic(ErrLaneNotFound{laneID: lane}) } - mem.metrics.RecheckTimes.Add(1) - mem.resCbRecheck(req, res) + lane = LaneID(res.LaneId) } - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) - mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) + if err := mem.isLaneFull(len(tx), lane); err != nil { + mem.forceRemoveFromCache(tx) // lane might have space later + // use debug level to avoid spamming logs when traffic is high + mem.logger.Debug(err.Error()) + mem.metrics.RejectedTxs.Add(1) + return err + } + + // Check that tx is not already in the mempool. This can happen when the + // cache overflows. See https://github.com/cometbft/cometbft/pull/890. + txKey := tx.Key() + if mem.Contains(txKey) { + mem.metrics.RejectedTxs.Add(1) + if err := mem.addSender(txKey, sender); err != nil { + mem.logger.Error("Could not add sender to tx", "tx", tx.Hash(), "sender", sender, "err", err) + } + mem.logger.Debug("Reject tx", "tx", log.NewLazyHash(tx), "height", mem.height.Load(), "err", ErrTxInMempool) + return ErrTxInMempool + } + + // Add tx to mempool and notify that new txs are available. + mem.addTx(tx, res.GasWanted, sender, lane) + mem.notifyTxsAvailable() - default: - // ignore other messages + if mem.onNewTx != nil { + mem.onNewTx(tx) + } + + mem.updateSizeMetrics(lane) + + return nil } } // Called from: -// - resCbFirstTime (lock not held) if tx is valid -func (mem *CListMempool) addTx(memTx *mempoolTx) { - e := mem.txs.PushBack(memTx) - mem.txsMap.Store(memTx.tx.Key(), e) - atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) - mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) +// - handleCheckTxResponse (lock not held) if tx is valid +func (mem *CListMempool) addTx(tx types.Tx, gasWanted int64, sender nodekey.ID, lane LaneID) { + mem.txsMtx.Lock() + defer mem.txsMtx.Unlock() + + // Get lane's clist. + txs, ok := mem.lanes[lane] + if !ok { + panic(ErrLaneNotFound{laneID: lane}) + } + + // Increase sequence number. + mem.addTxChMtx.Lock() + defer mem.addTxChMtx.Unlock() + mem.addTxSeq++ + mem.addTxLaneSeqs[lane] = mem.addTxSeq + + // Add new transaction. + memTx := &mempoolTx{ + tx: tx, + height: mem.height.Load(), + gasWanted: gasWanted, + lane: lane, + seq: mem.addTxSeq, + } + _ = memTx.addSender(sender) + e := txs.PushBack(memTx) + + // Update auxiliary variables. + mem.txsMap[tx.Key()] = e + mem.txsBytes += int64(len(tx)) + mem.numTxs++ + mem.laneBytes[lane] += int64(len(tx)) + + // Notify iterators there's a new transaction. + close(mem.addTxCh) + mem.addTxCh = make(chan struct{}) + + // Update metrics. + mem.metrics.TxSizeBytes.Observe(float64(len(tx))) + + mem.logger.Debug( + "Added transaction", + "tx", log.NewLazyHash(tx), + "lane", lane, + "height", mem.height.Load(), + "total", mem.numTxs, + ) } // RemoveTxByKey removes a transaction from the mempool by its TxKey index. // Called from: -// - Update (lock held) if tx was committed -// - resCbRecheck (lock not held) if tx was invalidated +// - Update (updateMtx held) if tx was committed +// - handleRecheckTxResponse (updateMtx not held) if tx was invalidated func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error { - // The transaction should be removed from the reactor, even if it cannot be - // found in the mempool. - mem.invokeRemoveTxOnReactor(txKey) - if elem, ok := mem.getCElement(txKey); ok { - mem.txs.Remove(elem) - elem.DetachPrev() - mem.txsMap.Delete(txKey) - tx := elem.Value.(*mempoolTx).tx - atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) - return nil + mem.txsMtx.Lock() + defer mem.txsMtx.Unlock() + + elem, ok := mem.txsMap[txKey] + if !ok { + return ErrTxNotFound } - return ErrTxNotFound -} -func (mem *CListMempool) isFull(txSize int) error { - var ( - memSize = mem.Size() - txsBytes = mem.SizeBytes() + memTx := elem.Value.(*mempoolTx) + + label := string(memTx.lane) + mem.metrics.TxLifeSpan.With("lane", label).Observe(float64(memTx.timestamp.Sub(time.Now().UTC()))) + + // Remove tx from lane. + mem.lanes[memTx.lane].Remove(elem) + elem.DetachPrev() + + // Update auxiliary variables. + delete(mem.txsMap, txKey) + mem.txsBytes -= int64(len(memTx.tx)) + mem.numTxs-- + mem.laneBytes[memTx.lane] -= int64(len(memTx.tx)) + + mem.logger.Debug( + "Removed transaction", + "tx", log.NewLazyHash(memTx.tx), + "lane", memTx.lane, + "height", mem.height.Load(), + "total", mem.numTxs, ) + return nil +} - if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes { +func (mem *CListMempool) isFull(txSize int) error { + memSize := mem.Size() + txsBytes := mem.SizeBytes() + if memSize >= mem.config.Size || uint64(txSize)+uint64(txsBytes) > uint64(mem.config.MaxTxsBytes) { return ErrMempoolIsFull{ NumTxs: memSize, MaxTxs: mem.config.Size, @@ -366,141 +568,89 @@ func (mem *CListMempool) isFull(txSize int) error { } } + if mem.recheck.consideredFull() { + return ErrRecheckFull + } + return nil } -// callback, which is called after the app checked the tx for the first time. -// -// The case where the app checks the tx for the second and subsequent times is -// handled by the resCbRecheck callback. -func (mem *CListMempool) resCbFirstTime( - tx []byte, - res *abci.Response, -) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - var postCheckErr error - if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) - } - txKey := types.Tx(tx).Key() - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - // Check mempool isn't full again to reduce the chance of exceeding the - // limits. - if err := mem.isFull(len(tx)); err != nil { - mem.forceRemoveFromCache(tx) // mempool might have space later - mem.logger.Error(err.Error()) - return - } +func (mem *CListMempool) isLaneFull(txSize int, lane LaneID) error { + laneTxs, laneBytes := mem.LaneSizes(lane) - // Check transaction not already in the mempool - if mem.InMempool(txKey) { - mem.logger.Debug( - "transaction already there, not adding it again", - "tx", types.Tx(tx).Hash(), - "res", r, - "height", mem.height, - "total", mem.Size(), - ) - return - } + // The mempool is partitioned evenly across all lanes. + laneTxsCapacity := mem.config.Size / len(mem.sortedLanes) + laneBytesCapacity := mem.config.MaxTxsBytes / int64(len(mem.sortedLanes)) - mem.addTx(&mempoolTx{ - height: mem.height, - gasWanted: r.CheckTx.GasWanted, - tx: tx, - }) - mem.logger.Debug( - "added valid transaction", - "tx", types.Tx(tx).Hash(), - "res", r, - "height", mem.height, - "total", mem.Size(), - ) - mem.notifyTxsAvailable() - } else { - mem.tryRemoveFromCache(tx) - mem.logger.Debug( - "rejected invalid transaction", - "tx", types.Tx(tx).Hash(), - "res", r, - "err", postCheckErr, - ) - mem.metrics.FailedTxs.Add(1) + if laneTxs > laneTxsCapacity || int64(txSize)+laneBytes > laneBytesCapacity { + return ErrLaneIsFull{ + Lane: lane, + NumTxs: laneTxs, + MaxTxs: laneTxsCapacity, + Bytes: laneBytes, + MaxBytes: laneBytesCapacity, } + } - default: - // ignore other messages + if mem.recheck.consideredFull() { + return ErrRecheckFull } -} -// callback, which is called after the app rechecked the tx. -// -// The case where the app checks the tx for the first time is handled by the -// resCbFirstTime callback. -func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - tx := req.GetCheckTx().Tx - memTx := mem.recheckCursor.Value.(*mempoolTx) - - // Search through the remaining list of tx to recheck for a transaction that matches - // the one we received from the ABCI application. - for { - if bytes.Equal(tx, memTx.tx) { - // We've found a tx in the recheck list that matches the tx that we - // received from the ABCI application. - // Break, and use this transaction for further checks. - break - } + return nil +} - mem.logger.Error( - "re-CheckTx transaction mismatch", - "got", types.Tx(tx), - "expected", memTx.tx, - ) +// handleRecheckTxResponse handles CheckTx responses for transactions in the mempool that need to be +// revalidated after a mempool update. +func (mem *CListMempool) handleRecheckTxResponse(tx types.Tx) func(res *abci.Response) error { + return func(r *abci.Response) error { + res := r.GetCheckTx() + if res == nil { + panic(fmt.Sprintf("unexpected response value %v not of type CheckTx", r)) + } - if mem.recheckCursor == mem.recheckEnd { - // we reached the end of the recheckTx list without finding a tx - // matching the one we received from the ABCI application. - // Return without processing any tx. - mem.recheckCursor = nil - return - } + // Check whether the rechecking process has finished. + if mem.recheck.done() { + mem.logger.Error("Failed to recheck tx", "tx", log.NewLazyHash(tx), "err", ErrLateRecheckResponse) + return ErrLateRecheckResponse + } + mem.metrics.RecheckTimes.Add(1) - mem.recheckCursor = mem.recheckCursor.Next() - memTx = mem.recheckCursor.Value.(*mempoolTx) + // Check whether tx is still in the list of transactions that can be rechecked. + if !mem.recheck.findNextEntryMatching(&tx) { + // Reached the end of the list and didn't find a matching tx; rechecking has finished. + return nil } var postCheckErr error if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) + postCheckErr = mem.postCheck(tx, res) } - if (r.CheckTx.Code != abci.CodeTypeOK) || postCheckErr != nil { + // If tx is invalid, remove it from the mempool and the cache. + if (res.Code != abci.CodeTypeOK) || postCheckErr != nil { // Tx became invalidated due to newly committed block. - mem.logger.Debug("tx is no longer valid", "tx", types.Tx(tx).Hash(), "res", r, "err", postCheckErr) - if err := mem.RemoveTxByKey(memTx.tx.Key()); err != nil { + mem.logger.Debug("Tx is no longer valid", "tx", log.NewLazyHash(tx), "res", res, "postCheckErr", postCheckErr) + if err := mem.RemoveTxByKey(tx.Key()); err != nil { mem.logger.Debug("Transaction could not be removed from mempool", "err", err) + return err } - mem.tryRemoveFromCache(tx) - } - if mem.recheckCursor == mem.recheckEnd { - mem.recheckCursor = nil - } else { - mem.recheckCursor = mem.recheckCursor.Next() - } - if mem.recheckCursor == nil { - // Done! - mem.logger.Debug("done rechecking txs") - // incase the recheck removed all txs - if mem.Size() > 0 { - mem.notifyTxsAvailable() + // update metrics + mem.metrics.EvictedTxs.Add(1) + if elem, ok := mem.txsMap[tx.Key()]; ok { + mem.updateSizeMetrics(elem.Value.(*mempoolTx).lane) + } else { + mem.logger.Error("Cannot update metrics", "err", ErrTxNotFound) } + + mem.tryRemoveFromCache(tx) + if postCheckErr != nil { + return postCheckErr + } + return ErrInvalidTx } - default: - // ignore other messages + + return nil } } @@ -513,9 +663,8 @@ func (mem *CListMempool) notifyTxsAvailable() { if mem.Size() == 0 { panic("notified txs available but mempool is empty!") } - if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { + if mem.txsAvailable != nil && mem.notifiedTxsAvailable.CompareAndSwap(false, true) { // channel cap is 1, so this will send once - mem.notifiedTxsAvailable = true select { case mem.txsAvailable <- struct{}{}: default: @@ -535,14 +684,17 @@ func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { // TODO: we will get a performance boost if we have a good estimate of avg // size per tx, and set the initial capacity based off of that. - // txs := make([]types.Tx, 0, cmtmath.MinInt(mem.txs.Len(), max/mem.avgTxSize)) - txs := make([]types.Tx, 0, mem.txs.Len()) - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - - txs = append(txs, memTx.tx) + // txs := make([]types.Tx, 0, cmtmath.MinInt(mem.Size(), max/mem.avgTxSize)) + txs := make([]types.Tx, 0, mem.Size()) + iter := NewNonBlockingIterator(mem) + for { + memTx := iter.Next() + if memTx == nil { + break + } + txs = append(txs, memTx.Tx()) - dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.tx}) + dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.Tx()}) // Check total size requirement if maxBytes > -1 && runningSize+dataSize > maxBytes { @@ -555,7 +707,7 @@ func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { // If maxGas is negative, skip this check. // Since newTotalGas < masGas, which // must be non-negative, it follows that this won't overflow. - newTotalGas := totalGas + memTx.gasWanted + newTotalGas := totalGas + memTx.GasWanted() if maxGas > -1 && newTotalGas > maxGas { return txs[:len(txs)-1] } @@ -570,19 +722,34 @@ func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { defer mem.updateMtx.RUnlock() if max < 0 { - max = mem.txs.Len() + max = mem.Size() } - txs := make([]types.Tx, 0, cmtmath.MinInt(mem.txs.Len(), max)) - for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { - memTx := e.Value.(*mempoolTx) - txs = append(txs, memTx.tx) + txs := make([]types.Tx, 0, cmtmath.MinInt(mem.Size(), max)) + iter := NewNonBlockingIterator(mem) + for len(txs) <= max { + memTx := iter.Next() + if memTx == nil { + break + } + txs = append(txs, memTx.Tx()) } return txs } +// GetTxByHash returns the types.Tx with the given hash if found in the mempool, otherwise returns nil. +func (mem *CListMempool) GetTxByHash(hash []byte) types.Tx { + mem.txsMtx.RLock() + defer mem.txsMtx.RUnlock() + + if elem, ok := mem.txsMap[types.TxKey(hash)]; ok { + return elem.Value.(*mempoolTx).tx + } + return nil +} + // Lock() must be help by the caller during execution. -// TODO: this function always returns nil; remove the return value +// TODO: this function always returns nil; remove the return value. func (mem *CListMempool) Update( height int64, txs types.Txs, @@ -590,9 +757,11 @@ func (mem *CListMempool) Update( preCheck PreCheckFunc, postCheck PostCheckFunc, ) error { + mem.logger.Debug("Update", "height", height, "len(txs)", len(txs)) + // Set height - mem.height = height - mem.notifiedTxsAvailable = false + mem.height.Store(height) + mem.notifiedTxsAvailable.Store(false) if preCheck != nil { mem.preCheck = preCheck @@ -621,55 +790,189 @@ func (mem *CListMempool) Update( // https://github.com/tendermint/tendermint/issues/3322. if err := mem.RemoveTxByKey(tx.Key()); err != nil { mem.logger.Debug("Committed transaction not in local mempool (not an error)", - "key", tx.Key(), + "tx", log.NewLazyHash(tx), "error", err.Error()) } } - // Either recheck non-committed txs to see if they became invalid - // or just notify there're some txs left. + // Recheck txs left in the mempool to remove them if they became invalid in the new state. + if mem.config.Recheck { + mem.recheckTxs() + } + + // Notify if there are still txs left in the mempool. if mem.Size() > 0 { - if mem.config.Recheck { - mem.logger.Debug("recheck txs", "numtxs", mem.Size(), "height", height) - mem.recheckTxs() - // At this point, mem.txs are being rechecked. - // mem.recheckCursor re-scans mem.txs and possibly removes some txs. - // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. - } else { - mem.notifyTxsAvailable() - } + mem.notifyTxsAvailable() } // Update metrics - mem.metrics.Size.Set(float64(mem.Size())) - mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) + for lane := range mem.lanes { + mem.updateSizeMetrics(lane) + } return nil } +// updateSizeMetrics updates the size-related metrics of a given lane. +func (mem *CListMempool) updateSizeMetrics(laneID LaneID) { + laneTxs, laneBytes := mem.LaneSizes(laneID) + label := string(laneID) + mem.metrics.LaneSize.With("lane", label).Set(float64(laneTxs)) + mem.metrics.LaneBytes.With("lane", label).Set(float64(laneBytes)) + mem.metrics.Size.Set(float64(mem.Size())) + mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) +} + +// recheckTxs sends all transactions in the mempool to the app for re-validation. When the function +// returns, all recheck responses from the app have been processed. func (mem *CListMempool) recheckTxs() { - if mem.Size() == 0 { - panic("recheckTxs is called, but the mempool is empty") + mem.logger.Debug("Recheck txs", "height", mem.height.Load(), "num-txs", mem.Size()) + + if mem.Size() <= 0 { + return } - mem.recheckCursor = mem.txs.Front() - mem.recheckEnd = mem.txs.Back() + defer func(start time.Time) { + mem.metrics.RecheckDurationSeconds.Set(cmttime.Since(start).Seconds()) + }(cmttime.Now()) + + mem.recheck.init() + + iter := NewNonBlockingIterator(mem) + for { + memTx := iter.Next() + if memTx == nil { + break + } + + // NOTE: handleCheckTxResponse may be called concurrently, but CheckTx cannot be executed concurrently + // because this function has the lock (via Update and Lock). + mem.recheck.numPendingTxs.Add(1) - // Push txs to proxyAppConn - // NOTE: globalCb may be called concurrently. - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - _, err := mem.proxyAppConn.CheckTxAsync(context.TODO(), &abci.RequestCheckTx{ - Tx: memTx.tx, - Type: abci.CheckTxType_Recheck, + // Send CheckTx request to the app to re-validate transaction. + resReq, err := mem.proxyAppConn.CheckTxAsync(context.TODO(), &abci.CheckTxRequest{ + Tx: memTx.Tx(), + Type: abci.CHECK_TX_TYPE_RECHECK, }) if err != nil { - mem.logger.Error("recheckTx", err, "err") - return + panic(fmt.Errorf("(re-)CheckTx request for tx %s failed: %w", memTx.Tx().Hash(), err)) } + resReq.SetCallback(mem.handleRecheckTxResponse(memTx.Tx())) + } + + // Flush any pending asynchronous recheck requests to process. + mem.proxyAppConn.Flush(context.TODO()) + + // Give some time to finish processing the responses; then finish the rechecking process, even + // if not all txs were rechecked. + select { + case <-time.After(mem.config.RecheckTimeout): + mem.recheck.setDone() + mem.logger.Error("Timed out waiting for recheck responses") + case <-mem.recheck.doneRechecking(): + } + + if n := mem.recheck.numPendingTxs.Load(); n > 0 { + mem.logger.Error("Not all txs were rechecked", "not-rechecked", n) } - // In = len(iter.sortedLanes) { + return nil + } + lane = iter.advanceIndexes() + continue + } + // Skip over-consumed lane on current round. + if int(lane.priority) < iter.round { + numEmptyLanes = 0 + lane = iter.advanceIndexes() + continue + } + break + } + elem := iter.cursors[lane.id] + if elem == nil { + panic(fmt.Errorf("Iterator picked a nil entry on lane %s", lane.id)) + } + iter.cursors[lane.id] = iter.cursors[lane.id].Next() + _ = iter.advanceIndexes() + return elem.Value.(*mempoolTx) +} + +// BlockingIterator implements a blocking version of the WRR iterator, +// meaning that when no transaction is available, it will wait until a new one +// is added to the mempool. +// Unlike `NonBlockingIterator`, this iterator is expected to work with an evolving mempool. +type BlockingIterator struct { + IWRRIterator + ctx context.Context + mp *CListMempool + name string // for debugging +} + +func NewBlockingIterator(ctx context.Context, mem *CListMempool, name string) Iterator { + iter := IWRRIterator{ + sortedLanes: mem.sortedLanes, + cursors: make(map[LaneID]*clist.CElement, len(mem.sortedLanes)), + round: 1, + } + return &BlockingIterator{ + IWRRIterator: iter, + ctx: ctx, + mp: mem, + name: name, + } +} + +// WaitNextCh returns a channel to wait for the next available entry. The channel will be explicitly +// closed when the entry gets removed before it is added to the channel, or when reaching the end of +// the list. +// +// Unsafe for concurrent use by multiple goroutines. +func (iter *BlockingIterator) WaitNextCh() <-chan Entry { + ch := make(chan Entry) + go func() { + var lane lane + for { + l, addTxCh := iter.pickLane() + if addTxCh == nil { + lane = l + break + } + // There are no transactions to take from any lane. Wait until at + // least one is added to the mempool and try again. + select { + case <-addTxCh: + case <-iter.ctx.Done(): + close(ch) + return + } + } + if elem := iter.next(lane.id); elem != nil { + ch <- elem.Value.(Entry) + } + // Unblock receiver in case no entry was sent (it will receive nil). + close(ch) + }() + return ch +} + +// pickLane returns a _valid_ lane on which to iterate, according to the WRR +// algorithm. A lane is valid if it is not empty and it is not over-consumed, +// meaning that the number of accessed entries in the lane has not yet reached +// its priority value in the current WRR iteration. It returns a channel to wait +// for new transactions if all lanes are empty or don't have transactions that +// have not yet been accessed. +func (iter *BlockingIterator) pickLane() (lane, chan struct{}) { + iter.mp.addTxChMtx.RLock() + defer iter.mp.addTxChMtx.RUnlock() + + // Start from the last accessed lane. + currLane := iter.sortedLanes[iter.laneIndex] + + // Loop until finding a valid lane. If the current lane is not valid, + // continue with the next lower-priority lane, in a round robin fashion. + numEmptyLanes := 0 + for { + laneID := currLane.id + // Skip empty lanes or lanes with their cursor pointing at their last entry. + if iter.mp.lanes[laneID].Len() == 0 || + (iter.cursors[laneID] != nil && + iter.cursors[laneID].Value.(*mempoolTx).seq == iter.mp.addTxLaneSeqs[laneID]) { + numEmptyLanes++ + if numEmptyLanes >= len(iter.sortedLanes) { + // There are no lanes with non-accessed entries. Wait until a + // new tx is added. + return lane{}, iter.mp.addTxCh + } + currLane = iter.advanceIndexes() + continue + } + + // Skip over-consumed lanes. + if int(currLane.priority) < iter.round { + numEmptyLanes = 0 + currLane = iter.advanceIndexes() + continue + } + + _ = iter.advanceIndexes() + return currLane, nil + } +} + +// In classical WRR, the iterator cycles over the lanes. When a lane is selected, Next returns an +// entry from the selected lane. On subsequent calls, Next will return the next entries from the +// same lane until `lane` entries are accessed or the lane is empty, where `lane` is the priority. +// The next time, Next will select the successive lane with lower priority. +// next returns the next entry from the given lane and updates WRR variables. +func (iter *BlockingIterator) next(laneID LaneID) *clist.CElement { + // Load the last accessed entry in the lane and set the next one. + var next *clist.CElement + + if cursor := iter.cursors[laneID]; cursor != nil { + // If the current entry is the last one or was removed, Next will return nil. + // Note we don't need to wait until the next entry is available (with <-cursor.NextWaitChan()). + next = cursor.Next() + } else { + // We are at the beginning of the iteration or the saved entry got removed. Pick the first + // entry in the lane if it's available (don't wait for it); if not, Front will return nil. + next = iter.mp.lanes[laneID].Front() + } + + // Update auxiliary variables. + if next != nil { + // Save entry. + iter.cursors[laneID] = next + } else { + // The entry got removed or it was the last one in the lane. + // At the moment this should not happen - the loop in PickLane will loop forever until there + // is data in at least one lane + delete(iter.cursors, laneID) + } + + return next +} diff --git a/mempool/iterators_test.go b/mempool/iterators_test.go new file mode 100644 index 00000000000..1ebc847fa43 --- /dev/null +++ b/mempool/iterators_test.go @@ -0,0 +1,511 @@ +package mempool + +import ( + "context" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + abciclimocks "github.com/cometbft/cometbft/abci/client/mocks" + "github.com/cometbft/cometbft/abci/example/kvstore" + abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/internal/test" + "github.com/cometbft/cometbft/proxy" + "github.com/cometbft/cometbft/types" +) + +func TestIteratorNonBlocking(t *testing.T) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + cfg := test.ResetTestRoot("mempool_test") + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) + defer cleanup() + + // Add all txs with id up to n. + n := 100 + for i := 0; i < n; i++ { + tx := kvstore.NewTxFromID(i) + rr, err := mp.CheckTx(tx, noSender) + require.NoError(t, err) + rr.Wait() + } + require.Equal(t, n, mp.Size()) + + iter := NewNonBlockingIterator(mp) + expectedOrder := []int{ + // round counter 1: + 0, // lane 7 + 1, // lane 3 + 3, // lane 1 + // round counter 2: + 11, // lane 7 + 2, // lane 3 + // round counter 3: + 22, // lane 7 + 4, // lane 3 + // round counter 4 - 7: + 33, 44, 55, 66, // lane 7 + // round counter 1: + 77, // lane 7 + 5, // lane 3 + 6, // lane 1 + // round counter 2: + 88, // lane 7 + 7, // lane 3 + // round counter 3: + 99, // lane 7 + 8, // lane 3 + // round counter 4- 7 have nothing + // round counter 1: + 10, // lane 3 + 9, // lane 1 + // round counter 2: + 13, // lane 3 + // round counter 3: + 14, // lane 3 + } + var next Entry + counter := 0 + + // Check that txs are picked by the iterator in the expected order. + for _, id := range expectedOrder { + next = iter.Next() + require.NotNil(t, next) + require.Equal(t, types.Tx(kvstore.NewTxFromID(id)), next.Tx(), "id=%v", id) + counter++ + } + + // Check that the rest of the entries are also consumed. + for { + if next = iter.Next(); next == nil { + break + } + counter++ + } + require.Equal(t, n, counter) +} + +func TestIteratorNonBlockingOneLane(t *testing.T) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + cfg := test.ResetTestRoot("mempool_test") + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) + defer cleanup() + + // Add all txs with id up to n to one lane. + n := 100 + for i := 0; i < n; i++ { + if i%11 != 0 { + continue + } + tx := kvstore.NewTxFromID(i) + rr, err := mp.CheckTx(tx, noSender) + require.NoError(t, err) + rr.Wait() + } + require.Equal(t, 10, mp.Size()) + + iter := NewNonBlockingIterator(mp) + expectedOrder := []int{0, 11, 22, 33, 44, 55, 66, 77, 88, 99} + + var next Entry + counter := 0 + + // Check that txs are picked by the iterator in the expected order. + for _, id := range expectedOrder { + next = iter.Next() + require.NotNil(t, next) + require.Equal(t, types.Tx(kvstore.NewTxFromID(id)), next.Tx(), "id=%v", id) + counter++ + } + + next = iter.Next() + require.Nil(t, next) +} + +// We have two iterators fetching transactions that +// then get removed. +func TestIteratorRace(t *testing.T) { + mockClient := new(abciclimocks.Client) + mockClient.On("Start").Return(nil) + mockClient.On("SetLogger", mock.Anything) + mockClient.On("Error").Return(nil).Times(100) + + mockClient.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{LanePriorities: map[string]uint32{"1": 1, "2": 2, "3": 3}, DefaultLane: "1"}, nil) + + mp, cleanup := newMempoolWithAppMock(mockClient) + defer cleanup() + + // Disable rechecking to make sure the recheck logic is not interferint. + mp.config.Recheck = false + + const numLanes = 3 + const numTxs = 100 + + var wg sync.WaitGroup + wg.Add(2) + + var counter atomic.Int64 + go func() { + waitForNumTxsInMempool(numTxs, mp) + + go func() { + defer wg.Done() + + for counter.Load() < int64(numTxs) { + iter := NewBlockingIterator(context.Background(), mp, t.Name()) + entry := <-iter.WaitNextCh() + if entry == nil { + continue + } + tx := entry.Tx() + err := mp.Update(1, []types.Tx{tx}, abciResponses(1, 0), nil, nil) + require.NoError(t, err, tx) + counter.Add(1) + } + }() + + go func() { + defer wg.Done() + + for counter.Load() < int64(numTxs) { + iter := NewBlockingIterator(context.Background(), mp, t.Name()) + entry := <-iter.WaitNextCh() + if entry == nil { + continue + } + tx := entry.Tx() + err := mp.Update(1, []types.Tx{tx}, abciResponses(1, 0), nil, nil) + require.NoError(t, err, tx) + counter.Add(1) + } + }() + }() + + // This was introduced because without a separate function + // we have to sleep to wait for all txs to get into the mempool. + // This way we loop in the function above until it is fool + // without arbitrary timeouts. + go func() { + for i := 1; i <= int(numTxs); i++ { + tx := kvstore.NewTxFromID(i) + + currLane := (i % numLanes) + 1 + reqRes := newReqResWithLanes(tx, abci.CodeTypeOK, abci.CHECK_TX_TYPE_CHECK, strconv.Itoa(currLane)) + require.NotNil(t, reqRes) + + mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil).Once() + _, err := mp.CheckTx(tx, "") + require.NoError(t, err, err) + reqRes.InvokeCallback() + } + }() + + wg.Wait() + + require.Equal(t, counter.Load(), int64(numTxs+1)) +} + +func TestIteratorEmptyLanes(t *testing.T) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + + cfg := test.ResetTestRoot("mempool_empty_test") + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) + defer cleanup() + + go func() { + iter := NewBlockingIterator(context.Background(), mp, t.Name()) + require.Zero(t, mp.Size()) + entry := <-iter.WaitNextCh() + require.NotNil(t, entry) + require.EqualValues(t, entry.Tx(), kvstore.NewTxFromID(1)) + }() + time.Sleep(100 * time.Millisecond) + + tx := kvstore.NewTxFromID(1) + res := abci.ToCheckTxResponse(&abci.CheckTxResponse{Code: abci.CodeTypeOK}) + err := mp.handleCheckTxResponse(tx, "")(res) + require.NoError(t, err) + require.Equal(t, 1, mp.Size(), "pool size mismatch") +} + +func TestBlockingIteratorsConsumeAllTxs(t *testing.T) { + const numTxs = 1000 + const numIterators = 50 + + tests := map[string]struct { + app *kvstore.Application + }{ + "lanes": { + app: kvstore.NewInMemoryApplication(), + }, + "no_lanes": { + app: kvstore.NewInMemoryApplicationWithoutLanes(), + }, + } + + for test, config := range tests { + cc := proxy.NewLocalClientCreator(config.app) + mp, cleanup := newMempoolWithApp(cc) + defer cleanup() + + wg := &sync.WaitGroup{} + wg.Add(numIterators) + + // Start concurrent iterators. + for i := 0; i < numIterators; i++ { + go func(j int) { + defer wg.Done() + + // Iterate until all txs added to the mempool are accessed. + iter := NewBlockingIterator(context.Background(), mp, strconv.Itoa(j)) + counter := 0 + nilCounter := 0 + for counter < numTxs { + entry := <-iter.WaitNextCh() + if entry == nil { + nilCounter++ + continue + } + if test == "no_lanes" { + // Entries are accessed sequentially when there is only one lane. + expectedTx := kvstore.NewTxFromID(counter) + require.EqualValues(t, expectedTx, entry.Tx(), "i=%d, c=%d, tx=%v", i, counter, entry.Tx()) + } + counter++ + } + require.Equal(t, numTxs, counter) + assert.Zero(t, nilCounter, "got nil entries") + t.Logf("%s: iterator %d finished (nils=%d)\n", test, j, nilCounter) + }(i) + } + + // Add transactions with sequential ids. + _ = addTxs(t, mp, 0, numTxs) + require.Equal(t, numTxs, mp.Size()) + + // Wait for all iterators to complete. + waitTimeout(wg, 5*time.Second, func() {}, func() { + t.Fatalf("Timed out waiting for all iterators to finish") + }) + } +} + +// Confirms that the transactions are returned in the same order. +// Note that for the cases with equal priorities the actual order +// will depend on the way we iterate over the map of lanes. +// With only two lanes of the same priority the order was predictable +// and matches the given order. In case these tests start to fail +// first thing to confirm is the order of lanes in mp.SortedLanes. +func TestIteratorExactOrder(t *testing.T) { + tests := map[string]struct { + lanePriorities map[string]uint32 + expectedTxIDs []int + expectedTxIDsAlternate []int + }{ + "unique_priority_lanes": { + lanePriorities: map[string]uint32{"1": 1, "2": 2, "3": 3}, + expectedTxIDs: []int{2, 1, 3, 5, 4, 8, 11, 7, 6, 10, 9}, + }, + "same_priority_lanes": { + lanePriorities: map[string]uint32{"1": 1, "2": 2, "3": 2}, + expectedTxIDs: []int{1, 2, 3, 4, 5, 7, 8, 6, 10, 11, 9}, + expectedTxIDsAlternate: []int{2, 1, 3, 5, 4, 8, 7, 6, 11, 10, 9}, + }, + "one_lane": { + lanePriorities: map[string]uint32{"1": 1}, + expectedTxIDs: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + }, + } + + for n, l := range tests { + mockClient := new(abciclimocks.Client) + mockClient.On("Start").Return(nil) + mockClient.On("SetLogger", mock.Anything) + mockClient.On("Error").Return(nil).Times(100) + mockClient.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{LanePriorities: l.lanePriorities, DefaultLane: "1"}, nil) + mp, cleanup := newMempoolWithAppMock(mockClient) + defer cleanup() + + // Disable rechecking to make sure the recheck logic is not interfering. + mp.config.Recheck = false + + numLanes := len(l.lanePriorities) + const numTxs = 11 + + // Transactions are ordered into lanes by their IDs. This is the order in + // which they should appear following WRR + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + waitForNumTxsInMempool(numTxs, mp) + t.Log("Mempool full, starting to pick up transactions", mp.Size()) + alternate := false + iter := NewBlockingIterator(context.Background(), mp, t.Name()) + for i := 0; i < numTxs; i++ { + entry := <-iter.WaitNextCh() + if entry == nil { + continue + } + // When lanes have same priorities their order in the map of lanes + // is arbitrary so we needv to check + if n == "same_priority_lanes" { + if mp.sortedLanes[1].id != "3" { + alternate = true + } + } + if alternate { + require.EqualValues(t, entry.Tx(), kvstore.NewTxFromID(l.expectedTxIDsAlternate[i]), n) + } else { + require.EqualValues(t, entry.Tx(), kvstore.NewTxFromID(l.expectedTxIDs[i]), n) + } + } + }() + + // This was introduced because without a separate function + // we have to sleep to wait for all txs to get into the mempool. + // This way we loop in the function above until it is fool + // without arbitrary timeouts. + go func() { + for i := 1; i <= numTxs; i++ { + tx := kvstore.NewTxFromID(i) + + currLane := (i % numLanes) + 1 + reqRes := newReqResWithLanes(tx, abci.CodeTypeOK, abci.CHECK_TX_TYPE_CHECK, strconv.Itoa(currLane)) + require.NotNil(t, reqRes) + + mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil).Once() + _, err := mp.CheckTx(tx, "") + require.NoError(t, err, err) + reqRes.InvokeCallback() + } + }() + + wg.Wait() + + // Confirm also that the non blocking iterator works with lanes of same priorities + iterNonBlocking := NewNonBlockingIterator(mp) + reapedTx := mp.ReapMaxTxs(numTxs) + alternate := false + for i := 0; i < numTxs; i++ { + tx := iterNonBlocking.Next().Tx() + if n == "same_priority_lanes" { + if mp.sortedLanes[1].id != "3" { + alternate = true + } + } + if !alternate { + require.Equal(t, []byte(tx), kvstore.NewTxFromID(l.expectedTxIDs[i]), n) + require.Equal(t, []byte(reapedTx[i]), kvstore.NewTxFromID(l.expectedTxIDs[i]), n) + } else { + require.Equal(t, []byte(tx), kvstore.NewTxFromID(l.expectedTxIDsAlternate[i]), n) + require.Equal(t, []byte(reapedTx[i]), kvstore.NewTxFromID(l.expectedTxIDsAlternate[i]), n) + } + } + } +} + +// This only tests that all transactions were submitted. +func TestIteratorCountOnly(t *testing.T) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + + cfg := test.ResetTestRoot("mempool_test") + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) + defer cleanup() + + var wg sync.WaitGroup + wg.Add(1) + + const n = numTxs + + // Spawn a goroutine that iterates on the list until counting n entries. + counter := 0 + go func() { + defer wg.Done() + + iter := NewBlockingIterator(context.Background(), mp, t.Name()) + for counter < n { + entry := <-iter.WaitNextCh() + if entry == nil { + continue + } + counter++ + } + }() + + // Add n transactions with sequential ids. + for i := 0; i < n; i++ { + tx := kvstore.NewTxFromID(i) + rr, err := mp.CheckTx(tx, "") + require.NoError(t, err) + rr.Wait() + } + + wg.Wait() + require.Equal(t, n, counter) +} + +func TestReapMatchesGossipOrder(t *testing.T) { + const n = 100 + + tests := map[string]struct { + app *kvstore.Application + }{ + "test_lanes": { + app: kvstore.NewInMemoryApplication(), + }, + "test_no_lanes": { + app: kvstore.NewInMemoryApplicationWithoutLanes(), + }, + } + + for test, config := range tests { + cc := proxy.NewLocalClientCreator(config.app) + mp, cleanup := newMempoolWithApp(cc) + defer cleanup() + // Add a bunch of txs. + for i := 1; i <= n; i++ { + tx := kvstore.NewTxFromID(i) + rr, err := mp.CheckTx(tx, "") + require.NoError(t, err, err) + rr.Wait() + } + + require.Equal(t, n, mp.Size()) + + gossipIter := NewBlockingIterator(context.Background(), mp, t.Name()) + reapIter := NewNonBlockingIterator(mp) + + // Check that both iterators return the same entry as in the reaped txs. + txs := make([]types.Tx, n) + reapedTxs := mp.ReapMaxTxs(n) + for i, reapedTx := range reapedTxs { + entry := <-gossipIter.WaitNextCh() + // entry can be nil only when an entry is removed concurrently. + require.NotNil(t, entry) + gossipTx := entry.Tx() + + reapTx := reapIter.Next().Tx() + txs[i] = reapTx + require.EqualValues(t, reapTx, gossipTx) + require.EqualValues(t, reapTx, reapedTx) + if test == "test_no_lanes" { + require.EqualValues(t, reapTx, kvstore.NewTxFromID(i+1)) + } + } + require.EqualValues(t, txs, reapedTxs) + + err := mp.Update(1, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil) + require.NoError(t, err) + require.Zero(t, mp.Size()) + } +} diff --git a/mempool/lanes_info.go b/mempool/lanes_info.go new file mode 100644 index 00000000000..653ba4bacb4 --- /dev/null +++ b/mempool/lanes_info.go @@ -0,0 +1,52 @@ +package mempool + +type LanesInfo struct { + lanes map[LaneID]LanePriority + defaultLane LaneID +} + +// BuildLanesInfo builds the information required to initialize +// lanes given the data queried from the app. +func BuildLanesInfo(laneMap map[string]uint32, defLane string) (*LanesInfo, error) { + info := LanesInfo{} + info.lanes = make(map[LaneID]LanePriority, len(laneMap)) + for l, p := range laneMap { + info.lanes[LaneID(l)] = LanePriority(p) + } + info.defaultLane = LaneID(defLane) + + if err := validate(info); err != nil { + return nil, err + } + + return &info, nil +} + +func validate(info LanesInfo) error { + // If no lanes are provided the default priority is 0 + if len(info.lanes) == 0 && info.defaultLane == "" { + return nil + } + + // Default lane is set but empty lane list + if len(info.lanes) == 0 && info.defaultLane != "" { + return ErrEmptyLanesDefaultLaneSet{ + Info: info, + } + } + + // Lane 0 is reserved for when there are no lanes or for invalid txs; it should not be used for the default lane. + if info.defaultLane == "" && len(info.lanes) != 0 { + return ErrBadDefaultLaneNonEmptyLaneList{ + Info: info, + } + } + + if _, ok := info.lanes[info.defaultLane]; !ok { + return ErrDefaultLaneNotInList{ + Info: info, + } + } + + return nil +} diff --git a/mempool/mempool.go b/mempool/mempool.go index 4b2f3ecc038..211db2efcb9 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -6,13 +6,14 @@ import ( abcicli "github.com/cometbft/cometbft/abci/client" abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/types" ) const ( MempoolChannel = byte(0x30) - // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind + // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind. PeerCatchupSleepIntervalMS = 100 ) @@ -25,7 +26,7 @@ const ( type Mempool interface { // CheckTx executes a new transaction against the application to determine // its validity and whether it should be added to the mempool. - CheckTx(tx types.Tx) (*abcicli.ReqRes, error) + CheckTx(tx types.Tx, sender nodekey.ID) (*abcicli.ReqRes, error) // RemoveTxByKey removes a transaction, identified by its key, // from the mempool. @@ -44,6 +45,10 @@ type Mempool interface { // (~ all available transactions). ReapMaxTxs(max int) types.Txs + // GetTxByHash returns the types.Tx with the given hash if found in the mempool, + // otherwise returns nil. + GetTxByHash(hash []byte) types.Tx + // Lock locks the mempool. The consensus must be able to hold lock to safely // update. Lock() @@ -51,6 +56,10 @@ type Mempool interface { // Unlock unlocks the mempool. Unlock() + // PreUpdate signals that a new update is coming, before acquiring the mempool lock. + // If the mempool is still rechecking at this point, it should be considered full. + PreUpdate() + // Update informs the mempool that the given txs were committed and can be // discarded. // @@ -75,6 +84,10 @@ type Mempool interface { // Flush removes all transactions from the mempool and caches. Flush() + // Contains returns true iff the transaction, identified by its key, is in + // the mempool. + Contains(txKey types.TxKey) bool + // TxsAvailable returns a channel which fires once for every height, and only // when transactions are available in the mempool. // @@ -86,10 +99,6 @@ type Mempool interface { // trigger once every height when transactions are available. EnableTxsAvailable() - // Set a callback function to be called when a transaction is removed from - // the mempool. - SetTxRemovedCallback(cb func(types.TxKey)) - // Size returns the number of transactions in the mempool. Size() int @@ -105,7 +114,7 @@ type PreCheckFunc func(types.Tx) error // PostCheckFunc is an optional filter executed after CheckTx and rejects // transaction if false is returned. An example would be to ensure a // transaction doesn't require more gas than available for the block. -type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error +type PostCheckFunc func(types.Tx, *abci.CheckTxResponse) error // PreCheckMaxBytes checks that the size of the transaction is smaller or equal // to the expected maxBytes. @@ -124,7 +133,7 @@ func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { // PostCheckMaxGas checks that the wanted gas is smaller or equal to the passed // maxGas. Returns nil if maxGas is -1. func PostCheckMaxGas(maxGas int64) PostCheckFunc { - return func(tx types.Tx, res *abci.ResponseCheckTx) error { + return func(_ types.Tx, res *abci.CheckTxResponse) error { if maxGas == -1 { return nil } @@ -143,3 +152,25 @@ func PostCheckMaxGas(maxGas int64) PostCheckFunc { // TxKey is the fixed length array key used as an index. type TxKey [sha256.Size]byte + +// An entry in the mempool. +type Entry interface { + // Tx returns the transaction stored in the entry. + Tx() types.Tx + + // Height returns the height of the latest block at the moment the entry was created. + Height() int64 + + // GasWanted returns the amount of gas required by the transaction. + GasWanted() int64 + + // IsSender returns whether we received the transaction from the given peer ID. + IsSender(peerID nodekey.ID) bool +} + +// An iterator is used to iterate through the mempool entries. +// Multiple iterators should be allowed to run concurrently. +type Iterator interface { + // WaitNextCh returns a channel on which to wait for the next available entry. + WaitNextCh() <-chan Entry +} diff --git a/mempool/mempoolTx.go b/mempool/mempoolTx.go index eb5229fd68a..561bd8e7d9d 100644 --- a/mempool/mempoolTx.go +++ b/mempool/mempoolTx.go @@ -1,19 +1,52 @@ package mempool import ( + "sync" "sync/atomic" + "time" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/types" ) -// mempoolTx is an entry in the mempool +// mempoolTx is an entry in the mempool. type mempoolTx struct { height int64 // height that this tx had been validated in gasWanted int64 // amount of gas this tx states it will require tx types.Tx // validated by the application + lane LaneID + seq int64 + timestamp time.Time // time when entry was created + + // ids of peers who've sent us this tx (as a map for quick lookups). + // senders: PeerID -> struct{} + senders sync.Map +} + +func (memTx *mempoolTx) Tx() types.Tx { + return memTx.tx } -// Height returns the height for this transaction func (memTx *mempoolTx) Height() int64 { return atomic.LoadInt64(&memTx.height) } + +func (memTx *mempoolTx) GasWanted() int64 { + return memTx.gasWanted +} + +func (memTx *mempoolTx) IsSender(peerID nodekey.ID) bool { + _, ok := memTx.senders.Load(peerID) + return ok +} + +// Add the peer ID to the list of senders. Return true iff it exists already in the list. +func (memTx *mempoolTx) addSender(peerID nodekey.ID) bool { + if len(peerID) == 0 { + return false + } + if _, loaded := memTx.senders.LoadOrStore(peerID, struct{}{}); loaded { + return true + } + return false +} diff --git a/mempool/metrics.gen.go b/mempool/metrics.gen.go index deacb5edceb..fb7dfc46257 100644 --- a/mempool/metrics.gen.go +++ b/mempool/metrics.gen.go @@ -3,8 +3,8 @@ package mempool import ( - "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/cometbft/cometbft/libs/metrics/discard" + prometheus "github.com/cometbft/cometbft/libs/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) @@ -18,14 +18,34 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Namespace: namespace, Subsystem: MetricsSubsystem, Name: "size", - Help: "Number of uncommitted transactions in the mempool.", + Help: "Number of uncommitted transactions in the mempool. Deprecated: this value can be obtained as the sum of LaneSize.", }, labels).With(labelsAndValues...), SizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "size_bytes", - Help: "Total size of the mempool in bytes.", + Help: "Total size of the mempool in bytes. Deprecated: this value can be obtained as the sum of LaneBytes.", }, labels).With(labelsAndValues...), + LaneSize: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "lane_size", + Help: "Number of uncommitted transactions per lane.", + }, append(labels, "lane")).With(labelsAndValues...), + LaneBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "lane_bytes", + Help: "Number of used bytes per lane.", + }, append(labels, "lane")).With(labelsAndValues...), + TxLifeSpan: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "tx_life_span", + Help: "Duration in ms of a transaction in the mempool.", + + Buckets: []float64{50, 100, 200, 500, 1000}, + }, append(labels, "lane")).With(labelsAndValues...), TxSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -46,6 +66,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "rejected_txs", Help: "Number of rejected transactions.", }, labels).With(labelsAndValues...), + EvictedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "evicted_txs", + Help: "Number of evicted transactions.", + }, labels).With(labelsAndValues...), RecheckTimes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -58,17 +84,35 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "already_received_txs", Help: "Number of duplicate transaction reception.", }, labels).With(labelsAndValues...), + ActiveOutboundConnections: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "active_outbound_connections", + Help: "Number of connections being actively used for gossiping transactions (experimental feature).", + }, labels).With(labelsAndValues...), + RecheckDurationSeconds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "recheck_duration_seconds", + Help: "Cumulative time spent rechecking transactions", + }, labels).With(labelsAndValues...), } } func NopMetrics() *Metrics { return &Metrics{ - Size: discard.NewGauge(), - SizeBytes: discard.NewGauge(), - TxSizeBytes: discard.NewHistogram(), - FailedTxs: discard.NewCounter(), - RejectedTxs: discard.NewCounter(), - RecheckTimes: discard.NewCounter(), - AlreadyReceivedTxs: discard.NewCounter(), + Size: discard.NewGauge(), + SizeBytes: discard.NewGauge(), + LaneSize: discard.NewGauge(), + LaneBytes: discard.NewGauge(), + TxLifeSpan: discard.NewHistogram(), + TxSizeBytes: discard.NewHistogram(), + FailedTxs: discard.NewCounter(), + RejectedTxs: discard.NewCounter(), + EvictedTxs: discard.NewCounter(), + RecheckTimes: discard.NewCounter(), + AlreadyReceivedTxs: discard.NewCounter(), + ActiveOutboundConnections: discard.NewGauge(), + RecheckDurationSeconds: discard.NewGauge(), } } diff --git a/mempool/metrics.go b/mempool/metrics.go index 53ee7912375..ee0c8f7a60d 100644 --- a/mempool/metrics.go +++ b/mempool/metrics.go @@ -1,7 +1,7 @@ package mempool import ( - "github.com/go-kit/kit/metrics" + "github.com/cometbft/cometbft/libs/metrics" ) const ( @@ -16,28 +16,58 @@ const ( // see MetricsProvider for descriptions. type Metrics struct { // Number of uncommitted transactions in the mempool. + // + // Deprecated: this value can be obtained as the sum of LaneSize. Size metrics.Gauge // Total size of the mempool in bytes. + // + // Deprecated: this value can be obtained as the sum of LaneBytes. SizeBytes metrics.Gauge + // Number of uncommitted transactions per lane. + LaneSize metrics.Gauge `metrics_labels:"lane"` + + // Number of used bytes per lane. + LaneBytes metrics.Gauge `metrics_labels:"lane"` + + // TxLifeSpan measures the time each transaction has in the mempool, since + // the time it enters until it is removed. + // metrics:Duration in ms of a transaction in the mempool. + TxLifeSpan metrics.Histogram `metrics_bucketsizes:"50,100,200,500,1000" metrics_labels:"lane"` + // Histogram of transaction sizes in bytes. - TxSizeBytes metrics.Histogram `metrics_buckettype:"exp" metrics_bucketsizes:"1,3,7"` + TxSizeBytes metrics.Histogram `metrics_bucketsizes:"1,3,7" metrics_buckettype:"exp"` - // Number of failed transactions. + // FailedTxs defines the number of failed transactions. These are + // transactions that failed to make it into the mempool because they were + // deemed invalid. + // metrics:Number of failed transactions. FailedTxs metrics.Counter // RejectedTxs defines the number of rejected transactions. These are - // transactions that passed CheckTx but failed to make it into the mempool - // due to resource limits, e.g. mempool is full and no lower priority - // transactions exist in the mempool. - //metrics:Number of rejected transactions. + // transactions that failed to make it into the mempool due to resource + // limits, e.g. mempool is full. + // metrics:Number of rejected transactions. RejectedTxs metrics.Counter + // EvictedTxs defines the number of evicted transactions. These are valid + // transactions that passed CheckTx and make it into the mempool but later + // became invalid. + // metrics:Number of evicted transactions. + EvictedTxs metrics.Counter + // Number of times transactions are rechecked in the mempool. RecheckTimes metrics.Counter // Number of times transactions were received more than once. - //metrics:Number of duplicate transaction reception. + // metrics:Number of duplicate transaction reception. AlreadyReceivedTxs metrics.Counter + + // Number of connections being actively used for gossiping transactions + // (experimental feature). + ActiveOutboundConnections metrics.Gauge + + // Cumulative time spent rechecking transactions + RecheckDurationSeconds metrics.Gauge } diff --git a/mempool/mocks/mempool.go b/mempool/mocks/mempool.go index 3a01f10b910..12f5a8e6853 100644 --- a/mempool/mocks/mempool.go +++ b/mempool/mocks/mempool.go @@ -4,13 +4,15 @@ package mocks import ( abcicli "github.com/cometbft/cometbft/abci/client" - abcitypes "github.com/cometbft/cometbft/abci/types" - mempool "github.com/cometbft/cometbft/mempool" mock "github.com/stretchr/testify/mock" + nodekey "github.com/cometbft/cometbft/p2p/nodekey" + types "github.com/cometbft/cometbft/types" + + v1 "github.com/cometbft/cometbft/api/cometbft/abci/v1" ) // Mempool is an autogenerated mock type for the Mempool type @@ -18,25 +20,29 @@ type Mempool struct { mock.Mock } -// CheckTx provides a mock function with given fields: tx -func (_m *Mempool) CheckTx(tx types.Tx) (*abcicli.ReqRes, error) { - ret := _m.Called(tx) +// CheckTx provides a mock function with given fields: tx, sender +func (_m *Mempool) CheckTx(tx types.Tx, sender nodekey.ID) (*abcicli.ReqRes, error) { + ret := _m.Called(tx, sender) + + if len(ret) == 0 { + panic("no return value specified for CheckTx") + } var r0 *abcicli.ReqRes var r1 error - if rf, ok := ret.Get(0).(func(types.Tx) (*abcicli.ReqRes, error)); ok { - return rf(tx) + if rf, ok := ret.Get(0).(func(types.Tx, nodekey.ID) (*abcicli.ReqRes, error)); ok { + return rf(tx, sender) } - if rf, ok := ret.Get(0).(func(types.Tx) *abcicli.ReqRes); ok { - r0 = rf(tx) + if rf, ok := ret.Get(0).(func(types.Tx, nodekey.ID) *abcicli.ReqRes); ok { + r0 = rf(tx, sender) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - if rf, ok := ret.Get(1).(func(types.Tx) error); ok { - r1 = rf(tx) + if rf, ok := ret.Get(1).(func(types.Tx, nodekey.ID) error); ok { + r1 = rf(tx, sender) } else { r1 = ret.Error(1) } @@ -44,6 +50,24 @@ func (_m *Mempool) CheckTx(tx types.Tx) (*abcicli.ReqRes, error) { return r0, r1 } +// Contains provides a mock function with given fields: txKey +func (_m *Mempool) Contains(txKey types.TxKey) bool { + ret := _m.Called(txKey) + + if len(ret) == 0 { + panic("no return value specified for Contains") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(types.TxKey) bool); ok { + r0 = rf(txKey) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + // EnableTxsAvailable provides a mock function with given fields: func (_m *Mempool) EnableTxsAvailable() { _m.Called() @@ -58,6 +82,10 @@ func (_m *Mempool) Flush() { func (_m *Mempool) FlushAppConn() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for FlushAppConn") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -68,15 +96,44 @@ func (_m *Mempool) FlushAppConn() error { return r0 } +// GetTxByHash provides a mock function with given fields: hash +func (_m *Mempool) GetTxByHash(hash []byte) types.Tx { + ret := _m.Called(hash) + + if len(ret) == 0 { + panic("no return value specified for GetTxByHash") + } + + var r0 types.Tx + if rf, ok := ret.Get(0).(func([]byte) types.Tx); ok { + r0 = rf(hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Tx) + } + } + + return r0 +} + // Lock provides a mock function with given fields: func (_m *Mempool) Lock() { _m.Called() } +// PreUpdate provides a mock function with given fields: +func (_m *Mempool) PreUpdate() { + _m.Called() +} + // ReapMaxBytesMaxGas provides a mock function with given fields: maxBytes, maxGas func (_m *Mempool) ReapMaxBytesMaxGas(maxBytes int64, maxGas int64) types.Txs { ret := _m.Called(maxBytes, maxGas) + if len(ret) == 0 { + panic("no return value specified for ReapMaxBytesMaxGas") + } + var r0 types.Txs if rf, ok := ret.Get(0).(func(int64, int64) types.Txs); ok { r0 = rf(maxBytes, maxGas) @@ -93,6 +150,10 @@ func (_m *Mempool) ReapMaxBytesMaxGas(maxBytes int64, maxGas int64) types.Txs { func (_m *Mempool) ReapMaxTxs(max int) types.Txs { ret := _m.Called(max) + if len(ret) == 0 { + panic("no return value specified for ReapMaxTxs") + } + var r0 types.Txs if rf, ok := ret.Get(0).(func(int) types.Txs); ok { r0 = rf(max) @@ -109,6 +170,10 @@ func (_m *Mempool) ReapMaxTxs(max int) types.Txs { func (_m *Mempool) RemoveTxByKey(txKey types.TxKey) error { ret := _m.Called(txKey) + if len(ret) == 0 { + panic("no return value specified for RemoveTxByKey") + } + var r0 error if rf, ok := ret.Get(0).(func(types.TxKey) error); ok { r0 = rf(txKey) @@ -119,15 +184,14 @@ func (_m *Mempool) RemoveTxByKey(txKey types.TxKey) error { return r0 } -// SetTxRemovedCallback provides a mock function with given fields: cb -func (_m *Mempool) SetTxRemovedCallback(cb func(types.TxKey)) { - _m.Called(cb) -} - // Size provides a mock function with given fields: func (_m *Mempool) Size() int { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 int if rf, ok := ret.Get(0).(func() int); ok { r0 = rf() @@ -142,6 +206,10 @@ func (_m *Mempool) Size() int { func (_m *Mempool) SizeBytes() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for SizeBytes") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() @@ -156,6 +224,10 @@ func (_m *Mempool) SizeBytes() int64 { func (_m *Mempool) TxsAvailable() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TxsAvailable") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -174,11 +246,15 @@ func (_m *Mempool) Unlock() { } // Update provides a mock function with given fields: blockHeight, blockTxs, deliverTxResponses, newPreFn, newPostFn -func (_m *Mempool) Update(blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abcitypes.ExecTxResult, newPreFn mempool.PreCheckFunc, newPostFn mempool.PostCheckFunc) error { +func (_m *Mempool) Update(blockHeight int64, blockTxs types.Txs, deliverTxResponses []*v1.ExecTxResult, newPreFn mempool.PreCheckFunc, newPostFn mempool.PostCheckFunc) error { ret := _m.Called(blockHeight, blockTxs, deliverTxResponses, newPreFn, newPostFn) + if len(ret) == 0 { + panic("no return value specified for Update") + } + var r0 error - if rf, ok := ret.Get(0).(func(int64, types.Txs, []*abcitypes.ExecTxResult, mempool.PreCheckFunc, mempool.PostCheckFunc) error); ok { + if rf, ok := ret.Get(0).(func(int64, types.Txs, []*v1.ExecTxResult, mempool.PreCheckFunc, mempool.PostCheckFunc) error); ok { r0 = rf(blockHeight, blockTxs, deliverTxResponses, newPreFn, newPostFn) } else { r0 = ret.Error(0) diff --git a/mempool/nop_mempool.go b/mempool/nop_mempool.go new file mode 100644 index 00000000000..2d17a23ce58 --- /dev/null +++ b/mempool/nop_mempool.go @@ -0,0 +1,122 @@ +package mempool + +import ( + "errors" + + abcicli "github.com/cometbft/cometbft/abci/client" + abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/libs/service" + "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/p2p/nodekey" + "github.com/cometbft/cometbft/types" +) + +// NopMempool is a mempool that does nothing. +// +// The ABCI app is responsible for storing, disseminating, and proposing transactions. +// See [ADR-111](../docs/architecture/adr-111-nop-mempool.md). +type NopMempool struct{} + +// errNotAllowed indicates that the operation is not allowed with `nop` mempool. +var errNotAllowed = errors.New("not allowed with `nop` mempool") + +var _ Mempool = &NopMempool{} + +// CheckTx always returns an error. +func (*NopMempool) CheckTx(types.Tx, nodekey.ID) (*abcicli.ReqRes, error) { + return nil, errNotAllowed +} + +// RemoveTxByKey always returns an error. +func (*NopMempool) RemoveTxByKey(types.TxKey) error { return errNotAllowed } + +// ReapMaxBytesMaxGas always returns nil. +func (*NopMempool) ReapMaxBytesMaxGas(int64, int64) types.Txs { return nil } + +// ReapMaxTxs always returns nil. +func (*NopMempool) ReapMaxTxs(int) types.Txs { return nil } + +// GetTxByHash always returns nil. +func (*NopMempool) GetTxByHash([]byte) types.Tx { return nil } + +// Lock does nothing. +func (*NopMempool) Lock() {} + +// Unlock does nothing. +func (*NopMempool) Unlock() {} + +func (*NopMempool) PreUpdate() {} + +// Update does nothing. +func (*NopMempool) Update( + int64, + types.Txs, + []*abci.ExecTxResult, + PreCheckFunc, + PostCheckFunc, +) error { + return nil +} + +// FlushAppConn does nothing. +func (*NopMempool) FlushAppConn() error { return nil } + +// Flush does nothing. +func (*NopMempool) Flush() {} + +// Contains always returns false. +func (*NopMempool) Contains(types.TxKey) bool { return false } + +// TxsAvailable always returns nil. +func (*NopMempool) TxsAvailable() <-chan struct{} { + return nil +} + +// EnableTxsAvailable does nothing. +func (*NopMempool) EnableTxsAvailable() {} + +// Size always returns 0. +func (*NopMempool) Size() int { return 0 } + +// SizeBytes always returns 0. +func (*NopMempool) SizeBytes() int64 { return 0 } + +// NopMempoolReactor is a mempool reactor that does nothing. +type NopMempoolReactor struct { + service.BaseService +} + +// NewNopMempoolReactor returns a new `nop` reactor. +// +// To be used only in RPC. +func NewNopMempoolReactor() *NopMempoolReactor { + return &NopMempoolReactor{*service.NewBaseService(nil, "NopMempoolReactor", nil)} +} + +var _ p2p.Reactor = &NopMempoolReactor{} + +// WaitSync always returns false. +func (*NopMempoolReactor) WaitSync() bool { return false } + +// StreamDescriptors always returns nil. +func (*NopMempoolReactor) StreamDescriptors() []p2p.StreamDescriptor { return nil } + +// AddPeer does nothing. +func (*NopMempoolReactor) AddPeer(p2p.Peer) {} + +// InitPeer always returns nil. +func (*NopMempoolReactor) InitPeer(p2p.Peer) p2p.Peer { return nil } + +// RemovePeer does nothing. +func (*NopMempoolReactor) RemovePeer(p2p.Peer, any) {} + +// Receive does nothing. +func (*NopMempoolReactor) Receive(p2p.Envelope) {} + +// TryAddTx does nothing. +func (*NopMempoolReactor) TryAddTx(_ types.Tx, _ p2p.Peer) (*abcicli.ReqRes, error) { + return nil, nil +} + +// SetSwitch does nothing. +func (*NopMempoolReactor) SetSwitch(*p2p.Switch) {} diff --git a/mempool/nop_mempool_test.go b/mempool/nop_mempool_test.go new file mode 100644 index 00000000000..17a62321832 --- /dev/null +++ b/mempool/nop_mempool_test.go @@ -0,0 +1,40 @@ +package mempool + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/types" +) + +var tx = types.Tx([]byte{0x01}) + +func TestNopMempool_Basic(t *testing.T) { + mem := &NopMempool{} + + assert.Equal(t, 0, mem.Size()) + assert.Equal(t, int64(0), mem.SizeBytes()) + + _, err := mem.CheckTx(tx, "") + assert.Equal(t, errNotAllowed, err) + + err = mem.RemoveTxByKey(tx.Key()) + assert.Equal(t, errNotAllowed, err) + + txs := mem.ReapMaxBytesMaxGas(0, 0) + assert.Nil(t, txs) + + txs = mem.ReapMaxTxs(0) + assert.Nil(t, txs) + + err = mem.FlushAppConn() + require.NoError(t, err) + + err = mem.Update(0, nil, nil, nil, nil) + require.NoError(t, err) + + txsAvailable := mem.TxsAvailable() + assert.Nil(t, txsAvailable) +} diff --git a/mempool/reactor.go b/mempool/reactor.go index cd5d0784ac9..234b1f347ee 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -1,18 +1,20 @@ package mempool import ( + "context" "errors" "fmt" "sync/atomic" "time" - abci "github.com/cometbft/cometbft/abci/types" + "golang.org/x/sync/semaphore" + + abcicli "github.com/cometbft/cometbft/abci/client" + protomem "github.com/cometbft/cometbft/api/cometbft/mempool/v1" cfg "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/libs/clist" "github.com/cometbft/cometbft/libs/log" - cmtsync "github.com/cometbft/cometbft/libs/sync" "github.com/cometbft/cometbft/p2p" - protomem "github.com/cometbft/cometbft/proto/tendermint/mempool" + tcpconn "github.com/cometbft/cometbft/p2p/transport/tcp/conn" "github.com/cometbft/cometbft/types" ) @@ -27,28 +29,28 @@ type Reactor struct { waitSync atomic.Bool waitSyncCh chan struct{} // for signaling when to start receiving and sending txs - // `txSenders` maps every received transaction to the set of peer IDs that - // have sent the transaction to this node. Sender IDs are used during - // transaction propagation to avoid sending a transaction to a peer that - // already has it. - txSenders map[types.TxKey]map[p2p.ID]bool - txSendersMtx cmtsync.Mutex + // Semaphores to keep track of how many connections to peers are active for broadcasting + // transactions. Each semaphore has a capacity that puts an upper bound on the number of + // connections for different groups of peers. + activePersistentPeersSemaphore *semaphore.Weighted + activeNonPersistentPeersSemaphore *semaphore.Weighted } // NewReactor returns a new Reactor with the given config and mempool. func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool, waitSync bool) *Reactor { memR := &Reactor{ - config: config, - mempool: mempool, - waitSync: atomic.Bool{}, - txSenders: make(map[types.TxKey]map[p2p.ID]bool), + config: config, + mempool: mempool, + waitSync: atomic.Bool{}, } memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR) if waitSync { memR.waitSync.Store(true) memR.waitSyncCh = make(chan struct{}) } - memR.mempool.SetTxRemovedCallback(func(txKey types.TxKey) { memR.removeSenders(txKey) }) + memR.activePersistentPeersSemaphore = semaphore.NewWeighted(int64(memR.config.ExperimentalMaxGossipConnectionsToPersistentPeers)) + memR.activeNonPersistentPeersSemaphore = semaphore.NewWeighted(int64(memR.config.ExperimentalMaxGossipConnectionsToNonPersistentPeers)) + return memR } @@ -69,9 +71,9 @@ func (memR *Reactor) OnStart() error { return nil } -// GetChannels implements Reactor by returning the list of channels for this +// StreamDescriptors implements Reactor by returning the list of channels for this // reactor. -func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { +func (memR *Reactor) StreamDescriptors() []p2p.StreamDescriptor { largestTx := make([]byte, memR.config.MaxTxBytes) batchMsg := protomem.Message{ Sum: &protomem.Message_Txs{ @@ -79,12 +81,12 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { }, } - return []*p2p.ChannelDescriptor{ - { + return []p2p.StreamDescriptor{ + &tcpconn.ChannelDescriptor{ ID: MempoolChannel, Priority: 5, RecvMessageCapacity: batchMsg.Size(), - MessageType: &protomem.Message{}, + MessageTypeI: &protomem.Message{}, }, } } @@ -92,8 +94,43 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { // AddPeer implements Reactor. // It starts a broadcast routine ensuring all txs are forwarded to the given peer. func (memR *Reactor) AddPeer(peer p2p.Peer) { - if memR.config.Broadcast { - go memR.broadcastTxRoutine(peer) + if memR.config.Broadcast && peer.HasChannel(MempoolChannel) { + go func() { + // Always forward transactions to unconditional peers. + if !memR.Switch.IsPeerUnconditional(peer.ID()) { + // Depending on the type of peer, we choose a semaphore to limit the gossiping peers. + var peerSemaphore *semaphore.Weighted + if peer.IsPersistent() && memR.config.ExperimentalMaxGossipConnectionsToPersistentPeers > 0 { + peerSemaphore = memR.activePersistentPeersSemaphore + } else if !peer.IsPersistent() && memR.config.ExperimentalMaxGossipConnectionsToNonPersistentPeers > 0 { + peerSemaphore = memR.activeNonPersistentPeersSemaphore + } + + if peerSemaphore != nil { + for peer.IsRunning() { + // Block on the semaphore until a slot is available to start gossiping with this peer. + // Do not block indefinitely, in case the peer is disconnected before gossiping starts. + ctxTimeout, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + // Block sending transactions to peer until one of the connections become + // available in the semaphore. + err := peerSemaphore.Acquire(ctxTimeout, 1) + cancel() + + if err != nil { + continue + } + + // Release semaphore to allow other peer to start sending transactions. + defer peerSemaphore.Release(1) + break + } + } + } + + memR.mempool.metrics.ActiveOutboundConnections.Add(1) + defer memR.mempool.metrics.ActiveOutboundConnections.Add(-1) + memR.broadcastTxRoutine(peer) + }() } } @@ -110,33 +147,16 @@ func (memR *Reactor) Receive(e p2p.Envelope) { protoTxs := msg.GetTxs() if len(protoTxs) == 0 { - memR.Logger.Error("received empty txs from peer", "src", e.Src) + memR.Logger.Error("Received empty Txs message from peer", "src", e.Src) return } for _, txBytes := range protoTxs { - tx := types.Tx(txBytes) - reqRes, err := memR.mempool.CheckTx(tx) - if errors.Is(err, ErrTxInCache) { - memR.Logger.Debug("Tx already exists in cache", "tx", tx.String()) - } else if err != nil { - memR.Logger.Info("Could not check tx", "tx", tx.String(), "err", err) - } else { - // Record the sender only when the transaction is valid and, as - // a consequence, added to the mempool. Senders are stored until - // the transaction is removed from the mempool. Note that it's - // possible a tx is still in the cache but no longer in the - // mempool. For example, after committing a block, txs are - // removed from mempool but not the cache. - reqRes.SetCallback(func(res *abci.Response) { - if res.GetCheckTx().Code == abci.CodeTypeOK { - memR.addSender(tx.Key(), e.Src.ID()) - } - }) - } + _, _ = memR.TryAddTx(types.Tx(txBytes), e.Src) } + default: - memR.Logger.Error("unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) + memR.Logger.Error("Unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", e.Message)) return } @@ -144,8 +164,33 @@ func (memR *Reactor) Receive(e p2p.Envelope) { // broadcasting happens from go routines per peer } +// TryAddTx attempts to add an incoming transaction to the mempool. +// When the sender is nil, it means the transaction comes from an RPC endpoint. +func (memR *Reactor) TryAddTx(tx types.Tx, sender p2p.Peer) (*abcicli.ReqRes, error) { + senderID := noSender + if sender != nil { + senderID = sender.ID() + } + + reqRes, err := memR.mempool.CheckTx(tx, senderID) + if err != nil { + switch { + case errors.Is(err, ErrTxInCache): + memR.Logger.Debug("Tx already exists in cache", "tx", log.NewLazySprintf("%X", tx.Hash()), "sender", senderID) + case errors.As(err, &ErrMempoolIsFull{}): + // using debug level to avoid flooding when traffic is high + memR.Logger.Debug(err.Error()) + default: + memR.Logger.Info("Could not check tx", "tx", log.NewLazySprintf("%X", tx.Hash()), "sender", senderID, "err", err) + } + return nil, err + } + + return reqRes, nil +} + func (memR *Reactor) EnableInOutTxs() { - memR.Logger.Info("enabling inbound and outbound transactions") + memR.Logger.Info("Enabling inbound and outbound transactions") if !memR.waitSync.CompareAndSwap(true, false) { return } @@ -167,8 +212,6 @@ type PeerState interface { // Send new mempool txs to peer. func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { - var next *clist.CElement - // If the node is catching up, don't start this routine immediately. if memR.WaitSync() { select { @@ -179,36 +222,26 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { } } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + select { + case <-peer.Quit(): + cancel() + case <-memR.Quit(): + cancel() + } + }() + + iter := NewBlockingIterator(ctx, memR.mempool, string(peer.ID())) for { // In case of both next.NextWaitChan() and peer.Quit() are variable at the same time if !memR.IsRunning() || !peer.IsRunning() { return } - // This happens because the CElement we were looking at got garbage - // collected (removed). That is, .NextWait() returned nil. Go ahead and - // start from the beginning. - if next == nil { - select { - case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available - if next = memR.mempool.TxsFront(); next == nil { - continue - } - case <-peer.Quit(): - return - case <-memR.Quit(): - return - } - } - // Make sure the peer is up to date. - peerState, ok := peer.Get(types.PeerStateKey).(PeerState) - if !ok { - // Peer does not have a state yet. We set it in the consensus reactor, but - // when we add peer in Switch, the order we call reactors#AddPeer is - // different every time due to us using a map. Sometimes other reactors - // will be initialized before the consensus reactor. We should wait a few - // milliseconds and retry. - time.Sleep(PeerCatchupSleepIntervalMS * time.Millisecond) + entry := <-iter.WaitNextCh() + // If the entry we were looking at got garbage collected (removed), try again. + if entry == nil { continue } @@ -217,64 +250,70 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { // reduces the mempool size and the recheck-tx rate of the receiving // node. See [RFC 103] for an analysis on this optimization. // - // [RFC 103]: https://github.com/cometbft/cometbft/pull/735 - memTx := next.Value.(*mempoolTx) - if peerState.GetHeight() < memTx.Height()-1 { - time.Sleep(PeerCatchupSleepIntervalMS * time.Millisecond) - continue + // [RFC 103]: https://github.com/CometBFT/cometbft/blob/main/docs/references/rfc/rfc-103-incoming-txs-when-catching-up.md + for { + // Make sure the peer's state is up to date. The peer may not have a + // state yet. We set it in the consensus reactor, but when we add + // peer in Switch, the order we call reactors#AddPeer is different + // every time due to us using a map. Sometimes other reactors will + // be initialized before the consensus reactor. We should wait a few + // milliseconds and retry. + peerState, ok := peer.Get(types.PeerStateKey).(PeerState) + if ok && peerState.GetHeight()+1 >= entry.Height() { + break + } + select { + case <-time.After(PeerCatchupSleepIntervalMS * time.Millisecond): + case <-peer.Quit(): + return + case <-memR.Quit(): + return + } } // NOTE: Transaction batching was disabled due to // https://github.com/tendermint/tendermint/issues/5796 - if !memR.isSender(memTx.tx.Key(), peer.ID()) { - success := peer.Send(p2p.Envelope{ - ChannelID: MempoolChannel, - Message: &protomem.Txs{Txs: [][]byte{memTx.tx}}, - }) - if !success { - time.Sleep(PeerCatchupSleepIntervalMS * time.Millisecond) - continue - } - } + // We are paying the cost of computing the transaction hash in + // any case, even when logger level > debug. So it only once. + // See: https://github.com/cometbft/cometbft/issues/4167 + txHash := entry.Tx().Hash() - select { - case <-next.NextWaitChan(): - // see the start of the for loop for nil check - next = next.Next() - case <-peer.Quit(): - return - case <-memR.Quit(): - return + // Do not send this transaction if we receive it from peer. + if entry.IsSender(peer.ID()) { + memR.Logger.Debug("Skipping transaction, peer is sender", + "tx", log.NewLazySprintf("%X", txHash), "peer", peer.ID()) + continue } - } -} - -func (memR *Reactor) isSender(txKey types.TxKey, peerID p2p.ID) bool { - memR.txSendersMtx.Lock() - defer memR.txSendersMtx.Unlock() - sendersSet, ok := memR.txSenders[txKey] - return ok && sendersSet[peerID] -} + for { + // The entry may have been removed from the mempool since it was + // chosen at the beginning of the loop. Skip it if that's the case. + if !memR.mempool.Contains(entry.Tx().Key()) { + break + } -func (memR *Reactor) addSender(txKey types.TxKey, senderID p2p.ID) bool { - memR.txSendersMtx.Lock() - defer memR.txSendersMtx.Unlock() + memR.Logger.Debug("Sending transaction to peer", + "tx", log.NewLazySprintf("%X", txHash), "peer", peer.ID()) - if sendersSet, ok := memR.txSenders[txKey]; ok { - sendersSet[senderID] = true - return false - } - memR.txSenders[txKey] = map[p2p.ID]bool{senderID: true} - return true -} + success := peer.Send(p2p.Envelope{ + ChannelID: MempoolChannel, + Message: &protomem.Txs{Txs: [][]byte{entry.Tx()}}, + }) + if success { + break + } -func (memR *Reactor) removeSenders(txKey types.TxKey) { - memR.txSendersMtx.Lock() - defer memR.txSendersMtx.Unlock() + memR.Logger.Debug("Failed sending transaction to peer", + "tx", log.NewLazySprintf("%X", txHash), "peer", peer.ID()) - if memR.txSenders != nil { - delete(memR.txSenders, txKey) + select { + case <-time.After(PeerCatchupSleepIntervalMS * time.Millisecond): + case <-peer.Quit(): + return + case <-memR.Quit(): + return + } + } } } diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index 29a9815279c..a667b5dc28b 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -8,16 +8,16 @@ import ( "time" "github.com/fortytw2/leaktest" - "github.com/go-kit/log/term" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/abci/example/kvstore" abci "github.com/cometbft/cometbft/abci/types" + memproto "github.com/cometbft/cometbft/api/cometbft/mempool/v1" cfg "github.com/cometbft/cometbft/config" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/p2p" - memproto "github.com/cometbft/cometbft/proto/tendermint/mempool" "github.com/cometbft/cometbft/proxy" "github.com/cometbft/cometbft/types" ) @@ -43,23 +43,23 @@ func TestReactorBroadcastTxsMessage(t *testing.T) { // asserted in waitForTxsOnReactors (due to transactions gossiping). If we // replace Connect2Switches (full mesh) with a func, which connects first // reactor to others and nothing else, this test should also pass with >2 reactors. - const N = 2 - reactors, _ := makeAndConnectReactors(config, N) + const n = 2 + reactors, _ := makeAndConnectReactors(config, n, nil) defer func() { for _, r := range reactors { if err := r.Stop(); err != nil { - assert.NoError(t, err) + require.NoError(t, err) } } }() for _, r := range reactors { - for _, peer := range r.Switch.Peers().List() { + for _, peer := range r.Switch.Peers().Copy() { peer.Set(types.PeerStateKey, peerState{1}) } } - txs := checkTxs(t, reactors[0].mempool, numTxs) - waitForReactors(t, txs, reactors, checkTxsInOrder) + txs := addRandomTxs(t, reactors[0].mempool, numTxs) + waitForReactors(t, txs, reactors, checkTxsInMempool) } // regression test for https://github.com/tendermint/tendermint/issues/5408 @@ -67,17 +67,17 @@ func TestReactorConcurrency(t *testing.T) { config := cfg.TestConfig() config.Mempool.Size = 5000 config.Mempool.CacheSize = 5000 - const N = 2 - reactors, _ := makeAndConnectReactors(config, N) + const n = 2 + reactors, _ := makeAndConnectReactors(config, n, nil) defer func() { for _, r := range reactors { if err := r.Stop(); err != nil { - assert.NoError(t, err) + require.NoError(t, err) } } }() for _, r := range reactors { - for _, peer := range r.Switch.Peers().List() { + for _, peer := range r.Switch.Peers().Copy() { peer.Set(types.PeerStateKey, peerState{1}) } } @@ -90,27 +90,29 @@ func TestReactorConcurrency(t *testing.T) { // 1. submit a bunch of txs // 2. update the whole mempool - txs := checkTxs(t, reactors[0].mempool, numTxs) + txs := addRandomTxs(t, reactors[0].mempool, numTxs) go func() { defer wg.Done() + reactors[0].mempool.PreUpdate() reactors[0].mempool.Lock() defer reactors[0].mempool.Unlock() err := reactors[0].mempool.Update(1, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil) - assert.NoError(t, err) + require.NoError(t, err) }() // 1. submit a bunch of txs // 2. update none - _ = checkTxs(t, reactors[1].mempool, numTxs) + _ = addRandomTxs(t, reactors[1].mempool, numTxs) go func() { defer wg.Done() + reactors[1].mempool.PreUpdate() reactors[1].mempool.Lock() defer reactors[1].mempool.Unlock() err := reactors[1].mempool.Update(1, []types.Tx{}, make([]*abci.ExecTxResult, 0), nil, nil) - assert.NoError(t, err) + require.NoError(t, err) }() // 1. flush the mempool @@ -124,17 +126,17 @@ func TestReactorConcurrency(t *testing.T) { // ensure peer gets no txs. func TestReactorNoBroadcastToSender(t *testing.T) { config := cfg.TestConfig() - const N = 2 - reactors, _ := makeAndConnectReactors(config, N) + const n = 2 + reactors, _ := makeAndConnectReactorsNoLanes(config, n, nil) defer func() { for _, r := range reactors { if err := r.Stop(); err != nil { - assert.NoError(t, err) + require.NoError(t, err) } } }() for _, r := range reactors { - for _, peer := range r.Switch.Peers().List() { + for _, peer := range r.Switch.Peers().Copy() { peer.Set(types.PeerStateKey, peerState{1}) } } @@ -142,32 +144,126 @@ func TestReactorNoBroadcastToSender(t *testing.T) { // create random transactions txs := NewRandomTxs(numTxs, 20) - // the second peer sends all the transactions to the first peer + // This subset should be broadcast + var txsToBroadcast types.Txs + const minToBroadcast = numTxs / 10 + + // The second peer sends some transactions to the first peer secondNodeID := reactors[1].Switch.NodeInfo().ID() - for _, tx := range txs { - reactors[0].addSender(tx.Key(), secondNodeID) - _, err := reactors[0].mempool.CheckTx(tx) - require.NoError(t, err) + secondNode := reactors[0].Switch.Peers().Get(secondNodeID) + for i, tx := range txs { + shouldBroadcast := cmtrand.Bool() || // random choice + // Force shouldBroadcast == true to ensure that + // len(txsToBroadcast) >= minToBroadcast + (len(txsToBroadcast) < minToBroadcast && + len(txs)-i <= minToBroadcast) + + t.Log(i, "adding", tx, "shouldBroadcast", shouldBroadcast) + + if !shouldBroadcast { + // From the second peer => should not be broadcast + _, err := reactors[0].TryAddTx(tx, secondNode) + require.NoError(t, err) + } else { + // Emulate a tx received via RPC => should broadcast + _, err := reactors[0].TryAddTx(tx, nil) + require.NoError(t, err) + txsToBroadcast = append(txsToBroadcast, tx) + } } - // the second peer should not receive any transaction - ensureNoTxs(t, reactors[1], 100*time.Millisecond) + t.Log("Added", len(txs), "transactions, only", len(txsToBroadcast), + "should be sent to the peer") + + // The second peer should receive only txsToBroadcast transactions + waitForReactors(t, txsToBroadcast, reactors[1:], checkTxsInOrder) } -func TestReactor_MaxTxBytes(t *testing.T) { +// Test that a lagging peer does not receive txs. +func TestMempoolReactorSendLaggingPeer(t *testing.T) { config := cfg.TestConfig() + const n = 2 + reactors, _ := makeAndConnectReactors(config, n, nil) + defer func() { + for _, r := range reactors { + if err := r.Stop(); err != nil { + require.NoError(t, err) + } + } + }() + + // First reactor is at height 10 and knows that its peer is lagging at height 1. + reactors[0].mempool.height.Store(10) + peerID := reactors[1].Switch.NodeInfo().ID() + reactors[0].Switch.Peers().Get(peerID).Set(types.PeerStateKey, peerState{1}) - const N = 2 - reactors, _ := makeAndConnectReactors(config, N) + // Add a bunch of txs to the first reactor. The second reactor should not receive any tx. + txs1 := addTxs(t, reactors[0].mempool, 0, numTxs) + ensureNoTxs(t, reactors[1], 5*PeerCatchupSleepIntervalMS*time.Millisecond) + + // Now we know that the second reactor has advanced to height 9, so it should receive all txs. + reactors[0].Switch.Peers().Get(peerID).Set(types.PeerStateKey, peerState{9}) + waitForReactors(t, txs1, reactors, checkTxsInMempool) + + // Add a bunch of txs to first reactor. The second reactor should receive them all. + txs2 := addTxs(t, reactors[0].mempool, numTxs, numTxs) + waitForReactors(t, append(txs1, txs2...), reactors, checkTxsInMempool) +} + +// Test the scenario where a tx selected for being sent to a peer is removed +// from the mempool before it is actually sent. +func TestMempoolReactorSendRemovedTx(t *testing.T) { + config := cfg.TestConfig() + const n = 2 + reactors, _ := makeAndConnectReactors(config, n, nil) defer func() { for _, r := range reactors { if err := r.Stop(); err != nil { - assert.NoError(t, err) + require.NoError(t, err) + } + } + }() + + // First reactor is at height 10 and knows that its peer is lagging at height 1. + // We do this to hold sending transactions, giving us time to remove some of them. + reactors[0].mempool.height.Store(10) + peerID := reactors[1].Switch.NodeInfo().ID() + reactors[0].Switch.Peers().Get(peerID).Set(types.PeerStateKey, peerState{1}) + + // Add a bunch of txs to the first reactor. The second reactor should not receive any tx. + txs := addRandomTxs(t, reactors[0].mempool, 20) + ensureNoTxs(t, reactors[1], 5*PeerCatchupSleepIntervalMS*time.Millisecond) + + // Remove some txs from the mempool of the first reactor. + txsToRemove := txs[:10] + txsLeft := txs[10:] + reactors[0].mempool.PreUpdate() + reactors[0].mempool.Lock() + err := reactors[0].mempool.Update(10, txsToRemove, abciResponses(len(txsToRemove), abci.CodeTypeOK), nil, nil) + require.NoError(t, err) + reactors[0].mempool.Unlock() + require.Equal(t, len(txsLeft), reactors[0].mempool.Size()) + + // Now we know that the second reactor is not lagging, so it should receive + // all txs except those that were removed. + reactors[0].Switch.Peers().Get(peerID).Set(types.PeerStateKey, peerState{9}) + waitForReactors(t, txsLeft, reactors, checkTxsInMempool) +} + +func TestMempoolReactorMaxTxBytes(t *testing.T) { + config := cfg.TestConfig() + + const n = 2 + reactors, _ := makeAndConnectReactors(config, n, mempoolLogger("info")) + defer func() { + for _, r := range reactors { + if err := r.Stop(); err != nil { + require.NoError(t, err) } } }() for _, r := range reactors { - for _, peer := range r.Switch.Peers().List() { + for _, peer := range r.Switch.Peers().Copy() { peer.Set(types.PeerStateKey, peerState{1}) } } @@ -175,7 +271,7 @@ func TestReactor_MaxTxBytes(t *testing.T) { // Broadcast a tx, which has the max size // => ensure it's received by the second reactor. tx1 := kvstore.NewRandomTx(config.Mempool.MaxTxBytes) - reqRes, err := reactors[0].mempool.CheckTx(tx1) + reqRes, err := reactors[0].TryAddTx(tx1, nil) require.NoError(t, err) require.False(t, reqRes.Response.GetCheckTx().IsErr()) waitForReactors(t, []types.Tx{tx1}, reactors, checkTxsInOrder) @@ -186,7 +282,7 @@ func TestReactor_MaxTxBytes(t *testing.T) { // Broadcast a tx, which is beyond the max size // => ensure it's not sent tx2 := kvstore.NewRandomTx(config.Mempool.MaxTxBytes + 1) - reqRes, err = reactors[0].mempool.CheckTx(tx2) + reqRes, err = reactors[0].TryAddTx(tx2, nil) require.Error(t, err) require.Nil(t, reqRes) } @@ -197,19 +293,19 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { } config := cfg.TestConfig() - const N = 2 - reactors, _ := makeAndConnectReactors(config, N) + const n = 2 + reactors, _ := makeAndConnectReactors(config, n, nil) defer func() { for _, r := range reactors { if err := r.Stop(); err != nil { - assert.NoError(t, err) + require.NoError(t, err) } } }() // stop peer sw := reactors[1].Switch - sw.StopPeerForError(sw.Peers().List()[0], errors.New("some reason")) + sw.StopPeerForError(sw.Peers().Copy()[0], errors.New("some reason")) // check that we are not leaking any go-routines // i.e. broadcastTxRoutine finishes when peer is stopped @@ -222,12 +318,12 @@ func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) { } config := cfg.TestConfig() - const N = 2 - _, switches := makeAndConnectReactors(config, N) + const n = 2 + _, switches := makeAndConnectReactors(config, n, nil) // stop reactors for _, s := range switches { - assert.NoError(t, s.Stop()) + require.NoError(t, s.Stop()) } // check that we are not leaking any go-routines @@ -235,161 +331,250 @@ func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) { leaktest.CheckTimeout(t, 10*time.Second)() } -func TestReactorTxSendersLocal(t *testing.T) { +// Finding a solution for guaranteeing FIFO ordering is not easy; it would +// require changes at the p2p level. The order of messages is just best-effort, +// but this is not documented anywhere. If this is well understood and +// documented, we don't need this test. Until then, let's keep the test. +func TestMempoolFIFOWithParallelCheckTx(t *testing.T) { + t.Skip("FIFO is not supposed to be guaranteed and this is just used to evidence one of the cases where it does not happen. Hence we skip this test.") + config := cfg.TestConfig() - const N = 1 - reactors, _ := makeAndConnectReactors(config, N) + reactors, _ := makeAndConnectReactors(config, 4, nil) defer func() { for _, r := range reactors { if err := r.Stop(); err != nil { - assert.NoError(t, err) + require.NoError(t, err) } } }() - reactor := reactors[0] - - tx1 := kvstore.NewTxFromID(1) - tx2 := kvstore.NewTxFromID(2) - require.False(t, reactor.isSender(types.Tx(tx1).Key(), "peer1")) - - reactor.addSender(types.Tx(tx1).Key(), "peer1") - reactor.addSender(types.Tx(tx1).Key(), "peer2") - reactor.addSender(types.Tx(tx2).Key(), "peer1") - require.True(t, reactor.isSender(types.Tx(tx1).Key(), "peer1")) - require.True(t, reactor.isSender(types.Tx(tx1).Key(), "peer2")) - require.True(t, reactor.isSender(types.Tx(tx2).Key(), "peer1")) - - reactor.removeSenders(types.Tx(tx1).Key()) - require.False(t, reactor.isSender(types.Tx(tx1).Key(), "peer1")) - require.False(t, reactor.isSender(types.Tx(tx1).Key(), "peer2")) - require.True(t, reactor.isSender(types.Tx(tx2).Key(), "peer1")) + for _, r := range reactors { + for _, peer := range r.Switch.Peers().Copy() { + peer.Set(types.PeerStateKey, peerState{1}) + } + } + + // Deliver the same sequence of transactions from multiple sources, in parallel. + txs := newUniqueTxs(200) + for i := 0; i < 3; i++ { + go func() { + for _, tx := range txs { + _, _ = reactors[0].TryAddTx(tx, nil) + } + }() + } + + // Confirm that FIFO order was respected. + checkTxsInOrder(t, txs, reactors[0], 0) } -// Test that: -// - If a transaction came from a peer AND if the transaction is added to the -// mempool, it must have a non-empty list of senders in the reactor. -// - If a transaction is removed from the mempool, it must also be removed from -// the list of senders in the reactor. -func TestReactorTxSendersMultiNode(t *testing.T) { +// Test the experimental feature that limits the number of outgoing connections for gossiping +// transactions (only non-persistent peers). +// Note: in this test we know which gossip connections are active or not because of how the p2p +// functions are currently implemented, which affects the order in which peers are added to the +// mempool reactor. +func TestMempoolReactorMaxActiveOutboundConnections(t *testing.T) { config := cfg.TestConfig() - config.Mempool.Size = 1000 - config.Mempool.CacheSize = 1000 - const N = 3 - reactors, _ := makeAndConnectReactors(config, N) + config.Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers = 1 + reactors, _ := makeAndConnectReactors(config, 4, nil) defer func() { for _, r := range reactors { if err := r.Stop(); err != nil { - assert.NoError(t, err) + require.NoError(t, err) } } }() for _, r := range reactors { - for _, peer := range r.Switch.Peers().List() { + for _, peer := range r.Switch.Peers().Copy() { peer.Set(types.PeerStateKey, peerState{1}) } } - firstReactor := reactors[0] - numTxs := config.Mempool.Size - txs := newUniqueTxs(numTxs) + // Add a bunch transactions to the first reactor. + txs := newUniqueTxs(100) + tryAddTxs(t, reactors[0], txs) - // Initially, there are no transactions (and no senders). - for _, r := range reactors { - require.Zero(t, len(r.txSenders)) + // Wait for all txs to be in the mempool of the second reactor; the other reactors should not + // receive any tx. (The second reactor only sends transactions to the first reactor.) + checkTxsInMempool(t, txs, reactors[1], 0) + for _, r := range reactors[2:] { + require.Zero(t, r.mempool.Size()) } - // Add transactions to the first reactor. - callCheckTx(t, firstReactor.mempool, txs) + // Disconnect the second reactor from the first reactor. + firstPeer := reactors[0].Switch.Peers().Copy()[0] + reactors[0].Switch.StopPeerGracefully(firstPeer) - // Wait for all txs to be in the mempool of each reactor. - waitForReactors(t, txs, reactors, checkTxsInMempool) - for i, r := range reactors { - checkTxsInMempoolAndSenders(t, r, txs, i) + // Now the third reactor should start receiving transactions from the first reactor; the fourth + // reactor's mempool should still be empty. + checkTxsInMempool(t, txs, reactors[2], 0) + for _, r := range reactors[3:] { + require.Zero(t, r.mempool.Size()) } +} - // Split the transactions in three groups of different sizes. - splitIndex := numTxs / 6 - validTxs := txs[:splitIndex] // will be used to update the mempool, as valid txs - invalidTxs := txs[splitIndex : 3*splitIndex] // will be used to update the mempool, as invalid txs - ignoredTxs := txs[3*splitIndex:] // will remain in the mempool - - // Update the mempools with a list of valid and invalid transactions. - for i, r := range reactors { - updateMempool(t, r.mempool, validTxs, invalidTxs) - - // Txs included in a block should have been removed from the mempool and - // have no senders. - for _, tx := range append(validTxs, invalidTxs...) { - require.False(t, r.mempool.InMempool(tx.Key())) - _, hasSenders := r.txSenders[tx.Key()] - require.False(t, hasSenders) +// Test the experimental feature that limits the number of outgoing connections for gossiping +// transactions (only non-persistent peers). +// Given the disconnections, no transaction should be received in duplicate. +// Note: in this test we know which gossip connections are active or not because of how the p2p +// functions are currently implemented, which affects the order in which peers are added to the +// mempool reactor. +func TestMempoolReactorMaxActiveOutboundConnectionsNoDuplicate(t *testing.T) { + config := cfg.TestConfig() + config.Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers = 1 + reactors, _ := makeAndConnectReactors(config, 4, nil) + defer func() { + for _, r := range reactors { + if err := r.Stop(); err != nil { + require.NoError(t, err) + } + } + }() + for _, r := range reactors { + for _, peer := range r.Switch.Peers().Copy() { + peer.Set(types.PeerStateKey, peerState{1}) } + } + + // Disconnect the second reactor from the third reactor. + pCon1_2 := reactors[1].Switch.Peers().Copy()[1] + reactors[1].Switch.StopPeerGracefully(pCon1_2) - // Ignored txs should still be in the mempool. - checkTxsInMempoolAndSenders(t, r, ignoredTxs, i) + // Add a bunch transactions to the first reactor. + txs := newUniqueTxs(100) + tryAddTxs(t, reactors[0], txs) + + // Wait for all txs to be in the mempool of the second reactor; the other reactors should not + // receive any tx. (The second reactor only sends transactions to the first reactor.) + checkTxsInMempool(t, txs, reactors[1], 0) + for _, r := range reactors[2:] { + require.Zero(t, r.mempool.Size()) } - // The first reactor should not receive transactions from other peers. - require.Zero(t, len(firstReactor.txSenders)) + // Disconnect the second reactor from the first reactor. + pCon0_1 := reactors[0].Switch.Peers().Copy()[0] + reactors[0].Switch.StopPeerGracefully(pCon0_1) + + // Now the third reactor should start receiving transactions from the first reactor and + // the fourth reactor from the second + checkTxsInMempool(t, txs, reactors[2], 0) + checkTxsInMempool(t, txs, reactors[3], 0) } -// Check that the mempool has exactly the given list of txs and, if it's not the -// first reactor (reactorIndex == 0), then each tx has a non-empty list of senders. -func checkTxsInMempoolAndSenders(t *testing.T, r *Reactor, txs types.Txs, reactorIndex int) { - r.txSendersMtx.Lock() - defer r.txSendersMtx.Unlock() - - require.Equal(t, len(txs), r.mempool.Size()) - if reactorIndex == 0 { - require.Zero(t, len(r.txSenders)) - } else { - require.Equal(t, len(txs), len(r.txSenders)) +// Test the experimental feature that limits the number of outgoing connections for gossiping +// transactions (only non-persistent peers) on a star shaped network. +// The star center will need to deliver the transactions to each point. +// Note: in this test we know which gossip connections are active or not because of how the p2p +// functions are currently implemented, which affects the order in which peers are added to the +// mempool reactor. +func TestMempoolReactorMaxActiveOutboundConnectionsStar(t *testing.T) { + config := cfg.TestConfig() + config.Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers = 1 + reactors, _ := makeAndConnectReactorsStar(config, 0, 4, nil) + defer func() { + for _, r := range reactors { + if err := r.Stop(); err != nil { + require.NoError(t, err) + } + } + }() + for _, r := range reactors { + for _, peer := range r.Switch.Peers().Copy() { + peer.Set(types.PeerStateKey, peerState{1}) + } } + // Add a bunch transactions to the first reactor. + txs := newUniqueTxs(5) + tryAddTxs(t, reactors[0], txs) - // Each transaction is in the mempool and, if it's not the first reactor, it - // has a non-empty list of senders. - for _, tx := range txs { - assert.True(t, r.mempool.InMempool(tx.Key())) - senders, hasSenders := r.txSenders[tx.Key()] - if reactorIndex == 0 { - require.False(t, hasSenders) - } else { - require.True(t, hasSenders && len(senders) > 0) - } + // Wait for all txs to be in the mempool of the second reactor; the other reactors should not + // receive any tx. (The second reactor only sends transactions to the first reactor.) + checkTxsInMempool(t, txs, reactors[0], 0) + checkTxsInMempool(t, txs, reactors[1], 0) + + for _, r := range reactors[2:] { + require.Zero(t, r.mempool.Size()) + } + + // Disconnect the second reactor from the first reactor. + firstPeer := reactors[0].Switch.Peers().Copy()[0] + reactors[0].Switch.StopPeerGracefully(firstPeer) + + // Now the third reactor should start receiving transactions from the first reactor; the fourth + // reactor's mempool should still be empty. + checkTxsInMempool(t, txs, reactors[0], 0) + checkTxsInMempool(t, txs, reactors[1], 0) + checkTxsInMempool(t, txs, reactors[2], 0) + for _, r := range reactors[3:] { + require.Zero(t, r.mempool.Size()) } } // mempoolLogger is a TestingLogger which uses a different // color for each validator ("validator" key must exist). -func mempoolLogger() log.Logger { - return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { - for i := 0; i < len(keyvals)-1; i += 2 { - if keyvals[i] == "validator" { - return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} - } - } - return term.FgBgColor{} - }) +func mempoolLogger(level string) *log.Logger { + logger := log.TestingLogger() + + // Customize log level + option, err := log.AllowLevel(level) + if err != nil { + panic(err) + } + logger = log.NewFilter(logger, option) + + return &logger } -// connect N mempool reactors through N switches -func makeAndConnectReactors(config *cfg.Config, n int) ([]*Reactor, []*p2p.Switch) { +// makeReactors creates n mempool reactors. +func makeReactors(config *cfg.Config, n int, logger *log.Logger, lanesEnabled bool) []*Reactor { + if logger == nil { + logger = mempoolLogger("info") + } reactors := make([]*Reactor, n) - logger := mempoolLogger() for i := 0; i < n; i++ { - app := kvstore.NewInMemoryApplication() + var app *kvstore.Application + if lanesEnabled { + app = kvstore.NewInMemoryApplication() + } else { + app = kvstore.NewInMemoryApplicationWithoutLanes() + } cc := proxy.NewLocalClientCreator(app) mempool, cleanup := newMempoolWithApp(cc) defer cleanup() reactors[i] = NewReactor(config.Mempool, mempool, false) // so we dont start the consensus states - reactors[i].SetLogger(logger.With("validator", i)) + reactors[i].SetLogger((*logger).With("validator", i)) } + return reactors +} - switches := p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch { +// connectReactors connects the list of N reactors through N switches. +func connectReactors(config *cfg.Config, reactors []*Reactor, connect func([]*p2p.Switch, int, int)) []*p2p.Switch { + switches := p2p.MakeSwitches(config.P2P, len(reactors), func(i int, s *p2p.Switch) *p2p.Switch { s.AddReactor("MEMPOOL", reactors[i]) return s - }, p2p.Connect2Switches) + }) + for _, s := range switches { + s.SetLogger(log.NewNopLogger()) + } + return p2p.StartAndConnectSwitches(switches, connect) +} + +func makeAndConnectReactorsNoLanes(config *cfg.Config, n int, logger *log.Logger) ([]*Reactor, []*p2p.Switch) { + reactors := makeReactors(config, n, logger, false) + switches := connectReactors(config, reactors, p2p.Connect2Switches) + return reactors, switches +} + +func makeAndConnectReactors(config *cfg.Config, n int, logger *log.Logger) ([]*Reactor, []*p2p.Switch) { + reactors := makeReactors(config, n, logger, true) + switches := connectReactors(config, reactors, p2p.Connect2Switches) + return reactors, switches +} + +// connect N mempool reactors through N switches as a star centered in c. +func makeAndConnectReactorsStar(config *cfg.Config, c, n int, logger *log.Logger) ([]*Reactor, []*p2p.Switch) { + reactors := makeReactors(config, n, logger, true) + switches := connectReactors(config, reactors, p2p.ConnectStarSwitches(c)) return reactors, switches } @@ -404,6 +589,7 @@ func newUniqueTxs(n int) types.Txs { // Wait for all reactors to finish applying a testing function to a list of // transactions. func waitForReactors(t *testing.T, txs types.Txs, reactors []*Reactor, testFunc func(*testing.T, types.Txs, *Reactor, int)) { + t.Helper() wg := new(sync.WaitGroup) for i, reactor := range reactors { wg.Add(1) @@ -437,46 +623,46 @@ func waitForNumTxsInMempool(numTxs int, mempool Mempool) { // Wait until all txs are in the mempool and check that the number of txs in the // mempool is as expected. func checkTxsInMempool(t *testing.T, txs types.Txs, reactor *Reactor, _ int) { + t.Helper() waitForNumTxsInMempool(len(txs), reactor.mempool) reapedTxs := reactor.mempool.ReapMaxTxs(len(txs)) - require.Equal(t, len(txs), len(reapedTxs)) - require.Equal(t, len(txs), reactor.mempool.Size()) + require.Len(t, txs, len(reapedTxs)) + require.Len(t, txs, reactor.mempool.Size()) } // Wait until all txs are in the mempool and check that they are in the same // order as given. func checkTxsInOrder(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) { + t.Helper() waitForNumTxsInMempool(len(txs), reactor.mempool) // Check that all transactions in the mempool are in the same order as txs. reapedTxs := reactor.mempool.ReapMaxTxs(len(txs)) + require.Equal(t, len(txs), len(reapedTxs)) for i, tx := range txs { assert.Equalf(t, tx, reapedTxs[i], "txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i]) } } -func updateMempool(t *testing.T, mp Mempool, validTxs types.Txs, invalidTxs types.Txs) { - allTxs := append(validTxs, invalidTxs...) - - validTxResponses := abciResponses(len(validTxs), abci.CodeTypeOK) - invalidTxResponses := abciResponses(len(invalidTxs), 1) - allResponses := append(validTxResponses, invalidTxResponses...) - - mp.Lock() - err := mp.Update(1, allTxs, allResponses, nil, nil) - mp.Unlock() - - require.NoError(t, err) -} - -// ensure no txs on reactor after some timeout +// ensure no txs on reactor after some timeout. func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) { + t.Helper() time.Sleep(timeout) // wait for the txs in all mempools assert.Zero(t, reactor.mempool.Size()) } +// Try to add a list of transactions to the mempool of a given reactor. +func tryAddTxs(t *testing.T, reactor *Reactor, txs types.Txs) { + t.Helper() + for _, tx := range txs { + rr, err := reactor.TryAddTx(tx, nil) + require.Nil(t, err) + rr.Wait() + } +} + func TestMempoolVectors(t *testing.T) { testCases := []struct { testName string @@ -488,8 +674,6 @@ func TestMempoolVectors(t *testing.T) { } for _, tc := range testCases { - tc := tc - msg := memproto.Message{ Sum: &memproto.Message_Txs{ Txs: &memproto.Txs{Txs: [][]byte{tc.tx}}, diff --git a/mempool/types.go b/mempool/types.go new file mode 100644 index 00000000000..ee3f694b71a --- /dev/null +++ b/mempool/types.go @@ -0,0 +1,11 @@ +package mempool + +import ( + memprotos "github.com/cometbft/cometbft/api/cometbft/mempool/v1" + "github.com/cometbft/cometbft/types" +) + +var ( + _ types.Wrapper = &memprotos.Txs{} + _ types.Unwrapper = &memprotos.Message{} +) diff --git a/networks/local/Makefile b/networks/local/Makefile deleted file mode 100644 index c2d52334e96..00000000000 --- a/networks/local/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# Makefile for the "localnode" docker image. - -all: - docker build --tag cometbft/localnode localnode - -.PHONY: all - diff --git a/networks/local/README.md b/networks/local/README.md deleted file mode 100644 index ec6d857ac82..00000000000 --- a/networks/local/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Local Cluster with Docker Compose - -See the [docs](https://docs.cometbft.com/main/networks/docker-compose.html). diff --git a/networks/local/localnode/Dockerfile b/networks/local/localnode/Dockerfile deleted file mode 100644 index e1c3c452701..00000000000 --- a/networks/local/localnode/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM alpine:3.7 - -RUN apk update && \ - apk upgrade && \ - apk --no-cache add curl jq file - -VOLUME /cometbft -WORKDIR /cometbft -EXPOSE 26656 26657 -ENTRYPOINT ["/usr/bin/wrapper.sh"] -CMD ["node", "--proxy_app", "kvstore"] -STOPSIGNAL SIGTERM - -COPY wrapper.sh /usr/bin/wrapper.sh -COPY config-template.toml /etc/cometbft/config-template.toml diff --git a/networks/local/localnode/config-template.toml b/networks/local/localnode/config-template.toml deleted file mode 100644 index a90eb7bd5f0..00000000000 --- a/networks/local/localnode/config-template.toml +++ /dev/null @@ -1,2 +0,0 @@ -[rpc] -laddr = "tcp://0.0.0.0:26657" diff --git a/networks/local/localnode/wrapper.sh b/networks/local/localnode/wrapper.sh deleted file mode 100755 index 700d4ac8a07..00000000000 --- a/networks/local/localnode/wrapper.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env sh - -## -## Input parameters -## -BINARY=/cometbft/${BINARY:-cometbft} -ID=${ID:-0} -LOG=${LOG:-cometbft.log} - -## -## Assert linux binary -## -if ! [ -f "${BINARY}" ]; then - echo "The binary $(basename "${BINARY}") cannot be found. Please add the binary to the shared folder. Please use the BINARY environment variable if the name of the binary is not 'cometbft' E.g.: -e BINARY=my_test_binary" - - exit 1 -fi -BINARY_CHECK="$(file "$BINARY" | grep 'ELF 64-bit LSB executable, x86-64')" -if [ -z "${BINARY_CHECK}" ]; then - echo "Binary needs to be OS linux, ARCH amd64 (build with 'make build-linux')" - exit 1 -fi - -## -## Run binary with all parameters -## -export CMTHOME="/cometbft/node${ID}" - -if [ -d "`dirname ${CMTHOME}/${LOG}`" ]; then - "$BINARY" "$@" | tee "${CMTHOME}/${LOG}" -else - "$BINARY" "$@" -fi - -chmod 777 -R /cometbft - diff --git a/node/errors.go b/node/errors.go new file mode 100644 index 00000000000..eb650666b25 --- /dev/null +++ b/node/errors.go @@ -0,0 +1,278 @@ +package node + +import ( + "errors" + "fmt" +) + +var ( + // ErrNonEmptyBlockStore is returned when the blockstore is not empty and the node is trying to initialize non empty state. + ErrNonEmptyBlockStore = errors.New("blockstore not empty, trying to initialize non empty state") + // ErrNonEmptyState is returned when the state is not empty and the node is trying to initialize non empty state. + ErrNonEmptyState = errors.New("state not empty, trying to initialize non empty state") + // ErrSwitchStateSync is returned when the blocksync reactor does not support switching from state sync. + ErrSwitchStateSync = errors.New("this blocksync reactor does not support switching from state sync") + // ErrGenesisHashDecode is returned when the genesis hash provided by the operator cannot be decoded. + ErrGenesisHashDecode = errors.New("genesis hash provided by operator cannot be decoded") + // ErrPassedGenesisHashMismatch is returned when the genesis doc hash in the database does not match the passed --genesis_hash value. + ErrPassedGenesisHashMismatch = errors.New("genesis doc hash in db does not match passed --genesis_hash value") + // ErrLoadedGenesisDocHashMismatch is returned when the genesis doc hash in the database does not match the loaded genesis doc. + ErrLoadedGenesisDocHashMismatch = errors.New("genesis doc hash in db does not match loaded genesis doc") +) + +// ErrLightClientStateProvider is returned when the node fails to create the blockstore. +type ErrLightClientStateProvider struct { + Err error +} + +func (e ErrLightClientStateProvider) Error() string { + return fmt.Sprintf("failed to set up light client state provider: %v", e.Err) +} + +func (e ErrLightClientStateProvider) Unwrap() error { + return e.Err +} + +// ErrMismatchAppHash is returned when the app hash returned by the light client does not match the provided appHash. +type ErrMismatchAppHash struct { + Expected, Actual []byte +} + +func (e ErrMismatchAppHash) Error() string { + return fmt.Sprintf("the app hash returned by the light client does not match the provided appHash, expected %X, got %X", e.Expected, e.Actual) +} + +// ErrSetSyncHeight is returned when the node fails to set the synced height. +type ErrSetSyncHeight struct { + Err error +} + +func (e ErrSetSyncHeight) Error() string { + return fmt.Sprintf("failed to set synced height: %v", e.Err) +} + +// ErrPrivValidatorSocketClient is returned when the node fails to create private validator socket client. +type ErrPrivValidatorSocketClient struct { + Err error +} + +func (e ErrPrivValidatorSocketClient) Error() string { + return fmt.Sprintf("error with private validator socket client: %v", e.Err) +} + +func (e ErrPrivValidatorSocketClient) Unwrap() error { + return e.Err +} + +// ErrGetPubKey is returned when the node fails to get the public key. +type ErrGetPubKey struct { + Err error +} + +func (e ErrGetPubKey) Error() string { + return fmt.Sprintf("can't get pubkey: %v", e.Err) +} + +func (e ErrGetPubKey) Unwrap() error { + return e.Err +} + +// ErrCreatePruner is returned when the node fails to create the pruner. +type ErrCreatePruner struct { + Err error +} + +func (e ErrCreatePruner) Error() string { + return fmt.Sprintf("failed to create pruner: %v", e.Err) +} + +func (e ErrCreatePruner) Unwrap() error { + return e.Err +} + +// ErrCreateBlockSyncReactor is returned when the node fails to create the blocksync reactor. +type ErrCreateBlockSyncReactor struct { + Err error +} + +func (e ErrCreateBlockSyncReactor) Error() string { + return fmt.Sprintf("could not create blocksync reactor: %v", e.Err) +} + +func (e ErrCreateBlockSyncReactor) Unwrap() error { + return e.Err +} + +// ErrAddPersistentPeers is returned when the node fails to add peers from the persistent_peers field. +type ErrAddPersistentPeers struct { + Err error +} + +func (e ErrAddPersistentPeers) Error() string { + return fmt.Sprintf("could not add peers from persistent_peers field: %v", e.Err) +} + +func (e ErrAddPersistentPeers) Unwrap() error { + return e.Err +} + +// ErrAddUnconditionalPeerIDs is returned when the node fails to add peer ids from the unconditional_peer_ids field. +type ErrAddUnconditionalPeerIDs struct { + Err error +} + +func (e ErrAddUnconditionalPeerIDs) Error() string { + return fmt.Sprintf("could not add peer ids from unconditional_peer_ids field: %v", e.Err) +} + +func (e ErrAddUnconditionalPeerIDs) Unwrap() error { + return e.Err +} + +// ErrCreateAddrBook is returned when the node fails to create the address book. +type ErrCreateAddrBook struct { + Err error +} + +func (e ErrCreateAddrBook) Error() string { + return fmt.Sprintf("could not create addrbook: %v", e.Err) +} + +func (e ErrCreateAddrBook) Unwrap() error { + return e.Err +} + +// ErrDialPeers is returned when the node fails to dial peers from the persistent_peers field. +type ErrDialPeers struct { + Err error +} + +func (e ErrDialPeers) Error() string { + return fmt.Sprintf("could not dial peers from persistent_peers field: %v", e.Err) +} + +func (e ErrDialPeers) Unwrap() error { + return e.Err +} + +// ErrStartStateSync is returned when the node fails to start state sync. +type ErrStartStateSync struct { + Err error +} + +func (e ErrStartStateSync) Error() string { + return fmt.Sprintf("failed to start state sync: %v", e.Err) +} + +func (e ErrStartStateSync) Unwrap() error { + return e.Err +} + +// ErrStartPruning is returned when the node fails to start background pruning routine. +type ErrStartPruning struct { + Err error +} + +func (e ErrStartPruning) Error() string { + return fmt.Sprintf("failed to start background pruning routine: %v", e.Err) +} + +func (e ErrStartPruning) Unwrap() error { + return e.Err +} + +// ErrLoadOrGenNodeKey is returned when the node fails to load or generate the node key. +type ErrLoadOrGenNodeKey struct { + Err error + NodeKeyFile string +} + +func (e ErrLoadOrGenNodeKey) Error() string { + return fmt.Sprintf("failed to load or gen node key %s: %v", e.NodeKeyFile, e.Err) +} + +func (e ErrLoadOrGenNodeKey) Unwrap() error { + return e.Err +} + +// ErrRetrieveGenesisDocHash is returned when the node fails to retrieve the genesis doc hash from the database. +type ErrRetrieveGenesisDocHash struct { + Err error +} + +func (e ErrRetrieveGenesisDocHash) Error() string { + return fmt.Sprintf("error retrieving genesis doc hash: %v", e.Err) +} + +func (e ErrRetrieveGenesisDocHash) Unwrap() error { + return e.Err +} + +// ErrGenesisDoc is returned when the node fails to load the genesis doc. +type ErrGenesisDoc struct { + Err error +} + +func (e ErrGenesisDoc) Error() string { + return fmt.Sprintf("error in genesis doc: %v", e.Err) +} + +func (e ErrGenesisDoc) Unwrap() error { + return e.Err +} + +// ErrSaveGenesisDocHash is returned when the node fails to save the genesis doc hash to the database. +type ErrSaveGenesisDocHash struct { + Err error +} + +func (e ErrSaveGenesisDocHash) Error() string { + return fmt.Sprintf("failed to save genesis doc hash to db: %v", e.Err) +} + +func (e ErrSaveGenesisDocHash) Unwrap() error { + return e.Err +} + +// ErrorReadingGenesisDoc is returned when the node fails to read the genesis doc file. +type ErrorReadingGenesisDoc struct { + Err error +} + +func (e ErrorReadingGenesisDoc) Error() string { + return fmt.Sprintf("could not read GenesisDoc file: %v", e.Err) +} + +func (e ErrorReadingGenesisDoc) Unwrap() error { + return e.Err +} + +// ErrorLoadOrGenNodeKey is returned when the node fails to load or generate node key. +type ErrorLoadOrGenNodeKey struct { + Err error + NodeKeyFile string +} + +func (e ErrorLoadOrGenNodeKey) Error() string { + return fmt.Sprintf("failed to load or generate node key %s: %v", e.NodeKeyFile, e.Err) +} + +func (e ErrorLoadOrGenNodeKey) Unwrap() error { + return e.Err +} + +// ErrorLoadOrGenFilePV is returned when the node fails to load or generate priv validator file. +type ErrorLoadOrGenFilePV struct { + Err error + KeyFile string + StateFile string +} + +func (e ErrorLoadOrGenFilePV) Error() string { + return fmt.Sprintf("failed to load or generate privval file; "+ + "key file %s, state file %s: %v", e.KeyFile, e.StateFile, e.Err) +} + +func (e ErrorLoadOrGenFilePV) Unwrap() error { + return e.Err +} diff --git a/node/node.go b/node/node.go index 96b87605445..2eb9fc846ea 100644 --- a/node/node.go +++ b/node/node.go @@ -3,29 +3,38 @@ package node import ( "bytes" "context" + "encoding/hex" "errors" "fmt" "net" "net/http" "os" + "sync" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rs/cors" - bc "github.com/cometbft/cometbft/blocksync" - cfg "github.com/cometbft/cometbft/config" - cs "github.com/cometbft/cometbft/consensus" - "github.com/cometbft/cometbft/evidence" - "github.com/cometbft/cometbft/light" + _ "net/http/pprof" //nolint: gosec + abcicli "github.com/cometbft/cometbft/abci/client" + cfg "github.com/cometbft/cometbft/config" + bc "github.com/cometbft/cometbft/internal/blocksync" + cs "github.com/cometbft/cometbft/internal/consensus" + "github.com/cometbft/cometbft/internal/evidence" + cmtjson "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/libs/log" cmtpubsub "github.com/cometbft/cometbft/libs/pubsub" "github.com/cometbft/cometbft/libs/service" + "github.com/cometbft/cometbft/light" mempl "github.com/cometbft/cometbft/mempool" "github.com/cometbft/cometbft/p2p" + na "github.com/cometbft/cometbft/p2p/netaddr" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/p2p/pex" + "github.com/cometbft/cometbft/p2p/transport/tcp" "github.com/cometbft/cometbft/proxy" rpccore "github.com/cometbft/cometbft/rpc/core" grpcserver "github.com/cometbft/cometbft/rpc/grpc/server" @@ -40,8 +49,6 @@ import ( "github.com/cometbft/cometbft/types" cmttime "github.com/cometbft/cometbft/types/time" "github.com/cometbft/cometbft/version" - - _ "net/http/pprof" //nolint: gosec ) // Node is the highest level interface to a full CometBFT node. @@ -51,15 +58,15 @@ type Node struct { // config config *cfg.Config - genesisDoc *types.GenesisDoc // initial validator set + genesisTime time.Time privValidator types.PrivValidator // local node's validator key // network - transport *p2p.MultiplexTransport + transport *tcp.MultiplexTransport sw *p2p.Switch // p2p connections addrBook pex.AddrBook // known peers - nodeInfo p2p.NodeInfo - nodeKey *p2p.NodeKey // our node privkey + nodeInfo ni.NodeInfo + nodeKey *nodekey.NodeKey // our node privkey isListening bool // services @@ -68,7 +75,7 @@ type Node struct { blockStore *store.BlockStore // store the blockchain to disk pruner *sm.Pruner bcReactor p2p.Reactor // for block-syncing - mempoolReactor *mempl.Reactor // for gossipping transactions + mempoolReactor mempoolReactor // for gossipping transactions mempool mempl.Mempool stateSync bool // whether the node should state sync on startup stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots @@ -87,6 +94,17 @@ type Node struct { pprofSrv *http.Server } +type waitSyncP2PReactor interface { + p2p.Reactor + // required by RPC service + WaitSync() bool +} + +type mempoolReactor interface { + waitSyncP2PReactor + TryAddTx(tx types.Tx, sender p2p.Peer) (*abcicli.ReqRes, error) +} + // Option sets a parameter for the node. type Option func(*Node) @@ -115,16 +133,15 @@ func CustomReactors(reactors map[string]p2p.Reactor) Option { // NOTE: This is a bit messy now with the type casting but is // cleaned up in the following version when NodeInfo is changed from // and interface to a concrete type - if ni, ok := n.nodeInfo.(p2p.DefaultNodeInfo); ok { - for _, chDesc := range reactor.GetChannels() { - if !ni.HasChannel(chDesc.ID) { - ni.Channels = append(ni.Channels, chDesc.ID) - n.transport.AddChannel(chDesc.ID) + if ni, ok := n.nodeInfo.(ni.Default); ok { + for _, chDesc := range reactor.StreamDescriptors() { + if !ni.HasChannel(chDesc.StreamID()) { + ni.Channels = append(ni.Channels, chDesc.StreamID()) } } n.nodeInfo = ni } else { - n.Logger.Error("Node info is not of type DefaultNodeInfo. Custom reactor channels can not be added.") + n.Logger.Error("Node info is not of type ni.Default. Custom reactor channels can not be added.") } } } @@ -144,8 +161,8 @@ func StateProvider(stateProvider statesync.StateProvider) Option { // store are empty at the time the function is called. // // If the block store is not empty, the function returns an error. -func BootstrapState(ctx context.Context, config *cfg.Config, dbProvider cfg.DBProvider, height uint64, appHash []byte) (err error) { - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +func BootstrapState(ctx context.Context, config *cfg.Config, dbProvider cfg.DBProvider, genProvider GenesisDocProvider, height uint64, appHash []byte) (err error) { + logger := log.NewLogger(os.Stdout) if ctx == nil { ctx = context.Background() } @@ -158,7 +175,10 @@ func BootstrapState(ctx context.Context, config *cfg.Config, dbProvider cfg.DBPr if dbProvider == nil { dbProvider = cfg.DefaultDBProvider } - blockStore, stateDB, err := initDBs(config, dbProvider) + blockStoreDB, stateDB, err := initDBs(config, dbProvider) + + blockStore := store.NewBlockStore(blockStoreDB, store.WithMetrics(store.NopMetrics()), store.WithCompaction(config.Storage.Compact, config.Storage.CompactionInterval), store.WithDBKeyLayout(config.Storage.ExperimentalKeyLayout)) + logger.Info("Blockstore version", "version", blockStore.GetVersion()) defer func() { if derr := blockStore.Close(); derr != nil { @@ -173,11 +193,13 @@ func BootstrapState(ctx context.Context, config *cfg.Config, dbProvider cfg.DBPr } if !blockStore.IsEmpty() { - return fmt.Errorf("blockstore not empty, trying to initialize non empty state") + return ErrNonEmptyBlockStore } stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: config.Storage.DiscardABCIResponses, + Logger: logger, + DBKeyLayout: config.Storage.ExperimentalKeyLayout, }) defer func() { @@ -193,24 +215,26 @@ func BootstrapState(ctx context.Context, config *cfg.Config, dbProvider cfg.DBPr } if !state.IsEmpty() { - return fmt.Errorf("state not empty, trying to initialize non empty state") + return ErrNonEmptyState } - genState, _, err := LoadStateFromDBOrGenesisDocProvider(stateDB, DefaultGenesisDocProviderFunc(config), config.Storage.GenesisHash) + // The state store will use the DBKeyLayout set in config or already existing in the DB. + genState, _, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genProvider, "") if err != nil { return err } - stateProvider, err := statesync.NewLightClientStateProvider( + stateProvider, err := statesync.NewLightClientStateProviderWithDBKeyVersion( ctx, genState.ChainID, genState.Version, genState.InitialHeight, config.StateSync.RPCServers, light.TrustOptions{ Period: config.StateSync.TrustPeriod, Height: config.StateSync.TrustHeight, Hash: config.StateSync.TrustHashBytes(), - }, logger.With("module", "light")) + }, logger.With("module", "light"), + config.Storage.ExperimentalKeyLayout) if err != nil { - return fmt.Errorf("failed to set up light client state provider: %w", err) + return ErrLightClientStateProvider{Err: err} } state, err = stateProvider.State(ctx, height) @@ -219,16 +243,14 @@ func BootstrapState(ctx context.Context, config *cfg.Config, dbProvider cfg.DBPr } if appHash == nil { logger.Info("warning: cannot verify appHash. Verification will happen when node boots up!") - } else { - if !bytes.Equal(appHash, state.AppHash) { - if err := blockStore.Close(); err != nil { - logger.Error("failed to close blockstore: %w", err) - } - if err := stateStore.Close(); err != nil { - logger.Error("failed to close statestore: %w", err) - } - return fmt.Errorf("the app hash returned by the light client does not match the provided appHash, expected %X, got %X", state.AppHash, appHash) + } else if !bytes.Equal(appHash, state.AppHash) { + if err := blockStore.Close(); err != nil { + logger.Error("failed to close blockstore: %w", err) } + if err := stateStore.Close(); err != nil { + logger.Error("failed to close statestore: %w", err) + } + return ErrMismatchAppHash{Expected: appHash, Actual: state.AppHash} } commit, err := stateProvider.Commit(ctx, height) @@ -251,53 +273,93 @@ func BootstrapState(ctx context.Context, config *cfg.Config, dbProvider cfg.DBPr // needs to manually delete the state and blockstores and rerun the bootstrapping process. err = stateStore.SetOfflineStateSyncHeight(state.LastBlockHeight) if err != nil { - return fmt.Errorf("failed to set synced height: %w", err) + return ErrSetSyncHeight{Err: err} } return err } -//------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------ // NewNode returns a new, ready to go, CometBFT Node. func NewNode(ctx context.Context, config *cfg.Config, privValidator types.PrivValidator, - nodeKey *p2p.NodeKey, + nodeKey *nodekey.NodeKey, + clientCreator proxy.ClientCreator, + genesisDocProvider GenesisDocProvider, + dbProvider cfg.DBProvider, + metricsProvider MetricsProvider, + logger log.Logger, + options ...Option, +) (*Node, error) { + return NewNodeWithCliParams(ctx, + config, + privValidator, + nodeKey, + clientCreator, + genesisDocProvider, + dbProvider, + metricsProvider, + logger, + CliParams{}, + options...) +} + +// NewNodeWithCliParams returns a new, ready to go, CometBFT node +// where we check the hash of the provided genesis file against +// a hash provided by the operator via cli. + +func NewNodeWithCliParams(ctx context.Context, + config *cfg.Config, + privValidator types.PrivValidator, + nodeKey *nodekey.NodeKey, clientCreator proxy.ClientCreator, genesisDocProvider GenesisDocProvider, dbProvider cfg.DBProvider, metricsProvider MetricsProvider, logger log.Logger, + cliParams CliParams, options ...Option, ) (*Node, error) { - blockStore, stateDB, err := initDBs(config, dbProvider) + blockStoreDB, stateDB, err := initDBs(config, dbProvider) if err != nil { return nil, err } + var genesisHash string + if len(cliParams.GenesisHash) != 0 { + genesisHash = hex.EncodeToString(cliParams.GenesisHash) + } + state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider, genesisHash) + if err != nil { + return nil, err + } + + csMetrics, p2pMetrics, memplMetrics, smMetrics, bstMetrics, abciMetrics, bsMetrics, ssMetrics := metricsProvider(genDoc.ChainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: config.Storage.DiscardABCIResponses, + Metrics: smMetrics, + Compact: config.Storage.Compact, + CompactionInterval: config.Storage.CompactionInterval, + Logger: logger, + DBKeyLayout: config.Storage.ExperimentalKeyLayout, }) - state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider, config.Storage.GenesisHash) - if err != nil { - return nil, err - } + blockStore := store.NewBlockStore(blockStoreDB, store.WithMetrics(bstMetrics), store.WithCompaction(config.Storage.Compact, config.Storage.CompactionInterval), store.WithDBKeyLayout(config.Storage.ExperimentalKeyLayout), store.WithDBKeyLayout(config.Storage.ExperimentalKeyLayout)) + logger.Info("Blockstore version", "version", blockStore.GetVersion()) // The key will be deleted if it existed. // Not checking whether the key is there in case the genesis file was larger than // the max size of a value (in rocksDB for example), which would cause the check // to fail and prevent the node from booting. - logger.Info("WARNING: deleting genesis file from database if present, the database stores a hash of the original genesis file now") + logger.Warn("deleting genesis file from database if present, the database stores a hash of the original genesis file now") err = stateDB.Delete(genesisDocKey) if err != nil { logger.Error("Failed to delete genesis doc from DB ", err) } - csMetrics, p2pMetrics, memplMetrics, smMetrics, abciMetrics, bsMetrics, ssMetrics := metricsProvider(genDoc.ChainID) - // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). proxyApp, err := createAndStartProxyAppConns(clientCreator, logger, abciMetrics) if err != nil { @@ -325,17 +387,18 @@ func NewNode(ctx context.Context, // FIXME: we should start services inside OnStart privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger) if err != nil { - return nil, fmt.Errorf("error with private validator socket client: %w", err) + return nil, ErrPrivValidatorSocketClient{Err: err} } } pubKey, err := privValidator.GetPubKey() if err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) + return nil, ErrGetPubKey{Err: err} } + localAddr := pubKey.Address() // Determine whether we should attempt state sync. - stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey) + stateSync := config.StateSync.Enable && !state.Validators.ValidatorBlocksTheChain(localAddr) if stateSync && state.LastBlockHeight > 0 { logger.Info("Found local state with non-zero height, skipping state sync") stateSync = false @@ -344,8 +407,13 @@ func NewNode(ctx context.Context, // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, // and replays any blocks as necessary to sync CometBFT with the app. consensusLogger := logger.With("module", "consensus") + + appInfoResponse, err := proxyApp.Query().Info(ctx, proxy.InfoRequest) + if err != nil { + return nil, fmt.Errorf("error calling ABCI Info method: %v", err) + } if !stateSync { - if err := doHandshake(ctx, stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { + if err := doHandshake(ctx, stateStore, state, blockStore, genDoc, eventBus, appInfoResponse, proxyApp, consensusLogger); err != nil { return nil, err } @@ -358,17 +426,13 @@ func NewNode(ctx context.Context, } } - // Determine whether we should do block sync. This must happen after the handshake, since the - // app may modify the validator set, specifying ourself as the only validator. - blockSync := !onlyValidatorIsUs(state, pubKey) - waitSync := stateSync || blockSync - logNodeStartupInfo(state, pubKey, logger, consensusLogger) - // Make MempoolReactor - mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, waitSync, memplMetrics, logger) + // Blocksync is always active, except if the local node blocks the chain + waitSync := !state.Validators.ValidatorBlocksTheChain(localAddr) + + mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, eventBus, waitSync, memplMetrics, logger, appInfoResponse) - // Make Evidence Reactor evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateStore, blockStore, logger) if err != nil { return nil, err @@ -384,7 +448,7 @@ func NewNode(ctx context.Context, logger.With("module", "state"), ) if err != nil { - return nil, fmt.Errorf("failed to create pruner: %w", err) + return nil, ErrCreatePruner{Err: err} } // make block executor for consensus and blocksync reactors to execute blocks @@ -406,13 +470,13 @@ func NewNode(ctx context.Context, panic(fmt.Sprintf("failed to retrieve statesynced height from store %s; expected state store height to be %v", err, state.LastBlockHeight)) } } - // Make BlocksyncReactor. Don't start block sync if we're doing a state sync first. - bcReactor, err := createBlocksyncReactor(config, state, blockExec, blockStore, blockSync && !stateSync, logger, bsMetrics, offlineStateSyncHeight) + // Don't start block sync if we're doing a state sync first, or we are blocking the chain. + blockSync := !stateSync && !state.Validators.ValidatorBlocksTheChain(localAddr) + bcReactor, err := createBlocksyncReactor(config, state, blockExec, blockStore, blockSync, localAddr, logger, bsMetrics, offlineStateSyncHeight) if err != nil { - return nil, fmt.Errorf("could not create blocksync reactor: %w", err) + return nil, ErrCreateBlockSyncReactor{Err: err} } - // Make ConsensusReactor consensusReactor, consensusState := createConsensusReactor( config, state, blockExec, blockStore, mempool, evidencePool, privValidator, csMetrics, waitSync, eventBus, consensusLogger, offlineStateSyncHeight, @@ -439,10 +503,8 @@ func NewNode(ctx context.Context, return nil, err } - // Setup Transport. - transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp) + transport, peerFilters := createTransport(config, nodeKey, proxyApp) - // Setup Switch. p2pLogger := logger.With("module", "p2p") sw := createSwitch( config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, @@ -451,17 +513,17 @@ func NewNode(ctx context.Context, err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) if err != nil { - return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) + return nil, ErrAddPersistentPeers{Err: err} } err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) if err != nil { - return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) + return nil, ErrAddUnconditionalPeerIDs{Err: err} } addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) if err != nil { - return nil, fmt.Errorf("could not create addrbook: %w", err) + return nil, ErrCreateAddrBook{Err: err} } // Optionally, start the pex reactor @@ -470,7 +532,7 @@ func NewNode(ctx context.Context, // // We need to set Seeds and PersistentPeers on the switch, // since it needs to be able to use these (and their DNS names) - // even if the PEX is off. We can include the DNS name in the NetAddress, + // even if the PEX is off. We can include the DNS name in the na.NetAddr, // but it would still be nice to have a clear list of the current "PersistentPeers" // somewhere that we can return with net_info. // @@ -486,7 +548,7 @@ func NewNode(ctx context.Context, node := &Node{ config: config, - genesisDoc: genDoc, + genesisTime: genDoc.GenesisTime, privValidator: privValidator, transport: transport, @@ -526,7 +588,7 @@ func NewNode(ctx context.Context, // OnStart starts the Node. It implements service.Service. func (n *Node) OnStart() error { now := cmttime.Now() - genTime := n.genesisDoc.GenesisTime + genTime := n.genesisTime if genTime.After(now) { n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime) time.Sleep(genTime.Sub(now)) @@ -547,13 +609,13 @@ func (n *Node) OnStart() error { if n.config.RPC.ListenAddress != "" { listeners, err := n.startRPC() if err != nil { - return err + return fmt.Errorf("starting RPC server: %w", err) } n.rpcListeners = listeners } // Start the transport. - addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress)) + addr, err := na.NewFromString(na.IDAddrString(n.nodeKey.ID(), n.config.P2P.ListenAddress)) if err != nil { return err } @@ -572,25 +634,25 @@ func (n *Node) OnStart() error { // Always connect to persistent peers err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) if err != nil { - return fmt.Errorf("could not dial peers from persistent_peers field: %w", err) + return ErrDialPeers{Err: err} } // Run state sync if n.stateSync { bcR, ok := n.bcReactor.(blockSyncReactor) if !ok { - return fmt.Errorf("this blocksync reactor does not support switching from state sync") + return ErrSwitchStateSync } err := startStateSync(n.stateSyncReactor, bcR, n.stateSyncProvider, - n.config.StateSync, n.stateStore, n.blockStore, n.stateSyncGenesis) + n.config.StateSync, n.stateStore, n.blockStore, n.stateSyncGenesis, n.config.Storage.ExperimentalKeyLayout) if err != nil { - return fmt.Errorf("failed to start state sync: %w", err) + return ErrStartStateSync{Err: err} } } // Start background pruning if err := n.pruner.Start(); err != nil { - return fmt.Errorf("failed to start background pruning routine: %w", err) + return ErrStartPruning{Err: err} } return nil @@ -609,10 +671,11 @@ func (n *Node) OnStop() { if err := n.eventBus.Stop(); err != nil { n.Logger.Error("Error closing eventBus", "err", err) } - if err := n.indexerService.Stop(); err != nil { - n.Logger.Error("Error closing indexerService", "err", err) + if n.indexerService != nil { + if err := n.indexerService.Stop(); err != nil { + n.Logger.Error("Error closing indexerService", "err", err) + } } - // now stop the reactors if err := n.sw.Stop(); err != nil { n.Logger.Error("Error closing switch", "err", err) @@ -669,46 +732,78 @@ func (n *Node) OnStop() { } } -// ConfigureRPC makes sure RPC has all the objects it needs to operate. +var ( + // The following globals are only relevant to the `ConfigurerRPC` method below. + // The '_' prefix is to signal to other parts of the code that these are global + // unexported variables. + + // _once is a special object that executes a function only once. We use it to + // ensure that `ConfigureRPC` initializes an `Environment` object only once. + _once sync.Once + + // _rpcEnv is the `Environment` object serving RPC APIs. We treat it as a + // singleton and create it exactly once. See the docs of `ConfigureRPC` below + // for more details. + _rpcEnv *rpccore.Environment +) + +// ConfigureRPC initializes and returns an `Environment` object with all the data +// it needs to serve the RPC APIs. The function ensures that the `Environment` is +// created only once to prevent other parts of the code from creating duplicate +// `Environment` instances by calling this function directly. This is important +// because `Environment` stores a copy of the genesis in memory; therefore, +// multiple independent `Environment` instances would each load the genesis into +// memory. func (n *Node) ConfigureRPC() (*rpccore.Environment, error) { - pubKey, err := n.privValidator.GetPubKey() - if pubKey == nil || err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) - } - rpcCoreEnv := rpccore.Environment{ - ProxyAppQuery: n.proxyApp.Query(), - ProxyAppMempool: n.proxyApp.Mempool(), - - StateStore: n.stateStore, - BlockStore: n.blockStore, - EvidencePool: n.evidencePool, - ConsensusState: n.consensusState, - P2PPeers: n.sw, - P2PTransport: n, - PubKey: pubKey, - - GenDoc: n.genesisDoc, - TxIndexer: n.txIndexer, - BlockIndexer: n.blockIndexer, - ConsensusReactor: n.consensusReactor, - MempoolReactor: n.mempoolReactor, - EventBus: n.eventBus, - Mempool: n.mempool, - - Logger: n.Logger.With("module", "rpc"), - - Config: *n.config.RPC, - } - if err := rpcCoreEnv.InitGenesisChunks(); err != nil { - return nil, err - } - return &rpcCoreEnv, nil + var errToReturn error + + _once.Do(func() { + pubKey, err := n.privValidator.GetPubKey() + if pubKey == nil || err != nil { + errToReturn = ErrGetPubKey{Err: err} + return + } + + _rpcEnv = &rpccore.Environment{ + ProxyAppQuery: n.proxyApp.Query(), + ProxyAppMempool: n.proxyApp.Mempool(), + + StateStore: n.stateStore, + BlockStore: n.blockStore, + EvidencePool: n.evidencePool, + ConsensusState: n.consensusState, + P2PPeers: n.sw, + P2PTransport: n, + PubKey: pubKey, + + TxIndexer: n.txIndexer, + BlockIndexer: n.blockIndexer, + ConsensusReactor: n.consensusReactor, + MempoolReactor: n.mempoolReactor, + EventBus: n.eventBus, + Mempool: n.mempool, + + Logger: n.Logger.With("module", "rpc"), + + Config: *n.config.RPC, + + GenesisFilePath: n.config.GenesisFile(), + } + + n.Logger.Info("Creating genesis file chunks if genesis file is too big...") + if err := _rpcEnv.InitGenesisChunks(); err != nil { + errToReturn = fmt.Errorf("configuring RPC API environment: %w", err) + return + } + }) + + return _rpcEnv, errToReturn } func (n *Node) startRPC() ([]net.Listener, error) { env, err := n.ConfigureRPC() if err != nil { - return nil, err + return nil, fmt.Errorf("configuring RPC server: %s", err) } listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") @@ -719,6 +814,7 @@ func (n *Node) startRPC() ([]net.Listener, error) { } config := rpcserver.DefaultConfig() + config.MaxRequestBatchSize = n.config.RPC.MaxRequestBatchSize config.MaxBodyBytes = n.config.RPC.MaxBodyBytes config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes config.MaxOpenConnections = n.config.RPC.MaxOpenConnections @@ -730,8 +826,8 @@ func (n *Node) startRPC() ([]net.Listener, error) { } // we may expose the rpc over both a unix and tcp socket - listeners := make([]net.Listener, len(listenAddrs)) - for i, listenAddr := range listenAddrs { + listeners := make([]net.Listener, 0, len(listenAddrs)) + for _, listenAddr := range listenAddrs { mux := http.NewServeMux() rpcLogger := n.Logger.With("module", "rpc-server") wmLogger := rpcLogger.With("protocol", "websocket") @@ -768,31 +864,35 @@ func (n *Node) startRPC() ([]net.Listener, error) { } if n.config.RPC.IsTLSEnabled() { go func() { - if err := rpcserver.ServeTLS( + err := rpcserver.ServeTLSWithShutdown( listener, rootHandler, n.config.RPC.CertFile(), n.config.RPC.KeyFile(), rpcLogger, config, - ); err != nil { - n.Logger.Error("Error serving server with TLS", "err", err) + env.Cleanup, + ) + if err != nil { + n.Logger.Error("serving server with TLS", "err", err) } }() } else { go func() { - if err := rpcserver.Serve( + err := rpcserver.ServeWithShutdown( listener, rootHandler, rpcLogger, config, - ); err != nil { + env.Cleanup, + ) + if err != nil { n.Logger.Error("Error serving server", "err", err) } }() } - listeners[i] = listener + listeners = append(listeners, listener) } if n.config.GRPC.ListenAddress != "" { @@ -864,7 +964,7 @@ func (n *Node) startPrometheusServer() *http.Server { return srv } -// starts a ppro +// starts a ppro. func (n *Node) startPprofServer() *http.Server { srv := &http.Server{ Addr: n.config.RPC.PprofListenAddress, @@ -926,9 +1026,31 @@ func (n *Node) PrivValidator() types.PrivValidator { return n.privValidator } -// GenesisDoc returns the Node's GenesisDoc. -func (n *Node) GenesisDoc() *types.GenesisDoc { - return n.genesisDoc +// GenesisDoc returns a GenesisDoc object after reading the genesis file from disk. +// The function does not check for the genesis's validity since it was already +// checked at startup, and we work under the assumption that correct nodes (i.e., +// non-Byzantine) are not compromised. Therefore, their file system can be +// trusted while the node is running. +// Note that the genesis file can be large (hundreds of MBs, even GBs); therefore, +// we recommend that the caller does not keep the GenesisDoc returned by this +// function in memory longer than necessary. +func (n *Node) GenesisDoc() (*types.GenesisDoc, error) { + gDocPath := n.config.GenesisFile() + + gDocJSON, err := os.ReadFile(gDocPath) + if err != nil { + return nil, fmt.Errorf("unavailable genesis file at %s: %w", gDocPath, err) + } + + var gDoc types.GenesisDoc + + err = cmtjson.Unmarshal(gDocJSON, &gDoc) + if err != nil { + formatStr := "invalid JSON format for genesis file at %s: %w" + return nil, fmt.Errorf(formatStr, gDocPath, err) + } + + return &gDoc, nil } // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application. @@ -941,7 +1063,7 @@ func (n *Node) Config() *cfg.Config { return n.config } -//------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------ func (n *Node) Listeners() []string { return []string{ @@ -954,31 +1076,31 @@ func (n *Node) IsListening() bool { } // NodeInfo returns the Node's Info from the Switch. -func (n *Node) NodeInfo() p2p.NodeInfo { +func (n *Node) NodeInfo() ni.NodeInfo { return n.nodeInfo } func makeNodeInfo( config *cfg.Config, - nodeKey *p2p.NodeKey, + nodeKey *nodekey.NodeKey, txIndexer txindex.TxIndexer, genDoc *types.GenesisDoc, state sm.State, -) (p2p.DefaultNodeInfo, error) { +) (ni.Default, error) { txIndexerStatus := "on" if _, ok := txIndexer.(*null.TxIndex); ok { txIndexerStatus = "off" } - nodeInfo := p2p.DefaultNodeInfo{ - ProtocolVersion: p2p.NewProtocolVersion( + nodeInfo := ni.Default{ + ProtocolVersion: ni.NewProtocolVersion( version.P2PProtocol, // global state.Version.Consensus.Block, state.Version.Consensus.App, ), DefaultNodeID: nodeKey.ID(), Network: genDoc.ChainID, - Version: version.TMCoreSemVer, + Version: version.CMTSemVer, Channels: []byte{ bc.BlocksyncChannel, cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, @@ -987,7 +1109,7 @@ func makeNodeInfo( statesync.SnapshotChannel, statesync.ChunkChannel, }, Moniker: config.Moniker, - Other: p2p.DefaultNodeInfoOther{ + Other: ni.DefaultOther{ TxIndex: txIndexerStatus, RPCAddress: config.RPC.ListenAddress, }, diff --git a/node/node_test.go b/node/node_test.go index e690f46031c..3406fad4e4c 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -3,12 +3,13 @@ package node import ( "bytes" "context" - "encoding/hex" - + "encoding/json" + "errors" "fmt" "net" "net/http" "os" + "reflect" "syscall" "testing" "time" @@ -17,21 +18,24 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - "github.com/cometbft/cometbft/abci/example/kvstore" cfg "github.com/cometbft/cometbft/config" + "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/crypto/tmhash" - "github.com/cometbft/cometbft/evidence" + "github.com/cometbft/cometbft/internal/evidence" + kt "github.com/cometbft/cometbft/internal/keytypes" + cmtos "github.com/cometbft/cometbft/internal/os" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/internal/test" cmtjson "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/libs/log" - cmtos "github.com/cometbft/cometbft/libs/os" - cmtrand "github.com/cometbft/cometbft/libs/rand" mempl "github.com/cometbft/cometbft/mempool" "github.com/cometbft/cometbft/p2p" - "github.com/cometbft/cometbft/p2p/conn" p2pmock "github.com/cometbft/cometbft/p2p/mock" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" + "github.com/cometbft/cometbft/p2p/nodekey" + "github.com/cometbft/cometbft/p2p/transport/tcp/conn" "github.com/cometbft/cometbft/privval" "github.com/cometbft/cometbft/proxy" sm "github.com/cometbft/cometbft/state" @@ -45,7 +49,7 @@ func TestNodeStartStop(t *testing.T) { defer os.RemoveAll(config.RootDir) // create & start node - n, err := DefaultNewNode(config, log.TestingLogger()) + n, err := DefaultNewNode(config, log.TestingLogger(), CliParams{}, nil) require.NoError(t, err) err = n.Start() require.NoError(t, err) @@ -108,12 +112,12 @@ func TestCompanionInitialHeightSetup(t *testing.T) { config.Storage.Pruning.DataCompanion.Enabled = true config.Storage.Pruning.DataCompanion.InitialBlockRetainHeight = 1 // create & start node - n, err := DefaultNewNode(config, log.TestingLogger()) + n, err := DefaultNewNode(config, log.TestingLogger(), CliParams{}, nil) require.NoError(t, err) companionRetainHeight, err := n.stateStore.GetCompanionBlockRetainHeight() require.NoError(t, err) - require.Equal(t, companionRetainHeight, int64(1)) + require.Equal(t, int64(1), companionRetainHeight) } func TestNodeDelayedStart(t *testing.T) { @@ -122,16 +126,17 @@ func TestNodeDelayedStart(t *testing.T) { now := cmttime.Now() // create & start node - n, err := DefaultNewNode(config, log.TestingLogger()) - n.GenesisDoc().GenesisTime = now.Add(2 * time.Second) + n, err := DefaultNewNode(config, log.TestingLogger(), CliParams{}, nil) + n.genesisTime = now.Add(2 * time.Second) require.NoError(t, err) + n.genesisTime = now.Add(2 * time.Second) err = n.Start() require.NoError(t, err) defer n.Stop() //nolint:errcheck // ignore for tests startTime := cmttime.Now() - assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime)) + assert.True(t, true, startTime.After(n.genesisTime)) } func TestNodeSetAppVersion(t *testing.T) { @@ -139,7 +144,7 @@ func TestNodeSetAppVersion(t *testing.T) { defer os.RemoveAll(config.RootDir) // create & start node - n, err := DefaultNewNode(config, log.TestingLogger()) + n, err := DefaultNewNode(config, log.TestingLogger(), CliParams{}, nil) require.NoError(t, err) // default config uses the kvstore app @@ -151,7 +156,7 @@ func TestNodeSetAppVersion(t *testing.T) { assert.Equal(t, state.Version.Consensus.App, appVersion) // check version is set in node info - assert.Equal(t, n.nodeInfo.(p2p.DefaultNodeInfo).ProtocolVersion.App, appVersion) + assert.Equal(t, n.nodeInfo.(ni.Default).ProtocolVersion.App, appVersion) } func TestPprofServer(t *testing.T) { @@ -161,18 +166,18 @@ func TestPprofServer(t *testing.T) { // should not work yet _, err := http.Get("http://" + config.RPC.PprofListenAddress) //nolint: bodyclose - assert.Error(t, err) + require.Error(t, err) - n, err := DefaultNewNode(config, log.TestingLogger()) - assert.NoError(t, err) - assert.NoError(t, n.Start()) + n, err := DefaultNewNode(config, log.TestingLogger(), CliParams{}, nil) + require.NoError(t, err) + require.NoError(t, n.Start()) defer func() { require.NoError(t, n.Stop()) }() assert.NotNil(t, n.pprofSrv) resp, err := http.Get("http://" + config.RPC.PprofListenAddress + "/debug/pprof") - assert.NoError(t, err) + require.NoError(t, err) defer resp.Body.Close() assert.Equal(t, 200, resp.StatusCode) } @@ -205,12 +210,12 @@ func TestNodeSetPrivValTCP(t *testing.T) { }() defer signerServer.Stop() //nolint:errcheck // ignore for tests - n, err := DefaultNewNode(config, log.TestingLogger()) + n, err := DefaultNewNode(config, log.TestingLogger(), CliParams{}, nil) require.NoError(t, err) assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) } -// address without a protocol must result in error +// address without a protocol must result in error. func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { addrNoPrefix := testFreeAddr(t) @@ -218,8 +223,8 @@ func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { defer os.RemoveAll(config.RootDir) config.BaseConfig.PrivValidatorListenAddr = addrNoPrefix - _, err := DefaultNewNode(config, log.TestingLogger()) - assert.Error(t, err) + _, err := DefaultNewNode(config, log.TestingLogger(), CliParams{}, nil) + require.ErrorAs(t, err, &ErrPrivValidatorSocketClient{}) } func TestNodeSetPrivValIPC(t *testing.T) { @@ -249,13 +254,30 @@ func TestNodeSetPrivValIPC(t *testing.T) { }() defer pvsc.Stop() //nolint:errcheck // ignore for tests - n, err := DefaultNewNode(config, log.TestingLogger()) + n, err := DefaultNewNode(config, log.TestingLogger(), CliParams{}, nil) require.NoError(t, err) assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) } +func TestNodeSetFilePrivVal(t *testing.T) { + for _, keyType := range kt.ListSupportedKeyTypes() { + t.Run(keyType, func(t *testing.T) { + config := test.ResetTestRootWithChainIDNoOverwritePrivval("node_priv_val_file_test_"+keyType, "test_chain_"+keyType) + defer os.RemoveAll(config.RootDir) + + keyGenF := func() (crypto.PrivKey, error) { + return kt.GenPrivKey(keyType) + } + n, err := DefaultNewNode(config, log.TestingLogger(), CliParams{}, keyGenF) + require.NoError(t, err) + assert.IsType(t, &privval.FilePV{}, n.PrivValidator()) + }) + } +} + // testFreeAddr claims a free port so we don't block on listener being ready. func testFreeAddr(t *testing.T) string { + t.Helper() ln, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) defer ln.Close() @@ -271,10 +293,11 @@ func TestCreateProposalBlock(t *testing.T) { config := test.ResetTestRoot("node_create_proposal") defer os.RemoveAll(config.RootDir) - cc := proxy.NewLocalClientCreator(kvstore.NewInMemoryApplication()) + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() - require.Nil(t, err) + require.NoError(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests logger := log.TestingLogger() @@ -294,9 +317,14 @@ func TestCreateProposalBlock(t *testing.T) { proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool + resp, err := app.Info(context.Background(), proxy.InfoRequest) + require.NoError(t, err) + lanesInfo, err := mempl.BuildLanesInfo(resp.LanePriorities, resp.DefaultLane) + require.NoError(t, err) memplMetrics := mempl.NopMetrics() mempool := mempl.NewCListMempool(config.Mempool, proxyApp.Mempool(), + lanesInfo, state.LastBlockHeight, mempl.WithMetrics(memplMetrics), mempl.WithPreCheck(sm.TxPreCheck(state)), @@ -313,7 +341,7 @@ func TestCreateProposalBlock(t *testing.T) { // than can fit in a block var currentBytes int64 for currentBytes <= maxEvidenceBytes { - ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), privVals[0], "test-chain") + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(height, cmttime.Now(), privVals[0], "test-chain") require.NoError(t, err) currentBytes += int64(len(ev.Bytes())) evidencePool.ReportConflictingVotes(ev.VoteA, ev.VoteB) @@ -329,8 +357,8 @@ func TestCreateProposalBlock(t *testing.T) { txLength := 100 for i := 0; i <= int(maxBytes)/txLength; i++ { tx := cmtrand.Bytes(txLength) - _, err := mempool.CheckTx(tx) - assert.NoError(t, err) + _, err := mempool.CheckTx(tx, "") + require.NoError(t, err) } blockExec := sm.NewBlockExecutor( @@ -366,7 +394,7 @@ func TestCreateProposalBlock(t *testing.T) { assert.EqualValues(t, partSetFromHeader.ByteSize(), partSet.ByteSize()) err = blockExec.ValidateBlock(state, block) - assert.NoError(t, err) + require.NoError(t, err) } func TestMaxProposalBlockSize(t *testing.T) { @@ -375,10 +403,11 @@ func TestMaxProposalBlockSize(t *testing.T) { config := test.ResetTestRoot("node_create_proposal") defer os.RemoveAll(config.RootDir) - cc := proxy.NewLocalClientCreator(kvstore.NewInMemoryApplication()) + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() - require.Nil(t, err) + require.NoError(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests logger := log.TestingLogger() @@ -394,9 +423,14 @@ func TestMaxProposalBlockSize(t *testing.T) { proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool + resp, err := app.Info(context.Background(), proxy.InfoRequest) + require.NoError(t, err) + lanesInfo, err := mempl.BuildLanesInfo(resp.LanePriorities, resp.DefaultLane) + require.NoError(t, err) memplMetrics := mempl.NopMetrics() mempool := mempl.NewCListMempool(config.Mempool, proxyApp.Mempool(), + lanesInfo, state.LastBlockHeight, mempl.WithMetrics(memplMetrics), mempl.WithPreCheck(sm.TxPreCheck(state)), @@ -407,8 +441,8 @@ func TestMaxProposalBlockSize(t *testing.T) { // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1)) tx := cmtrand.Bytes(txLength - 4) // to account for the varint - _, err = mempool.CheckTx(tx) - assert.NoError(t, err) + _, err = mempool.CheckTx(tx, "") + require.NoError(t, err) blockExec := sm.NewBlockExecutor( stateStore, @@ -444,8 +478,8 @@ func TestNodeNewNodeCustomReactors(t *testing.T) { defer os.RemoveAll(config.RootDir) cr := p2pmock.NewReactor() - cr.Channels = []*conn.ChannelDescriptor{ - { + cr.Channels = []p2p.StreamDescriptor{ + &conn.ChannelDescriptor{ ID: byte(0x31), Priority: 5, SendQueueCapacity: 100, @@ -454,12 +488,14 @@ func TestNodeNewNodeCustomReactors(t *testing.T) { } customBlocksyncReactor := p2pmock.NewReactor() - nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + nodeKey, err := nodekey.LoadOrGen(config.NodeKeyFile()) require.NoError(t, err) + pv, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), nil) + require.NoError(t, err) n, err := NewNode(context.Background(), config, - privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), + pv, nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), @@ -480,18 +516,18 @@ func TestNodeNewNodeCustomReactors(t *testing.T) { assert.True(t, customBlocksyncReactor.IsRunning()) assert.Equal(t, customBlocksyncReactor, n.Switch().Reactor("BLOCKSYNC")) - channels := n.NodeInfo().(p2p.DefaultNodeInfo).Channels + channels := n.NodeInfo().(ni.Default).Channels assert.Contains(t, channels, mempl.MempoolChannel) - assert.Contains(t, channels, cr.Channels[0].ID) + assert.Contains(t, channels, cr.Channels[0].StreamID()) } // Simple test to confirm that an existing genesis file will be deleted from the DB -// TODO Confirm that the deletion of a very big file does not crash the machine +// TODO Confirm that the deletion of a very big file does not crash the machine. func TestNodeNewNodeDeleteGenesisFileFromDB(t *testing.T) { config := test.ResetTestRoot("node_new_node_delete_genesis_from_db") defer os.RemoveAll(config.RootDir) // Use goleveldb so we can reuse the same db for the second NewNode() - config.DBBackend = string(dbm.GoLevelDBBackend) + config.DBBackend = string(dbm.PebbleDBBackend) // Ensure the genesis doc hash is saved to db stateDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "state", Config: config}) require.NoError(t, err) @@ -504,13 +540,16 @@ func TestNodeNewNodeDeleteGenesisFileFromDB(t *testing.T) { require.Equal(t, genDocFromDB, []byte("genFile")) stateDB.Close() - nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + + nodeKey, err := nodekey.LoadOrGen(config.NodeKeyFile()) require.NoError(t, err) + pv, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), nil) + require.NoError(t, err) n, err := NewNode( context.Background(), config, - privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), + pv, nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), @@ -520,9 +559,6 @@ func TestNodeNewNodeDeleteGenesisFileFromDB(t *testing.T) { ) require.NoError(t, err) - _, err = stateDB.Get(genesisDocKey) - require.Error(t, err) - // Start and stop to close the db for later reading err = n.Start() require.NoError(t, err) @@ -540,20 +576,23 @@ func TestNodeNewNodeDeleteGenesisFileFromDB(t *testing.T) { err = stateDB.Close() require.NoError(t, err) } + func TestNodeNewNodeGenesisHashMismatch(t *testing.T) { config := test.ResetTestRoot("node_new_node_genesis_hash") defer os.RemoveAll(config.RootDir) // Use goleveldb so we can reuse the same db for the second NewNode() - config.DBBackend = string(dbm.GoLevelDBBackend) + config.DBBackend = string(dbm.PebbleDBBackend) - nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + nodeKey, err := nodekey.LoadOrGen(config.NodeKeyFile()) require.NoError(t, err) + pv, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), nil) + require.NoError(t, err) n, err := NewNode( context.Background(), config, - privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), + pv, nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), @@ -592,10 +631,12 @@ func TestNodeNewNodeGenesisHashMismatch(t *testing.T) { err = genesisDoc.SaveAs(config.GenesisFile()) require.NoError(t, err) + pv, err = privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), nil) + require.NoError(t, err) _, err = NewNode( context.Background(), config, - privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), + pv, nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), @@ -603,37 +644,39 @@ func TestNodeNewNodeGenesisHashMismatch(t *testing.T) { DefaultMetricsProvider(config.Instrumentation), log.TestingLogger(), ) - require.Error(t, err, "NewNode should error when genesisDoc is changed") - require.Equal(t, "genesis doc hash in db does not match loaded genesis doc", err.Error()) + require.ErrorIs(t, err, ErrLoadedGenesisDocHashMismatch, "NewNode should error when genesisDoc is changed") } func TestNodeGenesisHashFlagMatch(t *testing.T) { config := test.ResetTestRoot("node_new_node_genesis_hash_flag_match") defer os.RemoveAll(config.RootDir) - config.DBBackend = string(dbm.GoLevelDBBackend) - nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + config.DBBackend = string(dbm.PebbleDBBackend) + nodeKey, err := nodekey.LoadOrGen(config.NodeKeyFile()) require.NoError(t, err) // Get correct hash of correct genesis file jsonBlob, err := os.ReadFile(config.GenesisFile()) require.NoError(t, err) + // Set the cli params variable to the correct hash incomingChecksum := tmhash.Sum(jsonBlob) - // Set genesis flag value to incorrect hash - config.Storage.GenesisHash = hex.EncodeToString(incomingChecksum) - _, err = NewNode( + cliParams := CliParams{GenesisHash: incomingChecksum} + pv, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), nil) + require.NoError(t, err) + + _, err = NewNodeWithCliParams( context.Background(), config, - privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), + pv, nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), cfg.DefaultDBProvider, DefaultMetricsProvider(config.Instrumentation), log.TestingLogger(), + cliParams, ) require.NoError(t, err) - } func TestNodeGenesisHashFlagMismatch(t *testing.T) { @@ -641,9 +684,9 @@ func TestNodeGenesisHashFlagMismatch(t *testing.T) { defer os.RemoveAll(config.RootDir) // Use goleveldb so we can reuse the same db for the second NewNode() - config.DBBackend = string(dbm.GoLevelDBBackend) + config.DBBackend = string(dbm.PebbleDBBackend) - nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + nodeKey, err := nodekey.LoadOrGen(config.NodeKeyFile()) require.NoError(t, err) // Generate hash of wrong file @@ -652,20 +695,23 @@ func TestNodeGenesisHashFlagMismatch(t *testing.T) { flagHash := tmhash.Sum(f) // Set genesis flag value to incorrect hash - config.Storage.GenesisHash = hex.EncodeToString(flagHash) + cliParams := CliParams{GenesisHash: flagHash} - _, err = NewNode( + pv, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), nil) + require.NoError(t, err) + _, err = NewNodeWithCliParams( context.Background(), config, - privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), + pv, nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), cfg.DefaultDBProvider, DefaultMetricsProvider(config.Instrumentation), log.TestingLogger(), + cliParams, ) - require.Error(t, err) + require.ErrorIs(t, err, ErrPassedGenesisHashMismatch, "NewNode should error when genesis flag value is incorrectly set") f, err = os.ReadFile(config.GenesisFile()) require.NoError(t, err) @@ -676,6 +722,148 @@ func TestNodeGenesisHashFlagMismatch(t *testing.T) { require.False(t, genHashMismatch) } +func TestLoadStateFromDBOrGenesisDocProviderWithConfig(t *testing.T) { + config := test.ResetTestRoot(t.Name()) + config.DBBackend = string(dbm.PebbleDBBackend) + + _, stateDB, err := initDBs(config, cfg.DefaultDBProvider) + require.NoErrorf(t, err, "state DB setup: %s", err) + + genDocProviderFunc := func(sha256Checksum []byte) GenesisDocProvider { + return func() (ChecksummedGenesisDoc, error) { + genDocJSON, err := os.ReadFile(config.GenesisFile()) + if err != nil { + formatStr := "reading genesis file: %s" + return ChecksummedGenesisDoc{}, fmt.Errorf(formatStr, err) + } + + genDoc, err := types.GenesisDocFromJSON(genDocJSON) + if err != nil { + formatStr := "parsing genesis file: %s" + return ChecksummedGenesisDoc{}, fmt.Errorf(formatStr, err) + } + + checksummedGenesisDoc := ChecksummedGenesisDoc{ + GenesisDoc: genDoc, + Sha256Checksum: sha256Checksum, + } + + return checksummedGenesisDoc, nil + } + } + + t.Run("NilGenesisChecksum", func(t *testing.T) { + genDocProvider := genDocProviderFunc(nil) + + _, _, err = LoadStateFromDBOrGenesisDocProviderWithConfig( + stateDB, + genDocProvider, + "", + nil, + ) + + wantErr := "invalid genesis doc SHA256 checksum: expected 64 characters, but have 0" + assert.EqualError(t, err, wantErr) + }) + + t.Run("ShorterGenesisChecksum", func(t *testing.T) { + genDocProvider := genDocProviderFunc([]byte("shorter")) + + _, _, err = LoadStateFromDBOrGenesisDocProviderWithConfig( + stateDB, + genDocProvider, + "", + nil, + ) + + wantErr := "invalid genesis doc SHA256 checksum: expected 64 characters, but have 14" + assert.EqualError(t, err, wantErr) + }) +} + +func TestGenesisDoc(t *testing.T) { + var ( + config = test.ResetTestRoot(t.Name()) + n = &Node{config: config} + ) + + // In the following tests we always overwrite the genesis file with a dummy. + // We can do so because the method under test's sole responsibility is + // retrieving and returning the GenesisDoc from disk. Therefore, we test only + // whether the retrieval process goes as expected; we don't check if the + // GenesisDoc is valid. + + t.Run("NoError", func(t *testing.T) { + // A trivial, incomplete genesis to test correct behavior. + gDocStr := `{ +"genesis_time": "2018-10-10T08:20:13.695936996Z", +"chain_id": "test-chain", +"initial_height": "1", +"app_hash": "" +}` + + err := os.WriteFile(config.GenesisFile(), []byte(gDocStr), 0o644) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + wantgDoc := &types.GenesisDoc{ + GenesisTime: time.Date(2018, 10, 10, 8, 20, 13, 695936996, time.UTC), + ChainID: "test-chain", + InitialHeight: 1, + AppHash: []byte{}, + } + + gDoc, err := n.GenesisDoc() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !reflect.DeepEqual(gDoc, wantgDoc) { + t.Errorf("\nwant: %#v\ngot: %#v\n", wantgDoc, gDoc) + } + }) + + t.Run("ErrGenesisFilePath", func(t *testing.T) { + n.config.Genesis = "foo.json" + _, err := n.GenesisDoc() + if err == nil { + t.Fatal("expected error but got none") + } + if !errors.Is(err, os.ErrNotExist) { + t.Errorf("expected os.ErrNotExist, got %s", err) + } + }) + + t.Run("ErrGenesisUnmarshal", func(t *testing.T) { + // A trivial, incomplete genesis where initial_height is set to an invalid + // value. + // We don't need anything more complex to test this error. + gDocStr := `{ +"genesis_time": "2018-10-10T08:20:13.695936996Z", +"chain_id": "test-chain", +"initial_height": "hello world", +"app_hash": "" +}` + + // note: Recall that in the previous test we set the path n.config.Genesis to + // foo.json. Therefore, config.GenesisFile() returns the path to foo.json. + err := os.WriteFile(config.GenesisFile(), []byte(gDocStr), 0o644) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + _, err = n.GenesisDoc() + if err == nil { + t.Fatal("expected error but got none") + } + + var errUnmarshal *json.SyntaxError + if !errors.As(err, &errUnmarshal) { + t.Errorf("expected json.SyntaxError, got %s", err) + } + }) +} + func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { privVals := make([]types.PrivValidator, nVals) vals := make([]types.GenesisValidator, nVals) diff --git a/node/setup.go b/node/setup.go index 864ca283d30..147befe015b 100644 --- a/node/setup.go +++ b/node/setup.go @@ -10,35 +10,37 @@ import ( "strings" "time" - _ "net/http/pprof" //nolint: gosec // securely exposed on separate, optional port + _ "net/http/pprof" //nolint: gosec,gci // securely exposed on separate, optional port - dbm "github.com/cometbft/cometbft-db" + _ "github.com/lib/pq" //nolint: gci // provide the psql db driver. + dbm "github.com/cometbft/cometbft-db" abci "github.com/cometbft/cometbft/abci/types" - "github.com/cometbft/cometbft/blocksync" cfg "github.com/cometbft/cometbft/config" - cs "github.com/cometbft/cometbft/consensus" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/tmhash" - "github.com/cometbft/cometbft/evidence" - "github.com/cometbft/cometbft/statesync" - + "github.com/cometbft/cometbft/internal/blocksync" + cs "github.com/cometbft/cometbft/internal/consensus" + "github.com/cometbft/cometbft/internal/evidence" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/light" mempl "github.com/cometbft/cometbft/mempool" "github.com/cometbft/cometbft/p2p" + na "github.com/cometbft/cometbft/p2p/netaddr" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/p2p/pex" + "github.com/cometbft/cometbft/p2p/transport/tcp" "github.com/cometbft/cometbft/privval" "github.com/cometbft/cometbft/proxy" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/state/indexer" "github.com/cometbft/cometbft/state/indexer/block" "github.com/cometbft/cometbft/state/txindex" + "github.com/cometbft/cometbft/statesync" "github.com/cometbft/cometbft/store" "github.com/cometbft/cometbft/types" "github.com/cometbft/cometbft/version" - - _ "github.com/lib/pq" // provide the psql db driver ) const readHeaderTimeout = 10 * time.Second @@ -50,9 +52,25 @@ type ChecksummedGenesisDoc struct { Sha256Checksum []byte } +// Introduced to store parameters passed via cli and needed to start the node. +// This parameters should not be stored or persisted in the config file. +// This can then be further extended to include additional flags without further +// API breaking changes. +type CliParams struct { + // SHA-256 hash of the genesis file provided via the command line. + // This hash is used is compared against the computed hash of the + // actual genesis file or the hash stored in the database. + // If there is a mismatch between the hash provided via cli and the + // hash of the genesis file or the hash in the DB, the node will not boot. + GenesisHash []byte +} + // GenesisDocProvider returns a GenesisDoc together with its SHA256 checksum. // It allows the GenesisDoc to be pulled from sources other than the // filesystem, for instance from a distributed key-value store cluster. +// It is the responsibility of the GenesisDocProvider to ensure that the SHA256 +// checksum correctly matches the GenesisDoc, that is: +// sha256(GenesisDoc) == Sha256Checksum. type GenesisDocProvider func() (ChecksummedGenesisDoc, error) // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads @@ -64,7 +82,7 @@ func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider { // https://github.com/cometbft/cometbft/issues/1302 jsonBlob, err := os.ReadFile(config.GenesisFile()) if err != nil { - return ChecksummedGenesisDoc{}, fmt.Errorf("couldn't read GenesisDoc file: %w", err) + return ChecksummedGenesisDoc{}, ErrorReadingGenesisDoc{Err: err} } incomingChecksum := tmhash.Sum(jsonBlob) genDoc, err := types.GenesisDocFromJSON(jsonBlob) @@ -76,68 +94,86 @@ func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider { } // Provider takes a config and a logger and returns a ready to go Node. -type Provider func(*cfg.Config, log.Logger) (*Node, error) +type Provider func(*cfg.Config, log.Logger, CliParams, func() (crypto.PrivKey, error)) (*Node, error) // DefaultNewNode returns a CometBFT node with default settings for the // PrivValidator, ClientCreator, GenesisDoc, and DBProvider. -// It implements NodeProvider. -func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { - nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) +// It implements Provider. +func DefaultNewNode( + config *cfg.Config, + logger log.Logger, + cliParams CliParams, + keyGenF func() (crypto.PrivKey, error), +) (*Node, error) { + nodeKey, err := nodekey.LoadOrGen(config.NodeKeyFile()) + if err != nil { + return nil, ErrorLoadOrGenNodeKey{Err: err, NodeKeyFile: config.NodeKeyFile()} + } + + pv, err := privval.LoadOrGenFilePV( + config.PrivValidatorKeyFile(), + config.PrivValidatorStateFile(), + keyGenF, + ) if err != nil { - return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err) + return nil, ErrorLoadOrGenFilePV{ + Err: err, + KeyFile: config.PrivValidatorKeyFile(), + StateFile: config.PrivValidatorStateFile(), + } } - return NewNode(context.Background(), config, - privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), + return NewNodeWithCliParams(context.Background(), config, + pv, nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), cfg.DefaultDBProvider, DefaultMetricsProvider(config.Instrumentation), logger, + cliParams, ) } // MetricsProvider returns a consensus, p2p and mempool Metrics. -type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *proxy.Metrics, *blocksync.Metrics, *statesync.Metrics) +type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *store.Metrics, *proxy.Metrics, *blocksync.Metrics, *statesync.Metrics) // DefaultMetricsProvider returns Metrics build using Prometheus client library // if Prometheus is enabled. Otherwise, it returns no-op Metrics. func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { - return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *proxy.Metrics, *blocksync.Metrics, *statesync.Metrics) { + return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *store.Metrics, *proxy.Metrics, *blocksync.Metrics, *statesync.Metrics) { if config.Prometheus { return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID), mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID), sm.PrometheusMetrics(config.Namespace, "chain_id", chainID), + store.PrometheusMetrics(config.Namespace, "chain_id", chainID), proxy.PrometheusMetrics(config.Namespace, "chain_id", chainID), blocksync.PrometheusMetrics(config.Namespace, "chain_id", chainID), statesync.PrometheusMetrics(config.Namespace, "chain_id", chainID) } - return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics(), proxy.NopMetrics(), blocksync.NopMetrics(), statesync.NopMetrics() + return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics(), store.NopMetrics(), proxy.NopMetrics(), blocksync.NopMetrics(), statesync.NopMetrics() } } type blockSyncReactor interface { - SwitchToBlockSync(sm.State) error + SwitchToBlockSync(state sm.State) error } -//------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------ -func initDBs(config *cfg.Config, dbProvider cfg.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { - var blockStoreDB dbm.DB - blockStoreDB, err = dbProvider(&cfg.DBContext{ID: "blockstore", Config: config}) +func initDBs(config *cfg.Config, dbProvider cfg.DBProvider) (bsDB dbm.DB, stateDB dbm.DB, err error) { + bsDB, err = dbProvider(&cfg.DBContext{ID: "blockstore", Config: config}) if err != nil { - return + return nil, nil, err } - blockStore = store.NewBlockStore(blockStoreDB) stateDB, err = dbProvider(&cfg.DBContext{ID: "state", Config: config}) if err != nil { - return + return nil, nil, err } - return + return bsDB, stateDB, nil } func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) { @@ -169,16 +205,20 @@ func createAndStartIndexerService( txIndexer txindex.TxIndexer blockIndexer indexer.BlockIndexer ) - txIndexer, blockIndexer, err := block.IndexerFromConfig(config, dbProvider, chainID) + + txIndexer, blockIndexer, allIndexersDisabled, err := block.IndexerFromConfig(config, dbProvider, chainID) if err != nil { return nil, nil, nil, err } + if allIndexersDisabled { + return nil, txIndexer, blockIndexer, nil + } txIndexer.SetLogger(logger.With("module", "txindex")) blockIndexer.SetLogger(logger.With("module", "txindex")) + indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false) indexerService.SetLogger(logger.With("module", "txindex")) - if err := indexerService.Start(); err != nil { return nil, nil, nil, err } @@ -193,13 +233,14 @@ func doHandshake( blockStore sm.BlockStore, genDoc *types.GenesisDoc, eventBus types.BlockEventPublisher, + appInfoResponse *abci.InfoResponse, proxyApp proxy.AppConns, consensusLogger log.Logger, ) error { handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) handshaker.SetLogger(consensusLogger) handshaker.SetEventBus(eventBus) - if err := handshaker.Handshake(ctx, proxyApp); err != nil { + if err := handshaker.Handshake(ctx, appInfoResponse, proxyApp); err != nil { return fmt.Errorf("error during handshake: %v", err) } return nil @@ -208,11 +249,11 @@ func doHandshake( func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) { // Log the version info. logger.Info("Version info", - "tendermint_version", version.TMCoreSemVer, + "tendermint_version", version.CMTSemVer, "abci", version.ABCISemVer, "block", version.BlockProtocol, "p2p", version.P2PProtocol, - "commit_hash", version.TMGitCommitHash, + "commit_hash", version.CMTGitCommitHash, ) // If the state and software differ in block version, at least log it. @@ -232,45 +273,64 @@ func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusL } } -func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { - if state.Validators.Size() > 1 { - return false - } - addr, _ := state.Validators.GetByIndex(0) - return bytes.Equal(pubKey.Address(), addr) -} - +// createMempoolAndMempoolReactor creates a mempool and a mempool reactor based on the config. func createMempoolAndMempoolReactor( config *cfg.Config, proxyApp proxy.AppConns, state sm.State, + eventBus *types.EventBus, waitSync bool, memplMetrics *mempl.Metrics, logger log.Logger, -) (mempl.Mempool, *mempl.Reactor) { - logger = logger.With("module", "mempool") - mp := mempl.NewCListMempool( - config.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempl.WithMetrics(memplMetrics), - mempl.WithPreCheck(sm.TxPreCheck(state)), - mempl.WithPostCheck(sm.TxPostCheck(state)), - ) + appInfoResponse *abci.InfoResponse, +) (mempl.Mempool, mempoolReactor) { + switch config.Mempool.Type { + // allow empty string for backward compatibility + case cfg.MempoolTypeFlood, "": + lanesInfo, err := mempl.BuildLanesInfo(appInfoResponse.LanePriorities, appInfoResponse.DefaultLane) + if err != nil { + panic(fmt.Sprintf("could not get lanes info from app: %s", err)) + } - mp.SetLogger(logger) + logger = logger.With("module", "mempool") + options := []mempl.CListMempoolOption{ + mempl.WithMetrics(memplMetrics), + mempl.WithPreCheck(sm.TxPreCheck(state)), + mempl.WithPostCheck(sm.TxPostCheck(state)), + } + if config.Mempool.ExperimentalPublishEventPendingTx { + options = append(options, mempl.WithNewTxCallback(func(tx types.Tx) { + _ = eventBus.PublishEventPendingTx(types.EventDataPendingTx{ + Tx: tx, + }) + })) + } + mp := mempl.NewCListMempool( + config.Mempool, + proxyApp.Mempool(), + lanesInfo, + state.LastBlockHeight, + options..., + ) + mp.SetLogger(logger) + reactor := mempl.NewReactor( + config.Mempool, + mp, + waitSync, + ) + if config.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() + } + reactor.SetLogger(logger) - reactor := mempl.NewReactor( - config.Mempool, - mp, - waitSync, - ) - if config.Consensus.WaitForTxs() { - mp.EnableTxsAvailable() + return mp, reactor + case cfg.MempoolTypeNop: + // Strictly speaking, there's no need to have a `mempl.NopMempoolReactor`, but + // adding it leads to a cleaner code. + return &mempl.NopMempool{}, mempl.NewNopMempoolReactor() + default: + panic(fmt.Sprintf("unknown mempool type: %q", config.Mempool.Type)) } - reactor.SetLogger(logger) - - return mp, reactor } func createEvidenceReactor(config *cfg.Config, dbProvider cfg.DBProvider, @@ -281,7 +341,7 @@ func createEvidenceReactor(config *cfg.Config, dbProvider cfg.DBProvider, return nil, nil, err } evidenceLogger := logger.With("module", "evidence") - evidencePool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) + evidencePool, err := evidence.NewPool(evidenceDB, stateStore, blockStore, evidence.WithDBKeyLayout(config.Storage.ExperimentalKeyLayout)) if err != nil { return nil, nil, err } @@ -295,13 +355,14 @@ func createBlocksyncReactor(config *cfg.Config, blockExec *sm.BlockExecutor, blockStore *store.BlockStore, blockSync bool, + localAddr crypto.Address, logger log.Logger, metrics *blocksync.Metrics, offlineStateSyncHeight int64, ) (bcReactor p2p.Reactor, err error) { switch config.BlockSync.Version { case "v0": - bcReactor = blocksync.NewReactor(state.Copy(), blockExec, blockStore, blockSync, metrics, offlineStateSyncHeight) + bcReactor = blocksync.NewReactor(state.Copy(), blockExec, blockStore, blockSync, localAddr, metrics, offlineStateSyncHeight) case "v1", "v2": return nil, fmt.Errorf("block sync version %s has been deprecated. Please use v0", config.BlockSync.Version) default: @@ -349,22 +410,21 @@ func createConsensusReactor(config *cfg.Config, func createTransport( config *cfg.Config, - nodeInfo p2p.NodeInfo, - nodeKey *p2p.NodeKey, + nodeKey *nodekey.NodeKey, proxyApp proxy.AppConns, ) ( - *p2p.MultiplexTransport, + *tcp.MultiplexTransport, []p2p.PeerFilterFunc, ) { var ( mConnConfig = p2p.MConnConfig(config.P2P) - transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) - connFilters = []p2p.ConnFilterFunc{} + transport = tcp.NewMultiplexTransport(*nodeKey, mConnConfig) + connFilters = []tcp.ConnFilterFunc{} peerFilters = []p2p.PeerFilterFunc{} ) if !config.P2P.AllowDuplicateIP { - connFilters = append(connFilters, p2p.ConnDuplicateIPFilter()) + connFilters = append(connFilters, tcp.ConnDuplicateIPFilter()) } // Filter peers by addr or pubkey with an ABCI query. @@ -373,9 +433,9 @@ func createTransport( connFilters = append( connFilters, // ABCI query for address filtering. - func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error { - res, err := proxyApp.Query().Query(context.TODO(), &abci.RequestQuery{ - Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()), + func(_ tcp.ConnSet, c net.Conn, _ []net.IP) error { + res, err := proxyApp.Query().Query(context.TODO(), &abci.QueryRequest{ + Path: "/p2p/filter/addr/" + c.RemoteAddr().String(), }) if err != nil { return err @@ -392,7 +452,7 @@ func createTransport( peerFilters, // ABCI query for ID filtering. func(_ p2p.IPeerSet, p p2p.Peer) error { - res, err := proxyApp.Query().Query(context.TODO(), &abci.RequestQuery{ + res, err := proxyApp.Query().Query(context.TODO(), &abci.QueryRequest{ Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()), }) if err != nil { @@ -407,11 +467,11 @@ func createTransport( ) } - p2p.MultiplexTransportConnFilters(connFilters...)(transport) + tcp.MultiplexTransportConnFilters(connFilters...)(transport) // Limit the number of incoming connections. max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) - p2p.MultiplexTransportMaxIncomingConnections(max)(transport) + tcp.MultiplexTransportMaxIncomingConnections(max)(transport) return transport, peerFilters } @@ -425,8 +485,8 @@ func createSwitch(config *cfg.Config, stateSyncReactor *statesync.Reactor, consensusReactor *cs.Reactor, evidenceReactor *evidence.Reactor, - nodeInfo p2p.NodeInfo, - nodeKey *p2p.NodeKey, + nodeInfo ni.NodeInfo, + nodeKey *nodekey.NodeKey, p2pLogger log.Logger, ) *p2p.Switch { sw := p2p.NewSwitch( @@ -436,7 +496,9 @@ func createSwitch(config *cfg.Config, p2p.SwitchPeerFilters(peerFilters...), ) sw.SetLogger(p2pLogger) - sw.AddReactor("MEMPOOL", mempoolReactor) + if config.Mempool.Type != cfg.MempoolTypeNop { + sw.AddReactor("MEMPOOL", mempoolReactor) + } sw.AddReactor("BLOCKSYNC", bcReactor) sw.AddReactor("CONSENSUS", consensusReactor) sw.AddReactor("EVIDENCE", evidenceReactor) @@ -450,21 +512,21 @@ func createSwitch(config *cfg.Config, } func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, - p2pLogger log.Logger, nodeKey *p2p.NodeKey, + p2pLogger log.Logger, nodeKey *nodekey.NodeKey, ) (pex.AddrBook, error) { addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) // Add ourselves to addrbook to prevent dialing ourselves if config.P2P.ExternalAddress != "" { - addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress)) + addr, err := na.NewFromString(na.IDAddrString(nodeKey.ID(), config.P2P.ExternalAddress)) if err != nil { return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) } addrBook.AddOurAddress(addr) } if config.P2P.ListenAddress != "" { - addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress)) + addr, err := na.NewFromString(na.IDAddrString(nodeKey.ID(), config.P2P.ListenAddress)) if err != nil { return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) } @@ -506,6 +568,7 @@ func startStateSync( stateStore sm.Store, blockStore *store.BlockStore, state sm.State, + dbKeyLayoutVersion string, ) error { ssR.Logger.Info("Starting state sync") @@ -513,25 +576,32 @@ func startStateSync( var err error ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - stateProvider, err = statesync.NewLightClientStateProvider( + stateProvider, err = statesync.NewLightClientStateProviderWithDBKeyVersion( ctx, state.ChainID, state.Version, state.InitialHeight, config.RPCServers, light.TrustOptions{ Period: config.TrustPeriod, Height: config.TrustHeight, Hash: config.TrustHashBytes(), - }, ssR.Logger.With("module", "light")) + }, ssR.Logger.With("module", "light"), + dbKeyLayoutVersion) if err != nil { return fmt.Errorf("failed to set up light client state provider: %w", err) } } go func() { - state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime) + state, commit, err := ssR.Sync(stateProvider, config.MaxDiscoveryTime) if err != nil { ssR.Logger.Error("State sync failed", "err", err) + err = bcR.SwitchToBlockSync(state) + if err != nil { + ssR.Logger.Error("Failed to switch to block sync", "err", err) + return + } return } + err = stateStore.Bootstrap(state) if err != nil { ssR.Logger.Error("Failed to bootstrap node with new state", "err", err) @@ -542,7 +612,6 @@ func startStateSync( ssR.Logger.Error("Failed to store last seen commit", "err", err) return } - err = bcR.SwitchToBlockSync(state) if err != nil { ssR.Logger.Error("Failed to switch to block sync", "err", err) @@ -552,23 +621,23 @@ func startStateSync( return nil } -//------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------ -var genesisDocKey = []byte("genesisDoc") -var genesisDocHashKey = []byte("genesisDocHash") +var ( + genesisDocKey = []byte("genesisDoc") + genesisDocHashKey = []byte("genesisDocHash") +) -// LoadStateFromDBOrGenesisDocProvider attempts to load the state from the -// database, or creates one using the given genesisDocProvider. On success this also -// returns the genesis doc loaded through the given provider. -func LoadStateFromDBOrGenesisDocProvider( +func LoadStateFromDBOrGenesisDocProviderWithConfig( stateDB dbm.DB, genesisDocProvider GenesisDocProvider, operatorGenesisHashHex string, + config *cfg.Config, ) (sm.State, *types.GenesisDoc, error) { // Get genesis doc hash genDocHash, err := stateDB.Get(genesisDocHashKey) if err != nil { - return sm.State{}, nil, fmt.Errorf("error retrieving genesis doc hash: %w", err) + return sm.State{}, nil, ErrRetrieveGenesisDocHash{Err: err} } csGenDoc, err := genesisDocProvider() if err != nil { @@ -576,34 +645,46 @@ func LoadStateFromDBOrGenesisDocProvider( } if err = csGenDoc.GenesisDoc.ValidateAndComplete(); err != nil { - return sm.State{}, nil, fmt.Errorf("error in genesis doc: %w", err) + return sm.State{}, nil, ErrGenesisDoc{Err: err} + } + + checkSumStr := hex.EncodeToString(csGenDoc.Sha256Checksum) + if err := tmhash.ValidateSHA256(checkSumStr); err != nil { + const formatStr = "invalid genesis doc SHA256 checksum: %s" + return sm.State{}, nil, fmt.Errorf(formatStr, err) } // Validate that existing or recently saved genesis file hash matches optional --genesis_hash passed by operator if operatorGenesisHashHex != "" { decodedOperatorGenesisHash, err := hex.DecodeString(operatorGenesisHashHex) if err != nil { - return sm.State{}, nil, fmt.Errorf("genesis hash provided by operator cannot be decoded") + return sm.State{}, nil, ErrGenesisHashDecode } if !bytes.Equal(csGenDoc.Sha256Checksum, decodedOperatorGenesisHash) { - return sm.State{}, nil, fmt.Errorf("genesis doc hash in db does not match passed --genesis_hash value") + return sm.State{}, nil, ErrPassedGenesisHashMismatch } } if len(genDocHash) == 0 { // Save the genDoc hash in the store if it doesn't already exist for future verification if err = stateDB.SetSync(genesisDocHashKey, csGenDoc.Sha256Checksum); err != nil { - return sm.State{}, nil, fmt.Errorf("failed to save genesis doc hash to db: %w", err) + return sm.State{}, nil, ErrSaveGenesisDocHash{Err: err} } } else { if !bytes.Equal(genDocHash, csGenDoc.Sha256Checksum) { - return sm.State{}, nil, fmt.Errorf("genesis doc hash in db does not match loaded genesis doc") + return sm.State{}, nil, ErrLoadedGenesisDocHashMismatch } } + dbKeyLayoutVersion := "" + if config != nil { + dbKeyLayoutVersion = config.Storage.ExperimentalKeyLayout + } stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, + DBKeyLayout: dbKeyLayoutVersion, }) + state, err := stateStore.LoadFromDBOrGenesisDoc(csGenDoc.GenesisDoc) if err != nil { return sm.State{}, nil, err @@ -611,6 +692,20 @@ func LoadStateFromDBOrGenesisDocProvider( return state, csGenDoc.GenesisDoc, nil } +// LoadStateFromDBOrGenesisDocProvider attempts to load the state from the +// database, or creates one using the given genesisDocProvider. On success this also +// returns the genesis doc loaded through the given provider. + +// Note that if you don't have a version of the key layout set in your DB already, +// and no config is passed, it will default to v1. +func LoadStateFromDBOrGenesisDocProvider( + stateDB dbm.DB, + genesisDocProvider GenesisDocProvider, + operatorGenesisHashHex string, +) (sm.State, *types.GenesisDoc, error) { + return LoadStateFromDBOrGenesisDocProviderWithConfig(stateDB, genesisDocProvider, operatorGenesisHashHex, nil) +} + func createAndStartPrivValidatorSocketClient( listenAddr, chainID string, diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go index bfac2340847..932c4d071d8 100644 --- a/p2p/base_reactor.go +++ b/p2p/base_reactor.go @@ -2,11 +2,10 @@ package p2p import ( "github.com/cometbft/cometbft/libs/service" - "github.com/cometbft/cometbft/p2p/conn" ) // Reactor is responsible for handling incoming messages on one or more -// Channel. Switch calls GetChannels when reactor is added to it. When a new +// Channel. Switch calls StreamDescriptors when reactor is added to it. When a new // peer joins our node, InitPeer and AddPeer are called. RemovePeer is called // when the peer is stopped. Receive is called when a message is received on a // channel associated with this reactor. @@ -16,11 +15,11 @@ type Reactor interface { service.Service // Start, Stop // SetSwitch allows setting a switch. - SetSwitch(*Switch) + SetSwitch(sw *Switch) - // GetChannels returns the list of MConnection.ChannelDescriptor. Make sure + // StreamDescriptors returns the list of stream descriptors. Make sure // that each ID is unique across all the reactors added to the switch. - GetChannels() []*conn.ChannelDescriptor + StreamDescriptors() []StreamDescriptor // InitPeer is called by the switch before the peer is started. Use it to // initialize data for the peer (e.g. peer state). @@ -36,14 +35,14 @@ type Reactor interface { // RemovePeer is called by the switch when the peer is stopped (due to error // or other reason). - RemovePeer(peer Peer, reason interface{}) + RemovePeer(peer Peer, reason any) // Receive is called by the switch when an envelope is received from any connected // peer on any of the channels registered by the reactor - Receive(Envelope) + Receive(e Envelope) } -//-------------------------------------- +// -------------------------------------- type BaseReactor struct { service.BaseService // Provides Start, Stop, .Quit @@ -60,8 +59,8 @@ func NewBaseReactor(name string, impl Reactor) *BaseReactor { func (br *BaseReactor) SetSwitch(sw *Switch) { br.Switch = sw } -func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } -func (*BaseReactor) AddPeer(Peer) {} -func (*BaseReactor) RemovePeer(Peer, interface{}) {} -func (*BaseReactor) Receive(Envelope) {} -func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } +func (*BaseReactor) StreamDescriptors() []StreamDescriptor { return nil } +func (*BaseReactor) AddPeer(Peer) {} +func (*BaseReactor) RemovePeer(Peer, any) {} +func (*BaseReactor) Receive(Envelope) {} +func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } diff --git a/p2p/conn/conn_go110.go b/p2p/conn/conn_go110.go deleted file mode 100644 index 459c3169b14..00000000000 --- a/p2p/conn/conn_go110.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.10 -// +build go1.10 - -package conn - -// Go1.10 has a proper net.Conn implementation that -// has the SetDeadline method implemented as per -// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 -// lest we run into problems like -// https://github.com/tendermint/tendermint/issues/851 - -import "net" - -func NetPipe() (net.Conn, net.Conn) { - return net.Pipe() -} diff --git a/p2p/conn/conn_notgo110.go b/p2p/conn/conn_notgo110.go deleted file mode 100644 index 37de8afcc12..00000000000 --- a/p2p/conn/conn_notgo110.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !go1.10 -// +build !go1.10 - -package conn - -import ( - "net" - "time" -) - -// Only Go1.10 has a proper net.Conn implementation that -// has the SetDeadline method implemented as per -// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 -// lest we run into problems like -// https://github.com/tendermint/tendermint/issues/851 -// so for go versions < Go1.10 use our custom net.Conn creator -// that doesn't return an `Unimplemented error` for net.Conn. -// Before https://github.com/cometbft/cometbft/commit/49faa79bdce5663894b3febbf4955fb1d172df04 -// we hadn't cared about errors from SetDeadline so swallow them up anyways. -type pipe struct { - net.Conn -} - -func (p *pipe) SetDeadline(t time.Time) error { - return nil -} - -func NetPipe() (net.Conn, net.Conn) { - p1, p2 := net.Pipe() - return &pipe{p1}, &pipe{p2} -} - -var _ net.Conn = (*pipe)(nil) diff --git a/p2p/errors.go b/p2p/errors.go index 4fc915292fb..fb1f438ee9b 100644 --- a/p2p/errors.go +++ b/p2p/errors.go @@ -3,103 +3,15 @@ package p2p import ( "fmt" "net" -) - -// ErrFilterTimeout indicates that a filter operation timed out. -type ErrFilterTimeout struct{} - -func (e ErrFilterTimeout) Error() string { - return "filter timed out" -} - -// ErrRejected indicates that a Peer was rejected carrying additional -// information as to the reason. -type ErrRejected struct { - addr NetAddress - conn net.Conn - err error - id ID - isAuthFailure bool - isDuplicate bool - isFiltered bool - isIncompatible bool - isNodeInfoInvalid bool - isSelf bool -} - -// Addr returns the NetAddress for the rejected Peer. -func (e ErrRejected) Addr() NetAddress { - return e.addr -} - -func (e ErrRejected) Error() string { - if e.isAuthFailure { - return fmt.Sprintf("auth failure: %s", e.err) - } - - if e.isDuplicate { - if e.conn != nil { - return fmt.Sprintf( - "duplicate CONN<%s>", - e.conn.RemoteAddr().String(), - ) - } - if e.id != "" { - return fmt.Sprintf("duplicate ID<%v>", e.id) - } - } - - if e.isFiltered { - if e.conn != nil { - return fmt.Sprintf( - "filtered CONN<%s>: %s", - e.conn.RemoteAddr().String(), - e.err, - ) - } - - if e.id != "" { - return fmt.Sprintf("filtered ID<%v>: %s", e.id, e.err) - } - } - - if e.isIncompatible { - return fmt.Sprintf("incompatible: %s", e.err) - } - - if e.isNodeInfoInvalid { - return fmt.Sprintf("invalid NodeInfo: %s", e.err) - } - - if e.isSelf { - return fmt.Sprintf("self ID<%v>", e.id) - } - - return fmt.Sprintf("%s", e.err) -} -// IsAuthFailure when Peer authentication was unsuccessful. -func (e ErrRejected) IsAuthFailure() bool { return e.isAuthFailure } - -// IsDuplicate when Peer ID or IP are present already. -func (e ErrRejected) IsDuplicate() bool { return e.isDuplicate } - -// IsFiltered when Peer ID or IP was filtered. -func (e ErrRejected) IsFiltered() bool { return e.isFiltered } - -// IsIncompatible when Peer NodeInfo is not compatible with our own. -func (e ErrRejected) IsIncompatible() bool { return e.isIncompatible } - -// IsNodeInfoInvalid when the sent NodeInfo is not valid. -func (e ErrRejected) IsNodeInfoInvalid() bool { return e.isNodeInfoInvalid } - -// IsSelf when Peer is our own node. -func (e ErrRejected) IsSelf() bool { return e.isSelf } + na "github.com/cometbft/cometbft/p2p/netaddr" + "github.com/cometbft/cometbft/p2p/nodekey" +) // ErrSwitchDuplicatePeerID to be raised when a peer is connecting with a known // ID. type ErrSwitchDuplicatePeerID struct { - ID ID + ID nodekey.ID } func (e ErrSwitchDuplicatePeerID) Error() string { @@ -118,7 +30,7 @@ func (e ErrSwitchDuplicatePeerIP) Error() string { // ErrSwitchConnectToSelf to be raised when trying to connect to itself. type ErrSwitchConnectToSelf struct { - Addr *NetAddress + Addr *na.NetAddr } func (e ErrSwitchConnectToSelf) Error() string { @@ -126,8 +38,8 @@ func (e ErrSwitchConnectToSelf) Error() string { } type ErrSwitchAuthenticationFailure struct { - Dialed *NetAddress - Got ID + Dialed *na.NetAddr + Got nodekey.ID } func (e ErrSwitchAuthenticationFailure) Error() string { @@ -138,54 +50,34 @@ func (e ErrSwitchAuthenticationFailure) Error() string { ) } -// ErrTransportClosed is raised when the Transport has been closed. -type ErrTransportClosed struct{} - -func (e ErrTransportClosed) Error() string { - return "transport has been closed" -} - // ErrPeerRemoval is raised when attempting to remove a peer results in an error. type ErrPeerRemoval struct{} -func (e ErrPeerRemoval) Error() string { +func (ErrPeerRemoval) Error() string { return "peer removal failed" } -//------------------------------------------------------------------- +// ------------------------------------------------------------------- -type ErrNetAddressNoID struct { - Addr string -} - -func (e ErrNetAddressNoID) Error() string { - return fmt.Sprintf("address (%s) does not contain ID", e.Addr) -} - -type ErrNetAddressInvalid struct { +// ErrCurrentlyDialingOrExistingAddress indicates that we're currently +// dialing this address or it belongs to an existing peer. +type ErrCurrentlyDialingOrExistingAddress struct { Addr string - Err error -} - -func (e ErrNetAddressInvalid) Error() string { - return fmt.Sprintf("invalid address (%s): %v", e.Addr, e.Err) } -type ErrNetAddressLookup struct { - Addr string - Err error +func (e ErrCurrentlyDialingOrExistingAddress) Error() string { + return fmt.Sprintf("connection with %s has been established or dialed", e.Addr) } -func (e ErrNetAddressLookup) Error() string { - return fmt.Sprintf("error looking up host (%s): %v", e.Addr, e.Err) +type ErrStart struct { + Service any + Err error } -// ErrCurrentlyDialingOrExistingAddress indicates that we're currently -// dialing this address or it belongs to an existing peer. -type ErrCurrentlyDialingOrExistingAddress struct { - Addr string +func (e ErrStart) Error() string { + return fmt.Sprintf("failed to start %v: %v", e.Service, e.Err) } -func (e ErrCurrentlyDialingOrExistingAddress) Error() string { - return fmt.Sprintf("connection with %s has been established or dialed", e.Addr) +func (e ErrStart) Unwrap() error { + return e.Err } diff --git a/p2p/handshake.go b/p2p/handshake.go new file mode 100644 index 00000000000..3467e1ce32d --- /dev/null +++ b/p2p/handshake.go @@ -0,0 +1,192 @@ +package p2p + +import ( + "fmt" + "net" + "time" + + tmp2p "github.com/cometbft/cometbft/api/cometbft/p2p/v1" + "github.com/cometbft/cometbft/libs/protoio" + na "github.com/cometbft/cometbft/p2p/netaddr" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" + "github.com/cometbft/cometbft/p2p/nodekey" +) + +// ErrRejected indicates that a Peer was rejected carrying additional +// information as to the reason. +type ErrRejected struct { + addr na.NetAddr + conn net.Conn + err error + id nodekey.ID + isAuthFailure bool + isDuplicate bool + isFiltered bool + isIncompatible bool + isNodeInfoInvalid bool + isSelf bool +} + +// Addr returns the network address for the rejected Peer. +func (e ErrRejected) Addr() na.NetAddr { + return e.addr +} + +func (e ErrRejected) Error() string { + if e.isAuthFailure { + return fmt.Sprintf("auth failure: %s", e.err) + } + + if e.isDuplicate { + if e.conn != nil { + return fmt.Sprintf( + "duplicate CONN<%s>", + e.conn.RemoteAddr().String(), + ) + } + if e.id != "" { + return fmt.Sprintf("duplicate ID<%v>", e.id) + } + } + + if e.isFiltered { + if e.conn != nil { + return fmt.Sprintf( + "filtered CONN<%s>: %s", + e.conn.RemoteAddr().String(), + e.err, + ) + } + + if e.id != "" { + return fmt.Sprintf("filtered ID<%v>: %s", e.id, e.err) + } + } + + if e.isIncompatible { + return fmt.Sprintf("incompatible: %s", e.err) + } + + if e.isNodeInfoInvalid { + return fmt.Sprintf("invalid NodeInfo: %s", e.err) + } + + if e.isSelf { + return fmt.Sprintf("self ID<%v>", e.id) + } + + return e.err.Error() +} + +// IsAuthFailure when Peer authentication was unsuccessful. +func (e ErrRejected) IsAuthFailure() bool { return e.isAuthFailure } + +// IsDuplicate when Peer ID or IP are present already. +func (e ErrRejected) IsDuplicate() bool { return e.isDuplicate } + +// IsFiltered when Peer ID or IP was filtered. +func (e ErrRejected) IsFiltered() bool { return e.isFiltered } + +// IsIncompatible when Peer NodeInfo is not compatible with our own. +func (e ErrRejected) IsIncompatible() bool { return e.isIncompatible } + +// IsNodeInfoInvalid when the sent NodeInfo is not valid. +func (e ErrRejected) IsNodeInfoInvalid() bool { return e.isNodeInfoInvalid } + +// IsSelf when Peer is our own node. +func (e ErrRejected) IsSelf() bool { return e.isSelf } + +// Do a handshake and verify the node info. +func handshake(ourNodeInfo ni.NodeInfo, c net.Conn, handshakeTimeout time.Duration) (ni.NodeInfo, error) { + nodeInfo, err := exchangeNodeInfo(ourNodeInfo, c, handshakeTimeout) + if err != nil { + return nil, ErrRejected{ + conn: c, + err: fmt.Errorf("handshake failed: %w", err), + isAuthFailure: true, + } + } + + if err := nodeInfo.Validate(); err != nil { + return nil, ErrRejected{ + conn: c, + err: err, + isNodeInfoInvalid: true, + } + } + + // TODO + // Ensure connection key matches self reported key. + // + // Transport ensures that connID == addr.ID. + // Assert that addr.ID == nodeInfo.ID. + // if remoteNodeID != nodeInfo.ID() { + // return nil, ErrRejected{ + // conn: c, + // id: remoteNodeID, + // err: fmt.Errorf( + // "addr.ID (%v) NodeInfo.ID (%v) mismatch", + // remoteNodeID, + // nodeInfo.ID(), + // ), + // isAuthFailure: true, + // } + // } + + // Reject self. + if ourNodeInfo.ID() == nodeInfo.ID() { + return nil, ErrRejected{ + addr: *na.New(nodeInfo.ID(), c.RemoteAddr()), + conn: c, + id: nodeInfo.ID(), + isSelf: true, + } + } + + if err := ourNodeInfo.CompatibleWith(nodeInfo); err != nil { + return nil, ErrRejected{ + conn: c, + err: err, + id: nodeInfo.ID(), + isIncompatible: true, + } + } + + return nodeInfo, nil +} + +func exchangeNodeInfo(ourNodeInfo ni.NodeInfo, c net.Conn, timeout time.Duration) (peerNodeInfo ni.NodeInfo, err error) { + if err := c.SetDeadline(time.Now().Add(timeout)); err != nil { + return nil, err + } + + var ( + errc = make(chan error, 2) + pbpeerNodeInfo tmp2p.DefaultNodeInfo + ) + + go func(errc chan<- error, c net.Conn) { + ourNodeInfoProto := ourNodeInfo.(ni.Default).ToProto() + _, err := protoio.NewDelimitedWriter(c).WriteMsg(ourNodeInfoProto) + errc <- err + }(errc, c) + go func(errc chan<- error, c net.Conn) { + protoReader := protoio.NewDelimitedReader(c, ni.MaxSize()) + _, err := protoReader.ReadMsg(&pbpeerNodeInfo) + errc <- err + }(errc, c) + + for i := 0; i < cap(errc); i++ { + err := <-errc + if err != nil { + return nil, err + } + } + + peerNodeInfo, err = ni.DefaultFromToProto(&pbpeerNodeInfo) + if err != nil { + return nil, err + } + + return peerNodeInfo, c.SetDeadline(time.Time{}) +} diff --git a/p2p/handshake_test.go b/p2p/handshake_test.go new file mode 100644 index 00000000000..71d8a594148 --- /dev/null +++ b/p2p/handshake_test.go @@ -0,0 +1,134 @@ +package p2p + +import ( + "net" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/crypto/ed25519" + "github.com/cometbft/cometbft/p2p/nodekey" +) + +func TestHandshake(t *testing.T) { + c1, c2 := net.Pipe() + + go func() { + var ( + pk = ed25519.GenPrivKey() + nodeInfo = testNodeInfo(nodekey.PubKeyToID(pk.PubKey()), "c2") + ) + _, err := handshake(nodeInfo, c1, 20*time.Millisecond) + if err != nil { + panic("handshake failed: " + err.Error()) + } + }() + + var ( + pk = ed25519.GenPrivKey() + nodeInfo = testNodeInfo(nodekey.PubKeyToID(pk.PubKey()), "c1") + ) + + _, err := handshake(nodeInfo, c2, 20*time.Millisecond) + require.NoError(t, err) +} + +func TestHandshake_InvalidNodeInfo(t *testing.T) { + c1, c2 := net.Pipe() + + go func() { + var ( + pk = ed25519.GenPrivKey() + nodeInfo = testNodeInfo(nodekey.PubKeyToID(pk.PubKey()), "c2") + ) + + // modify nodeInfo to be invalid + nodeInfo.Other.TxIndex = "invalid" + + _, err := handshake(nodeInfo, c1, 20*time.Millisecond) + if err != nil { + panic("handshake failed: " + err.Error()) + } + }() + + var ( + pk = ed25519.GenPrivKey() + nodeInfo = testNodeInfo(nodekey.PubKeyToID(pk.PubKey()), "c1") + ) + + _, err := handshake(nodeInfo, c2, 20*time.Millisecond) + require.Error(t, err) + + if e, ok := err.(ErrRejected); ok { + if !e.IsNodeInfoInvalid() { + t.Errorf("expected NodeInfo to be invalid, got %v", err) + } + } else { + t.Errorf("expected ErrRejected, got %v", err) + } +} + +func TestTransportMultiplexRejectSelf(t *testing.T) { + c1, c2 := net.Pipe() + + var ( + pk1 = ed25519.GenPrivKey() + nodeInfo1 = testNodeInfo(nodekey.PubKeyToID(pk1.PubKey()), "c1") + ) + + go func() { + nodeInfo2 := testNodeInfo(nodeInfo1.ID(), "c2") + + _, err := handshake(nodeInfo2, c1, 20*time.Millisecond) + if err == nil { + panic("expected handshake to fail") + } + }() + + _, err := handshake(nodeInfo1, c2, 20*time.Millisecond) + require.Error(t, err) + + if err, ok := err.(ErrRejected); ok { + if !err.IsSelf() { + t.Errorf("expected to reject self, got: %v", err) + } + } else { + t.Errorf("expected ErrRejected, got %v", nil) + } +} + +func TestHandshake_Incompatible(t *testing.T) { + c1, c2 := net.Pipe() + + go func() { + var ( + pk = ed25519.GenPrivKey() + nodeInfo = testNodeInfo(nodekey.PubKeyToID(pk.PubKey()), "c2") + ) + + // modify nodeInfo to be incompatible + nodeInfo.Network = "other" + + _, err := handshake(nodeInfo, c1, 20*time.Millisecond) + if err == nil { + panic("expected handshake to fail") + } + }() + + var ( + pk = ed25519.GenPrivKey() + nodeInfo = testNodeInfo(nodekey.PubKeyToID(pk.PubKey()), "c1") + ) + + _, err := handshake(nodeInfo, c2, 20*time.Millisecond) + require.Error(t, err) + + if e, ok := err.(ErrRejected); ok { + if !e.IsIncompatible() { + t.Errorf("expected to reject incompatible, got %v", e) + } + } else { + t.Errorf("expected ErrRejected, got %v", err) + } +} diff --git a/p2p/fuzz.go b/p2p/internal/fuzz/fuzz.go similarity index 75% rename from p2p/fuzz.go rename to p2p/internal/fuzz/fuzz.go index 23ad8c5907f..81cf634ab0d 100644 --- a/p2p/fuzz.go +++ b/p2p/internal/fuzz/fuzz.go @@ -1,11 +1,11 @@ -package p2p +package fuzz import ( "net" "time" "github.com/cometbft/cometbft/config" - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtrand "github.com/cometbft/cometbft/internal/rand" cmtsync "github.com/cometbft/cometbft/libs/sync" ) @@ -21,31 +21,9 @@ type FuzzedConnection struct { config *config.FuzzConnConfig } -// FuzzConn creates a new FuzzedConnection. Fuzzing starts immediately. -func FuzzConn(conn net.Conn) net.Conn { - return FuzzConnFromConfig(conn, config.DefaultFuzzConnConfig()) -} - -// FuzzConnFromConfig creates a new FuzzedConnection from a config. Fuzzing -// starts immediately. -func FuzzConnFromConfig(conn net.Conn, config *config.FuzzConnConfig) net.Conn { - return &FuzzedConnection{ - conn: conn, - start: make(<-chan time.Time), - active: true, - config: config, - } -} - -// FuzzConnAfter creates a new FuzzedConnection. Fuzzing starts when the -// duration elapses. -func FuzzConnAfter(conn net.Conn, d time.Duration) net.Conn { - return FuzzConnAfterFromConfig(conn, d, config.DefaultFuzzConnConfig()) -} - -// FuzzConnAfterFromConfig creates a new FuzzedConnection from a config. +// ConnAfterFromConfig creates a new FuzzedConnection from a config. // Fuzzing starts when the duration elapses. -func FuzzConnAfterFromConfig( +func ConnAfterFromConfig( conn net.Conn, d time.Duration, config *config.FuzzConnConfig, @@ -107,7 +85,7 @@ func (fc *FuzzedConnection) randomDuration() time.Duration { } // implements the fuzz (delay, kill conn) -// and returns whether or not the read/write should be ignored +// and returns whether or not the read/write should be ignored. func (fc *FuzzedConnection) fuzz() bool { if !fc.shouldFuzz() { return false diff --git a/p2p/metrics.gen.go b/p2p/metrics.gen.go index e452f16535e..a0e0b99cff4 100644 --- a/p2p/metrics.gen.go +++ b/p2p/metrics.gen.go @@ -3,8 +3,8 @@ package p2p import ( - "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/cometbft/cometbft/libs/metrics/discard" + prometheus "github.com/cometbft/cometbft/libs/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) @@ -20,30 +20,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "peers", Help: "Number of peers.", }, labels).With(labelsAndValues...), - PeerReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "peer_receive_bytes_total", - Help: "Number of bytes received from a given peer.", - }, append(labels, "peer_id", "chID")).With(labelsAndValues...), - PeerSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "peer_send_bytes_total", - Help: "Number of bytes sent to a given peer.", - }, append(labels, "peer_id", "chID")).With(labelsAndValues...), PeerPendingSendBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "peer_pending_send_bytes", Help: "Pending bytes to be sent to a given peer.", }, append(labels, "peer_id")).With(labelsAndValues...), - NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "num_txs", - Help: "Number of transactions submitted by each peer.", - }, append(labels, "peer_id")).With(labelsAndValues...), MessageReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -56,17 +38,28 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "message_send_bytes_total", Help: "Number of bytes of each message type sent.", }, append(labels, "message_type")).With(labelsAndValues...), + RecvRateLimiterDelay: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "recv_rate_limiter_delay", + Help: "Time in seconds spent sleeping by the receive rate limiter", + }, append(labels, "peer_id")).With(labelsAndValues...), + SendRateLimiterDelay: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "send_rate_limiter_delay", + Help: "Time in seconds spent sleeping by the send rate limiter", + }, append(labels, "peer_id")).With(labelsAndValues...), } } func NopMetrics() *Metrics { return &Metrics{ Peers: discard.NewGauge(), - PeerReceiveBytesTotal: discard.NewCounter(), - PeerSendBytesTotal: discard.NewCounter(), PeerPendingSendBytes: discard.NewGauge(), - NumTxs: discard.NewGauge(), MessageReceiveBytesTotal: discard.NewCounter(), MessageSendBytesTotal: discard.NewCounter(), + RecvRateLimiterDelay: discard.NewCounter(), + SendRateLimiterDelay: discard.NewCounter(), } } diff --git a/p2p/metrics.go b/p2p/metrics.go index 808142e9afc..a1ac00fa4cc 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -6,7 +6,7 @@ import ( "regexp" "sync" - "github.com/go-kit/kit/metrics" + "github.com/cometbft/cometbft/libs/metrics" ) const ( @@ -15,12 +15,10 @@ const ( MetricsSubsystem = "p2p" ) -var ( - // valueToLabelRegexp is used to find the golang package name and type name - // so that the name can be turned into a prometheus label where the characters - // in the label do not include prometheus special characters such as '*' and '.'. - valueToLabelRegexp = regexp.MustCompile(`\*?(\w+)\.(.*)`) -) +// valueToLabelRegexp is used to find the golang package name and type name +// so that the name can be turned into a prometheus label where the characters +// in the label do not include prometheus special characters such as '*' and '.'. +var valueToLabelRegexp = regexp.MustCompile(`\*?(\w+)\.(.*)`) //go:generate go run ../scripts/metricsgen -struct=Metrics @@ -28,51 +26,67 @@ var ( type Metrics struct { // Number of peers. Peers metrics.Gauge - // Number of bytes received from a given peer. - PeerReceiveBytesTotal metrics.Counter `metrics_labels:"peer_id,chID"` - // Number of bytes sent to a given peer. - PeerSendBytesTotal metrics.Counter `metrics_labels:"peer_id,chID"` // Pending bytes to be sent to a given peer. PeerPendingSendBytes metrics.Gauge `metrics_labels:"peer_id"` - // Number of transactions submitted by each peer. - NumTxs metrics.Gauge `metrics_labels:"peer_id"` // Number of bytes of each message type received. MessageReceiveBytesTotal metrics.Counter `metrics_labels:"message_type"` // Number of bytes of each message type sent. MessageSendBytesTotal metrics.Counter `metrics_labels:"message_type"` + // Time in seconds spent sleeping by the receive rate limiter + RecvRateLimiterDelay metrics.Counter `metrics_labels:"peer_id"` + // Time in seconds spent sleeping by the send rate limiter + SendRateLimiterDelay metrics.Counter `metrics_labels:"peer_id"` } -type metricsLabelCache struct { - mtx *sync.RWMutex - messageLabelNames map[reflect.Type]string +type peerPendingMetricsCache struct { + mtx sync.Mutex + perMessageCache map[reflect.Type]*peerPendingMetricsCacheEntry } -// ValueToMetricLabel is a method that is used to produce a prometheus label value of the golang -// type that is passed in. -// This method uses a map on the Metrics struct so that each label name only needs -// to be produced once to prevent expensive string operations. -func (m *metricsLabelCache) ValueToMetricLabel(i interface{}) string { - t := reflect.TypeOf(i) - m.mtx.RLock() +type peerPendingMetricsCacheEntry struct { + label string + pendingSendBytes int + pendingRecvBytes int +} - if s, ok := m.messageLabelNames[t]; ok { - m.mtx.RUnlock() - return s +func newPeerPendingMetricsCache() *peerPendingMetricsCache { + return &peerPendingMetricsCache{ + perMessageCache: make(map[reflect.Type]*peerPendingMetricsCacheEntry), } - m.mtx.RUnlock() +} - s := t.String() - ss := valueToLabelRegexp.FindStringSubmatch(s) - l := fmt.Sprintf("%s_%s", ss[1], ss[2]) - m.mtx.Lock() - defer m.mtx.Unlock() - m.messageLabelNames[t] = l - return l +func (c *peerPendingMetricsCache) AddPendingSendBytes(msgType reflect.Type, addBytes int) { + c.mtx.Lock() + defer c.mtx.Unlock() + if entry, ok := c.perMessageCache[msgType]; ok { + entry.pendingSendBytes += addBytes + } else { + c.perMessageCache[msgType] = &peerPendingMetricsCacheEntry{ + label: buildLabel(msgType), + pendingSendBytes: addBytes, + } + } } -func newMetricsLabelCache() *metricsLabelCache { - return &metricsLabelCache{ - mtx: &sync.RWMutex{}, - messageLabelNames: map[reflect.Type]string{}, +func (c *peerPendingMetricsCache) AddPendingRecvBytes(msgType reflect.Type, addBytes int) { + c.mtx.Lock() + defer c.mtx.Unlock() + if entry, ok := c.perMessageCache[msgType]; ok { + entry.pendingRecvBytes += addBytes + } else { + c.perMessageCache[msgType] = &peerPendingMetricsCacheEntry{ + label: buildLabel(msgType), + pendingRecvBytes: addBytes, + } } } + +func buildLabel(msgType reflect.Type) string { + s := msgType.String() + ss := valueToLabelRegexp.FindStringSubmatch(s) + return fmt.Sprintf("%s_%s", ss[1], ss[2]) +} + +func getMsgType(i any) reflect.Type { + return reflect.TypeOf(i) +} diff --git a/p2p/mock/peer.go b/p2p/mock/peer.go index b4111004c81..eda6bc649ed 100644 --- a/p2p/mock/peer.go +++ b/p2p/mock/peer.go @@ -6,34 +6,41 @@ import ( "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/libs/service" "github.com/cometbft/cometbft/p2p" - "github.com/cometbft/cometbft/p2p/conn" + na "github.com/cometbft/cometbft/p2p/netaddr" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" + "github.com/cometbft/cometbft/p2p/nodekey" + "github.com/cometbft/cometbft/p2p/transport/tcp/conn" ) type Peer struct { *service.BaseService ip net.IP - id p2p.ID - addr *p2p.NetAddress - kv map[string]interface{} + id nodekey.ID + addr *na.NetAddr + kv map[string]any Outbound, Persistent bool + server, client net.Conn } // NewPeer creates and starts a new mock peer. If the ip // is nil, random routable address is used. func NewPeer(ip net.IP) *Peer { - var netAddr *p2p.NetAddress + var netAddr *na.NetAddr if ip == nil { - _, netAddr = p2p.CreateRoutableAddr() + _, netAddr = na.CreateRoutableAddr() } else { - netAddr = p2p.NewNetAddressIPPort(ip, 26656) + netAddr = na.NewFromIPPort(ip, 26656) } - nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()} + nodeKey := nodekey.NodeKey{PrivKey: ed25519.GenPrivKey()} netAddr.ID = nodeKey.ID() + server, client := net.Pipe() mp := &Peer{ - ip: ip, - id: nodeKey.ID(), - addr: netAddr, - kv: make(map[string]interface{}), + ip: ip, + id: nodeKey.ID(), + addr: netAddr, + kv: make(map[string]any), + server: server, + client: client, } mp.BaseService = service.NewBaseService(nil, "MockPeer", mp) if err := mp.Start(); err != nil { @@ -42,32 +49,37 @@ func NewPeer(ip net.IP) *Peer { return mp } -func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error -func (mp *Peer) TrySend(_ p2p.Envelope) bool { return true } -func (mp *Peer) Send(_ p2p.Envelope) bool { return true } -func (mp *Peer) NodeInfo() p2p.NodeInfo { - return p2p.DefaultNodeInfo{ +func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error +func (mp *Peer) OnStop() { + mp.server.Close() + mp.client.Close() +} +func (*Peer) HasChannel(_ byte) bool { return true } +func (*Peer) TrySend(_ p2p.Envelope) bool { return true } +func (*Peer) Send(_ p2p.Envelope) bool { return true } +func (mp *Peer) NodeInfo() ni.NodeInfo { + return ni.Default{ DefaultNodeID: mp.addr.ID, ListenAddr: mp.addr.DialString(), } } -func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } -func (mp *Peer) ID() p2p.ID { return mp.id } -func (mp *Peer) IsOutbound() bool { return mp.Outbound } -func (mp *Peer) IsPersistent() bool { return mp.Persistent } -func (mp *Peer) Get(key string) interface{} { +func (*Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } +func (mp *Peer) ID() nodekey.ID { return mp.id } +func (mp *Peer) IsOutbound() bool { return mp.Outbound } +func (mp *Peer) IsPersistent() bool { return mp.Persistent } +func (mp *Peer) Get(key string) any { if value, ok := mp.kv[key]; ok { return value } return nil } -func (mp *Peer) Set(key string, value interface{}) { +func (mp *Peer) Set(key string, value any) { mp.kv[key] = value } -func (mp *Peer) RemoteIP() net.IP { return mp.ip } -func (mp *Peer) SocketAddr() *p2p.NetAddress { return mp.addr } -func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } -func (mp *Peer) CloseConn() error { return nil } -func (mp *Peer) SetRemovalFailed() {} -func (mp *Peer) GetRemovalFailed() bool { return false } +func (mp *Peer) RemoteIP() net.IP { return mp.ip } +func (mp *Peer) SocketAddr() *na.NetAddr { return mp.addr } +func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } +func (mp *Peer) Conn() net.Conn { return mp.server } +func (*Peer) SetRemovalFailed() {} +func (*Peer) GetRemovalFailed() bool { return false } diff --git a/p2p/mock/reactor.go b/p2p/mock/reactor.go index 64d93a97358..04610b427e9 100644 --- a/p2p/mock/reactor.go +++ b/p2p/mock/reactor.go @@ -3,13 +3,12 @@ package mock import ( "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/p2p" - "github.com/cometbft/cometbft/p2p/conn" ) type Reactor struct { p2p.BaseReactor - Channels []*conn.ChannelDescriptor + Channels []p2p.StreamDescriptor } func NewReactor() *Reactor { @@ -19,7 +18,7 @@ func NewReactor() *Reactor { return r } -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return r.Channels } -func (r *Reactor) AddPeer(_ p2p.Peer) {} -func (r *Reactor) RemovePeer(_ p2p.Peer, _ interface{}) {} -func (r *Reactor) Receive(_ p2p.Envelope) {} +func (r *Reactor) StreamDescriptors() []p2p.StreamDescriptor { return r.Channels } +func (*Reactor) AddPeer(_ p2p.Peer) {} +func (*Reactor) RemovePeer(_ p2p.Peer, _ any) {} +func (*Reactor) Receive(_ p2p.Envelope) {} diff --git a/p2p/mock_transport.go b/p2p/mock_transport.go new file mode 100644 index 00000000000..8b059437a1b --- /dev/null +++ b/p2p/mock_transport.go @@ -0,0 +1,40 @@ +package p2p + +import ( + "net" + "time" + + na "github.com/cometbft/cometbft/p2p/netaddr" +) + +type mockTransport struct { + ln net.Listener + addr na.NetAddr +} + +func (t *mockTransport) Listen(addr na.NetAddr) error { + ln, err := net.Listen("tcp", addr.DialString()) + if err != nil { + return err + } + t.addr = addr + t.ln = ln + return nil +} + +func (t *mockTransport) NetAddr() na.NetAddr { + return t.addr +} + +func (t *mockTransport) Accept() (net.Conn, *na.NetAddr, error) { + c, err := t.ln.Accept() + return c, nil, err +} + +func (*mockTransport) Dial(addr na.NetAddr) (net.Conn, error) { + return addr.DialTimeout(time.Second) +} + +func (*mockTransport) Cleanup(net.Conn) error { + return nil +} diff --git a/p2p/mocks/peer.go b/p2p/mocks/peer.go index 0c562447a38..332d64028b0 100644 --- a/p2p/mocks/peer.go +++ b/p2p/mocks/peer.go @@ -4,12 +4,18 @@ package mocks import ( log "github.com/cometbft/cometbft/libs/log" - conn "github.com/cometbft/cometbft/p2p/conn" + conn "github.com/cometbft/cometbft/p2p/transport/tcp/conn" mock "github.com/stretchr/testify/mock" net "net" + netaddr "github.com/cometbft/cometbft/p2p/netaddr" + + nodeinfo "github.com/cometbft/cometbft/p2p/nodeinfo" + + nodekey "github.com/cometbft/cometbft/p2p/nodekey" + p2p "github.com/cometbft/cometbft/p2p" ) @@ -18,15 +24,21 @@ type Peer struct { mock.Mock } -// CloseConn provides a mock function with given fields: -func (_m *Peer) CloseConn() error { +// Conn provides a mock function with given fields: +func (_m *Peer) Conn() net.Conn { ret := _m.Called() - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { + if len(ret) == 0 { + panic("no return value specified for Conn") + } + + var r0 net.Conn + if rf, ok := ret.Get(0).(func() net.Conn); ok { r0 = rf() } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.Conn) + } } return r0 @@ -37,16 +49,20 @@ func (_m *Peer) FlushStop() { _m.Called() } -// Get provides a mock function with given fields: _a0 -func (_m *Peer) Get(_a0 string) interface{} { - ret := _m.Called(_a0) +// Get provides a mock function with given fields: key +func (_m *Peer) Get(key string) any { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Get") + } - var r0 interface{} - if rf, ok := ret.Get(0).(func(string) interface{}); ok { - r0 = rf(_a0) + var r0 any + if rf, ok := ret.Get(0).(func(string) any); ok { + r0 = rf(key) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) + r0 = ret.Get(0).(any) } } @@ -57,6 +73,10 @@ func (_m *Peer) Get(_a0 string) interface{} { func (_m *Peer) GetRemovalFailed() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetRemovalFailed") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -67,15 +87,37 @@ func (_m *Peer) GetRemovalFailed() bool { return r0 } +// HasChannel provides a mock function with given fields: chID +func (_m *Peer) HasChannel(chID byte) bool { + ret := _m.Called(chID) + + if len(ret) == 0 { + panic("no return value specified for HasChannel") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(byte) bool); ok { + r0 = rf(chID) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + // ID provides a mock function with given fields: -func (_m *Peer) ID() p2p.ID { +func (_m *Peer) ID() nodekey.ID { ret := _m.Called() - var r0 p2p.ID - if rf, ok := ret.Get(0).(func() p2p.ID); ok { + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 nodekey.ID + if rf, ok := ret.Get(0).(func() nodekey.ID); ok { r0 = rf() } else { - r0 = ret.Get(0).(p2p.ID) + r0 = ret.Get(0).(nodekey.ID) } return r0 @@ -85,6 +127,10 @@ func (_m *Peer) ID() p2p.ID { func (_m *Peer) IsOutbound() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for IsOutbound") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -99,6 +145,10 @@ func (_m *Peer) IsOutbound() bool { func (_m *Peer) IsPersistent() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for IsPersistent") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -113,6 +163,10 @@ func (_m *Peer) IsPersistent() bool { func (_m *Peer) IsRunning() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for IsRunning") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -124,15 +178,19 @@ func (_m *Peer) IsRunning() bool { } // NodeInfo provides a mock function with given fields: -func (_m *Peer) NodeInfo() p2p.NodeInfo { +func (_m *Peer) NodeInfo() nodeinfo.NodeInfo { ret := _m.Called() - var r0 p2p.NodeInfo - if rf, ok := ret.Get(0).(func() p2p.NodeInfo); ok { + if len(ret) == 0 { + panic("no return value specified for NodeInfo") + } + + var r0 nodeinfo.NodeInfo + if rf, ok := ret.Get(0).(func() nodeinfo.NodeInfo); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(p2p.NodeInfo) + r0 = ret.Get(0).(nodeinfo.NodeInfo) } } @@ -143,6 +201,10 @@ func (_m *Peer) NodeInfo() p2p.NodeInfo { func (_m *Peer) OnReset() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for OnReset") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -157,6 +219,10 @@ func (_m *Peer) OnReset() error { func (_m *Peer) OnStart() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for OnStart") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -176,6 +242,10 @@ func (_m *Peer) OnStop() { func (_m *Peer) Quit() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Quit") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -192,6 +262,10 @@ func (_m *Peer) Quit() <-chan struct{} { func (_m *Peer) RemoteAddr() net.Addr { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RemoteAddr") + } + var r0 net.Addr if rf, ok := ret.Get(0).(func() net.Addr); ok { r0 = rf() @@ -208,6 +282,10 @@ func (_m *Peer) RemoteAddr() net.Addr { func (_m *Peer) RemoteIP() net.IP { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RemoteIP") + } + var r0 net.IP if rf, ok := ret.Get(0).(func() net.IP); ok { r0 = rf() @@ -224,6 +302,10 @@ func (_m *Peer) RemoteIP() net.IP { func (_m *Peer) Reset() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Reset") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -234,13 +316,17 @@ func (_m *Peer) Reset() error { return r0 } -// Send provides a mock function with given fields: _a0 -func (_m *Peer) Send(_a0 p2p.Envelope) bool { - ret := _m.Called(_a0) +// Send provides a mock function with given fields: e +func (_m *Peer) Send(e p2p.Envelope) bool { + ret := _m.Called(e) + + if len(ret) == 0 { + panic("no return value specified for Send") + } var r0 bool if rf, ok := ret.Get(0).(func(p2p.Envelope) bool); ok { - r0 = rf(_a0) + r0 = rf(e) } else { r0 = ret.Get(0).(bool) } @@ -248,14 +334,14 @@ func (_m *Peer) Send(_a0 p2p.Envelope) bool { return r0 } -// Set provides a mock function with given fields: _a0, _a1 -func (_m *Peer) Set(_a0 string, _a1 interface{}) { - _m.Called(_a0, _a1) +// Set provides a mock function with given fields: key, value +func (_m *Peer) Set(key string, value any) { + _m.Called(key, value) } -// SetLogger provides a mock function with given fields: _a0 -func (_m *Peer) SetLogger(_a0 log.Logger) { - _m.Called(_a0) +// SetLogger provides a mock function with given fields: l +func (_m *Peer) SetLogger(l log.Logger) { + _m.Called(l) } // SetRemovalFailed provides a mock function with given fields: @@ -264,15 +350,19 @@ func (_m *Peer) SetRemovalFailed() { } // SocketAddr provides a mock function with given fields: -func (_m *Peer) SocketAddr() *p2p.NetAddress { +func (_m *Peer) SocketAddr() *netaddr.NetAddr { ret := _m.Called() - var r0 *p2p.NetAddress - if rf, ok := ret.Get(0).(func() *p2p.NetAddress); ok { + if len(ret) == 0 { + panic("no return value specified for SocketAddr") + } + + var r0 *netaddr.NetAddr + if rf, ok := ret.Get(0).(func() *netaddr.NetAddr); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*p2p.NetAddress) + r0 = ret.Get(0).(*netaddr.NetAddr) } } @@ -283,6 +373,10 @@ func (_m *Peer) SocketAddr() *p2p.NetAddress { func (_m *Peer) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -297,6 +391,10 @@ func (_m *Peer) Start() error { func (_m *Peer) Status() conn.ConnectionStatus { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 conn.ConnectionStatus if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok { r0 = rf() @@ -311,6 +409,10 @@ func (_m *Peer) Status() conn.ConnectionStatus { func (_m *Peer) Stop() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Stop") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -325,6 +427,10 @@ func (_m *Peer) Stop() error { func (_m *Peer) String() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for String") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -335,13 +441,17 @@ func (_m *Peer) String() string { return r0 } -// TrySend provides a mock function with given fields: _a0 -func (_m *Peer) TrySend(_a0 p2p.Envelope) bool { - ret := _m.Called(_a0) +// TrySend provides a mock function with given fields: e +func (_m *Peer) TrySend(e p2p.Envelope) bool { + ret := _m.Called(e) + + if len(ret) == 0 { + panic("no return value specified for TrySend") + } var r0 bool if rf, ok := ret.Get(0).(func(p2p.Envelope) bool); ok { - r0 = rf(_a0) + r0 = rf(e) } else { r0 = ret.Get(0).(bool) } diff --git a/p2p/netaddr/errors.go b/p2p/netaddr/errors.go new file mode 100644 index 00000000000..ed0492f568e --- /dev/null +++ b/p2p/netaddr/errors.go @@ -0,0 +1,74 @@ +package netaddr + +import ( + "errors" + "fmt" + + "github.com/cometbft/cometbft/p2p/nodekey" +) + +var ( + ErrEmptyHost = errors.New("host is empty") + ErrNoIP = errors.New("no IP address found") + ErrInvalidIP = errors.New("invalid IP address") +) + +type ErrNoID struct { + Addr string +} + +func (e ErrNoID) Error() string { + return fmt.Sprintf("address (%s) does not contain ID", e.Addr) +} + +type ErrInvalid struct { + Addr string + Err error +} + +func (e ErrInvalid) Error() string { + return fmt.Sprintf("invalid address (%s): %v", e.Addr, e.Err) +} + +func (e ErrInvalid) Unwrap() error { return e.Err } + +type ErrLookup struct { + Addr string + Err error +} + +func (e ErrLookup) Error() string { + return fmt.Sprintf("error looking up host (%s): %v", e.Addr, e.Err) +} + +func (e ErrLookup) Unwrap() error { return e.Err } + +type ErrInvalidPort struct { + Port uint32 +} + +func (e ErrInvalidPort) Error() string { + return fmt.Sprintf("invalid port: %d", e.Port) +} + +type ErrInvalidPeerID struct { + ID nodekey.ID + Source error +} + +func (e ErrInvalidPeerID) Error() string { + return fmt.Sprintf("invalid peer ID (%v): %v", e.ID, e.Source) +} + +func (e ErrInvalidPeerID) Unwrap() error { + return e.Source +} + +type ErrInvalidPeerIDLength struct { + Got int + Expected int +} + +func (e ErrInvalidPeerIDLength) Error() string { + return fmt.Sprintf("invalid peer ID length, got %d, expected %d", e.Expected, e.Got) +} diff --git a/p2p/netaddress.go b/p2p/netaddr/netaddr.go similarity index 52% rename from p2p/netaddress.go rename to p2p/netaddr/netaddr.go index 252178be3a3..aeb46a19ca5 100644 --- a/p2p/netaddress.go +++ b/p2p/netaddr/netaddr.go @@ -2,11 +2,10 @@ // Originally Copyright (c) 2013-2014 Conformal Systems LLC. // https://github.com/conformal/btcd/blob/master/LICENSE -package p2p +package netaddr import ( "encoding/hex" - "errors" "flag" "fmt" "net" @@ -14,110 +13,113 @@ import ( "strings" "time" - tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" + tmp2p "github.com/cometbft/cometbft/api/cometbft/p2p/v1" + cmtrand "github.com/cometbft/cometbft/internal/rand" + "github.com/cometbft/cometbft/p2p/nodekey" ) -// EmptyNetAddress defines the string representation of an empty NetAddress -const EmptyNetAddress = "" +// Empty defines the string representation of an empty NetAddress. +const Empty = "" -// NetAddress defines information about a peer on the network +// NetAddr defines information about a peer on the network // including its ID, IP address, and port. -type NetAddress struct { - ID ID `json:"id"` - IP net.IP `json:"ip"` - Port uint16 `json:"port"` +type NetAddr struct { + ID nodekey.ID `json:"id"` + IP net.IP `json:"ip"` + Port uint16 `json:"port"` } -// IDAddressString returns id@hostPort. It strips the leading +// IDAddrString returns id@hostPort. It strips the leading // protocol from protocolHostPort if it exists. -func IDAddressString(id ID, protocolHostPort string) string { +func IDAddrString(id nodekey.ID, protocolHostPort string) string { hostPort := removeProtocolIfDefined(protocolHostPort) return fmt.Sprintf("%s@%s", id, hostPort) } -// NewNetAddress returns a new NetAddress using the provided TCP +// New returns a new address using the provided TCP // address. When testing, other net.Addr (except TCP) will result in // using 0.0.0.0:0. When normal run, other net.Addr (except TCP) will // panic. Panics if ID is invalid. // TODO: socks proxies? -func NewNetAddress(id ID, addr net.Addr) *NetAddress { +func New(id nodekey.ID, addr net.Addr) *NetAddr { tcpAddr, ok := addr.(*net.TCPAddr) if !ok { if flag.Lookup("test.v") == nil { // normal run panic(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr)) } // in testing - netAddr := NewNetAddressIPPort(net.IP("127.0.0.1"), 0) + netAddr := NewFromIPPort(net.IP("127.0.0.1"), 0) netAddr.ID = id return netAddr } - if err := validateID(id); err != nil { + if err := ValidateID(id); err != nil { panic(fmt.Sprintf("Invalid ID %v: %v (addr: %v)", id, err, addr)) } ip := tcpAddr.IP port := uint16(tcpAddr.Port) - na := NewNetAddressIPPort(ip, port) + na := NewFromIPPort(ip, port) na.ID = id return na } -// NewNetAddressString returns a new NetAddress using the provided address in +// NewFromString returns a new address using the provided address in // the form of "ID@IP:Port". // Also resolves the host if host is not an IP. -// Errors are of type ErrNetAddressXxx where Xxx is in (NoID, Invalid, Lookup) -func NewNetAddressString(addr string) (*NetAddress, error) { +// Errors are of type ErrXxx where Xxx is in (NoID, Invalid, Lookup). +func NewFromString(addr string) (*NetAddr, error) { addrWithoutProtocol := removeProtocolIfDefined(addr) spl := strings.Split(addrWithoutProtocol, "@") if len(spl) != 2 { - return nil, ErrNetAddressNoID{addr} + return nil, ErrInvalid{Addr: addr, Err: ErrNoID{addr}} } // get ID - if err := validateID(ID(spl[0])); err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} + if err := ValidateID(nodekey.ID(spl[0])); err != nil { + return nil, ErrInvalid{addrWithoutProtocol, err} } - var id ID - id, addrWithoutProtocol = ID(spl[0]), spl[1] + var id nodekey.ID + id, addrWithoutProtocol = nodekey.ID(spl[0]), spl[1] // get host and port host, portStr, err := net.SplitHostPort(addrWithoutProtocol) if err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} + return nil, ErrInvalid{addrWithoutProtocol, err} } if len(host) == 0 { - return nil, ErrNetAddressInvalid{ + return nil, ErrInvalid{ addrWithoutProtocol, - errors.New("host is empty")} + ErrEmptyHost, + } } ip := net.ParseIP(host) if ip == nil { ips, err := net.LookupIP(host) if err != nil { - return nil, ErrNetAddressLookup{host, err} + return nil, ErrLookup{host, err} } ip = ips[0] } port, err := strconv.ParseUint(portStr, 10, 16) if err != nil { - return nil, ErrNetAddressInvalid{portStr, err} + return nil, ErrInvalid{portStr, err} } - na := NewNetAddressIPPort(ip, uint16(port)) + na := NewFromIPPort(ip, uint16(port)) na.ID = id return na, nil } -// NewNetAddressStrings returns an array of NetAddress'es build using +// NewFromStrings returns an array of Addr'es build using // the provided strings. -func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) { - netAddrs := make([]*NetAddress, 0) +func NewFromStrings(addrs []string) ([]*NetAddr, []error) { + netAddrs := make([]*NetAddr, 0) errs := make([]error, 0) for _, addr := range addrs { - netAddr, err := NewNetAddressString(addr) + netAddr, err := NewFromString(addr) if err != nil { errs = append(errs, err) } else { @@ -127,36 +129,37 @@ func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) { return netAddrs, errs } -// NewNetAddressIPPort returns a new NetAddress using the provided IP +// NewFromIPPort returns a new Addr using the provided IP // and port number. -func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress { - return &NetAddress{ +func NewFromIPPort(ip net.IP, port uint16) *NetAddr { + return &NetAddr{ IP: ip, Port: port, } } -// NetAddressFromProto converts a Protobuf NetAddress into a native struct. -func NetAddressFromProto(pb tmp2p.NetAddress) (*NetAddress, error) { +// NewFromProto converts a Protobuf NetAddress into a native struct. +func NewFromProto(pb tmp2p.NetAddress) (*NetAddr, error) { ip := net.ParseIP(pb.IP) if ip == nil { - return nil, fmt.Errorf("invalid IP address %v", pb.IP) + return nil, ErrInvalid{Addr: pb.IP, Err: ErrInvalidIP} } + if pb.Port >= 1<<16 { - return nil, fmt.Errorf("invalid port number %v", pb.Port) + return nil, ErrInvalid{Addr: pb.IP, Err: ErrInvalidPort{pb.Port}} } - return &NetAddress{ - ID: ID(pb.ID), + return &NetAddr{ + ID: nodekey.ID(pb.ID), IP: ip, Port: uint16(pb.Port), }, nil } -// NetAddressesFromProto converts a slice of Protobuf NetAddresses into a native slice. -func NetAddressesFromProto(pbs []tmp2p.NetAddress) ([]*NetAddress, error) { - nas := make([]*NetAddress, 0, len(pbs)) +// AddrsFromProtos converts a slice of Protobuf NetAddresses into a native slice. +func AddrsFromProtos(pbs []tmp2p.NetAddress) ([]*NetAddr, error) { + nas := make([]*NetAddr, 0, len(pbs)) for _, pb := range pbs { - na, err := NetAddressFromProto(pb) + na, err := NewFromProto(pb) if err != nil { return nil, err } @@ -165,8 +168,8 @@ func NetAddressesFromProto(pbs []tmp2p.NetAddress) ([]*NetAddress, error) { return nas, nil } -// NetAddressesToProto converts a slice of NetAddresses into a Protobuf slice. -func NetAddressesToProto(nas []*NetAddress) []tmp2p.NetAddress { +// AddrsToProtos converts a slice of addresses into a Protobuf slice. +func AddrsToProtos(nas []*NetAddr) []tmp2p.NetAddress { pbs := make([]tmp2p.NetAddress, 0, len(nas)) for _, na := range nas { if na != nil { @@ -176,8 +179,8 @@ func NetAddressesToProto(nas []*NetAddress) []tmp2p.NetAddress { return pbs } -// ToProto converts a NetAddress to Protobuf. -func (na *NetAddress) ToProto() tmp2p.NetAddress { +// ToProto converts an Addr to Protobuf. +func (na *NetAddr) ToProto() tmp2p.NetAddress { return tmp2p.NetAddress{ ID: string(na.ID), IP: na.IP.String(), @@ -187,16 +190,16 @@ func (na *NetAddress) ToProto() tmp2p.NetAddress { // Equals reports whether na and other are the same addresses, // including their ID, IP, and Port. -func (na *NetAddress) Equals(other interface{}) bool { - if o, ok := other.(*NetAddress); ok { +func (na *NetAddr) Equals(other any) bool { + if o, ok := other.(*NetAddr); ok { return na.String() == o.String() } return false } // Same returns true is na has the same non-empty ID or DialString as other. -func (na *NetAddress) Same(other interface{}) bool { - if o, ok := other.(*NetAddress); ok { +func (na *NetAddr) Same(other any) bool { + if o, ok := other.(*NetAddr); ok { if na.DialString() == o.DialString() { return true } @@ -207,23 +210,23 @@ func (na *NetAddress) Same(other interface{}) bool { return false } -// String representation: @: -func (na *NetAddress) String() string { +// String representation: @:. +func (na *NetAddr) String() string { if na == nil { - return EmptyNetAddress + return Empty } addrStr := na.DialString() if na.ID != "" { - addrStr = IDAddressString(na.ID, addrStr) + addrStr = IDAddrString(na.ID, addrStr) } return addrStr } -func (na *NetAddress) DialString() string { +func (na *NetAddr) DialString() string { if na == nil { - return "" + return Empty } return net.JoinHostPort( na.IP.String(), @@ -232,7 +235,7 @@ func (na *NetAddress) DialString() string { } // Dial calls net.Dial on the address. -func (na *NetAddress) Dial() (net.Conn, error) { +func (na *NetAddr) Dial() (net.Conn, error) { conn, err := net.Dial("tcp", na.DialString()) if err != nil { return nil, err @@ -241,7 +244,7 @@ func (na *NetAddress) Dial() (net.Conn, error) { } // DialTimeout calls net.DialTimeout on the address. -func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) { +func (na *NetAddr) DialTimeout(timeout time.Duration) (net.Conn, error) { conn, err := net.DialTimeout("tcp", na.DialString(), timeout) if err != nil { return nil, err @@ -250,7 +253,7 @@ func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) { } // Routable returns true if the address is routable. -func (na *NetAddress) Routable() bool { +func (na *NetAddr) Routable() bool { if err := na.Valid(); err != nil { return false } @@ -261,35 +264,35 @@ func (na *NetAddress) Routable() bool { // For IPv4 these are either a 0 or all bits set address. For IPv6 a zero // address or one that matches the RFC3849 documentation address format. -func (na *NetAddress) Valid() error { - if err := validateID(na.ID); err != nil { - return fmt.Errorf("invalid ID: %w", err) +func (na *NetAddr) Valid() error { + if err := ValidateID(na.ID); err != nil { + return ErrInvalidPeerID{na.ID, err} } if na.IP == nil { - return errors.New("no IP") + return ErrNoIP } if na.IP.IsUnspecified() || na.RFC3849() || na.IP.Equal(net.IPv4bcast) { - return errors.New("invalid IP") + return ErrInvalid{na.IP.String(), ErrInvalidIP} } return nil } // HasID returns true if the address has an ID. // NOTE: It does not check whether the ID is valid or not. -func (na *NetAddress) HasID() bool { +func (na *NetAddr) HasID() bool { return string(na.ID) != "" } // Local returns true if it is a local address. -func (na *NetAddress) Local() bool { +func (na *NetAddr) Local() bool { return na.IP.IsLoopback() || zero4.Contains(na.IP) } // ReachabilityTo checks whenever o can be reached from na. -func (na *NetAddress) ReachabilityTo(o *NetAddress) int { +func (na *NetAddr) ReachabilityTo(o *NetAddr) int { const ( - Unreachable = 0 + unreachable = 0 Default = iota Teredo Ipv6Weak @@ -298,7 +301,7 @@ func (na *NetAddress) ReachabilityTo(o *NetAddress) int { ) switch { case !na.Routable(): - return Unreachable + return unreachable case na.RFC4380(): switch { case !o.Routable(): @@ -345,32 +348,33 @@ func (na *NetAddress) ReachabilityTo(o *NetAddress) int { // RFC4843: IPv6 ORCHID: (2001:10::/28) // RFC4862: IPv6 Autoconfig (FE80::/64) // RFC6052: IPv6 well known prefix (64:FF9B::/96) -// RFC6145: IPv6 IPv4 translated address ::FFFF:0:0:0/96 -var rfc1918_10 = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(8, 32)} -var rfc1918_192 = net.IPNet{IP: net.ParseIP("192.168.0.0"), Mask: net.CIDRMask(16, 32)} -var rfc1918_172 = net.IPNet{IP: net.ParseIP("172.16.0.0"), Mask: net.CIDRMask(12, 32)} -var rfc3849 = net.IPNet{IP: net.ParseIP("2001:0DB8::"), Mask: net.CIDRMask(32, 128)} -var rfc3927 = net.IPNet{IP: net.ParseIP("169.254.0.0"), Mask: net.CIDRMask(16, 32)} -var rfc3964 = net.IPNet{IP: net.ParseIP("2002::"), Mask: net.CIDRMask(16, 128)} -var rfc4193 = net.IPNet{IP: net.ParseIP("FC00::"), Mask: net.CIDRMask(7, 128)} -var rfc4380 = net.IPNet{IP: net.ParseIP("2001::"), Mask: net.CIDRMask(32, 128)} -var rfc4843 = net.IPNet{IP: net.ParseIP("2001:10::"), Mask: net.CIDRMask(28, 128)} -var rfc4862 = net.IPNet{IP: net.ParseIP("FE80::"), Mask: net.CIDRMask(64, 128)} -var rfc6052 = net.IPNet{IP: net.ParseIP("64:FF9B::"), Mask: net.CIDRMask(96, 128)} -var rfc6145 = net.IPNet{IP: net.ParseIP("::FFFF:0:0:0"), Mask: net.CIDRMask(96, 128)} -var zero4 = net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(8, 32)} +// RFC6145: IPv6 IPv4 translated address ::FFFF:0:0:0/96. var ( + rfc1918_10 = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(8, 32)} + rfc1918_192 = net.IPNet{IP: net.ParseIP("192.168.0.0"), Mask: net.CIDRMask(16, 32)} + rfc1918_172 = net.IPNet{IP: net.ParseIP("172.16.0.0"), Mask: net.CIDRMask(12, 32)} + rfc3849 = net.IPNet{IP: net.ParseIP("2001:0DB8::"), Mask: net.CIDRMask(32, 128)} + rfc3927 = net.IPNet{IP: net.ParseIP("169.254.0.0"), Mask: net.CIDRMask(16, 32)} + rfc3964 = net.IPNet{IP: net.ParseIP("2002::"), Mask: net.CIDRMask(16, 128)} + rfc4193 = net.IPNet{IP: net.ParseIP("FC00::"), Mask: net.CIDRMask(7, 128)} + rfc4380 = net.IPNet{IP: net.ParseIP("2001::"), Mask: net.CIDRMask(32, 128)} + rfc4843 = net.IPNet{IP: net.ParseIP("2001:10::"), Mask: net.CIDRMask(28, 128)} + rfc4862 = net.IPNet{IP: net.ParseIP("FE80::"), Mask: net.CIDRMask(64, 128)} + rfc6052 = net.IPNet{IP: net.ParseIP("64:FF9B::"), Mask: net.CIDRMask(96, 128)} + rfc6145 = net.IPNet{IP: net.ParseIP("::FFFF:0:0:0"), Mask: net.CIDRMask(96, 128)} + zero4 = net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(8, 32)} + // onionCatNet defines the IPv6 address block used to support Tor. // bitcoind encodes a .onion address as a 16 byte number by decoding the // address prior to the .onion (i.e. the key hash) base32 into a ten // byte number. It then stores the first 6 bytes of the address as // 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43. // - // This is the same range used by OnionCat, which is part part of the + // This is the same range used by OnionCat, which is part of the // RFC4193 unique local IPv6 range. // // In summary the format is: - // { magic 6 bytes, 10 bytes base32 decode of key hash } + // { magic 6 bytes, 10 bytes base32 decode of key hash }. onionCatNet = ipNet("fd87:d87e:eb43::", 48, 128) ) @@ -381,40 +385,60 @@ func ipNet(ip string, ones, bits int) net.IPNet { return net.IPNet{IP: net.ParseIP(ip), Mask: net.CIDRMask(ones, bits)} } -func (na *NetAddress) RFC1918() bool { +func (na *NetAddr) RFC1918() bool { return rfc1918_10.Contains(na.IP) || rfc1918_192.Contains(na.IP) || rfc1918_172.Contains(na.IP) } -func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) } -func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) } -func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) } -func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) } -func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) } -func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) } -func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) } -func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) } -func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) } -func (na *NetAddress) OnionCatTor() bool { return onionCatNet.Contains(na.IP) } +func (na *NetAddr) RFC3849() bool { return rfc3849.Contains(na.IP) } +func (na *NetAddr) RFC3927() bool { return rfc3927.Contains(na.IP) } +func (na *NetAddr) RFC3964() bool { return rfc3964.Contains(na.IP) } +func (na *NetAddr) RFC4193() bool { return rfc4193.Contains(na.IP) } +func (na *NetAddr) RFC4380() bool { return rfc4380.Contains(na.IP) } +func (na *NetAddr) RFC4843() bool { return rfc4843.Contains(na.IP) } +func (na *NetAddr) RFC4862() bool { return rfc4862.Contains(na.IP) } +func (na *NetAddr) RFC6052() bool { return rfc6052.Contains(na.IP) } +func (na *NetAddr) RFC6145() bool { return rfc6145.Contains(na.IP) } +func (na *NetAddr) OnionCatTor() bool { return onionCatNet.Contains(na.IP) } func removeProtocolIfDefined(addr string) string { if strings.Contains(addr, "://") { return strings.Split(addr, "://")[1] } return addr - } -func validateID(id ID) error { +func ValidateID(id nodekey.ID) error { if len(id) == 0 { - return errors.New("no ID") + return ErrNoIP } idBytes, err := hex.DecodeString(string(id)) if err != nil { return err } - if len(idBytes) != IDByteLength { - return fmt.Errorf("invalid hex length - got %d, expected %d", len(idBytes), IDByteLength) + if len(idBytes) != nodekey.IDByteLength { + return ErrInvalidPeerIDLength{Got: len(idBytes), Expected: nodekey.IDByteLength} } return nil } + +// Used for testing. +func CreateRoutableAddr() (addr string, netAddr *NetAddr) { + for { + var err error + addr = fmt.Sprintf("%X@%v.%v.%v.%v:26656", + cmtrand.Bytes(20), + cmtrand.Int()%256, + cmtrand.Int()%256, + cmtrand.Int()%256, + cmtrand.Int()%256) + netAddr, err = NewFromString(addr) + if err != nil { + panic(err) + } + if netAddr.Routable() { + break + } + } + return addr, netAddr +} diff --git a/p2p/netaddress_test.go b/p2p/netaddr/netaddr_test.go similarity index 76% rename from p2p/netaddress_test.go rename to p2p/netaddr/netaddr_test.go index 65f9fb834ef..eac3e30bb0a 100644 --- a/p2p/netaddress_test.go +++ b/p2p/netaddr/netaddr_test.go @@ -1,4 +1,4 @@ -package p2p +package netaddr import ( "net" @@ -9,11 +9,11 @@ import ( "github.com/stretchr/testify/require" ) -func TestNetAddress_String(t *testing.T) { +func Test_String(t *testing.T) { tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") - require.Nil(t, err) + require.NoError(t, err) - netAddr := NewNetAddress("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr) + netAddr := New("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr) var wg sync.WaitGroup @@ -31,23 +31,23 @@ func TestNetAddress_String(t *testing.T) { require.Equal(t, "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", s) } -func TestNewNetAddress(t *testing.T) { +func TestNew(t *testing.T) { tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") - require.Nil(t, err) + require.NoError(t, err) assert.Panics(t, func() { - NewNetAddress("", tcpAddr) + New("", tcpAddr) }) - addr := NewNetAddress("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr) + addr := New("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr) assert.Equal(t, "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", addr.String()) assert.NotPanics(t, func() { - NewNetAddress("", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000}) - }, "Calling NewNetAddress with UDPAddr should not panic in testing") + New("", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000}) + }, "Calling New with UDPAddr should not panic in testing") } -func TestNewNetAddressString(t *testing.T) { +func TestNewFromString(t *testing.T) { testCases := []struct { name string addr string @@ -110,35 +110,35 @@ func TestNewNetAddressString(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { - addr, err := NewNetAddressString(tc.addr) + addr, err := NewFromString(tc.addr) if tc.correct { - if assert.Nil(t, err, tc.addr) { + if assert.NoError(t, err, tc.addr) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, tc.expected, addr.String()) } } else { - assert.NotNil(t, err, tc.addr) + require.ErrorAs(t, err, &ErrInvalid{Addr: addr.String(), Err: err}) } }) } } -func TestNewNetAddressStrings(t *testing.T) { - addrs, errs := NewNetAddressStrings([]string{ +func TestNewFromStrings(t *testing.T) { + addrs, errs := NewFromStrings([]string{ "127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeed@127.0.0.2:8080"}) + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeed@127.0.0.2:8080", + }) + assert.Len(t, addrs, 2) assert.Len(t, errs, 1) - assert.Equal(t, 2, len(addrs)) } -func TestNewNetAddressIPPort(t *testing.T) { - addr := NewNetAddressIPPort(net.ParseIP("127.0.0.1"), 8080) +func TestNewFromIPPort(t *testing.T) { + addr := NewFromIPPort(net.ParseIP("127.0.0.1"), 8080) assert.Equal(t, "127.0.0.1:8080", addr.String()) } -func TestNetAddressProperties(t *testing.T) { +func TestProperties(t *testing.T) { // TODO add more test cases testCases := []struct { addr string @@ -151,21 +151,21 @@ func TestNetAddressProperties(t *testing.T) { } for _, tc := range testCases { - addr, err := NewNetAddressString(tc.addr) - require.Nil(t, err) + addr, err := NewFromString(tc.addr) + require.NoError(t, err) err = addr.Valid() if tc.valid { - assert.NoError(t, err) + require.NoError(t, err) } else { - assert.Error(t, err) + require.Error(t, err) } assert.Equal(t, tc.local, addr.Local()) assert.Equal(t, tc.routable, addr.Routable()) } } -func TestNetAddressReachabilityTo(t *testing.T) { +func TestReachabilityTo(t *testing.T) { // TODO add more test cases testCases := []struct { addr string @@ -181,11 +181,11 @@ func TestNetAddressReachabilityTo(t *testing.T) { } for _, tc := range testCases { - addr, err := NewNetAddressString(tc.addr) - require.Nil(t, err) + addr, err := NewFromString(tc.addr) + require.NoError(t, err) - other, err := NewNetAddressString(tc.other) - require.Nil(t, err) + other, err := NewFromString(tc.other) + require.NoError(t, err) assert.Equal(t, tc.reachability, addr.ReachabilityTo(other)) } diff --git a/p2p/node_info_test.go b/p2p/node_info_test.go deleted file mode 100644 index f1c43e35217..00000000000 --- a/p2p/node_info_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package p2p - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/cometbft/cometbft/crypto/ed25519" -) - -func TestNodeInfoValidate(t *testing.T) { - - // empty fails - ni := DefaultNodeInfo{} - assert.Error(t, ni.Validate()) - - channels := make([]byte, maxNumChannels) - for i := 0; i < maxNumChannels; i++ { - channels[i] = byte(i) - } - dupChannels := make([]byte, 5) - copy(dupChannels, channels[:5]) - dupChannels = append(dupChannels, testCh) - - nonASCII := "¢§µ" - emptyTab := "\t" - emptySpace := " " - - testCases := []struct { - testName string - malleateNodeInfo func(*DefaultNodeInfo) - expectErr bool - }{ - { - "Too Many Channels", - func(ni *DefaultNodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, //nolint: gocritic - true, - }, - {"Duplicate Channel", func(ni *DefaultNodeInfo) { ni.Channels = dupChannels }, true}, - {"Good Channels", func(ni *DefaultNodeInfo) { ni.Channels = ni.Channels[:5] }, false}, - - {"Invalid NetAddress", func(ni *DefaultNodeInfo) { ni.ListenAddr = "not-an-address" }, true}, - {"Good NetAddress", func(ni *DefaultNodeInfo) { ni.ListenAddr = "0.0.0.0:26656" }, false}, - - {"Non-ASCII Version", func(ni *DefaultNodeInfo) { ni.Version = nonASCII }, true}, - {"Empty tab Version", func(ni *DefaultNodeInfo) { ni.Version = emptyTab }, true}, - {"Empty space Version", func(ni *DefaultNodeInfo) { ni.Version = emptySpace }, true}, - {"Empty Version", func(ni *DefaultNodeInfo) { ni.Version = "" }, false}, - - {"Non-ASCII Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = nonASCII }, true}, - {"Empty tab Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = emptyTab }, true}, - {"Empty space Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = emptySpace }, true}, - {"Empty Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = "" }, true}, - {"Good Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = "hey its me" }, false}, - - {"Non-ASCII TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = nonASCII }, true}, - {"Empty tab TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = emptyTab }, true}, - {"Empty space TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = emptySpace }, true}, - {"Empty TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = "" }, false}, - {"Off TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = "off" }, false}, - - {"Non-ASCII RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = nonASCII }, true}, - {"Empty tab RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = emptyTab }, true}, - {"Empty space RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = emptySpace }, true}, - {"Empty RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = "" }, false}, - {"Good RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = "0.0.0.0:26657" }, false}, - } - - nodeKey := NodeKey{PrivKey: ed25519.GenPrivKey()} - name := "testing" - - // test case passes - ni = testNodeInfo(nodeKey.ID(), name).(DefaultNodeInfo) - ni.Channels = channels - assert.NoError(t, ni.Validate()) - - for _, tc := range testCases { - ni := testNodeInfo(nodeKey.ID(), name).(DefaultNodeInfo) - ni.Channels = channels - tc.malleateNodeInfo(&ni) - err := ni.Validate() - if tc.expectErr { - assert.Error(t, err, tc.testName) - } else { - assert.NoError(t, err, tc.testName) - } - } - -} - -func TestNodeInfoCompatible(t *testing.T) { - - nodeKey1 := NodeKey{PrivKey: ed25519.GenPrivKey()} - nodeKey2 := NodeKey{PrivKey: ed25519.GenPrivKey()} - name := "testing" - - var newTestChannel byte = 0x2 - - // test NodeInfo is compatible - ni1 := testNodeInfo(nodeKey1.ID(), name).(DefaultNodeInfo) - ni2 := testNodeInfo(nodeKey2.ID(), name).(DefaultNodeInfo) - assert.NoError(t, ni1.CompatibleWith(ni2)) - - // add another channel; still compatible - ni2.Channels = append(ni2.Channels, newTestChannel) - assert.True(t, ni2.HasChannel(newTestChannel)) - assert.NoError(t, ni1.CompatibleWith(ni2)) - - // wrong NodeInfo type is not compatible - _, netAddr := CreateRoutableAddr() - ni3 := mockNodeInfo{netAddr} - assert.Error(t, ni1.CompatibleWith(ni3)) - - testCases := []struct { - testName string - malleateNodeInfo func(*DefaultNodeInfo) - }{ - {"Wrong block version", func(ni *DefaultNodeInfo) { ni.ProtocolVersion.Block++ }}, - {"Wrong network", func(ni *DefaultNodeInfo) { ni.Network += "-wrong" }}, - {"No common channels", func(ni *DefaultNodeInfo) { ni.Channels = []byte{newTestChannel} }}, - } - - for _, tc := range testCases { - ni := testNodeInfo(nodeKey2.ID(), name).(DefaultNodeInfo) - tc.malleateNodeInfo(&ni) - assert.Error(t, ni1.CompatibleWith(ni)) - } -} diff --git a/p2p/nodeinfo/errors.go b/p2p/nodeinfo/errors.go new file mode 100644 index 00000000000..157edf884da --- /dev/null +++ b/p2p/nodeinfo/errors.go @@ -0,0 +1,96 @@ +package nodeinfo + +import ( + "errors" + "fmt" + + "github.com/cometbft/cometbft/libs/bytes" +) + +var ErrNoNodeInfo = errors.New("no node info found") + +type ErrInvalidNodeVersion struct { + Version string +} + +func (e ErrInvalidNodeVersion) Error() string { + return fmt.Sprintf("invalid version %s: version must be valid ASCII text without tabs", e.Version) +} + +type ErrDuplicateChannelID struct { + ID byte +} + +func (e ErrDuplicateChannelID) Error() string { + return fmt.Sprintf("channels contains duplicate channel id %v", e.ID) +} + +type ErrChannelsTooLong struct { + Length int + Max int +} + +func (e ErrChannelsTooLong) Error() string { + return fmt.Sprintf("channels is too long (max: %d, got: %d)", e.Max, e.Length) +} + +type ErrInvalidMoniker struct { + Moniker string +} + +func (e ErrInvalidMoniker) Error() string { + return fmt.Sprintf("moniker must be valid non-empty ASCII text without tabs, but got %v", e.Moniker) +} + +type ErrInvalidTxIndex struct { + TxIndex string +} + +func (e ErrInvalidTxIndex) Error() string { + return fmt.Sprintf("tx index must be either 'on', 'off', or empty string, got '%v'", e.TxIndex) +} + +type ErrInvalidRPCAddress struct { + RPCAddress string +} + +func (e ErrInvalidRPCAddress) Error() string { + return fmt.Sprintf("rpc address must be valid ASCII text without tabs, but got %v", e.RPCAddress) +} + +type ErrInvalidNodeInfoType struct { + Type string + Expected string +} + +func (e ErrInvalidNodeInfoType) Error() string { + return fmt.Sprintf("invalid NodeInfo type, Expected %s but got %s", e.Expected, e.Type) +} + +type ErrDifferentBlockVersion struct { + Other uint64 + Our uint64 +} + +func (e ErrDifferentBlockVersion) Error() string { + return fmt.Sprintf("peer is on a different Block version. Got %d, expected %d", + e.Other, e.Our) +} + +type ErrDifferentNetwork struct { + Other string + Our string +} + +func (e ErrDifferentNetwork) Error() string { + return fmt.Sprintf("peer is on a different network. Got %s, expected %s", e.Other, e.Our) +} + +type ErrNoCommonChannels struct { + OtherChannels bytes.HexBytes + OurChannels bytes.HexBytes +} + +func (e ErrNoCommonChannels) Error() string { + return fmt.Sprintf("no common channels between us (%v) and peer (%v)", e.OurChannels, e.OtherChannels) +} diff --git a/p2p/node_info.go b/p2p/nodeinfo/nodeinfo.go similarity index 57% rename from p2p/node_info.go rename to p2p/nodeinfo/nodeinfo.go index 59cf885cb1f..6e02cefb05d 100644 --- a/p2p/node_info.go +++ b/p2p/nodeinfo/nodeinfo.go @@ -1,15 +1,15 @@ -package p2p +package nodeinfo import ( "bytes" - "errors" "fmt" "reflect" + tmp2p "github.com/cometbft/cometbft/api/cometbft/p2p/v1" + cmtstrings "github.com/cometbft/cometbft/internal/strings" cmtbytes "github.com/cometbft/cometbft/libs/bytes" - cmtstrings "github.com/cometbft/cometbft/libs/strings" - tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" - "github.com/cometbft/cometbft/version" + na "github.com/cometbft/cometbft/p2p/netaddr" + "github.com/cometbft/cometbft/p2p/nodekey" ) const ( @@ -17,33 +17,23 @@ const ( maxNumChannels = 16 // plenty of room for upgrades, for now ) -// Max size of the NodeInfo struct -func MaxNodeInfoSize() int { +// MaxSize returns the maximum size of the NodeInfo struct. +func MaxSize() int { return maxNodeInfoSize } -//------------------------------------------------------------- +// ------------------------------------------------------------- // NodeInfo exposes basic info of a node // and determines if we're compatible. type NodeInfo interface { - ID() ID - nodeInfoAddress - nodeInfoTransport -} - -type nodeInfoAddress interface { - NetAddress() (*NetAddress, error) -} - -// nodeInfoTransport validates a nodeInfo and checks -// our compatibility with it. It's for use in the handshake. -type nodeInfoTransport interface { + ID() nodekey.ID + NetAddr() (*na.NetAddr, error) Validate() error CompatibleWith(other NodeInfo) error } -//------------------------------------------------------------- +// ------------------------------------------------------------- // ProtocolVersion contains the protocol versions for the software. type ProtocolVersion struct { @@ -52,14 +42,6 @@ type ProtocolVersion struct { App uint64 `json:"app"` } -// defaultProtocolVersion populates the Block and P2P versions using -// the global values, but not the App. -var defaultProtocolVersion = NewProtocolVersion( - version.P2PProtocol, - version.BlockProtocol, - 0, -) - // NewProtocolVersion returns a fully populated ProtocolVersion. func NewProtocolVersion(p2p, block, app uint64) ProtocolVersion { return ProtocolVersion{ @@ -69,20 +51,20 @@ func NewProtocolVersion(p2p, block, app uint64) ProtocolVersion { } } -//------------------------------------------------------------- +// ------------------------------------------------------------- -// Assert DefaultNodeInfo satisfies NodeInfo -var _ NodeInfo = DefaultNodeInfo{} +// Assert Default satisfies NodeInfo. +var _ NodeInfo = Default{} -// DefaultNodeInfo is the basic node information exchanged +// Default is the basic node information exchanged // between two peers during the CometBFT P2P handshake. -type DefaultNodeInfo struct { +type Default struct { ProtocolVersion ProtocolVersion `json:"protocol_version"` // Authenticate - // TODO: replace with NetAddress - DefaultNodeID ID `json:"id"` // authenticated identifier - ListenAddr string `json:"listen_addr"` // accepting incoming + // TODO: replace with na.NetAddr + DefaultNodeID nodekey.ID `json:"id"` // authenticated identifier + ListenAddr string `json:"listen_addr"` // accepting incoming // Check compatibility. // Channels are HexBytes so easier to read as JSON @@ -91,22 +73,22 @@ type DefaultNodeInfo struct { Channels cmtbytes.HexBytes `json:"channels"` // channels this node knows about // ASCIIText fields - Moniker string `json:"moniker"` // arbitrary moniker - Other DefaultNodeInfoOther `json:"other"` // other application specific data + Moniker string `json:"moniker"` // arbitrary moniker + Other DefaultOther `json:"other"` // other application specific data } -// DefaultNodeInfoOther is the misc. applcation specific data -type DefaultNodeInfoOther struct { +// DefaultOther is the misc. application specific data. +type DefaultOther struct { TxIndex string `json:"tx_index"` RPCAddress string `json:"rpc_address"` } // ID returns the node's peer ID. -func (info DefaultNodeInfo) ID() ID { +func (info Default) ID() nodekey.ID { return info.DefaultNodeID } -// Validate checks the self-reported DefaultNodeInfo is safe. +// Validate checks the self-reported Default is safe. // It returns an error if there // are too many Channels, if there are any duplicate Channels, // if the ListenAddr is malformed, or if the ListenAddr is a host name @@ -119,12 +101,11 @@ func (info DefaultNodeInfo) ID() ID { // International clients could then use punycode (or we could use // url-encoding), and we just need to be careful with how we handle that in our // clients. (e.g. off by default). -func (info DefaultNodeInfo) Validate() error { - +func (info Default) Validate() error { // ID is already validated. // Validate ListenAddr. - _, err := NewNetAddressString(IDAddressString(info.ID(), info.ListenAddr)) + _, err := na.NewFromString(na.IDAddrString(info.ID(), info.ListenAddr)) if err != nil { return err } @@ -134,26 +115,26 @@ func (info DefaultNodeInfo) Validate() error { // Validate Version if len(info.Version) > 0 && (!cmtstrings.IsASCIIText(info.Version) || cmtstrings.ASCIITrim(info.Version) == "") { - - return fmt.Errorf("info.Version must be valid ASCII text without tabs, but got %v", info.Version) + return ErrInvalidNodeVersion{Version: info.Version} } // Validate Channels - ensure max and check for duplicates. if len(info.Channels) > maxNumChannels { - return fmt.Errorf("info.Channels is too long (%v). Max is %v", len(info.Channels), maxNumChannels) + return ErrChannelsTooLong{Length: len(info.Channels), Max: maxNumChannels} } + channels := make(map[byte]struct{}) for _, ch := range info.Channels { _, ok := channels[ch] if ok { - return fmt.Errorf("info.Channels contains duplicate channel id %v", ch) + return ErrDuplicateChannelID{ID: ch} } channels[ch] = struct{}{} } // Validate Moniker. if !cmtstrings.IsASCIIText(info.Moniker) || cmtstrings.ASCIITrim(info.Moniker) == "" { - return fmt.Errorf("info.Moniker must be valid non-empty ASCII text without tabs, but got %v", info.Moniker) + return ErrInvalidMoniker{Moniker: info.Moniker} } // Validate Other. @@ -162,34 +143,42 @@ func (info DefaultNodeInfo) Validate() error { switch txIndex { case "", "on", "off": default: - return fmt.Errorf("info.Other.TxIndex should be either 'on', 'off', or empty string, got '%v'", txIndex) + return ErrInvalidTxIndex{TxIndex: txIndex} } // XXX: Should we be more strict about address formats? rpcAddr := other.RPCAddress if len(rpcAddr) > 0 && (!cmtstrings.IsASCIIText(rpcAddr) || cmtstrings.ASCIITrim(rpcAddr) == "") { - return fmt.Errorf("info.Other.RPCAddress=%v must be valid ASCII text without tabs", rpcAddr) + return ErrInvalidRPCAddress{RPCAddress: rpcAddr} } return nil } -// CompatibleWith checks if two DefaultNodeInfo are compatible with eachother. +// CompatibleWith checks if two Default are compatible with each other. // CONTRACT: two nodes are compatible if the Block version and network match // and they have at least one channel in common. -func (info DefaultNodeInfo) CompatibleWith(otherInfo NodeInfo) error { - other, ok := otherInfo.(DefaultNodeInfo) +func (info Default) CompatibleWith(otherInfo NodeInfo) error { + other, ok := otherInfo.(Default) if !ok { - return fmt.Errorf("wrong NodeInfo type. Expected DefaultNodeInfo, got %v", reflect.TypeOf(otherInfo)) + return ErrInvalidNodeInfoType{ + Type: reflect.TypeOf(otherInfo).String(), + Expected: fmt.Sprintf("%T", Default{}), + } } if info.ProtocolVersion.Block != other.ProtocolVersion.Block { - return fmt.Errorf("peer is on a different Block version. Got %v, expected %v", - other.ProtocolVersion.Block, info.ProtocolVersion.Block) + return ErrDifferentBlockVersion{ + Other: other.ProtocolVersion.Block, + Our: info.ProtocolVersion.Block, + } } // nodes must be on the same network if info.Network != other.Network { - return fmt.Errorf("peer is on a different network. Got %v, expected %v", other.Network, info.Network) + return ErrDifferentNetwork{ + Other: other.Network, + Our: info.Network, + } } // if we have no channels, we're just testing @@ -209,26 +198,28 @@ OUTER_LOOP: } } if !found { - return fmt.Errorf("peer has no common channels. Our channels: %v ; Peer channels: %v", info.Channels, other.Channels) + return ErrNoCommonChannels{ + OtherChannels: other.Channels, + OurChannels: info.Channels, + } } return nil } -// NetAddress returns a NetAddress derived from the DefaultNodeInfo - +// NetAddr returns a NetAddr derived from the Default - // it includes the authenticated peer ID and the self-reported // ListenAddr. Note that the ListenAddr is not authenticated and // may not match that address actually dialed if its an outbound peer. -func (info DefaultNodeInfo) NetAddress() (*NetAddress, error) { - idAddr := IDAddressString(info.ID(), info.ListenAddr) - return NewNetAddressString(idAddr) +func (info Default) NetAddr() (*na.NetAddr, error) { + idAddr := na.IDAddrString(info.ID(), info.ListenAddr) + return na.NewFromString(idAddr) } -func (info DefaultNodeInfo) HasChannel(chID byte) bool { +func (info Default) HasChannel(chID byte) bool { return bytes.Contains(info.Channels, []byte{chID}) } -func (info DefaultNodeInfo) ToProto() *tmp2p.DefaultNodeInfo { - +func (info Default) ToProto() *tmp2p.DefaultNodeInfo { dni := new(tmp2p.DefaultNodeInfo) dni.ProtocolVersion = tmp2p.ProtocolVersion{ P2P: info.ProtocolVersion.P2P, @@ -250,23 +241,24 @@ func (info DefaultNodeInfo) ToProto() *tmp2p.DefaultNodeInfo { return dni } -func DefaultNodeInfoFromToProto(pb *tmp2p.DefaultNodeInfo) (DefaultNodeInfo, error) { +func DefaultFromToProto(pb *tmp2p.DefaultNodeInfo) (Default, error) { if pb == nil { - return DefaultNodeInfo{}, errors.New("nil node info") + return Default{}, ErrNoNodeInfo } - dni := DefaultNodeInfo{ + + dni := Default{ ProtocolVersion: ProtocolVersion{ P2P: pb.ProtocolVersion.P2P, Block: pb.ProtocolVersion.Block, App: pb.ProtocolVersion.App, }, - DefaultNodeID: ID(pb.DefaultNodeID), + DefaultNodeID: nodekey.ID(pb.DefaultNodeID), ListenAddr: pb.ListenAddr, Network: pb.Network, Version: pb.Version, Channels: pb.Channels, Moniker: pb.Moniker, - Other: DefaultNodeInfoOther{ + Other: DefaultOther{ TxIndex: pb.Other.TxIndex, RPCAddress: pb.Other.RPCAddress, }, diff --git a/p2p/nodeinfo/nodeinfo_test.go b/p2p/nodeinfo/nodeinfo_test.go new file mode 100644 index 00000000000..bc3c9bbd03b --- /dev/null +++ b/p2p/nodeinfo/nodeinfo_test.go @@ -0,0 +1,166 @@ +package nodeinfo + +import ( + "fmt" + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/crypto/ed25519" + cmtnet "github.com/cometbft/cometbft/internal/net" + na "github.com/cometbft/cometbft/p2p/netaddr" + "github.com/cometbft/cometbft/p2p/nodekey" +) + +const testCh = 0x01 + +type mockNodeInfo struct { + addr *na.NetAddr +} + +func (ni mockNodeInfo) ID() nodekey.ID { return ni.addr.ID } +func (ni mockNodeInfo) NetAddr() (*na.NetAddr, error) { return ni.addr, nil } +func (mockNodeInfo) Validate() error { return nil } +func (mockNodeInfo) CompatibleWith(NodeInfo) error { return nil } +func (mockNodeInfo) Handshake(net.Conn, time.Duration) (NodeInfo, error) { return nil, nil } + +func testNodeInfo(id nodekey.ID) NodeInfo { + return Default{ + ProtocolVersion: NewProtocolVersion(0, 0, 0), + DefaultNodeID: id, + ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort()), + Network: "testing", + Version: "1.2.3-rc0-deadbeef", + Channels: []byte{testCh}, + Moniker: "testing", + Other: DefaultOther{ + TxIndex: "on", + RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort()), + }, + } +} + +func getFreePort() int { + port, err := cmtnet.GetFreePort() + if err != nil { + panic(err) + } + return port +} + +func TestNodeInfoValidate(t *testing.T) { + // empty fails + ni := Default{} + require.Error(t, ni.Validate()) + + channels := make([]byte, maxNumChannels) + for i := 0; i < maxNumChannels; i++ { + channels[i] = byte(i) + } + dupChannels := make([]byte, 5) + copy(dupChannels, channels[:5]) + dupChannels = append(dupChannels, testCh) //nolint:makezero // huge errors when we don't do it the "wrong" way + + nonASCII := "¢§µ" + emptyTab := "\t" + emptySpace := " " + + testCases := []struct { + testName string + malleateNodeInfo func(*Default) + expectErr bool + }{ + { + "Too Many Channels", + func(ni *Default) { ni.Channels = append(channels, byte(maxNumChannels)) }, //nolint: makezero + true, + }, + {"Duplicate Channel", func(ni *Default) { ni.Channels = dupChannels }, true}, + {"Good Channels", func(ni *Default) { ni.Channels = ni.Channels[:5] }, false}, + + {"Invalid NetAddr", func(ni *Default) { ni.ListenAddr = "not-an-address" }, true}, + {"Good NetAddr", func(ni *Default) { ni.ListenAddr = "0.0.0.0:26656" }, false}, + + {"Non-ASCII Version", func(ni *Default) { ni.Version = nonASCII }, true}, + {"Empty tab Version", func(ni *Default) { ni.Version = emptyTab }, true}, + {"Empty space Version", func(ni *Default) { ni.Version = emptySpace }, true}, + {"Empty Version", func(ni *Default) { ni.Version = "" }, false}, + + {"Non-ASCII Moniker", func(ni *Default) { ni.Moniker = nonASCII }, true}, + {"Empty tab Moniker", func(ni *Default) { ni.Moniker = emptyTab }, true}, + {"Empty space Moniker", func(ni *Default) { ni.Moniker = emptySpace }, true}, + {"Empty Moniker", func(ni *Default) { ni.Moniker = "" }, true}, + {"Good Moniker", func(ni *Default) { ni.Moniker = "hey its me" }, false}, + + {"Non-ASCII TxIndex", func(ni *Default) { ni.Other.TxIndex = nonASCII }, true}, + {"Empty tab TxIndex", func(ni *Default) { ni.Other.TxIndex = emptyTab }, true}, + {"Empty space TxIndex", func(ni *Default) { ni.Other.TxIndex = emptySpace }, true}, + {"Empty TxIndex", func(ni *Default) { ni.Other.TxIndex = "" }, false}, + {"Off TxIndex", func(ni *Default) { ni.Other.TxIndex = "off" }, false}, + + {"Non-ASCII RPCAddress", func(ni *Default) { ni.Other.RPCAddress = nonASCII }, true}, + {"Empty tab RPCAddress", func(ni *Default) { ni.Other.RPCAddress = emptyTab }, true}, + {"Empty space RPCAddress", func(ni *Default) { ni.Other.RPCAddress = emptySpace }, true}, + {"Empty RPCAddress", func(ni *Default) { ni.Other.RPCAddress = "" }, false}, + {"Good RPCAddress", func(ni *Default) { ni.Other.RPCAddress = "0.0.0.0:26657" }, false}, + } + + nodeKey := nodekey.NodeKey{PrivKey: ed25519.GenPrivKey()} + + // test case passes + ni = testNodeInfo(nodeKey.ID()).(Default) + ni.Channels = channels + require.NoError(t, ni.Validate()) + + for _, tc := range testCases { + ni := testNodeInfo(nodeKey.ID()).(Default) + ni.Channels = channels + tc.malleateNodeInfo(&ni) + err := ni.Validate() + if tc.expectErr { + require.Error(t, err, tc.testName) + } else { + require.NoError(t, err, tc.testName) + } + } +} + +func TestNodeInfoCompatible(t *testing.T) { + nodeKey1 := nodekey.NodeKey{PrivKey: ed25519.GenPrivKey()} + nodeKey2 := nodekey.NodeKey{PrivKey: ed25519.GenPrivKey()} + + var newTestChannel byte = 0x2 + + // test NodeInfo is compatible + ni1 := testNodeInfo(nodeKey1.ID()).(Default) + ni2 := testNodeInfo(nodeKey2.ID()).(Default) + require.NoError(t, ni1.CompatibleWith(ni2)) + + // add another channel; still compatible + ni2.Channels = append(ni2.Channels, newTestChannel) + assert.True(t, ni2.HasChannel(newTestChannel)) + require.NoError(t, ni1.CompatibleWith(ni2)) + + // wrong NodeInfo type is not compatible + _, netAddr := na.CreateRoutableAddr() + ni3 := mockNodeInfo{netAddr} + require.Error(t, ni1.CompatibleWith(ni3)) + + testCases := []struct { + testName string + malleateNodeInfo func(*Default) + }{ + {"Wrong block version", func(ni *Default) { ni.ProtocolVersion.Block++ }}, + {"Wrong network", func(ni *Default) { ni.Network += "-wrong" }}, + {"No common channels", func(ni *Default) { ni.Channels = []byte{newTestChannel} }}, + } + + for _, tc := range testCases { + ni := testNodeInfo(nodeKey2.ID()).(Default) + tc.malleateNodeInfo(&ni) + require.Error(t, ni1.CompatibleWith(ni)) + } +} diff --git a/p2p/key.go b/p2p/nodekey/nodekey.go similarity index 80% rename from p2p/key.go rename to p2p/nodekey/nodekey.go index b30dca1dcde..149dbe4fb30 100644 --- a/p2p/key.go +++ b/p2p/nodekey/nodekey.go @@ -1,4 +1,4 @@ -package p2p +package nodekey import ( "bytes" @@ -8,18 +8,18 @@ import ( "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" + cmtos "github.com/cometbft/cometbft/internal/os" cmtjson "github.com/cometbft/cometbft/libs/json" - cmtos "github.com/cometbft/cometbft/libs/os" ) -// ID is a hex-encoded crypto.Address +// ID is a hex-encoded crypto.Address. type ID string // IDByteLength is the length of a crypto.Address. Currently only 20. // TODO: support other length addresses ? const IDByteLength = crypto.AddressSize -//------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------ // Persistent peer ID // TODO: encrypt on disk @@ -34,7 +34,7 @@ func (nodeKey *NodeKey) ID() ID { return PubKeyToID(nodeKey.PubKey()) } -// PubKey returns the peer's PubKey +// PubKey returns the peer's PubKey. func (nodeKey *NodeKey) PubKey() crypto.PubKey { return nodeKey.PrivKey.PubKey() } @@ -45,11 +45,11 @@ func PubKeyToID(pubKey crypto.PubKey) ID { return ID(hex.EncodeToString(pubKey.Address())) } -// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If +// LoadOrGen attempts to load the NodeKey from the given filePath. If // the file does not exist, it generates and saves a new NodeKey. -func LoadOrGenNodeKey(filePath string) (*NodeKey, error) { +func LoadOrGen(filePath string) (*NodeKey, error) { if cmtos.FileExists(filePath) { - nodeKey, err := LoadNodeKey(filePath) + nodeKey, err := Load(filePath) if err != nil { return nil, err } @@ -68,8 +68,8 @@ func LoadOrGenNodeKey(filePath string) (*NodeKey, error) { return nodeKey, nil } -// LoadNodeKey loads NodeKey located in filePath. -func LoadNodeKey(filePath string) (*NodeKey, error) { +// Load loads NodeKey located in filePath. +func Load(filePath string) (*NodeKey, error) { jsonBytes, err := os.ReadFile(filePath) if err != nil { return nil, err @@ -88,14 +88,14 @@ func (nodeKey *NodeKey) SaveAs(filePath string) error { if err != nil { return err } - err = os.WriteFile(filePath, jsonBytes, 0600) + err = os.WriteFile(filePath, jsonBytes, 0o600) if err != nil { return err } return nil } -//------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------ // MakePoWTarget returns the big-endian encoding of 2^(targetBits - difficulty) - 1. // It can be used as a Proof of Work target. diff --git a/p2p/key_test.go b/p2p/nodekey/nodekey_test.go similarity index 59% rename from p2p/key_test.go rename to p2p/nodekey/nodekey_test.go index e87bfe88d63..674d4005887 100644 --- a/p2p/key_test.go +++ b/p2p/nodekey/nodekey_test.go @@ -1,4 +1,4 @@ -package p2p +package nodekey import ( "bytes" @@ -10,17 +10,17 @@ import ( "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/crypto/ed25519" - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtrand "github.com/cometbft/cometbft/internal/rand" ) func TestLoadOrGenNodeKey(t *testing.T) { filePath := filepath.Join(os.TempDir(), cmtrand.Str(12)+"_peer_id.json") - nodeKey, err := LoadOrGenNodeKey(filePath) - assert.Nil(t, err) + nodeKey, err := LoadOrGen(filePath) + require.NoError(t, err) - nodeKey2, err := LoadOrGenNodeKey(filePath) - assert.Nil(t, err) + nodeKey2, err := LoadOrGen(filePath) + require.NoError(t, err) assert.Equal(t, nodeKey, nodeKey2) } @@ -28,14 +28,14 @@ func TestLoadOrGenNodeKey(t *testing.T) { func TestLoadNodeKey(t *testing.T) { filePath := filepath.Join(os.TempDir(), cmtrand.Str(12)+"_peer_id.json") - _, err := LoadNodeKey(filePath) + _, err := Load(filePath) assert.True(t, os.IsNotExist(err)) - _, err = LoadOrGenNodeKey(filePath) + _, err = LoadOrGen(filePath) require.NoError(t, err) - nodeKey, err := LoadNodeKey(filePath) - assert.NoError(t, err) + nodeKey, err := Load(filePath) + require.NoError(t, err) assert.NotNil(t, nodeKey) } @@ -49,30 +49,29 @@ func TestNodeKeySaveAs(t *testing.T) { PrivKey: privKey, } err := nodeKey.SaveAs(filePath) - assert.NoError(t, err) + require.NoError(t, err) assert.FileExists(t, filePath) } -//---------------------------------------------------------- +// ---------------------------------------------------------- -func padBytes(bz []byte, targetBytes int) []byte { +func padBytes(bz []byte) []byte { + targetBytes := 20 return append(bz, bytes.Repeat([]byte{0xFF}, targetBytes-len(bz))...) } func TestPoWTarget(t *testing.T) { - - targetBytes := 20 cases := []struct { difficulty uint target []byte }{ - {0, padBytes([]byte{}, targetBytes)}, - {1, padBytes([]byte{127}, targetBytes)}, - {8, padBytes([]byte{0}, targetBytes)}, - {9, padBytes([]byte{0, 127}, targetBytes)}, - {10, padBytes([]byte{0, 63}, targetBytes)}, - {16, padBytes([]byte{0, 0}, targetBytes)}, - {17, padBytes([]byte{0, 0, 127}, targetBytes)}, + {0, padBytes([]byte{})}, + {1, padBytes([]byte{127})}, + {8, padBytes([]byte{0})}, + {9, padBytes([]byte{0, 127})}, + {10, padBytes([]byte{0, 63})}, + {16, padBytes([]byte{0, 0})}, + {17, padBytes([]byte{0, 0, 127})}, } for _, c := range cases { diff --git a/p2p/peer.go b/p2p/peer.go index dc88152df6d..5e2e77b7649 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -8,54 +8,61 @@ import ( "github.com/cosmos/gogoproto/proto" - "github.com/cometbft/cometbft/libs/cmap" + "github.com/cometbft/cometbft/internal/cmap" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/service" - - cmtconn "github.com/cometbft/cometbft/p2p/conn" + na "github.com/cometbft/cometbft/p2p/netaddr" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" + "github.com/cometbft/cometbft/p2p/nodekey" + tcpconn "github.com/cometbft/cometbft/p2p/transport/tcp/conn" + "github.com/cometbft/cometbft/types" ) //go:generate ../scripts/mockery_generate.sh Peer -const metricsTickerDuration = 10 * time.Second +// Same as the default Prometheus scrape interval in order to not lose +// granularity. +const metricsTickerDuration = 1 * time.Second // Peer is an interface representing a peer connected on a reactor. type Peer interface { service.Service FlushStop() - ID() ID // peer's cryptographic ID + ID() nodekey.ID // peer's cryptographic ID RemoteIP() net.IP // remote IP of the connection RemoteAddr() net.Addr // remote address of the connection IsOutbound() bool // did we dial the peer IsPersistent() bool // do we redial this peer when we disconnect - CloseConn() error // close original connection + // Conn returns the underlying connection. + Conn() net.Conn - NodeInfo() NodeInfo // peer's info - Status() cmtconn.ConnectionStatus - SocketAddr() *NetAddress // actual address of the socket + NodeInfo() ni.NodeInfo // peer's info + Status() tcpconn.ConnectionStatus + SocketAddr() *na.NetAddr // actual address of the socket - Send(Envelope) bool - TrySend(Envelope) bool + HasChannel(chID byte) bool // Does the peer implement this channel? + Send(e Envelope) bool // Send a message to the peer, blocking version + TrySend(e Envelope) bool // Send a message to the peer, non-blocking version - Set(string, interface{}) - Get(string) interface{} + Set(key string, value any) + Get(key string) any SetRemovalFailed() GetRemovalFailed() bool } -//---------------------------------------------------------- +// ---------------------------------------------------------- // peerConn contains the raw connection and its config. type peerConn struct { outbound bool persistent bool - conn net.Conn // source connection + conn net.Conn // Source connection - socketAddr *NetAddress + socketAddr *na.NetAddr // cached RemoteIP() ip net.IP @@ -64,9 +71,8 @@ type peerConn struct { func newPeerConn( outbound, persistent bool, conn net.Conn, - socketAddr *NetAddress, + socketAddr *na.NetAddr, ) peerConn { - return peerConn{ outbound: outbound, persistent: persistent, @@ -75,13 +81,14 @@ func newPeerConn( } } -// ID only exists for SecretConnection. -// NOTE: Will panic if conn is not *SecretConnection. -func (pc peerConn) ID() ID { - return PubKeyToID(pc.conn.(*cmtconn.SecretConnection).RemotePubKey()) +// ID returns the peer's ID. +// +// Only used in tests. +func (pc peerConn) ID() nodekey.ID { + return pc.socketAddr.ID } -// Return the IP from the connection RemoteAddr +// Return the IP from the connection RemoteAddr. func (pc peerConn) RemoteIP() net.IP { if pc.ip != nil { return pc.ip @@ -110,20 +117,19 @@ type peer struct { // raw peerConn and the multiplex connection peerConn - mconn *cmtconn.MConnection + mconn *tcpconn.MConnection // peer's node info and the channel it knows about // channels = nodeInfo.Channels - // cached to avoid copying nodeInfo in hasChannel - nodeInfo NodeInfo + // cached to avoid copying nodeInfo in HasChannel + nodeInfo ni.NodeInfo channels []byte // User data Data *cmap.CMap - metrics *Metrics - metricsTicker *time.Ticker - mlc *metricsLabelCache + metrics *Metrics + pendingMetrics *peerPendingMetricsCache // When removal of a peer fails, we set this flag removalAttemptFailed bool @@ -133,31 +139,30 @@ type PeerOption func(*peer) func newPeer( pc peerConn, - mConfig cmtconn.MConnConfig, - nodeInfo NodeInfo, + mConfig tcpconn.MConnConfig, + nodeInfo ni.NodeInfo, reactorsByCh map[byte]Reactor, msgTypeByChID map[byte]proto.Message, - chDescs []*cmtconn.ChannelDescriptor, - onPeerError func(Peer, interface{}), - mlc *metricsLabelCache, + streams []StreamDescriptor, + onPeerError func(Peer, any), options ...PeerOption, ) *peer { p := &peer{ - peerConn: pc, - nodeInfo: nodeInfo, - channels: nodeInfo.(DefaultNodeInfo).Channels, - Data: cmap.NewCMap(), - metricsTicker: time.NewTicker(metricsTickerDuration), - metrics: NopMetrics(), - mlc: mlc, + peerConn: pc, + nodeInfo: nodeInfo, + channels: nodeInfo.(ni.Default).Channels, + Data: cmap.NewCMap(), + metrics: NopMetrics(), + pendingMetrics: newPeerPendingMetricsCache(), } + // TODO: rip this out from the peer p.mconn = createMConnection( pc.conn, p, reactorsByCh, msgTypeByChID, - chDescs, + streams, onPeerError, mConfig, ) @@ -178,7 +183,7 @@ func (p *peer) String() string { return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.ID()) } -//--------------------------------------------------- +// --------------------------------------------------- // Implements service.Service // SetLogger implements BaseService. @@ -203,27 +208,24 @@ func (p *peer) OnStart() error { // FlushStop mimics OnStop but additionally ensures that all successful // .Send() calls will get flushed before closing the connection. +// // NOTE: it is not safe to call this method more than once. func (p *peer) FlushStop() { - p.metricsTicker.Stop() - p.BaseService.OnStop() p.mconn.FlushStop() // stop everything and close the conn } // OnStop implements BaseService. func (p *peer) OnStop() { - p.metricsTicker.Stop() - p.BaseService.OnStop() if err := p.mconn.Stop(); err != nil { // stop everything and close the conn p.Logger.Debug("Error while stopping peer", "err", err) } } -//--------------------------------------------------- +// --------------------------------------------------- // Implements Peer // ID returns the peer's ID - the hex encoded hash of its pubkey. -func (p *peer) ID() ID { +func (p *peer) ID() nodekey.ID { return p.nodeInfo.ID() } @@ -232,13 +234,13 @@ func (p *peer) IsOutbound() bool { return p.peerConn.outbound } -// IsPersistent returns true if the peer is persitent, false otherwise. +// IsPersistent returns true if the peer is persistent, false otherwise. func (p *peer) IsPersistent() bool { return p.peerConn.persistent } // NodeInfo returns a copy of the peer's NodeInfo. -func (p *peer) NodeInfo() NodeInfo { +func (p *peer) NodeInfo() ni.NodeInfo { return p.nodeInfo } @@ -246,23 +248,27 @@ func (p *peer) NodeInfo() NodeInfo { // For outbound peers, it's the address dialed (after DNS resolution). // For inbound peers, it's the address returned by the underlying connection // (not what's reported in the peer's NodeInfo). -func (p *peer) SocketAddr() *NetAddress { +func (p *peer) SocketAddr() *na.NetAddr { return p.peerConn.socketAddr } // Status returns the peer's ConnectionStatus. -func (p *peer) Status() cmtconn.ConnectionStatus { +func (p *peer) Status() tcpconn.ConnectionStatus { return p.mconn.Status() } // Send msg bytes to the channel identified by chID byte. Returns false if the // send queue is full after timeout, specified by MConnection. +// +// thread safe. func (p *peer) Send(e Envelope) bool { return p.send(e.ChannelID, e.Message, p.mconn.Send) } // TrySend msg bytes to the channel identified by chID byte. Immediately returns // false if the send queue is full. +// +// thread safe. func (p *peer) TrySend(e Envelope) bool { return p.send(e.ChannelID, e.Message, p.mconn.TrySend) } @@ -270,11 +276,11 @@ func (p *peer) TrySend(e Envelope) bool { func (p *peer) send(chID byte, msg proto.Message, sendFunc func(byte, []byte) bool) bool { if !p.IsRunning() { return false - } else if !p.hasChannel(chID) { + } else if !p.HasChannel(chID) { return false } - metricLabelValue := p.mlc.ValueToMetricLabel(msg) - if w, ok := msg.(Wrapper); ok { + msgType := getMsgType(msg) + if w, ok := msg.(types.Wrapper); ok { msg = w.Wrap() } msgBytes, err := proto.Marshal(msg) @@ -284,49 +290,38 @@ func (p *peer) send(chID byte, msg proto.Message, sendFunc func(byte, []byte) bo } res := sendFunc(chID, msgBytes) if res { - labels := []string{ - "peer_id", string(p.ID()), - "chID", fmt.Sprintf("%#x", chID), - } - p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) - p.metrics.MessageSendBytesTotal.With("message_type", metricLabelValue).Add(float64(len(msgBytes))) + p.pendingMetrics.AddPendingSendBytes(msgType, len(msgBytes)) } return res } // Get the data for a given key. -func (p *peer) Get(key string) interface{} { +// +// thread safe. +func (p *peer) Get(key string) any { return p.Data.Get(key) } // Set sets the data for the given key. -func (p *peer) Set(key string, data interface{}) { +// +// thread safe. +func (p *peer) Set(key string, data any) { p.Data.Set(key, data) } -// hasChannel returns true if the peer reported -// knowing about the given chID. -func (p *peer) hasChannel(chID byte) bool { +// HasChannel returns whether the peer reported implementing this channel. +func (p *peer) HasChannel(chID byte) bool { for _, ch := range p.channels { if ch == chID { return true } } - // NOTE: probably will want to remove this - // but could be helpful while the feature is new - p.Logger.Debug( - "Unknown channel for peer", - "channel", - chID, - "channels", - p.channels, - ) return false } -// CloseConn closes original connection. Used for cleaning up in cases where the peer had not been started at all. -func (p *peer) CloseConn() error { - return p.peerConn.conn.Close() +// Conn returns the underlying peer source connection. +func (p *peer) Conn() net.Conn { + return p.peerConn.conn } func (p *peer) SetRemovalFailed() { @@ -337,15 +332,10 @@ func (p *peer) GetRemovalFailed() bool { return p.removalAttemptFailed } -//--------------------------------------------------- +// --------------------------------------------------- // methods only used for testing // TODO: can we remove these? -// CloseConn closes the underlying connection -func (pc *peerConn) CloseConn() { - pc.conn.Close() -} - // RemoteAddr returns peer's remote network address. func (p *peer) RemoteAddr() net.Addr { return p.peerConn.conn.RemoteAddr() @@ -359,7 +349,7 @@ func (p *peer) CanSend(chID byte) bool { return p.mconn.CanSend(chID) } -//--------------------------------------------------- +// --------------------------------------------------- func PeerMetrics(metrics *Metrics) PeerOption { return func(p *peer) { @@ -368,23 +358,51 @@ func PeerMetrics(metrics *Metrics) PeerOption { } func (p *peer) metricsReporter() { + metricsTicker := time.NewTicker(metricsTickerDuration) + defer metricsTicker.Stop() + for { select { - case <-p.metricsTicker.C: + case <-metricsTicker.C: status := p.mconn.Status() var sendQueueSize float64 for _, chStatus := range status.Channels { sendQueueSize += float64(chStatus.SendQueueSize) } + p.metrics.RecvRateLimiterDelay.With("peer_id", string(p.ID())). + Add(status.RecvMonitor.SleepTime.Seconds()) + p.metrics.SendRateLimiterDelay.With("peer_id", string(p.ID())). + Add(status.SendMonitor.SleepTime.Seconds()) + p.metrics.PeerPendingSendBytes.With("peer_id", string(p.ID())).Set(sendQueueSize) + // Report per peer, per message total bytes, since the last interval + func() { + p.pendingMetrics.mtx.Lock() + defer p.pendingMetrics.mtx.Unlock() + for _, entry := range p.pendingMetrics.perMessageCache { + if entry.pendingSendBytes > 0 { + p.metrics.MessageSendBytesTotal. + With("message_type", entry.label). + Add(float64(entry.pendingSendBytes)) + entry.pendingSendBytes = 0 + } + if entry.pendingRecvBytes > 0 { + p.metrics.MessageReceiveBytesTotal. + With("message_type", entry.label). + Add(float64(entry.pendingRecvBytes)) + entry.pendingRecvBytes = 0 + } + } + }() + case <-p.Quit(): return } } } -//------------------------------------------------------------------ +// ------------------------------------------------------------------ // helper funcs func createMConnection( @@ -392,11 +410,10 @@ func createMConnection( p *peer, reactorsByCh map[byte]Reactor, msgTypeByChID map[byte]proto.Message, - chDescs []*cmtconn.ChannelDescriptor, - onPeerError func(Peer, interface{}), - config cmtconn.MConnConfig, -) *cmtconn.MConnection { - + streamDescs []StreamDescriptor, + onPeerError func(Peer, any), + config tcpconn.MConnConfig, +) *tcpconn.MConnection { onReceive := func(chID byte, msgBytes []byte) { reactor := reactorsByCh[chID] if reactor == nil { @@ -408,20 +425,15 @@ func createMConnection( msg := proto.Clone(mt) err := proto.Unmarshal(msgBytes, msg) if err != nil { - panic(fmt.Errorf("unmarshaling message: %s into type: %s", err, reflect.TypeOf(mt))) + panic(fmt.Sprintf("unmarshaling message: %v into type: %s", err, reflect.TypeOf(mt))) } - labels := []string{ - "peer_id", string(p.ID()), - "chID", fmt.Sprintf("%#x", chID), - } - if w, ok := msg.(Unwrapper); ok { + if w, ok := msg.(types.Unwrapper); ok { msg, err = w.Unwrap() if err != nil { - panic(fmt.Errorf("unwrapping message: %s", err)) + panic(fmt.Sprintf("unwrapping message: %v", err)) } } - p.metrics.PeerReceiveBytesTotal.With(labels...).Add(float64(len(msgBytes))) - p.metrics.MessageReceiveBytesTotal.With("message_type", p.mlc.ValueToMetricLabel(msg)).Add(float64(len(msgBytes))) + p.pendingMetrics.AddPendingRecvBytes(getMsgType(msg), len(msgBytes)) reactor.Receive(Envelope{ ChannelID: chID, Src: p, @@ -429,15 +441,60 @@ func createMConnection( }) } - onError := func(r interface{}) { + onError := func(r any) { onPeerError(p, r) } - return cmtconn.NewMConnectionWithConfig( + // filter out non-tcpconn.ChannelDescriptor streams + tcpDescs := make([]*tcpconn.ChannelDescriptor, 0, len(streamDescs)) + for _, stream := range streamDescs { + var ok bool + d, ok := stream.(*tcpconn.ChannelDescriptor) + if !ok { + continue + } + tcpDescs = append(tcpDescs, d) + } + + return tcpconn.NewMConnectionWithConfig( conn, - chDescs, + tcpDescs, onReceive, onError, config, ) } + +func wrapPeer(c net.Conn, ni ni.NodeInfo, cfg peerConfig, socketAddr *na.NetAddr, mConfig tcpconn.MConnConfig) Peer { + persistent := false + if cfg.isPersistent != nil { + if cfg.outbound { + persistent = cfg.isPersistent(socketAddr) + } else { + selfReportedAddr, err := ni.NetAddr() + if err == nil { + persistent = cfg.isPersistent(selfReportedAddr) + } + } + } + + peerConn := newPeerConn( + cfg.outbound, + persistent, + c, + socketAddr, + ) + + p := newPeer( + peerConn, + mConfig, + ni, + cfg.reactorsByCh, + cfg.msgTypeByChID, + cfg.streamDescs, + cfg.onPeerError, + PeerMetrics(cfg.metrics), + ) + + return p +} diff --git a/p2p/peer_set.go b/p2p/peer_set.go index 0a7727e6f1d..68e6d60a471 100644 --- a/p2p/peer_set.go +++ b/p2p/peer_set.go @@ -3,25 +3,35 @@ package p2p import ( "net" + cmtrand "github.com/cometbft/cometbft/internal/rand" cmtsync "github.com/cometbft/cometbft/libs/sync" + "github.com/cometbft/cometbft/p2p/nodekey" ) // IPeerSet has a (immutable) subset of the methods of PeerSet. type IPeerSet interface { - Has(key ID) bool + // Has returns true if the set contains the peer referred to by this key. + Has(key nodekey.ID) bool + // HasIP returns true if the set contains the peer referred to by this IP HasIP(ip net.IP) bool - Get(key ID) Peer - List() []Peer + // Get returns the peer with the given key, or nil if not found. + Get(key nodekey.ID) Peer + // Copy returns a copy of the peers list. + Copy() []Peer + // Size returns the number of peers in the PeerSet. Size() int + // ForEach iterates over the PeerSet and calls the given function for each peer. + ForEach(peer func(Peer)) + // Random returns a random peer from the PeerSet. + Random() Peer } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- -// PeerSet is a special structure for keeping a table of peers. -// Iteration over the peers is super fast and thread-safe. +// PeerSet is a special thread-safe structure for keeping a table of peers. type PeerSet struct { mtx cmtsync.Mutex - lookup map[ID]*peerSetItem + lookup map[nodekey.ID]*peerSetItem list []Peer } @@ -33,7 +43,7 @@ type peerSetItem struct { // NewPeerSet creates a new peerSet with a list of initial capacity of 256 items. func NewPeerSet() *PeerSet { return &PeerSet{ - lookup: make(map[ID]*peerSetItem), + lookup: make(map[nodekey.ID]*peerSetItem), list: make([]Peer, 0, 256), } } @@ -61,7 +71,7 @@ func (ps *PeerSet) Add(peer Peer) error { // Has returns true if the set contains the peer referred to by this // peerKey, otherwise false. -func (ps *PeerSet) Has(peerKey ID) bool { +func (ps *PeerSet) Has(peerKey nodekey.ID) bool { ps.mtx.Lock() _, ok := ps.lookup[peerKey] ps.mtx.Unlock() @@ -74,14 +84,8 @@ func (ps *PeerSet) HasIP(peerIP net.IP) bool { ps.mtx.Lock() defer ps.mtx.Unlock() - return ps.hasIP(peerIP) -} - -// hasIP does not acquire a lock so it can be used in public methods which -// already lock. -func (ps *PeerSet) hasIP(peerIP net.IP) bool { - for _, item := range ps.lookup { - if item.peer.RemoteIP().Equal(peerIP) { + for _, peer := range ps.list { + if peer.RemoteIP().Equal(peerIP) { return true } } @@ -91,9 +95,10 @@ func (ps *PeerSet) hasIP(peerIP net.IP) bool { // Get looks up a peer by the provided peerKey. Returns nil if peer is not // found. -func (ps *PeerSet) Get(peerKey ID) Peer { +func (ps *PeerSet) Get(peerKey nodekey.ID) Peer { ps.mtx.Lock() defer ps.mtx.Unlock() + item, ok := ps.lookup[peerKey] if ok { return item.peer @@ -101,15 +106,13 @@ func (ps *PeerSet) Get(peerKey ID) Peer { return nil } -// Remove discards peer by its Key, if the peer was previously memoized. -// Returns true if the peer was removed, and false if it was not found. -// in the set. +// Remove removes the peer from the PeerSet. func (ps *PeerSet) Remove(peer Peer) bool { ps.mtx.Lock() defer ps.mtx.Unlock() - item := ps.lookup[peer.ID()] - if item == nil { + item, ok := ps.lookup[peer.ID()] + if !ok || len(ps.list) == 0 { // Removing the peer has failed so we set a flag to mark that a removal was attempted. // This can happen when the peer add routine from the switch is running in // parallel to the receive routine of MConn. @@ -118,27 +121,24 @@ func (ps *PeerSet) Remove(peer Peer) bool { peer.SetRemovalFailed() return false } - index := item.index - // Create a new copy of the list but with one less item. - // (we must copy because we'll be mutating the list). - newList := make([]Peer, len(ps.list)-1) - copy(newList, ps.list) - // If it's the last peer, that's an easy special case. - if index == len(ps.list)-1 { - ps.list = newList - delete(ps.lookup, peer.ID()) - return true - } - // Replace the popped item with the last item in the old list. - lastPeer := ps.list[len(ps.list)-1] - lastPeerKey := lastPeer.ID() - lastPeerItem := ps.lookup[lastPeerKey] - newList[index] = lastPeer - lastPeerItem.index = index - ps.list = newList + // Remove from ps.lookup. delete(ps.lookup, peer.ID()) + + // If it's not the last item. + if index != len(ps.list)-1 { + // Swap it with the last item. + lastPeer := ps.list[len(ps.list)-1] + item := ps.lookup[lastPeer.ID()] + item.index = index + ps.list[index] = item.peer + } + + // Remove the last item from ps.list. + ps.list[len(ps.list)-1] = nil // nil the last entry of the slice to shorten, so it isn't reachable & can be GC'd. + ps.list = ps.list[:len(ps.list)-1] + return true } @@ -149,9 +149,36 @@ func (ps *PeerSet) Size() int { return len(ps.list) } -// List returns the threadsafe list of peers. -func (ps *PeerSet) List() []Peer { +// Copy returns the copy of the peers list. +// +// Note: there are no guarantees about the thread-safety of Peer objects. +func (ps *PeerSet) Copy() []Peer { ps.mtx.Lock() defer ps.mtx.Unlock() - return ps.list + + c := make([]Peer, len(ps.list)) + copy(c, ps.list) + return c +} + +// ForEach iterates over the PeerSet and calls the given function for each peer. +func (ps *PeerSet) ForEach(fn func(peer Peer)) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + for _, item := range ps.lookup { + fn(item.peer) + } +} + +// Random returns a random peer from the PeerSet. +func (ps *PeerSet) Random() Peer { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if len(ps.list) == 0 { + return nil + } + + return ps.list[cmtrand.Int()%len(ps.list)] } diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index 64911ecebff..efb7483a2ac 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -9,47 +9,49 @@ import ( "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/libs/service" + na "github.com/cometbft/cometbft/p2p/netaddr" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" + "github.com/cometbft/cometbft/p2p/nodekey" ) -// mockPeer for testing the PeerSet +// mockPeer for testing the PeerSet. type mockPeer struct { service.BaseService ip net.IP - id ID + id nodekey.ID } -func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error -func (mp *mockPeer) TrySend(Envelope) bool { return true } -func (mp *mockPeer) Send(Envelope) bool { return true } -func (mp *mockPeer) NodeInfo() NodeInfo { return DefaultNodeInfo{} } -func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } -func (mp *mockPeer) ID() ID { return mp.id } -func (mp *mockPeer) IsOutbound() bool { return false } -func (mp *mockPeer) IsPersistent() bool { return true } -func (mp *mockPeer) Get(s string) interface{} { return s } -func (mp *mockPeer) Set(string, interface{}) {} -func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } -func (mp *mockPeer) SocketAddr() *NetAddress { return nil } -func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } -func (mp *mockPeer) CloseConn() error { return nil } -func (mp *mockPeer) SetRemovalFailed() {} -func (mp *mockPeer) GetRemovalFailed() bool { return false } - -// Returns a mock peer +func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error +func (*mockPeer) HasChannel(byte) bool { return true } +func (*mockPeer) TrySend(Envelope) bool { return true } +func (*mockPeer) Send(Envelope) bool { return true } +func (*mockPeer) NodeInfo() ni.NodeInfo { return ni.Default{} } +func (*mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } +func (mp *mockPeer) ID() nodekey.ID { return mp.id } +func (*mockPeer) IsOutbound() bool { return false } +func (*mockPeer) IsPersistent() bool { return true } +func (*mockPeer) Get(s string) any { return s } +func (*mockPeer) Set(string, any) {} +func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } +func (*mockPeer) SocketAddr() *na.NetAddr { return nil } +func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } +func (*mockPeer) Conn() net.Conn { return nil } +func (*mockPeer) SetRemovalFailed() {} +func (*mockPeer) GetRemovalFailed() bool { return false } + +// Returns a mock peer. func newMockPeer(ip net.IP) *mockPeer { if ip == nil { ip = net.IP{127, 0, 0, 1} } - nodeKey := NodeKey{PrivKey: ed25519.GenPrivKey()} + nk := nodekey.NodeKey{PrivKey: ed25519.GenPrivKey()} return &mockPeer{ ip: ip, - id: nodeKey.ID(), + id: nk.ID(), } } func TestPeerSetAddRemoveOne(t *testing.T) { - t.Parallel() - peerSet := NewPeerSet() var peerList []Peer @@ -68,7 +70,7 @@ func TestPeerSetAddRemoveOne(t *testing.T) { assert.True(t, removed) wantSize := n - i - 1 for j := 0; j < 2; j++ { - assert.Equal(t, false, peerSet.Has(peerAtFront.ID()), "#%d Run #%d: failed to remove peer", i, j) + assert.False(t, false, peerSet.Has(peerAtFront.ID()), "#%d Run #%d: failed to remove peer", i, j) assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j) // Test the route of removing the now non-existent element removed := peerSet.Remove(peerAtFront) @@ -89,18 +91,17 @@ func TestPeerSetAddRemoveOne(t *testing.T) { peerAtEnd := peerList[i] removed := peerSet.Remove(peerAtEnd) assert.True(t, removed) - assert.Equal(t, false, peerSet.Has(peerAtEnd.ID()), "#%d: failed to remove item at end", i) + assert.False(t, false, peerSet.Has(peerAtEnd.ID()), "#%d: failed to remove item at end", i) assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i) } } func TestPeerSetAddRemoveMany(t *testing.T) { - t.Parallel() peerSet := NewPeerSet() peers := []Peer{} - N := 100 - for i := 0; i < N; i++ { + n := 100 + for i := 0; i < n; i++ { peer := newMockPeer(net.IP{127, 0, 0, byte(i)}) if err := peerSet.Add(peer); err != nil { t.Errorf("failed to add new peer") @@ -124,7 +125,6 @@ func TestPeerSetAddRemoveMany(t *testing.T) { } func TestPeerSetAddDuplicate(t *testing.T) { - t.Parallel() peerSet := NewPeerSet() peer := newMockPeer(nil) @@ -164,8 +164,6 @@ func TestPeerSetAddDuplicate(t *testing.T) { } func TestPeerSetGet(t *testing.T) { - t.Parallel() - var ( peerSet = NewPeerSet() peer = newMockPeer(nil) @@ -185,7 +183,7 @@ func TestPeerSetGet(t *testing.T) { go func(i int) { defer wg.Done() have, want := peerSet.Get(peer.ID()), peer - assert.Equal(t, have, want, "%d: have %v, want %v", i, have, want) + assert.Equal(t, want, have, "%d: have %v, want %v", i, want, have) }(i) } wg.Wait() diff --git a/p2p/peer_test.go b/p2p/peer_test.go index ce45b7fe3da..9b85951a9af 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -1,6 +1,7 @@ package p2p import ( + "errors" "fmt" golog "log" "net" @@ -11,14 +12,16 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + p2p "github.com/cometbft/cometbft/api/cometbft/p2p/v1" + "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/libs/bytes" "github.com/cometbft/cometbft/libs/log" - "github.com/cometbft/cometbft/proto/tendermint/p2p" - - "github.com/cometbft/cometbft/config" - cmtconn "github.com/cometbft/cometbft/p2p/conn" + na "github.com/cometbft/cometbft/p2p/netaddr" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" + "github.com/cometbft/cometbft/p2p/nodekey" + tcpconn "github.com/cometbft/cometbft/p2p/transport/tcp/conn" ) func TestPeerBasic(t *testing.T) { @@ -29,11 +32,11 @@ func TestPeerBasic(t *testing.T) { rp.Start() t.Cleanup(rp.Stop) - p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, cmtconn.DefaultMConnConfig()) - require.Nil(err) + p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tcpconn.DefaultMConnConfig()) + require.NoError(err) err = p.Start() - require.Nil(err) + require.NoError(err) t.Cleanup(func() { if err := p.Stop(); err != nil { t.Error(err) @@ -59,11 +62,11 @@ func TestPeerSend(t *testing.T) { rp.Start() t.Cleanup(rp.Stop) - p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, cmtconn.DefaultMConnConfig()) - require.Nil(err) + p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tcpconn.DefaultMConnConfig()) + require.NoError(err) err = p.Start() - require.Nil(err) + require.NoError(err) t.Cleanup(func() { if err := p.Stop(); err != nil { @@ -76,37 +79,49 @@ func TestPeerSend(t *testing.T) { } func createOutboundPeerAndPerformHandshake( - addr *NetAddress, + addr *na.NetAddr, config *config.P2PConfig, - mConfig cmtconn.MConnConfig, + mConfig tcpconn.MConnConfig, ) (*peer, error) { - chDescs := []*cmtconn.ChannelDescriptor{ - {ID: testCh, Priority: 1}, - } - reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)} - msgTypeByChID := map[byte]proto.Message{ - testCh: &p2p.Message{}, - } - pk := ed25519.GenPrivKey() - pc, err := testOutboundPeerConn(addr, config, false, pk) + // create outbound peer connection + pc, err := testOutboundPeerConn(addr, config, false) if err != nil { return nil, err } - timeout := 1 * time.Second - ourNodeInfo := testNodeInfo(addr.ID, "host_peer") - peerNodeInfo, err := handshake(pc.conn, timeout, ourNodeInfo) + + // create dummy node info and perform handshake + var ( + timeout = 1 * time.Second + ourNodeID = nodekey.PubKeyToID(ed25519.GenPrivKey().PubKey()) + ourNodeInfo = testNodeInfo(ourNodeID, "host_peer") + ) + peerNodeInfo, err := handshake(ourNodeInfo, pc.conn, timeout) if err != nil { return nil, err } - p := newPeer(pc, mConfig, peerNodeInfo, reactorsByCh, msgTypeByChID, chDescs, func(p Peer, r interface{}) {}, newMetricsLabelCache()) + // create peer + var ( + streamDescs = []StreamDescriptor{ + &tcpconn.ChannelDescriptor{ + ID: testCh, + Priority: 1, + MessageTypeI: &p2p.Message{}, + }, + } + reactorsByCh = map[byte]Reactor{testCh: NewTestReactor(streamDescs, true)} + msgTypeByChID = map[byte]proto.Message{ + testCh: &p2p.Message{}, + } + ) + p := newPeer(pc, mConfig, peerNodeInfo, reactorsByCh, msgTypeByChID, streamDescs, func(_ Peer, _ any) {}) p.SetLogger(log.TestingLogger().With("peer", addr)) return p, nil } -func testDial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) { +func testDial(addr *na.NetAddr, cfg *config.P2PConfig) (net.Conn, error) { if cfg.TestDialFail { - return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)") + return nil, errors.New("dial err (peerConfig.DialFail == true)") } conn, err := addr.DialTimeout(cfg.DialTimeout) @@ -117,19 +132,18 @@ func testDial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) { } func testOutboundPeerConn( - addr *NetAddress, + addr *na.NetAddr, config *config.P2PConfig, persistent bool, - ourNodePrivKey crypto.PrivKey, + // ourNodePrivKey crypto.PrivKey, ) (peerConn, error) { - var pc peerConn conn, err := testDial(addr, config) if err != nil { return pc, fmt.Errorf("error creating peer: %w", err) } - pc, err = testPeerConn(conn, config, true, persistent, ourNodePrivKey, addr) + pc, err = testPeerConn(conn, config, true, persistent, addr) if err != nil { if cerr := conn.Close(); cerr != nil { return pc, fmt.Errorf("%v: %w", cerr.Error(), err) @@ -151,18 +165,18 @@ func testOutboundPeerConn( type remotePeer struct { PrivKey crypto.PrivKey Config *config.P2PConfig - addr *NetAddress + addr *na.NetAddr channels bytes.HexBytes listenAddr string listener net.Listener } -func (rp *remotePeer) Addr() *NetAddress { +func (rp *remotePeer) Addr() *na.NetAddr { return rp.addr } -func (rp *remotePeer) ID() ID { - return PubKeyToID(rp.PrivKey.PubKey()) +func (rp *remotePeer) ID() nodekey.ID { + return nodekey.PubKeyToID(rp.PrivKey.PubKey()) } func (rp *remotePeer) Start() { @@ -175,7 +189,7 @@ func (rp *remotePeer) Start() { golog.Fatalf("net.Listen tcp :0: %+v", e) } rp.listener = l - rp.addr = NewNetAddress(PubKeyToID(rp.PrivKey.PubKey()), l.Addr()) + rp.addr = na.New(nodekey.PubKeyToID(rp.PrivKey.PubKey()), l.Addr()) if rp.channels == nil { rp.channels = []byte{testCh} } @@ -186,20 +200,17 @@ func (rp *remotePeer) Stop() { rp.listener.Close() } -func (rp *remotePeer) Dial(addr *NetAddress) (net.Conn, error) { - conn, err := addr.DialTimeout(1 * time.Second) +func (rp *remotePeer) Dial(addr *na.NetAddr) (net.Conn, error) { + pc, err := testOutboundPeerConn(addr, rp.Config, false) if err != nil { return nil, err } - pc, err := testInboundPeerConn(conn, rp.Config, rp.PrivKey) - if err != nil { - return nil, err - } - _, err = handshake(pc.conn, time.Second, rp.nodeInfo()) + + _, err = handshake(rp.nodeInfo(), pc.conn, time.Second) if err != nil { return nil, err } - return conn, err + return pc.conn, err } func (rp *remotePeer) accept() { @@ -215,28 +226,26 @@ func (rp *remotePeer) accept() { return } - pc, err := testInboundPeerConn(conn, rp.Config, rp.PrivKey) + pc, err := testInboundPeerConn(conn, rp.Config) if err != nil { + _ = conn.Close() golog.Fatalf("Failed to create a peer: %+v", err) } - _, err = handshake(pc.conn, time.Second, rp.nodeInfo()) + _, err = handshake(rp.nodeInfo(), pc.conn, time.Second) if err != nil { - golog.Fatalf("Failed to perform handshake: %+v", err) + _ = pc.conn.Close() + golog.Printf("Failed to perform handshake: %+v", err) } conns = append(conns, conn) } } -func (rp *remotePeer) nodeInfo() NodeInfo { - return DefaultNodeInfo{ - ProtocolVersion: defaultProtocolVersion, - DefaultNodeID: rp.Addr().ID, - ListenAddr: rp.listener.Addr().String(), - Network: "testing", - Version: "1.2.3-rc0-deadbeef", - Channels: rp.channels, - Moniker: "remote_peer", - } +func (rp *remotePeer) nodeInfo() ni.NodeInfo { + la := rp.listener.Addr().String() + nodeInfo := testNodeInfo(rp.ID(), "remote_peer_"+la) + nodeInfo.ListenAddr = la + nodeInfo.Channels = rp.channels + return nodeInfo } diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index 2dc3fd8a834..c1d05eb5a83 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -17,12 +17,13 @@ import ( "github.com/minio/highwayhash" "github.com/cometbft/cometbft/crypto" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/log" cmtmath "github.com/cometbft/cometbft/libs/math" - cmtrand "github.com/cometbft/cometbft/libs/rand" "github.com/cometbft/cometbft/libs/service" cmtsync "github.com/cometbft/cometbft/libs/sync" - "github.com/cometbft/cometbft/p2p" + na "github.com/cometbft/cometbft/p2p/netaddr" + "github.com/cometbft/cometbft/p2p/nodekey" ) const ( @@ -38,18 +39,18 @@ type AddrBook interface { service.Service // Add our own addresses so we don't later add ourselves - AddOurAddress(*p2p.NetAddress) + AddOurAddress(addr *na.NetAddr) // Check if it is our address - OurAddress(*p2p.NetAddress) bool + OurAddress(addr *na.NetAddr) bool - AddPrivateIDs([]string) + AddPrivateIDs(ids []string) // Add and remove an address - AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error - RemoveAddress(*p2p.NetAddress) + AddAddress(addr *na.NetAddr, src *na.NetAddr) error + RemoveAddress(addr *na.NetAddr) // Check if the address is in the book - HasAddress(*p2p.NetAddress) bool + HasAddress(addr *na.NetAddr) bool // Do we need more peers? NeedMoreAddrs() bool @@ -58,22 +59,22 @@ type AddrBook interface { Empty() bool // Pick an address to dial - PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress + PickAddress(biasTowardsNewAddrs int) *na.NetAddr // Mark address - MarkGood(p2p.ID) - MarkAttempt(*p2p.NetAddress) - MarkBad(*p2p.NetAddress, time.Duration) // Move peer to bad peers list + MarkGood(id nodekey.ID) + MarkAttempt(addr *na.NetAddr) + MarkBad(addr *na.NetAddr, dur time.Duration) // Move peer to bad peers list // Add bad peers back to addrBook ReinstateBadPeers() - IsGood(*p2p.NetAddress) bool - IsBanned(*p2p.NetAddress) bool + IsGood(addr *na.NetAddr) bool + IsBanned(addr *na.NetAddr) bool // Send a selection of addresses to peers - GetSelection() []*p2p.NetAddress + GetSelection() []*na.NetAddr // Send a selection of addresses with bias - GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress + GetSelectionWithBias(biasTowardsNewAddrs int) []*na.NetAddr Size() int @@ -92,9 +93,9 @@ type addrBook struct { mtx cmtsync.Mutex rand *cmtrand.Rand ourAddrs map[string]struct{} - privateIDs map[p2p.ID]struct{} - addrLookup map[p2p.ID]*knownAddress // new & old - badPeers map[p2p.ID]*knownAddress // blacklisted peers + privateIDs map[nodekey.ID]struct{} + addrLookup map[nodekey.ID]*knownAddress // new & old + badPeers map[nodekey.ID]*knownAddress // banned peers bucketsOld []map[string]*knownAddress bucketsNew []map[string]*knownAddress nOld int @@ -124,9 +125,9 @@ func NewAddrBook(filePath string, routabilityStrict bool) AddrBook { am := &addrBook{ rand: cmtrand.NewRand(), ourAddrs: make(map[string]struct{}), - privateIDs: make(map[p2p.ID]struct{}), - addrLookup: make(map[p2p.ID]*knownAddress), - badPeers: make(map[p2p.ID]*knownAddress), + privateIDs: make(map[nodekey.ID]struct{}), + addrLookup: make(map[nodekey.ID]*knownAddress), + badPeers: make(map[nodekey.ID]*knownAddress), filePath: filePath, routabilityStrict: routabilityStrict, } @@ -136,7 +137,7 @@ func NewAddrBook(filePath string, routabilityStrict bool) AddrBook { } // Initialize the buckets. -// When modifying this, don't forget to update loadFromFile() +// When modifying this, don't forget to update loadFromFile(). func (a *addrBook) init() { a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits // New addr buckets @@ -154,36 +155,33 @@ func (a *addrBook) init() { // OnStart implements Service. func (a *addrBook) OnStart() error { - if err := a.BaseService.OnStart(); err != nil { - return err - } a.loadFromFile(a.filePath) - // wg.Add to ensure that any invocation of .Wait() - // later on will wait for saveRoutine to terminate. a.wg.Add(1) go a.saveRoutine() return nil } -// OnStop implements Service. -func (a *addrBook) OnStop() { - a.BaseService.OnStop() -} - -func (a *addrBook) Wait() { +// Stop overrides Service.Stop(). +func (a *addrBook) Stop() error { + // Closes the Service.Quit() channel. + // This enables a.saveRoutine() to quit. + if err := a.BaseService.Stop(); err != nil { + return err + } a.wg.Wait() + return nil } func (a *addrBook) FilePath() string { return a.filePath } -//------------------------------------------------------- +// ------------------------------------------------------- // AddOurAddress one of our addresses. -func (a *addrBook) AddOurAddress(addr *p2p.NetAddress) { +func (a *addrBook) AddOurAddress(addr *na.NetAddr) { a.mtx.Lock() defer a.mtx.Unlock() @@ -192,7 +190,7 @@ func (a *addrBook) AddOurAddress(addr *p2p.NetAddress) { } // OurAddress returns true if it is our address. -func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool { +func (a *addrBook) OurAddress(addr *na.NetAddr) bool { a.mtx.Lock() defer a.mtx.Unlock() @@ -205,15 +203,15 @@ func (a *addrBook) AddPrivateIDs(ids []string) { defer a.mtx.Unlock() for _, id := range ids { - a.privateIDs[p2p.ID(id)] = struct{}{} + a.privateIDs[nodekey.ID(id)] = struct{}{} } } // AddAddress implements AddrBook // Add address to a "new" bucket. If it's already in one, only add it probabilistically. // Returns error if the addr is non-routable. Does not add self. -// NOTE: addr must not be nil -func (a *addrBook) AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error { +// NOTE: addr must not be nil. +func (a *addrBook) AddAddress(addr *na.NetAddr, src *na.NetAddr) error { a.mtx.Lock() defer a.mtx.Unlock() @@ -221,7 +219,7 @@ func (a *addrBook) AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error { } // RemoveAddress implements AddrBook - removes the address from the book. -func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) { +func (a *addrBook) RemoveAddress(addr *na.NetAddr) { a.mtx.Lock() defer a.mtx.Unlock() @@ -230,15 +228,15 @@ func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) { // IsGood returns true if peer was ever marked as good and haven't // done anything wrong since then. -func (a *addrBook) IsGood(addr *p2p.NetAddress) bool { +func (a *addrBook) IsGood(addr *na.NetAddr) bool { a.mtx.Lock() defer a.mtx.Unlock() return a.addrLookup[addr.ID].isOld() } -// IsBanned returns true if the peer is currently banned -func (a *addrBook) IsBanned(addr *p2p.NetAddress) bool { +// IsBanned returns true if the peer is currently banned. +func (a *addrBook) IsBanned(addr *na.NetAddr) bool { a.mtx.Lock() _, ok := a.badPeers[addr.ID] a.mtx.Unlock() @@ -247,7 +245,7 @@ func (a *addrBook) IsBanned(addr *p2p.NetAddress) bool { } // HasAddress returns true if the address is in the book. -func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool { +func (a *addrBook) HasAddress(addr *na.NetAddr) bool { a.mtx.Lock() defer a.mtx.Unlock() @@ -272,7 +270,7 @@ func (a *addrBook) Empty() bool { // and determines how biased we are to pick an address from a new bucket. // PickAddress returns nil if the AddrBook is empty or if we try to pick // from an empty bucket. -func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress { +func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *na.NetAddr { a.mtx.Lock() defer a.mtx.Unlock() @@ -322,7 +320,7 @@ func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress { // MarkGood implements AddrBook - it marks the peer as good and // moves it into an "old" bucket. -func (a *addrBook) MarkGood(id p2p.ID) { +func (a *addrBook) MarkGood(id nodekey.ID) { a.mtx.Lock() defer a.mtx.Unlock() @@ -332,14 +330,12 @@ func (a *addrBook) MarkGood(id p2p.ID) { } ka.markGood() if ka.isNew() { - if err := a.moveToOld(ka); err != nil { - a.Logger.Error("Error moving address to old", "err", err) - } + a.moveToOld(ka) } } // MarkAttempt implements AddrBook - it marks that an attempt was made to connect to the address. -func (a *addrBook) MarkAttempt(addr *p2p.NetAddress) { +func (a *addrBook) MarkAttempt(addr *na.NetAddr) { a.mtx.Lock() defer a.mtx.Unlock() @@ -352,7 +348,7 @@ func (a *addrBook) MarkAttempt(addr *p2p.NetAddress) { // MarkBad implements AddrBook. Kicks address out from book, places // the address in the badPeers pool. -func (a *addrBook) MarkBad(addr *p2p.NetAddress, banTime time.Duration) { +func (a *addrBook) MarkBad(addr *na.NetAddr, banTime time.Duration) { a.mtx.Lock() defer a.mtx.Unlock() @@ -372,12 +368,7 @@ func (a *addrBook) ReinstateBadPeers() { continue } - bucket, err := a.calcNewBucket(ka.Addr, ka.Src) - if err != nil { - a.Logger.Error("Failed to calculate new bucket (bad peer won't be reinstantiated)", - "addr", ka.Addr, "err", err) - continue - } + bucket := a.calcNewBucket(ka.Addr, ka.Src) if err := a.addToNewBucket(ka, bucket); err != nil { a.Logger.Error("Error adding peer to new bucket", "err", err) @@ -391,7 +382,7 @@ func (a *addrBook) ReinstateBadPeers() { // GetSelection implements AddrBook. // It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. // Must never return a nil address. -func (a *addrBook) GetSelection() []*p2p.NetAddress { +func (a *addrBook) GetSelection() []*na.NetAddr { a.mtx.Lock() defer a.mtx.Unlock() @@ -410,7 +401,7 @@ func (a *addrBook) GetSelection() []*p2p.NetAddress { // XXX: instead of making a list of all addresses, shuffling, and slicing a random chunk, // could we just select a random numAddresses of indexes? - allAddr := make([]*p2p.NetAddress, bookSize) + allAddr := make([]*na.NetAddr, bookSize) i := 0 for _, ka := range a.addrLookup { allAddr[i] = ka.Addr @@ -441,7 +432,7 @@ func percentageOfNum(p, n int) int { // biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to // that range) and determines how biased we are to pick an address from a new // bucket. -func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress { +func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*na.NetAddr { a.mtx.Lock() defer a.mtx.Unlock() @@ -473,7 +464,7 @@ func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddre return selection } -//------------------------------------------------ +// ------------------------------------------------ // Size returns the number of addresses in the book. func (a *addrBook) Size() int { @@ -487,7 +478,7 @@ func (a *addrBook) size() int { return a.nNew + a.nOld } -//---------------------------------------------------------- +// ---------------------------------------------------------- // Save persists the address book to disk. func (a *addrBook) Save() { @@ -498,20 +489,19 @@ func (a *addrBook) saveRoutine() { defer a.wg.Done() saveFileTicker := time.NewTicker(dumpAddressInterval) -out: + defer saveFileTicker.Stop() for { select { case <-saveFileTicker.C: - a.saveToFile(a.filePath) + a.Save() case <-a.Quit(): - break out + a.Save() + return } } - saveFileTicker.Stop() - a.saveToFile(a.filePath) } -//---------------------------------------------------------- +// ---------------------------------------------------------- func (a *addrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress { switch bucketType { @@ -529,7 +519,7 @@ func (a *addrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAd func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) error { // Consistency check to ensure we don't add an already known address if ka.isOld() { - return errAddrBookOldAddressNewBucket{ka.Addr, bucketIdx} + return ErrAddrBookOldAddressNewBucket{ka.Addr, bucketIdx} } addrStr := ka.Addr.String() @@ -548,7 +538,7 @@ func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) error { // Add to bucket. bucket[addrStr] = ka - // increment nNew if the peer doesnt already exist in a bucket + // increment nNew if the peer doesn't already exist in a bucket if ka.addBucketRef(bucketIdx) == 1 { a.nNew++ } @@ -626,7 +616,7 @@ func (a *addrBook) removeFromAllBuckets(ka *knownAddress) { delete(a.addrLookup, ka.ID()) } -//---------------------------------------------------------- +// ---------------------------------------------------------- func (a *addrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress { bucket := a.getBucket(bucketType, bucketIdx) @@ -640,8 +630,8 @@ func (a *addrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress { } // adds the address to a "new" bucket. if its already in one, -// it only adds it probabilistically -func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { +// it only adds it probabilistically. +func (a *addrBook) addAddress(addr, src *na.NetAddr) error { if addr == nil || src == nil { return ErrAddrBookNilAddr{addr, src} } @@ -692,14 +682,12 @@ func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { ka = newKnownAddress(addr, src) } - bucket, err := a.calcNewBucket(addr, src) - if err != nil { - return err - } + bucket := a.calcNewBucket(addr, src) + return a.addToNewBucket(ka, bucket) } -func (a *addrBook) randomPickAddresses(bucketType byte, num int) []*p2p.NetAddress { +func (a *addrBook) randomPickAddresses(bucketType byte, num int) []*na.NetAddr { var buckets []map[string]*knownAddress switch bucketType { case bucketTypeNew: @@ -707,7 +695,7 @@ func (a *addrBook) randomPickAddresses(bucketType byte, num int) []*p2p.NetAddre case bucketTypeOld: buckets = a.bucketsOld default: - panic("unexpected bucketType") + panic("unexpected bucket type") } total := 0 for _, bucket := range buckets { @@ -719,7 +707,7 @@ func (a *addrBook) randomPickAddresses(bucketType byte, num int) []*p2p.NetAddre addresses = append(addresses, ka) } } - selection := make([]*p2p.NetAddress, 0, num) + selection := make([]*na.NetAddr, 0, num) chosenSet := make(map[string]bool, num) rand.Shuffle(total, func(i, j int) { addresses[i], addresses[j] = addresses[j], addresses[i] @@ -757,15 +745,15 @@ func (a *addrBook) expireNew(bucketIdx int) { // Promotes an address from new to old. If the destination bucket is full, // demote the oldest one to a "new" bucket. // TODO: Demote more probabilistically? -func (a *addrBook) moveToOld(ka *knownAddress) error { +func (a *addrBook) moveToOld(ka *knownAddress) { // Sanity check if ka.isOld() { a.Logger.Error(fmt.Sprintf("Cannot promote address that is already old %v", ka)) - return nil + return } if len(ka.Buckets) == 0 { a.Logger.Error(fmt.Sprintf("Cannot promote address that isn't in any new buckets %v", ka)) - return nil + return } // Remove from all (new) buckets. @@ -774,19 +762,15 @@ func (a *addrBook) moveToOld(ka *knownAddress) error { ka.BucketType = bucketTypeOld // Try to add it to its oldBucket destination. - oldBucketIdx, err := a.calcOldBucket(ka.Addr) - if err != nil { - return err - } + oldBucketIdx := a.calcOldBucket(ka.Addr) + added := a.addToOldBucket(ka, oldBucketIdx) if !added { // No room; move the oldest to a new bucket oldest := a.pickOldest(bucketTypeOld, oldBucketIdx) a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx) - newBucketIdx, err := a.calcNewBucket(oldest.Addr, oldest.Src) - if err != nil { - return err - } + newBucketIdx := a.calcNewBucket(oldest.Addr, oldest.Src) + if err := a.addToNewBucket(oldest, newBucketIdx); err != nil { a.Logger.Error("Error adding peer to old bucket", "err", err) } @@ -797,10 +781,9 @@ func (a *addrBook) moveToOld(ka *knownAddress) error { a.Logger.Error(fmt.Sprintf("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)) } } - return nil } -func (a *addrBook) removeAddress(addr *p2p.NetAddress) { +func (a *addrBook) removeAddress(addr *na.NetAddr) { ka := a.addrLookup[addr.ID] if ka == nil { return @@ -809,7 +792,7 @@ func (a *addrBook) removeAddress(addr *p2p.NetAddress) { a.removeFromAllBuckets(ka) } -func (a *addrBook) addBadPeer(addr *p2p.NetAddress, banTime time.Duration) bool { +func (a *addrBook) addBadPeer(addr *na.NetAddr, banTime time.Duration) bool { // check it exists in addrbook ka := a.addrLookup[addr.ID] // check address is not already there @@ -821,24 +804,21 @@ func (a *addrBook) addBadPeer(addr *p2p.NetAddress, banTime time.Duration) bool // add to bad peer list ka.ban(banTime) a.badPeers[addr.ID] = ka - a.Logger.Info("Add address to blacklist", "addr", addr) + a.Logger.Info("Add address to denylist", "addr", addr) } return true } -//--------------------------------------------------------------------- +// --------------------------------------------------------------------- // calculate bucket placements -// hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets -func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) (int, error) { +// hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets. +func (a *addrBook) calcNewBucket(addr, src *na.NetAddr) int { data1 := []byte{} data1 = append(data1, []byte(a.key)...) data1 = append(data1, []byte(a.groupKey(addr))...) data1 = append(data1, []byte(a.groupKey(src))...) - hash1, err := a.hash(data1) - if err != nil { - return 0, err - } + hash1 := a.hash(data1) hash64 := binary.BigEndian.Uint64(hash1) hash64 %= newBucketsPerGroup var hashbuf [8]byte @@ -848,23 +828,18 @@ func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) (int, error) { data2 = append(data2, a.groupKey(src)...) data2 = append(data2, hashbuf[:]...) - hash2, err := a.hash(data2) - if err != nil { - return 0, err - } + hash2 := a.hash(data2) result := int(binary.BigEndian.Uint64(hash2) % newBucketCount) - return result, nil + return result } -// hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets -func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) (int, error) { +// hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets. +func (a *addrBook) calcOldBucket(addr *na.NetAddr) int { data1 := []byte{} data1 = append(data1, []byte(a.key)...) data1 = append(data1, []byte(addr.String())...) - hash1, err := a.hash(data1) - if err != nil { - return 0, err - } + hash1 := a.hash(data1) + hash64 := binary.BigEndian.Uint64(hash1) hash64 %= oldBucketsPerGroup var hashbuf [8]byte @@ -874,23 +849,21 @@ func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) (int, error) { data2 = append(data2, a.groupKey(addr)...) data2 = append(data2, hashbuf[:]...) - hash2, err := a.hash(data2) - if err != nil { - return 0, err - } + hash2 := a.hash(data2) + result := int(binary.BigEndian.Uint64(hash2) % oldBucketCount) - return result, nil + return result } // Return a string representing the network group of this address. // This is the /16 for IPv4 (e.g. 1.2.0.0), the /32 (/36 for he.net) for IPv6, the string // "local" for a local address and the string "unroutable" for an unroutable // address. -func (a *addrBook) groupKey(na *p2p.NetAddress) string { +func (a *addrBook) groupKey(na *na.NetAddr) string { return groupKeyFor(na, a.routabilityStrict) } -func groupKeyFor(na *p2p.NetAddress, routabilityStrict bool) string { +func groupKeyFor(na *na.NetAddr, routabilityStrict bool) string { if routabilityStrict && na.Local() { return "local" } @@ -940,8 +913,9 @@ func groupKeyFor(na *p2p.NetAddress, routabilityStrict bool) string { return na.IP.Mask(ipv6Mask).String() } -func (a *addrBook) hash(b []byte) ([]byte, error) { +// hash returns the hash of b. +func (a *addrBook) hash(b []byte) []byte { a.hasher.Reset() a.hasher.Write(b) - return a.hasher.Sum(nil), nil + return a.hasher.Sum(nil) } diff --git a/p2p/pex/addrbook_test.go b/p2p/pex/addrbook_test.go index c34ee412dbc..c9d918b26f1 100644 --- a/p2p/pex/addrbook_test.go +++ b/p2p/pex/addrbook_test.go @@ -12,16 +12,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/log" cmtmath "github.com/cometbft/cometbft/libs/math" - cmtrand "github.com/cometbft/cometbft/libs/rand" - "github.com/cometbft/cometbft/p2p" + na "github.com/cometbft/cometbft/p2p/netaddr" + "github.com/cometbft/cometbft/p2p/nodekey" ) // FIXME These tests should not rely on .(*addrBook) assertions func TestAddrBookPickAddress(t *testing.T) { - fname := createTempFileName("addrbook_test") + fname := createTempFileName() defer deleteTempFile(fname) // 0 addresses @@ -32,7 +33,7 @@ func TestAddrBookPickAddress(t *testing.T) { addr := book.PickAddress(50) assert.Nil(t, addr, "expected no address") - randAddrs := randNetAddressPairs(t, 1) + randAddrs := randNetAddrPairs(t, 1) addrSrc := randAddrs[0] err := book.AddAddress(addrSrc.addr, addrSrc.src) require.NoError(t, err) @@ -54,11 +55,11 @@ func TestAddrBookPickAddress(t *testing.T) { // in this case, nNew==0 but we biased 100% to new, so we return nil addr = book.PickAddress(100) - assert.Nil(t, addr, "did not expected an address") + assert.Nil(t, addr, "did not expect an address") } func TestAddrBookSaveLoad(t *testing.T) { - fname := createTempFileName("addrbook_test") + fname := createTempFileName() defer deleteTempFile(fname) // 0 addresses @@ -74,7 +75,7 @@ func TestAddrBookSaveLoad(t *testing.T) { assert.True(t, book.Empty()) // 100 addresses - randAddrs := randNetAddressPairs(t, 100) + randAddrs := randNetAddrPairs(t, 100) for _, addrSrc := range randAddrs { err := book.AddAddress(addrSrc.addr, addrSrc.src) @@ -93,10 +94,10 @@ func TestAddrBookSaveLoad(t *testing.T) { } func TestAddrBookLookup(t *testing.T) { - fname := createTempFileName("addrbook_test") + fname := createTempFileName() defer deleteTempFile(fname) - randAddrs := randNetAddressPairs(t, 100) + randAddrs := randNetAddrPairs(t, 100) book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) @@ -112,10 +113,10 @@ func TestAddrBookLookup(t *testing.T) { } func TestAddrBookPromoteToOld(t *testing.T) { - fname := createTempFileName("addrbook_test") + fname := createTempFileName() defer deleteTempFile(fname) - randAddrs := randNetAddressPairs(t, 100) + randAddrs := randNetAddrPairs(t, 100) book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) @@ -152,17 +153,17 @@ func TestAddrBookPromoteToOld(t *testing.T) { t.Errorf("selection with bias could not be bigger than the book") } - assert.Equal(t, book.Size(), 100, "expecting book size to be 100") + assert.Equal(t, 100, book.Size(), "expecting book size to be 100") } func TestAddrBookHandlesDuplicates(t *testing.T) { - fname := createTempFileName("addrbook_test") + fname := createTempFileName() defer deleteTempFile(fname) book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) - randAddrs := randNetAddressPairs(t, 100) + randAddrs := randNetAddrPairs(t, 100) differentSrc := randIPv4Address(t) for _, addrSrc := range randAddrs { @@ -177,20 +178,22 @@ func TestAddrBookHandlesDuplicates(t *testing.T) { assert.Equal(t, 100, book.Size()) } -type netAddressPair struct { - addr *p2p.NetAddress - src *p2p.NetAddress +type netAddrPair struct { + addr *na.NetAddr + src *na.NetAddr } -func randNetAddressPairs(t *testing.T, n int) []netAddressPair { - randAddrs := make([]netAddressPair, n) +func randNetAddrPairs(t *testing.T, n int) []netAddrPair { + t.Helper() + randAddrs := make([]netAddrPair, n) for i := 0; i < n; i++ { - randAddrs[i] = netAddressPair{addr: randIPv4Address(t), src: randIPv4Address(t)} + randAddrs[i] = netAddrPair{addr: randIPv4Address(t), src: randIPv4Address(t)} } return randAddrs } -func randIPv4Address(t *testing.T) *p2p.NetAddress { +func randIPv4Address(t *testing.T) *na.NetAddr { + t.Helper() for { ip := fmt.Sprintf("%v.%v.%v.%v", cmtrand.Intn(254)+1, @@ -199,10 +202,10 @@ func randIPv4Address(t *testing.T) *p2p.NetAddress { cmtrand.Intn(255), ) port := cmtrand.Intn(65535-1) + 1 - id := p2p.ID(hex.EncodeToString(cmtrand.Bytes(p2p.IDByteLength))) - idAddr := p2p.IDAddressString(id, fmt.Sprintf("%v:%v", ip, port)) - addr, err := p2p.NewNetAddressString(idAddr) - assert.Nil(t, err, "error generating rand network address") + id := nodekey.ID(hex.EncodeToString(cmtrand.Bytes(nodekey.IDByteLength))) + idAddr := na.IDAddrString(id, fmt.Sprintf("%v:%v", ip, port)) + addr, err := na.NewFromString(idAddr) + require.NoError(t, err) if addr.Routable() { return addr } @@ -210,7 +213,7 @@ func randIPv4Address(t *testing.T) *p2p.NetAddress { } func TestAddrBookRemoveAddress(t *testing.T) { - fname := createTempFileName("addrbook_test") + fname := createTempFileName() defer deleteTempFile(fname) book := NewAddrBook(fname, true) @@ -258,7 +261,7 @@ func TestAddrBookGetSelectionReturnsNilWhenAddrBookIsEmpty(t *testing.T) { } func TestAddrBookGetSelection(t *testing.T) { - fname := createTempFileName("addrbook_test") + fname := createTempFileName() defer deleteTempFile(fname) book := NewAddrBook(fname, true) @@ -272,18 +275,18 @@ func TestAddrBookGetSelection(t *testing.T) { err := book.AddAddress(addr, addr) require.NoError(t, err) - assert.Equal(t, 1, len(book.GetSelection())) + assert.Len(t, book.GetSelection(), 1) assert.Equal(t, addr, book.GetSelection()[0]) // 3) add a bunch of addresses - randAddrs := randNetAddressPairs(t, 100) + randAddrs := randNetAddrPairs(t, 100) for _, addrSrc := range randAddrs { err := book.AddAddress(addrSrc.addr, addrSrc.src) require.NoError(t, err) } // check there is no duplicates - addrs := make(map[string]*p2p.NetAddress) + addrs := make(map[string]*na.NetAddr) selection := book.GetSelection() for _, addr := range selection { if dup, ok := addrs[addr.String()]; ok { @@ -300,7 +303,7 @@ func TestAddrBookGetSelection(t *testing.T) { func TestAddrBookGetSelectionWithBias(t *testing.T) { const biasTowardsNewAddrs = 30 - fname := createTempFileName("addrbook_test") + fname := createTempFileName() defer deleteTempFile(fname) book := NewAddrBook(fname, true) @@ -316,18 +319,18 @@ func TestAddrBookGetSelectionWithBias(t *testing.T) { require.NoError(t, err) selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - assert.Equal(t, 1, len(selection)) + assert.Len(t, selection, 1) assert.Equal(t, addr, selection[0]) // 3) add a bunch of addresses - randAddrs := randNetAddressPairs(t, 100) + randAddrs := randNetAddrPairs(t, 100) for _, addrSrc := range randAddrs { err := book.AddAddress(addrSrc.addr, addrSrc.src) require.NoError(t, err) } // check there is no duplicates - addrs := make(map[string]*p2p.NetAddress) + addrs := make(map[string]*na.NetAddr) selection = book.GetSelectionWithBias(biasTowardsNewAddrs) for _, addr := range selection { if dup, ok := addrs[addr.String()]; ok { @@ -383,7 +386,7 @@ func TestAddrBookGetSelectionWithBias(t *testing.T) { } func TestAddrBookHasAddress(t *testing.T) { - fname := createTempFileName("addrbook_test") + fname := createTempFileName() defer deleteTempFile(fname) book := NewAddrBook(fname, true) @@ -399,8 +402,9 @@ func TestAddrBookHasAddress(t *testing.T) { assert.False(t, book.HasAddress(addr)) } -func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*p2p.NetAddress, []string) { - addrs := make([]*p2p.NetAddress, numAddrs) +func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*na.NetAddr, []string) { + t.Helper() + addrs := make([]*na.NetAddr, numAddrs) for i := 0; i < numAddrs; i++ { addrs[i] = randIPv4Address(t) } @@ -413,7 +417,7 @@ func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*p2p.NetAddress, []st } func TestBanBadPeers(t *testing.T) { - fname := createTempFileName("addrbook_test") + fname := createTempFileName() defer deleteTempFile(fname) book := NewAddrBook(fname, true) @@ -428,8 +432,8 @@ func TestBanBadPeers(t *testing.T) { assert.True(t, book.IsBanned(addr)) err := book.AddAddress(addr, addr) - // book should not add address from the blacklist - assert.Error(t, err) + // book should not add address from the denylist + require.Error(t, err) time.Sleep(1 * time.Second) book.ReinstateBadPeers() @@ -440,7 +444,7 @@ func TestBanBadPeers(t *testing.T) { } func TestAddrBookEmpty(t *testing.T) { - fname := createTempFileName("addrbook_test") + fname := createTempFileName() defer deleteTempFile(fname) book := NewAddrBook(fname, true) @@ -451,8 +455,8 @@ func TestAddrBookEmpty(t *testing.T) { book.AddOurAddress(randIPv4Address(t)) require.True(t, book.Empty()) // Check that book with private addrs is empty - _, privateIds := testCreatePrivateAddrs(t, 5) - book.AddPrivateIDs(privateIds) + _, privateIDs := testCreatePrivateAddrs(t, 5) + book.AddPrivateIDs(privateIDs) require.True(t, book.Empty()) // Check that book with address is not empty @@ -462,7 +466,7 @@ func TestAddrBookEmpty(t *testing.T) { } func TestPrivatePeers(t *testing.T) { - fname := createTempFileName("addrbook_test") + fname := createTempFileName() defer deleteTempFile(fname) book := NewAddrBook(fname, true) @@ -474,7 +478,7 @@ func TestPrivatePeers(t *testing.T) { // private addrs must not be added for _, addr := range addrs { err := book.AddAddress(addr, addr) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here _, ok := err.(ErrAddrBookPrivate) assert.True(t, ok) } @@ -482,13 +486,14 @@ func TestPrivatePeers(t *testing.T) { // addrs coming from private peers must not be added err := book.AddAddress(randIPv4Address(t), addrs[0]) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here _, ok := err.(ErrAddrBookPrivateSrc) assert.True(t, ok) } } func testAddrBookAddressSelection(t *testing.T, bookSize int) { + t.Helper() // generate all combinations of old (m) and new addresses for nBookOld := 0; nBookOld <= bookSize; nBookOld++ { nBookNew := bookSize - nBookOld @@ -539,8 +544,7 @@ func testAddrBookAddressSelection(t *testing.T, bookSize int) { // Verify that the order of addresses is as expected // Get the sequence types and lengths of the selection - seqLens, seqTypes, err := analyseSelectionLayout(book, addrs) - assert.NoError(t, err, "%s", dbgStr) + seqLens, seqTypes := analyseSelectionLayout(book, addrs) // Build a list with the expected lengths of partitions and another with the expected types, e.g.: // expSeqLens = [10, 22], expSeqTypes = [1, 2] @@ -571,8 +575,8 @@ func testAddrBookAddressSelection(t *testing.T, bookSize int) { func TestMultipleAddrBookAddressSelection(t *testing.T) { // test books with smaller size, < N - const N = 32 - for bookSize := 1; bookSize < N; bookSize++ { + const n = 32 + for bookSize := 1; bookSize < n; bookSize++ { testAddrBookAddressSelection(t, bookSize) } @@ -589,7 +593,7 @@ func TestMultipleAddrBookAddressSelection(t *testing.T) { } func TestAddrBookAddDoesNotOverwriteOldIP(t *testing.T) { - fname := createTempFileName("addrbook_test") + fname := createTempFileName() defer deleteTempFile(fname) // This test creates adds a peer to the address book and marks it good @@ -598,38 +602,38 @@ func TestAddrBookAddDoesNotOverwriteOldIP(t *testing.T) { peerID := "678503e6c8f50db7279c7da3cb9b072aac4bc0d5" peerRealIP := "1.1.1.1:26656" peerOverrideAttemptIP := "2.2.2.2:26656" - SrcAddr := "b0dd378c3fbc4c156cd6d302a799f0d2e4227201@159.89.121.174:26656" + srcAddr := "b0dd378c3fbc4c156cd6d302a799f0d2e4227201@159.89.121.174:26656" // There is a chance that AddAddress will ignore the new peer its given. // So we repeat trying to override the peer several times, // to ensure we aren't in a case that got probabilistically ignored numOverrideAttempts := 10 - peerRealAddr, err := p2p.NewNetAddressString(peerID + "@" + peerRealIP) - require.Nil(t, err) + peerRealAddr, err := na.NewFromString(peerID + "@" + peerRealIP) + require.NoError(t, err) - peerOverrideAttemptAddr, err := p2p.NewNetAddressString(peerID + "@" + peerOverrideAttemptIP) - require.Nil(t, err) + peerOverrideAttemptAddr, err := na.NewFromString(peerID + "@" + peerOverrideAttemptIP) + require.NoError(t, err) - src, err := p2p.NewNetAddressString(SrcAddr) - require.Nil(t, err) + src, err := na.NewFromString(srcAddr) + require.NoError(t, err) book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) err = book.AddAddress(peerRealAddr, src) - require.Nil(t, err) + require.NoError(t, err) book.MarkAttempt(peerRealAddr) book.MarkGood(peerRealAddr.ID) // Double check that adding a peer again doesn't error err = book.AddAddress(peerRealAddr, src) - require.Nil(t, err) + require.NoError(t, err) // Try changing ip but keeping the same node id. (change 1.1.1.1 to 2.2.2.2) // This should just be ignored, and not error. for i := 0; i < numOverrideAttempts; i++ { err = book.AddAddress(peerOverrideAttemptAddr, src) - require.Nil(t, err) + require.NoError(t, err) } // Now check that the IP was not overridden. // This is done by sampling several peers from addr book @@ -675,7 +679,7 @@ func TestAddrBookGroupKey(t *testing.T) { for i, tc := range testCases { nip := net.ParseIP(tc.ip) - key := groupKeyFor(p2p.NewNetAddressIPPort(nip, 26656), false) + key := groupKeyFor(na.NewFromIPPort(nip, 26656), false) assert.Equal(t, tc.expKey, key, "#%d", i) } @@ -705,18 +709,20 @@ func TestAddrBookGroupKey(t *testing.T) { for i, tc := range testCases { nip := net.ParseIP(tc.ip) - key := groupKeyFor(p2p.NewNetAddressIPPort(nip, 26656), true) + key := groupKeyFor(na.NewFromIPPort(nip, 26656), true) assert.Equal(t, tc.expKey, key, "#%d", i) } } -func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*p2p.NetAddress, book *addrBook) { +func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*na.NetAddr, book *addrBook) { + t.Helper() nOld, nNew := countOldAndNewAddrsInSelection(addrs, book) assert.Equal(t, m, nOld, "old addresses") assert.Equal(t, n, nNew, "new addresses") } -func createTempFileName(prefix string) string { +func createTempFileName() string { + prefix := "addrbook_test" f, err := os.CreateTemp("", prefix) if err != nil { panic(err) @@ -737,29 +743,30 @@ func deleteTempFile(fname string) { } func createAddrBookWithMOldAndNNewAddrs(t *testing.T, nOld, nNew int) (book *addrBook, fname string) { - fname = createTempFileName("addrbook_test") + t.Helper() + fname = createTempFileName() book = NewAddrBook(fname, true).(*addrBook) book.SetLogger(log.TestingLogger()) assert.Zero(t, book.Size()) - randAddrs := randNetAddressPairs(t, nOld) + randAddrs := randNetAddrPairs(t, nOld) for _, addr := range randAddrs { err := book.AddAddress(addr.addr, addr.src) require.NoError(t, err) book.MarkGood(addr.addr.ID) } - randAddrs = randNetAddressPairs(t, nNew) + randAddrs = randNetAddrPairs(t, nNew) for _, addr := range randAddrs { err := book.AddAddress(addr.addr, addr.src) require.NoError(t, err) } - return + return book, fname } -func countOldAndNewAddrsInSelection(addrs []*p2p.NetAddress, book *addrBook) (nOld, nNew int) { +func countOldAndNewAddrsInSelection(addrs []*na.NetAddr, book *addrBook) (nOld, nNew int) { for _, addr := range addrs { if book.IsGood(addr) { nOld++ @@ -767,14 +774,14 @@ func countOldAndNewAddrsInSelection(addrs []*p2p.NetAddress, book *addrBook) (nO nNew++ } } - return + return nOld, nNew } // Analyze the layout of the selection specified by 'addrs' // Returns: // - seqLens - the lengths of the sequences of addresses of same type -// - seqTypes - the types of sequences in selection -func analyseSelectionLayout(book *addrBook, addrs []*p2p.NetAddress) (seqLens, seqTypes []int, err error) { +// - seqTypes - the types of sequences in selection. +func analyseSelectionLayout(book *addrBook, addrs []*na.NetAddr) (seqLens, seqTypes []int) { // address types are: 0 - nil, 1 - new, 2 - old var ( prevType = 0 @@ -782,7 +789,7 @@ func analyseSelectionLayout(book *addrBook, addrs []*p2p.NetAddress) (seqLens, s ) for _, addr := range addrs { - addrType := 0 + var addrType int if book.IsGood(addr) { addrType = 2 } else { @@ -800,5 +807,5 @@ func analyseSelectionLayout(book *addrBook, addrs []*p2p.NetAddress) (seqLens, s seqLens = append(seqLens, currentSeqLen) seqTypes = append(seqTypes, prevType) - return + return seqLens, seqTypes } diff --git a/p2p/pex/bench_test.go b/p2p/pex/bench_test.go index 00d2724bfa1..64ac8e4cf1b 100644 --- a/p2p/pex/bench_test.go +++ b/p2p/pex/bench_test.go @@ -3,15 +3,15 @@ package pex import ( "testing" - "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/p2p/nodekey" ) func BenchmarkAddrBook_hash(b *testing.B) { book := &addrBook{ ourAddrs: make(map[string]struct{}), - privateIDs: make(map[p2p.ID]struct{}), - addrLookup: make(map[p2p.ID]*knownAddress), - badPeers: make(map[p2p.ID]*knownAddress), + privateIDs: make(map[nodekey.ID]struct{}), + addrLookup: make(map[nodekey.ID]*knownAddress), + badPeers: make(map[nodekey.ID]*knownAddress), filePath: "", routabilityStrict: true, } @@ -19,6 +19,6 @@ func BenchmarkAddrBook_hash(b *testing.B) { msg := []byte(`foobar`) b.ResetTimer() for i := 0; i < b.N; i++ { - _, _ = book.hash(msg) + _ = book.hash(msg) } } diff --git a/p2p/pex/errors.go b/p2p/pex/errors.go index f4551292b80..84bf7813533 100644 --- a/p2p/pex/errors.go +++ b/p2p/pex/errors.go @@ -3,31 +3,39 @@ package pex import ( "errors" "fmt" + "time" - "github.com/cometbft/cometbft/p2p" + na "github.com/cometbft/cometbft/p2p/netaddr" + "github.com/cometbft/cometbft/p2p/nodekey" +) + +var ( + ErrEmptyAddressBook = errors.New("address book is empty and couldn't resolve any seed nodes") + // ErrUnsolicitedList is thrown when a peer provides a list of addresses that have not been asked for. + ErrUnsolicitedList = errors.New("unsolicited pexAddrsMessage") ) type ErrAddrBookNonRoutable struct { - Addr *p2p.NetAddress + Addr *na.NetAddr } func (err ErrAddrBookNonRoutable) Error() string { return fmt.Sprintf("Cannot add non-routable address %v", err.Addr) } -type errAddrBookOldAddressNewBucket struct { - Addr *p2p.NetAddress +type ErrAddrBookOldAddressNewBucket struct { + Addr *na.NetAddr BucketID int } -func (err errAddrBookOldAddressNewBucket) Error() string { +func (err ErrAddrBookOldAddressNewBucket) Error() string { return fmt.Sprintf("failed consistency check!"+ " Cannot add pre-existing address %v into new bucket %v", err.Addr, err.BucketID) } type ErrAddrBookSelf struct { - Addr *p2p.NetAddress + Addr *na.NetAddr } func (err ErrAddrBookSelf) Error() string { @@ -35,32 +43,32 @@ func (err ErrAddrBookSelf) Error() string { } type ErrAddrBookPrivate struct { - Addr *p2p.NetAddress + Addr *na.NetAddr } func (err ErrAddrBookPrivate) Error() string { return fmt.Sprintf("Cannot add private peer with address %v", err.Addr) } -func (err ErrAddrBookPrivate) PrivateAddr() bool { +func (ErrAddrBookPrivate) PrivateAddr() bool { return true } type ErrAddrBookPrivateSrc struct { - Src *p2p.NetAddress + Src *na.NetAddr } func (err ErrAddrBookPrivateSrc) Error() string { return fmt.Sprintf("Cannot add peer coming from private peer with address %v", err.Src) } -func (err ErrAddrBookPrivateSrc) PrivateAddr() bool { +func (ErrAddrBookPrivateSrc) PrivateAddr() bool { return true } type ErrAddrBookNilAddr struct { - Addr *p2p.NetAddress - Src *p2p.NetAddress + Addr *na.NetAddr + Src *na.NetAddr } func (err ErrAddrBookNilAddr) Error() string { @@ -68,7 +76,7 @@ func (err ErrAddrBookNilAddr) Error() string { } type ErrAddrBookInvalidAddr struct { - Addr *p2p.NetAddress + Addr *na.NetAddr AddrErr error } @@ -76,14 +84,64 @@ func (err ErrAddrBookInvalidAddr) Error() string { return fmt.Sprintf("Cannot add invalid address %v: %v", err.Addr, err.AddrErr) } -// ErrAddressBanned is thrown when the address has been banned and therefore cannot be used +// ErrAddressBanned is thrown when the address has been banned and therefore cannot be used. type ErrAddressBanned struct { - Addr *p2p.NetAddress + Addr *na.NetAddr } func (err ErrAddressBanned) Error() string { return fmt.Sprintf("Address: %v is currently banned", err.Addr) } -// ErrUnsolicitedList is thrown when a peer provides a list of addresses that have not been asked for. -var ErrUnsolicitedList = errors.New("unsolicited pexAddrsMessage") +// ErrReceivedPEXRequestTooSoon is thrown when a peer sends a PEX request too soon after the last one. +type ErrReceivedPEXRequestTooSoon struct { + Peer nodekey.ID + LastReceived time.Time + Now time.Time + MinInterval time.Duration +} + +func (err ErrReceivedPEXRequestTooSoon) Error() string { + return fmt.Sprintf("received PEX request from peer %v too soon (last received %v, now %v, min interval %v), Disconnecting peer", + err.Peer, err.LastReceived, err.Now, err.MinInterval) +} + +type ErrMaxAttemptsToDial struct { + Max int +} + +func (e ErrMaxAttemptsToDial) Error() string { + return fmt.Sprintf("reached max attempts %d to dial", e.Max) +} + +type ErrTooEarlyToDial struct { + BackoffDuration time.Duration + LastDialed time.Time +} + +func (e ErrTooEarlyToDial) Error() string { + return fmt.Sprintf( + "too early to dial (backoff duration: %d, last dialed: %v, time since: %v)", + e.BackoffDuration, e.LastDialed, time.Since(e.LastDialed)) +} + +type ErrFailedToDial struct { + TotalAttempts int + Err error +} + +func (e ErrFailedToDial) Error() string { + return fmt.Sprintf("failed to dial after %d attempts: %v", e.TotalAttempts, e.Err) +} + +func (e ErrFailedToDial) Unwrap() error { return e.Err } + +type ErrSeedNodeConfig struct { + Err error +} + +func (e ErrSeedNodeConfig) Error() string { + return fmt.Sprintf("failed to parse seed node config: %v", e.Err) +} + +func (e ErrSeedNodeConfig) Unwrap() error { return e.Err } diff --git a/p2p/pex/file.go b/p2p/pex/file.go index 38eec9636b9..59c642b4783 100644 --- a/p2p/pex/file.go +++ b/p2p/pex/file.go @@ -5,7 +5,7 @@ import ( "fmt" "os" - "github.com/cometbft/cometbft/libs/tempfile" + "github.com/cometbft/cometbft/internal/tempfile" ) /* Loading & Saving */ @@ -35,7 +35,7 @@ func (a *addrBook) saveToFile(filePath string) { a.Logger.Error("Failed to save AddrBook to file", "err", err) return } - err = tempfile.WriteFileAtomic(filePath, jsonBytes, 0644) + err = tempfile.WriteFileAtomic(filePath, jsonBytes, 0o644) if err != nil { a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err) } diff --git a/p2p/pex/known_address.go b/p2p/pex/known_address.go index a46e682d88b..e408f271ca5 100644 --- a/p2p/pex/known_address.go +++ b/p2p/pex/known_address.go @@ -3,23 +3,24 @@ package pex import ( "time" - "github.com/cometbft/cometbft/p2p" + na "github.com/cometbft/cometbft/p2p/netaddr" + "github.com/cometbft/cometbft/p2p/nodekey" ) // knownAddress tracks information about a known network address // that is used to determine how viable an address is. type knownAddress struct { - Addr *p2p.NetAddress `json:"addr"` - Src *p2p.NetAddress `json:"src"` - Buckets []int `json:"buckets"` - Attempts int32 `json:"attempts"` - BucketType byte `json:"bucket_type"` - LastAttempt time.Time `json:"last_attempt"` - LastSuccess time.Time `json:"last_success"` - LastBanTime time.Time `json:"last_ban_time"` + Addr *na.NetAddr `json:"addr"` + Src *na.NetAddr `json:"src"` + Buckets []int `json:"buckets"` + Attempts int32 `json:"attempts"` + BucketType byte `json:"bucket_type"` + LastAttempt time.Time `json:"last_attempt"` + LastSuccess time.Time `json:"last_success"` + LastBanTime time.Time `json:"last_ban_time"` } -func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress { +func newKnownAddress(addr *na.NetAddr, src *na.NetAddr) *knownAddress { return &knownAddress{ Addr: addr, Src: src, @@ -30,7 +31,7 @@ func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress { } } -func (ka *knownAddress) ID() p2p.ID { +func (ka *knownAddress) ID() nodekey.ID { return ka.Addr.ID } diff --git a/p2p/pex/params.go b/p2p/pex/params.go index 29b4d45ab27..ac6c4ba4822 100644 --- a/p2p/pex/params.go +++ b/p2p/pex/params.go @@ -50,6 +50,6 @@ const ( minGetSelection = 32 // max addresses returned by GetSelection - // NOTE: this must match "maxMsgSize" + // NOTE: this must match "maxMsgSize". maxGetSelection = 250 ) diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 0457df2c626..49228046b85 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -6,39 +6,41 @@ import ( "sync" "time" - "github.com/cometbft/cometbft/libs/cmap" + tmp2p "github.com/cometbft/cometbft/api/cometbft/p2p/v1" + "github.com/cometbft/cometbft/internal/cmap" + cmtrand "github.com/cometbft/cometbft/internal/rand" cmtmath "github.com/cometbft/cometbft/libs/math" - cmtrand "github.com/cometbft/cometbft/libs/rand" "github.com/cometbft/cometbft/libs/service" "github.com/cometbft/cometbft/p2p" - "github.com/cometbft/cometbft/p2p/conn" - tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" + na "github.com/cometbft/cometbft/p2p/netaddr" + "github.com/cometbft/cometbft/p2p/nodekey" + tcpconn "github.com/cometbft/cometbft/p2p/transport/tcp/conn" ) type Peer = p2p.Peer const ( - // PexChannel is a channel for PEX messages + // PexChannel is a channel for PEX messages. PexChannel = byte(0x00) - // over-estimate of max NetAddress size + // over-estimate of max na.NetAddr size // hexID (40) + IP (16) + Port (2) + Name (100) ... // NOTE: dont use massive DNS name .. maxAddressSize = 256 // NOTE: amplificaiton factor! - // small request results in up to maxMsgSize response + // small request results in up to maxMsgSize response. maxMsgSize = maxAddressSize * maxGetSelection - // ensure we have enough peers + // ensure we have enough peers. defaultEnsurePeersPeriod = 30 * time.Second - // Seed/Crawler constants + // Seed/Crawler constants. // minTimeBetweenCrawls is a minimum time between attempts to crawl a peer. minTimeBetweenCrawls = 2 * time.Minute - // check some peers every this + // check some peers every this. crawlPeerPeriod = 30 * time.Second maxAttemptsToDial = 16 // ~ 35h in total (last attempt - 18h) @@ -48,31 +50,14 @@ const ( // untrusted. biasToSelectNewPeers = 30 // 70 to select good peers - // if a peer is marked bad, it will be banned for at least this time period + // if a peer is marked bad, it will be banned for at least this time period. defaultBanTime = 24 * time.Hour ) -type errMaxAttemptsToDial struct{} - -func (e errMaxAttemptsToDial) Error() string { - return fmt.Sprintf("reached max attempts %d to dial", maxAttemptsToDial) -} - -type errTooEarlyToDial struct { - backoffDuration time.Duration - lastDialed time.Time -} - -func (e errTooEarlyToDial) Error() string { - return fmt.Sprintf( - "too early to dial (backoff duration: %d, last dialed: %v, time since: %v)", - e.backoffDuration, e.lastDialed, time.Since(e.lastDialed)) -} - // Reactor handles PEX (peer exchange) and ensures that an // adequate number of peers are connected to the switch. // -// It uses `AddrBook` (address book) to store `NetAddress`es of the peers. +// It uses `AddrBook` (address book) to store `na.NetAddr`es of the peers. // // ## Preventing abuse // @@ -83,18 +68,20 @@ type Reactor struct { book AddrBook config *ReactorConfig + ensurePeersCh chan struct{} // Wakes up ensurePeersRoutine() ensurePeersPeriod time.Duration // TODO: should go in the config + peersRoutineWg sync.WaitGroup // maps to prevent abuse requestsSent *cmap.CMap // ID->struct{}: unanswered send requests lastReceivedRequests *cmap.CMap // ID->time.Time: last time peer requested from us - seedAddrs []*p2p.NetAddress + seedAddrs []*na.NetAddr attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)} // seed/crawled mode fields - crawlPeerInfos map[p2p.ID]crawlPeerInfo + crawlPeerInfos map[nodekey.ID]crawlPeerInfo } func (r *Reactor) minReceiveRequestInterval() time.Duration { @@ -131,19 +118,20 @@ func NewReactor(b AddrBook, config *ReactorConfig) *Reactor { r := &Reactor{ book: b, config: config, + ensurePeersCh: make(chan struct{}), ensurePeersPeriod: defaultEnsurePeersPeriod, requestsSent: cmap.NewCMap(), lastReceivedRequests: cmap.NewCMap(), - crawlPeerInfos: make(map[p2p.ID]crawlPeerInfo), + crawlPeerInfos: make(map[nodekey.ID]crawlPeerInfo), } r.BaseReactor = *p2p.NewBaseReactor("PEX", r) return r } -// OnStart implements BaseService +// OnStart implements BaseService. func (r *Reactor) OnStart() error { err := r.book.Start() - if err != nil && err != service.ErrAlreadyStarted { + if err != nil && !errors.Is(err, service.ErrAlreadyStarted) { return err } @@ -151,11 +139,12 @@ func (r *Reactor) OnStart() error { if err != nil { return err } else if numOnline == 0 && r.book.Empty() { - return errors.New("address book is empty and couldn't resolve any seed nodes") + return ErrEmptyAddressBook } r.seedAddrs = seedAddrs + r.peersRoutineWg.Add(1) // Check if this node should run // in seed/crawler mode if r.config.SeedMode { @@ -166,22 +155,27 @@ func (r *Reactor) OnStart() error { return nil } -// OnStop implements BaseService -func (r *Reactor) OnStop() { +// Stop overrides `Service.Stop()`. +func (r *Reactor) Stop() error { + if err := r.BaseReactor.Stop(); err != nil { + return err + } if err := r.book.Stop(); err != nil { - r.Logger.Error("Error stopping address book", "err", err) + return fmt.Errorf("can't stop address book: %w", err) } + r.peersRoutineWg.Wait() + return nil } -// GetChannels implements Reactor -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { - return []*conn.ChannelDescriptor{ - { +// StreamDescriptors implements Reactor. +func (*Reactor) StreamDescriptors() []p2p.StreamDescriptor { + return []p2p.StreamDescriptor{ + &tcpconn.ChannelDescriptor{ ID: PexChannel, Priority: 1, SendQueueCapacity: 10, RecvMessageCapacity: maxMsgSize, - MessageType: &tmp2p.Message{}, + MessageTypeI: &tmp2p.Message{}, }, } } @@ -198,9 +192,9 @@ func (r *Reactor) AddPeer(p Peer) { } } else { // inbound peer is its own source - addr, err := p.NodeInfo().NetAddress() + addr, err := p.NodeInfo().NetAddr() if err != nil { - r.Logger.Error("Failed to get peer NetAddress", "err", err, "peer", p) + r.Logger.Error("Failed to get peer NetAddr", "err", err, "peer", p) return } @@ -215,7 +209,7 @@ func (r *Reactor) AddPeer(p Peer) { } // RemovePeer implements Reactor by resetting peer's requests info. -func (r *Reactor) RemovePeer(p Peer, _ interface{}) { +func (r *Reactor) RemovePeer(p Peer, _ any) { id := string(p.ID()) r.requestsSent.Delete(id) r.lastReceivedRequests.Delete(id) @@ -264,7 +258,6 @@ func (r *Reactor) Receive(e p2p.Envelope) { e.Src.FlushStop() r.Switch.StopPeerGracefully(e.Src) }() - } else { // Check we're not receiving requests too frequently. if err := r.receiveRequest(e.Src); err != nil { @@ -277,7 +270,7 @@ func (r *Reactor) Receive(e p2p.Envelope) { case *tmp2p.PexAddrs: // If we asked for addresses, add them to the book - addrs, err := p2p.NetAddressesFromProto(msg.Addrs) + addrs, err := na.AddrsFromProtos(msg.Addrs) if err != nil { r.Switch.StopPeerForError(e.Src, err) r.book.MarkBad(e.Src.SocketAddr(), defaultBanTime) @@ -286,7 +279,7 @@ func (r *Reactor) Receive(e p2p.Envelope) { err = r.ReceiveAddrs(addrs, e.Src) if err != nil { r.Switch.StopPeerForError(e.Src, err) - if err == ErrUnsolicitedList { + if errors.Is(err, ErrUnsolicitedList) { r.book.MarkBad(e.Src.SocketAddr(), defaultBanTime) } return @@ -297,7 +290,7 @@ func (r *Reactor) Receive(e p2p.Envelope) { } } -// enforces a minimum amount of time between requests +// enforces a minimum amount of time between requests. func (r *Reactor) receiveRequest(src Peer) error { id := string(src.ID()) v := r.lastReceivedRequests.Get(id) @@ -319,13 +312,12 @@ func (r *Reactor) receiveRequest(src Peer) error { now := time.Now() minInterval := r.minReceiveRequestInterval() if now.Sub(lastReceived) < minInterval { - return fmt.Errorf( - "peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting", - src.ID(), - lastReceived, - now, - minInterval, - ) + return ErrReceivedPEXRequestTooSoon{ + Peer: src.ID(), + LastReceived: lastReceived, + Now: now, + MinInterval: minInterval, + } } r.lastReceivedRequests.Set(id, now) return nil @@ -346,29 +338,21 @@ func (r *Reactor) RequestAddrs(p Peer) { }) } -// ReceiveAddrs adds the given addrs to the addrbook if theres an open +// ReceiveAddrs adds the given addrs to the addrbook if there's an open // request for this peer and deletes the open request. // If there's no open request for the src peer, it returns an error. -func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { +func (r *Reactor) ReceiveAddrs(addrs []*na.NetAddr, src Peer) error { id := string(src.ID()) if !r.requestsSent.Has(id) { return ErrUnsolicitedList } r.requestsSent.Delete(id) - srcAddr, err := src.NodeInfo().NetAddress() + srcAddr, err := src.NodeInfo().NetAddr() if err != nil { return err } - srcIsSeed := false - for _, seedAddr := range r.seedAddrs { - if seedAddr.Equals(srcAddr) { - srcIsSeed = true - break - } - } - for _, netAddr := range addrs { // NOTE: we check netAddr validity and routability in book#AddAddress. err = r.book.AddAddress(netAddr, srcAddr) @@ -378,21 +362,16 @@ func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { // peer here too? continue } + } - // If this address came from a seed node, try to connect to it without - // waiting (#2093) - if srcIsSeed { - go func(addr *p2p.NetAddress) { - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Debug(err.Error(), "addr", addr) - } - } - }(netAddr) + // Try to connect to addresses coming from a seed node without waiting (#2093) + for _, seedAddr := range r.seedAddrs { + if seedAddr.Equals(srcAddr) { + select { + case r.ensurePeersCh <- struct{}{}: + default: + } + break } } @@ -400,10 +379,10 @@ func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { } // SendAddrs sends addrs to the peer. -func (r *Reactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) { +func (*Reactor) SendAddrs(p Peer, netAddrs []*na.NetAddr) { e := p2p.Envelope{ ChannelID: PexChannel, - Message: &tmp2p.PexAddrs{Addrs: p2p.NetAddressesToProto(netAddrs)}, + Message: &tmp2p.PexAddrs{Addrs: na.AddrsToProtos(netAddrs)}, } p.Send(e) } @@ -413,8 +392,10 @@ func (r *Reactor) SetEnsurePeersPeriod(d time.Duration) { r.ensurePeersPeriod = d } -// Ensures that sufficient peers are connected. (continuous) +// Ensures that sufficient peers are connected. (continuous). func (r *Reactor) ensurePeersRoutine() { + defer r.peersRoutineWg.Done() + var ( seed = cmtrand.NewRand() jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds()) @@ -433,12 +414,16 @@ func (r *Reactor) ensurePeersRoutine() { // fire periodically ticker := time.NewTicker(r.ensurePeersPeriod) + defer ticker.Stop() for { select { case <-ticker.C: r.ensurePeers() + case <-r.ensurePeersCh: + r.ensurePeers() + case <-r.book.Quit(): + return case <-r.Quit(): - ticker.Stop() return } } @@ -471,11 +456,15 @@ func (r *Reactor) ensurePeers() { // NOTE: range here is [10, 90]. Too high ? newBias := cmtmath.MinInt(out, 8)*10 + 10 - toDial := make(map[p2p.ID]*p2p.NetAddress) + toDial := make(map[nodekey.ID]*na.NetAddr) // Try maxAttempts times to pick numToDial addresses to dial maxAttempts := numToDial * 3 for i := 0; i < maxAttempts && len(toDial) < numToDial; i++ { + if !r.IsRunning() || !r.book.IsRunning() { + return + } + try := r.book.PickAddress(newBias) if try == nil { continue @@ -494,11 +483,11 @@ func (r *Reactor) ensurePeers() { // Dial picked addresses for _, addr := range toDial { - go func(addr *p2p.NetAddress) { + go func(addr *na.NetAddr) { err := r.dialPeer(addr) if err != nil { switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial: + case ErrMaxAttemptsToDial, ErrTooEarlyToDial: r.Logger.Debug(err.Error(), "addr", addr) default: r.Logger.Debug(err.Error(), "addr", addr) @@ -513,12 +502,9 @@ func (r *Reactor) ensurePeers() { } if r.book.NeedMoreAddrs() { - // 1) Pick a random peer and ask for more. - peers := r.Switch.Peers().List() - peersCount := len(peers) - if peersCount > 0 { - peer := peers[cmtrand.Int()%peersCount] + peer := r.Switch.Peers().Random() + if peer != nil { r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer) r.RequestAddrs(peer) } @@ -533,20 +519,20 @@ func (r *Reactor) ensurePeers() { } } -func (r *Reactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastDialed time.Time) { +func (r *Reactor) dialAttemptsInfo(addr *na.NetAddr) (attempts int, lastDialed time.Time) { _attempts, ok := r.attemptsToDial.Load(addr.DialString()) if !ok { - return + return 0, time.Time{} } atd := _attempts.(_attemptsToDial) return atd.number, atd.lastDialed } -func (r *Reactor) dialPeer(addr *p2p.NetAddress) error { +func (r *Reactor) dialPeer(addr *na.NetAddr) error { attempts, lastDialed := r.dialAttemptsInfo(addr) if !r.Switch.IsPeerPersistent(addr) && attempts > maxAttemptsToDial { r.book.MarkBad(addr, defaultBanTime) - return errMaxAttemptsToDial{} + return ErrMaxAttemptsToDial{Max: maxAttemptsToDial} } // exponential backoff if it's not our first attempt to dial given address @@ -556,7 +542,7 @@ func (r *Reactor) dialPeer(addr *p2p.NetAddress) error { backoffDuration = r.maxBackoffDurationForPeer(addr, backoffDuration) sinceLastDialed := time.Since(lastDialed) if sinceLastDialed < backoffDuration { - return errTooEarlyToDial{backoffDuration, lastDialed} + return ErrTooEarlyToDial{backoffDuration, lastDialed} } } @@ -574,7 +560,7 @@ func (r *Reactor) dialPeer(addr *p2p.NetAddress) error { default: r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()}) } - return fmt.Errorf("dialing failed (attempts: %d): %w", attempts+1, err) + return ErrFailedToDial{attempts + 1, err} } // cleanup any history @@ -583,7 +569,7 @@ func (r *Reactor) dialPeer(addr *p2p.NetAddress) error { } // maxBackoffDurationForPeer caps the backoff duration for persistent peers. -func (r *Reactor) maxBackoffDurationForPeer(addr *p2p.NetAddress, planned time.Duration) time.Duration { +func (r *Reactor) maxBackoffDurationForPeer(addr *na.NetAddr, planned time.Duration) time.Duration { if r.config.PersistentPeersMaxDialPeriod > 0 && planned > r.config.PersistentPeersMaxDialPeriod && r.Switch.IsPeerPersistent(addr) { @@ -597,25 +583,25 @@ func (r *Reactor) maxBackoffDurationForPeer(addr *p2p.NetAddress, planned time.D // return err if user provided any badly formatted seed addresses. // Doesn't error if the seed node can't be reached. // numOnline returns -1 if no seed nodes were in the initial configuration. -func (r *Reactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, err error) { +func (r *Reactor) checkSeeds() (numOnline int, netAddrs []*na.NetAddr, err error) { lSeeds := len(r.config.Seeds) if lSeeds == 0 { return -1, nil, nil } - netAddrs, errs := p2p.NewNetAddressStrings(r.config.Seeds) + netAddrs, errs := na.NewFromStrings(r.config.Seeds) numOnline = lSeeds - len(errs) for _, err := range errs { switch e := err.(type) { - case p2p.ErrNetAddressLookup: + case na.ErrLookup: r.Logger.Error("Connecting to seed failed", "err", e) default: - return 0, nil, fmt.Errorf("seed node configuration has error: %w", e) + return 0, nil, ErrSeedNodeConfig{Err: err} } } return numOnline, netAddrs, nil } -// randomly dial seeds until we connect to one or exhaust them +// randomly dial seeds until we connect to one or exhaust them. func (r *Reactor) dialSeeds() { perm := cmtrand.Perm(len(r.seedAddrs)) // perm := r.Switch.rng.Perm(lSeeds) @@ -638,7 +624,7 @@ func (r *Reactor) dialSeeds() { // AttemptsToDial returns the number of attempts to dial specific address. It // returns 0 if never attempted or successfully connected. -func (r *Reactor) AttemptsToDial(addr *p2p.NetAddress) int { +func (r *Reactor) AttemptsToDial(addr *na.NetAddr) int { lAttempts, attempted := r.attemptsToDial.Load(addr.DialString()) if attempted { return lAttempts.(_attemptsToDial).number @@ -646,12 +632,14 @@ func (r *Reactor) AttemptsToDial(addr *p2p.NetAddress) int { return 0 } -//---------------------------------------------------------- +// ---------------------------------------------------------- // Explores the network searching for more peers. (continuous) // Seed/Crawler Mode causes this node to quickly disconnect // from peers, except other seed nodes. func (r *Reactor) crawlPeersRoutine() { + defer r.peersRoutineWg.Done() + // If we have any seed nodes, consult them first if len(r.seedAddrs) > 0 { r.dialSeeds() @@ -662,13 +650,15 @@ func (r *Reactor) crawlPeersRoutine() { // Fire periodically ticker := time.NewTicker(crawlPeerPeriod) - + defer ticker.Stop() for { select { case <-ticker.C: r.attemptDisconnects() r.crawlPeers(r.book.GetSelection()) r.cleanupCrawlPeerInfos() + case <-r.book.Quit(): + return case <-r.Quit(): return } @@ -685,13 +675,13 @@ func (r *Reactor) nodeHasSomePeersOrDialingAny() bool { // crawlPeerInfo handles temporary data needed for the network crawling // performed during seed/crawler mode. type crawlPeerInfo struct { - Addr *p2p.NetAddress `json:"addr"` + Addr *na.NetAddr `json:"addr"` // The last time we crawled the peer or attempted to do so. LastCrawled time.Time `json:"last_crawled"` } // crawlPeers will crawl the network looking for new peer addresses. -func (r *Reactor) crawlPeers(addrs []*p2p.NetAddress) { +func (r *Reactor) crawlPeers(addrs []*na.NetAddr) { now := time.Now() for _, addr := range addrs { @@ -711,7 +701,7 @@ func (r *Reactor) crawlPeers(addrs []*p2p.NetAddress) { err := r.dialPeer(addr) if err != nil { switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress: + case ErrMaxAttemptsToDial, ErrTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress: r.Logger.Debug(err.Error(), "addr", addr) default: r.Logger.Debug(err.Error(), "addr", addr) @@ -740,9 +730,9 @@ func (r *Reactor) cleanupCrawlPeerInfos() { } } -// attemptDisconnects checks if we've been with each peer long enough to disconnect +// attemptDisconnects checks if we've been with each peer long enough to disconnect. func (r *Reactor) attemptDisconnects() { - for _, peer := range r.Switch.Peers().List() { + for _, peer := range r.Switch.Peers().Copy() { if peer.Status().Duration < r.config.SeedDisconnectWaitPeriod { continue } @@ -753,7 +743,7 @@ func (r *Reactor) attemptDisconnects() { } } -func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) { +func markAddrInBookBasedOnErr(addr *na.NetAddr, book AddrBook, err error) { // TODO: detect more "bad peer" scenarios switch err.(type) { case p2p.ErrSwitchAuthenticationFailure: diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index 03134995954..e06810e519f 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -12,11 +12,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + tmp2p "github.com/cometbft/cometbft/api/cometbft/p2p/v1" "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/p2p" "github.com/cometbft/cometbft/p2p/mock" - tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" + na "github.com/cometbft/cometbft/p2p/netaddr" + "github.com/cometbft/cometbft/types" ) var cfg *config.P2PConfig @@ -32,7 +34,7 @@ func TestPEXReactorBasic(t *testing.T) { defer teardownReactor(book) assert.NotNil(t, r) - assert.NotEmpty(t, r.GetChannels()) + assert.NotEmpty(t, r.StreamDescriptors()) } func TestPEXReactorAddRemovePeer(t *testing.T) { @@ -66,19 +68,19 @@ func TestPEXReactorAddRemovePeer(t *testing.T) { // peers have different IP addresses, they all have the same underlying remote // IP: 127.0.0.1. func TestPEXReactorRunning(t *testing.T) { - N := 3 - switches := make([]*p2p.Switch, N) + n := 3 + switches := make([]*p2p.Switch, n) // directory to store address books dir, err := os.MkdirTemp("", "pex_reactor") - require.Nil(t, err) + require.NoError(t, err) defer os.RemoveAll(dir) - books := make([]AddrBook, N) + books := make([]AddrBook, n) logger := log.TestingLogger() // create switches - for i := 0; i < N; i++ { + for i := 0; i < n; i++ { switches[i] = p2p.MakeSwitch(cfg, i, func(i int, sw *p2p.Switch) *p2p.Switch { books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) books[i].SetLogger(logger.With("pex", i)) @@ -89,14 +91,14 @@ func TestPEXReactorRunning(t *testing.T) { r := NewReactor(books[i], &ReactorConfig{}) r.SetLogger(logger.With("pex", i)) r.SetEnsurePeersPeriod(250 * time.Millisecond) - sw.AddReactor("pex", r) + sw.AddReactor("PEX", r) return sw }) } addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) { - addr := switches[otherSwitchIndex].NetAddress() + addr := switches[otherSwitchIndex].NetAddr() err := books[switchIndex].AddAddress(addr, addr) require.NoError(t, err) } @@ -107,10 +109,10 @@ func TestPEXReactorRunning(t *testing.T) { for _, sw := range switches { err := sw.Start() // start switch and reactors - require.Nil(t, err) + require.NoError(t, err) } - assertPeersWithTimeout(t, switches, 10*time.Millisecond, 10*time.Second, N-1) + assertPeersWithTimeout(t, switches, 10*time.Second, n-1) // stop them for _, s := range switches { @@ -204,20 +206,20 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) { func TestCheckSeeds(t *testing.T) { // directory to store address books dir, err := os.MkdirTemp("", "pex_reactor") - require.Nil(t, err) + require.NoError(t, err) defer os.RemoveAll(dir) // 1. test creating peer with no seeds works peerSwitch := testCreateDefaultPeer(dir, 0) - require.Nil(t, peerSwitch.Start()) + require.NoError(t, peerSwitch.Start()) peerSwitch.Stop() //nolint:errcheck // ignore for tests // 2. create seed - seed := testCreateSeed(dir, 1, []*p2p.NetAddress{}, []*p2p.NetAddress{}) + seed := testCreateSeed(dir, 1, []*na.NetAddr{}, []*na.NetAddr{}) // 3. test create peer with online seed works peerSwitch = testCreatePeerWithSeed(dir, 2, seed) - require.Nil(t, peerSwitch.Start()) + require.NoError(t, peerSwitch.Start()) peerSwitch.Stop() //nolint:errcheck // ignore for tests // 4. test create peer with all seeds having unresolvable DNS fails @@ -236,67 +238,85 @@ func TestCheckSeeds(t *testing.T) { Seeds: []string{ "ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657", - seed.NetAddress().String(), + seed.NetAddr().String(), }, } peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) - require.Nil(t, peerSwitch.Start()) + require.NoError(t, peerSwitch.Start()) peerSwitch.Stop() //nolint:errcheck // ignore for tests } func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { // directory to store address books dir, err := os.MkdirTemp("", "pex_reactor") - require.Nil(t, err) + require.NoError(t, err) defer os.RemoveAll(dir) // 1. create seed - seed := testCreateSeed(dir, 0, []*p2p.NetAddress{}, []*p2p.NetAddress{}) - require.Nil(t, seed.Start()) + seed := testCreateSeed(dir, 0, []*na.NetAddr{}, []*na.NetAddr{}) + require.NoError(t, seed.Start()) defer seed.Stop() //nolint:errcheck // ignore for tests // 2. create usual peer with only seed configured. peer := testCreatePeerWithSeed(dir, 1, seed) - require.Nil(t, peer.Start()) + require.NoError(t, peer.Start()) defer peer.Stop() //nolint:errcheck // ignore for tests // 3. check that the peer connects to seed immediately - assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1) + assertPeersWithTimeout(t, []*p2p.Switch{peer}, 3*time.Second, 1) } func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { // directory to store address books dir, err := os.MkdirTemp("", "pex_reactor") - require.Nil(t, err) + require.NoError(t, err) defer os.RemoveAll(dir) - // 1. create peer - peerSwitch := testCreateDefaultPeer(dir, 1) - require.Nil(t, peerSwitch.Start()) - defer peerSwitch.Stop() //nolint:errcheck // ignore for tests + // Default is 10, we need one connection for the seed node. + cfg.MaxNumOutboundPeers = 2 + + var id int + var knownAddrs []*na.NetAddr + + // 1. Create some peers + for id = 0; id < cfg.MaxNumOutboundPeers+1; id++ { + peer := testCreateDefaultPeer(dir, id) + require.NoError(t, peer.Start()) + addr := peer.NetAddr() + defer peer.Stop() //nolint:errcheck // ignore for tests - // 2. Create seed which knows about the peer - peerAddr := peerSwitch.NetAddress() - seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr}) - require.Nil(t, seed.Start()) + knownAddrs = append(knownAddrs, addr) + } + + // 2. Create seed node which knows about the previous peers + seed := testCreateSeed(dir, id, knownAddrs, knownAddrs) + require.NoError(t, seed.Start()) defer seed.Stop() //nolint:errcheck // ignore for tests - // 3. create another peer with only seed configured. - secondPeer := testCreatePeerWithSeed(dir, 3, seed) - require.Nil(t, secondPeer.Start()) - defer secondPeer.Stop() //nolint:errcheck // ignore for tests + // 3. Create a node with only seed configured. + id++ + node := testCreatePeerWithSeed(dir, id, seed) + require.NoError(t, node.Start()) + defer node.Stop() //nolint:errcheck // ignore for tests - // 4. check that the second peer connects to seed immediately - assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 3*time.Second, 1) + // 4. Check that the node connects to seed immediately + assertPeersWithTimeout(t, []*p2p.Switch{node}, 3*time.Second, 1) - // 5. check that the second peer connects to the first peer immediately - assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 1*time.Second, 2) + // 5. Check that the node connects to the peers reported by the seed node + assertPeersWithTimeout(t, []*p2p.Switch{node}, 10*time.Second, cfg.MaxNumOutboundPeers) + + // 6. Assert that the configured maximum number of inbound/outbound peers + // are respected, see https://github.com/cometbft/cometbft/issues/486 + outbound, inbound, dialing := node.NumPeers() + assert.LessOrEqual(t, inbound, cfg.MaxNumInboundPeers) + assert.LessOrEqual(t, outbound, cfg.MaxNumOutboundPeers) + assert.Zero(t, dialing) } func TestPEXReactorSeedMode(t *testing.T) { // directory to store address books dir, err := os.MkdirTemp("", "pex_reactor") - require.Nil(t, err) + require.NoError(t, err) defer os.RemoveAll(dir) pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond} @@ -316,7 +336,7 @@ func TestPEXReactorSeedMode(t *testing.T) { defer peerSwitch.Stop() //nolint:errcheck // ignore for tests // 1. Test crawlPeers dials the peer - pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) + pexR.crawlPeers([]*na.NetAddr{peerSwitch.NetAddr()}) assert.Equal(t, 1, sw.Peers().Size()) assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) @@ -335,7 +355,7 @@ func TestPEXReactorSeedMode(t *testing.T) { func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { // directory to store address books dir, err := os.MkdirTemp("", "pex_reactor") - require.Nil(t, err) + require.NoError(t, err) defer os.RemoveAll(dir) pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 1 * time.Millisecond} @@ -354,11 +374,11 @@ func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { require.NoError(t, peerSwitch.Start()) defer peerSwitch.Stop() //nolint:errcheck // ignore for tests - err = sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()}) + err = sw.AddPersistentPeers([]string{peerSwitch.NetAddr().String()}) require.NoError(t, err) // 1. Test crawlPeers dials the peer - pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) + pexR.crawlPeers([]*na.NetAddr{peerSwitch.NetAddr()}) assert.Equal(t, 1, sw.Peers().Size()) assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) @@ -373,7 +393,7 @@ func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { // directory to store address books dir, err := os.MkdirTemp("", "pex_reactor") - require.Nil(t, err) + require.NoError(t, err) defer os.RemoveAll(dir) pexR, book := createReactor(&ReactorConfig{SeedMode: true}) @@ -393,7 +413,7 @@ func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { // imitate maxAttemptsToDial reached pexR.attemptsToDial.Store(addr.DialString(), _attemptsToDial{maxAttemptsToDial + 1, time.Now()}) - pexR.crawlPeers([]*p2p.NetAddress{addr}) + pexR.crawlPeers([]*na.NetAddr{addr}) assert.False(t, book.HasAddress(addr)) } @@ -404,19 +424,19 @@ func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { // with FlushStop. Before a fix, this non-deterministically reproduced // https://github.com/tendermint/tendermint/issues/3231. func TestPEXReactorSeedModeFlushStop(t *testing.T) { - N := 2 - switches := make([]*p2p.Switch, N) + n := 2 + switches := make([]*p2p.Switch, n) // directory to store address books dir, err := os.MkdirTemp("", "pex_reactor") - require.Nil(t, err) + require.NoError(t, err) defer os.RemoveAll(dir) - books := make([]AddrBook, N) + books := make([]AddrBook, n) logger := log.TestingLogger() // create switches - for i := 0; i < N; i++ { + for i := 0; i < n; i++ { switches[i] = p2p.MakeSwitch(cfg, i, func(i int, sw *p2p.Switch) *p2p.Switch { books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) books[i].SetLogger(logger.With("pex", i)) @@ -440,14 +460,14 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) { for _, sw := range switches { err := sw.Start() // start switch and reactors - require.Nil(t, err) + require.NoError(t, err) } reactor := switches[0].Reactors()["pex"].(*Reactor) peerID := switches[1].NodeInfo().ID() - err = switches[1].DialPeerWithAddress(switches[0].NetAddress()) - assert.NoError(t, err) + err = switches[1].DialPeerWithAddress(switches[0].NetAddr()) + require.NoError(t, err) // sleep up to a second while waiting for the peer to send us a message. // this isn't perfect since it's possible the peer sends us a msg and we FlushStop @@ -462,7 +482,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) { // by now the FlushStop should have happened. Try stopping the peer. // it should be safe to do this. - peers := switches[0].Peers().List() + peers := switches[0].Peers().Copy() for _, peer := range peers { err := peer.Stop() require.NoError(t, err) @@ -537,9 +557,12 @@ func TestPEXReactorDialPeer(t *testing.T) { func assertPeersWithTimeout( t *testing.T, switches []*p2p.Switch, - checkPeriod, timeout time.Duration, + timeout time.Duration, nPeers int, ) { + t.Helper() + checkPeriod := 10 * time.Millisecond + var ( ticker = time.NewTicker(checkPeriod) remaining = timeout @@ -579,55 +602,57 @@ func assertPeersWithTimeout( } } -// Creates a peer with the provided config +// Creates a peer with the provided config. func testCreatePeerWithConfig(dir string, id int, config *ReactorConfig) *p2p.Switch { - peer := p2p.MakeSwitch( + return p2p.MakeSwitch( cfg, id, - func(i int, sw *p2p.Switch) *p2p.Switch { + func(_ int, sw *p2p.Switch) *p2p.Switch { + logger := log.TestingLogger().With("pex", id) + book := NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", id)), false) - book.SetLogger(log.TestingLogger()) + book.SetLogger(logger) sw.SetAddrBook(book) - sw.SetLogger(log.TestingLogger()) + r := NewReactor(book, config) + r.SetLogger(logger) + + sw.SetLogger(logger) + sw.AddReactor("PEX", r) - r := NewReactor( - book, - config, - ) - r.SetLogger(log.TestingLogger()) - sw.AddReactor("pex", r) return sw }, ) - return peer } -// Creates a peer with the default config +// Creates a peer with the default config. func testCreateDefaultPeer(dir string, id int) *p2p.Switch { return testCreatePeerWithConfig(dir, id, &ReactorConfig{}) } // Creates a seed which knows about the provided addresses / source address pairs. -// Starting and stopping the seed is left to the caller -func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) *p2p.Switch { +// Starting and stopping the seed is left to the caller. +func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*na.NetAddr) *p2p.Switch { seed := p2p.MakeSwitch( cfg, id, - func(i int, sw *p2p.Switch) *p2p.Switch { + func(_ int, sw *p2p.Switch) *p2p.Switch { + logger := log.TestingLogger().With("seed", id) + book := NewAddrBook(filepath.Join(dir, "addrbookSeed.json"), false) - book.SetLogger(log.TestingLogger()) + book.SetLogger(logger) for j := 0; j < len(knownAddrs); j++ { book.AddAddress(knownAddrs[j], srcAddrs[j]) //nolint:errcheck // ignore for tests book.MarkGood(knownAddrs[j].ID) } sw.SetAddrBook(book) - sw.SetLogger(log.TestingLogger()) - r := NewReactor(book, &ReactorConfig{}) - r.SetLogger(log.TestingLogger()) - sw.AddReactor("pex", r) + r.SetLogger(logger) + + sw.SetLogger(logger) + sw.AddReactor("PEX", r) + return sw }, ) @@ -635,10 +660,10 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) } // Creates a peer which knows about the provided seed. -// Starting and stopping the peer is left to the caller +// Starting and stopping the peer is left to the caller. func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch { conf := &ReactorConfig{ - Seeds: []string{seed.NetAddress().String()}, + Seeds: []string{seed.NetAddr().String()}, } return testCreatePeerWithConfig(dir, id, conf) } @@ -654,7 +679,7 @@ func createReactor(conf *ReactorConfig) (r *Reactor, book AddrBook) { r = NewReactor(book, conf) r.SetLogger(log.TestingLogger()) - return + return r, book } func teardownReactor(book AddrBook) { @@ -666,7 +691,7 @@ func teardownReactor(book AddrBook) { } func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { - sw := p2p.MakeSwitch(cfg, 0, func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) + sw := p2p.MakeSwitch(cfg, 0, func(_ int, sw *p2p.Switch) *p2p.Switch { return sw }) sw.SetLogger(log.TestingLogger()) for _, r := range reactors { sw.AddReactor(r.String(), r) @@ -692,9 +717,7 @@ func TestPexVectors(t *testing.T) { } for _, tc := range testCases { - tc := tc - - w := tc.msg.(p2p.Wrapper).Wrap() + w := tc.msg.(types.Wrapper).Wrap() bz, err := proto.Marshal(w) require.NoError(t, err) diff --git a/p2p/switch.go b/p2p/switch.go index 68ad5669b3e..052fb32677b 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -1,34 +1,40 @@ package p2p import ( + "errors" "fmt" "math" - "sync" "time" "github.com/cosmos/gogoproto/proto" "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/libs/cmap" - "github.com/cometbft/cometbft/libs/rand" + "github.com/cometbft/cometbft/internal/cmap" + "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/service" - "github.com/cometbft/cometbft/p2p/conn" + na "github.com/cometbft/cometbft/p2p/netaddr" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" + "github.com/cometbft/cometbft/p2p/nodekey" + "github.com/cometbft/cometbft/p2p/transport/tcp" + "github.com/cometbft/cometbft/p2p/transport/tcp/conn" ) const ( // wait a random amount of time from this interval - // before dialing peers or reconnecting to help prevent DoS + // before dialing peers or reconnecting to help prevent DoS. dialRandomizerIntervalMilliseconds = 3000 // repeatedly try to reconnect for a few minutes - // ie. 5 * 20 = 100s + // ie. 5 * 20 = 100s. reconnectAttempts = 20 reconnectInterval = 5 * time.Second // then move into exponential backoff mode for ~1day - // ie. 3**10 = 16hrs + // ie. 3**10 = 16hrs. reconnectBackOffAttempts = 10 reconnectBackOffBaseSeconds = 3 + + defaultFilterTimeout = 5 * time.Second ) // MConnConfig returns an MConnConfig with fields updated @@ -44,18 +50,18 @@ func MConnConfig(cfg *config.P2PConfig) conn.MConnConfig { return mConfig } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // An AddrBook represents an address book from the pex package, which is used // to store peer addresses. type AddrBook interface { - AddAddress(addr *NetAddress, src *NetAddress) error - AddPrivateIDs([]string) - AddOurAddress(*NetAddress) - OurAddress(*NetAddress) bool - MarkGood(ID) - RemoveAddress(*NetAddress) - HasAddress(*NetAddress) bool + AddAddress(addr *na.NetAddr, src *na.NetAddr) error + AddPrivateIDs(ids []string) + AddOurAddress(addr *na.NetAddr) + OurAddress(addr *na.NetAddr) bool + MarkGood(id nodekey.ID) + RemoveAddress(addr *na.NetAddr) + HasAddress(addr *na.NetAddr) bool Save() } @@ -63,7 +69,7 @@ type AddrBook interface { // fully setup. type PeerFilterFunc func(IPeerSet, Peer) error -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // Switch handles peer connections and exposes an API to receive incoming messages // on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one @@ -74,18 +80,18 @@ type Switch struct { config *config.P2PConfig reactors map[string]Reactor - chDescs []*conn.ChannelDescriptor + streamDescs []StreamDescriptor reactorsByCh map[byte]Reactor msgTypeByChID map[byte]proto.Message peers *PeerSet dialing *cmap.CMap reconnecting *cmap.CMap - nodeInfo NodeInfo // our node info - nodeKey *NodeKey // our node privkey + nodeInfo ni.NodeInfo // our node info + nodeKey *nodekey.NodeKey // our node privkey addrBook AddrBook // peers addresses with whom we'll maintain constant connection - persistentPeersAddrs []*NetAddress - unconditionalPeerIDs map[ID]struct{} + persistentPeersAddrs []*na.NetAddr + unconditionalPeerIDs map[nodekey.ID]struct{} transport Transport @@ -95,12 +101,11 @@ type Switch struct { rng *rand.Rand // seed for randomizing dial times and orders metrics *Metrics - mlc *metricsLabelCache } -// NetAddress returns the address the switch is listening on. -func (sw *Switch) NetAddress() *NetAddress { - addr := sw.transport.NetAddress() +// NetAddr returns the address the switch is listening on. +func (sw *Switch) NetAddr() *na.NetAddr { + addr := sw.transport.NetAddr() return &addr } @@ -113,11 +118,10 @@ func NewSwitch( transport Transport, options ...SwitchOption, ) *Switch { - sw := &Switch{ config: cfg, reactors: make(map[string]Reactor), - chDescs: make([]*conn.ChannelDescriptor, 0), + streamDescs: make([]StreamDescriptor, 0), reactorsByCh: make(map[byte]Reactor), msgTypeByChID: make(map[byte]proto.Message), peers: NewPeerSet(), @@ -126,9 +130,8 @@ func NewSwitch( metrics: NopMetrics(), transport: transport, filterTimeout: defaultFilterTimeout, - persistentPeersAddrs: make([]*NetAddress, 0), - unconditionalPeerIDs: make(map[ID]struct{}), - mlc: newMetricsLabelCache(), + persistentPeersAddrs: make([]*na.NetAddr, 0), + unconditionalPeerIDs: make(map[nodekey.ID]struct{}), } // Ensure we have a completely undeterministic PRNG. @@ -158,21 +161,21 @@ func WithMetrics(metrics *Metrics) SwitchOption { return func(sw *Switch) { sw.metrics = metrics } } -//--------------------------------------------------------------------- +// --------------------------------------------------------------------- // Switch setup // AddReactor adds the given reactor to the switch. // NOTE: Not goroutine safe. func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { - for _, chDesc := range reactor.GetChannels() { - chID := chDesc.ID + for _, streamDesc := range reactor.StreamDescriptors() { + id := streamDesc.StreamID() // No two reactors can share the same channel. - if sw.reactorsByCh[chID] != nil { - panic(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor)) + if sw.reactorsByCh[id] != nil { + panic(fmt.Sprintf("Stream %X has multiple reactors %v & %v", id, sw.reactorsByCh[id], reactor)) } - sw.chDescs = append(sw.chDescs, chDesc) - sw.reactorsByCh[chID] = reactor - sw.msgTypeByChID[chID] = chDesc.MessageType + sw.streamDescs = append(sw.streamDescs, streamDesc) + sw.reactorsByCh[id] = reactor + sw.msgTypeByChID[id] = streamDesc.MessageType() } sw.reactors[name] = reactor reactor.SetSwitch(sw) @@ -182,16 +185,16 @@ func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { // RemoveReactor removes the given Reactor from the Switch. // NOTE: Not goroutine safe. func (sw *Switch) RemoveReactor(name string, reactor Reactor) { - for _, chDesc := range reactor.GetChannels() { + for _, streamDesc := range reactor.StreamDescriptors() { // remove channel description - for i := 0; i < len(sw.chDescs); i++ { - if chDesc.ID == sw.chDescs[i].ID { - sw.chDescs = append(sw.chDescs[:i], sw.chDescs[i+1:]...) + for i := 0; i < len(sw.streamDescs); i++ { + if streamDesc.StreamID() == sw.streamDescs[i].StreamID() { + sw.streamDescs = append(sw.streamDescs[:i], sw.streamDescs[i+1:]...) break } } - delete(sw.reactorsByCh, chDesc.ID) - delete(sw.msgTypeByChID, chDesc.ID) + delete(sw.reactorsByCh, streamDesc.StreamID()) + delete(sw.msgTypeByChID, streamDesc.StreamID()) } delete(sw.reactors, name) reactor.SetSwitch(nil) @@ -211,23 +214,23 @@ func (sw *Switch) Reactor(name string) Reactor { // SetNodeInfo sets the switch's NodeInfo for checking compatibility and handshaking with other nodes. // NOTE: Not goroutine safe. -func (sw *Switch) SetNodeInfo(nodeInfo NodeInfo) { +func (sw *Switch) SetNodeInfo(nodeInfo ni.NodeInfo) { sw.nodeInfo = nodeInfo } // NodeInfo returns the switch's NodeInfo. // NOTE: Not goroutine safe. -func (sw *Switch) NodeInfo() NodeInfo { +func (sw *Switch) NodeInfo() ni.NodeInfo { return sw.nodeInfo } // SetNodeKey sets the switch's private key for authenticated encryption. // NOTE: Not goroutine safe. -func (sw *Switch) SetNodeKey(nodeKey *NodeKey) { +func (sw *Switch) SetNodeKey(nodeKey *nodekey.NodeKey) { sw.nodeKey = nodeKey } -//--------------------------------------------------------------------- +// --------------------------------------------------------------------- // Service start/stop // OnStart implements BaseService. It starts all the reactors and peers. @@ -236,7 +239,7 @@ func (sw *Switch) OnStart() error { for _, reactor := range sw.reactors { err := reactor.Start() if err != nil { - return fmt.Errorf("failed to start %v: %w", reactor, err) + return ErrStart{reactor, err} } } @@ -249,7 +252,7 @@ func (sw *Switch) OnStart() error { // OnStop implements BaseService. It stops all peers and reactors. func (sw *Switch) OnStop() { // Stop peers - for _, p := range sw.peers.List() { + for _, p := range sw.peers.Copy() { sw.stopAndRemovePeer(p, nil) } @@ -257,64 +260,54 @@ func (sw *Switch) OnStop() { sw.Logger.Debug("Switch: Stopping reactors") for _, reactor := range sw.reactors { if err := reactor.Stop(); err != nil { - sw.Logger.Error("error while stopped reactor", "reactor", reactor, "error", err) + sw.Logger.Error("error while stopped reactor", "reactor", reactor, "err", err) } } } -//--------------------------------------------------------------------- +// --------------------------------------------------------------------- // Peers // Broadcast runs a go routine for each attempted send, which will block trying -// to send for defaultSendTimeoutSeconds. Returns a channel which receives -// success values for each attempted send (false if times out). Channel will be -// closed once msg bytes are sent to all peers (or time out). +// to send for defaultSendTimeoutSeconds. // // NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved. -func (sw *Switch) Broadcast(e Envelope) chan bool { - sw.Logger.Debug("Broadcast", "channel", e.ChannelID) - - peers := sw.peers.List() - var wg sync.WaitGroup - wg.Add(len(peers)) - successChan := make(chan bool, len(peers)) - - for _, peer := range peers { - go func(p Peer) { - defer wg.Done() - success := p.Send(e) - successChan <- success - }(peer) - } - - go func() { - wg.Wait() - close(successChan) - }() +func (sw *Switch) Broadcast(e Envelope) { + sw.peers.ForEach(func(p Peer) { + go func(peer Peer) { + success := peer.Send(e) + _ = success + }(p) + }) +} - return successChan +// TryBroadcast runs a go routine for each attempted send. +// If the send queue of the destination channel and peer are full, the message will not be sent. To make sure that messages are indeed sent to all destination, use `Broadcast`. +// +// NOTE: TryBroadcast uses goroutines, so order of broadcast may not be preserved. +func (sw *Switch) TryBroadcast(e Envelope) { + sw.peers.ForEach(func(p Peer) { + go func(peer Peer) { + peer.TrySend(e) + }(p) + }) } // NumPeers returns the count of outbound/inbound and outbound-dialing peers. // unconditional peers are not counted here. func (sw *Switch) NumPeers() (outbound, inbound, dialing int) { - peers := sw.peers.List() - for _, peer := range peers { - if peer.IsOutbound() { - if !sw.IsPeerUnconditional(peer.ID()) { - outbound++ - } - } else { - if !sw.IsPeerUnconditional(peer.ID()) { - inbound++ - } + sw.peers.ForEach(func(peer Peer) { + if peer.IsOutbound() && !sw.IsPeerUnconditional(peer.ID()) { + outbound++ + } else if !sw.IsPeerUnconditional(peer.ID()) { + inbound++ } - } + }) dialing = sw.dialing.Size() - return + return outbound, inbound, dialing } -func (sw *Switch) IsPeerUnconditional(id ID) bool { +func (sw *Switch) IsPeerUnconditional(id nodekey.ID) bool { _, ok := sw.unconditionalPeerIDs[id] return ok } @@ -332,7 +325,7 @@ func (sw *Switch) Peers() IPeerSet { // StopPeerForError disconnects from a peer due to external error. // If the peer is persistent, it will attempt to reconnect. // TODO: make record depending on reason. -func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { +func (sw *Switch) StopPeerForError(peer Peer, reason any) { if !peer.IsRunning() { return } @@ -341,12 +334,12 @@ func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { sw.stopAndRemovePeer(peer, reason) if peer.IsPersistent() { - var addr *NetAddress + var addr *na.NetAddr if peer.IsOutbound() { // socket address for outbound peers addr = peer.SocketAddr() } else { // self-reported address for inbound peers var err error - addr, err = peer.NodeInfo().NetAddress() + addr, err = peer.NodeInfo().NetAddr() if err != nil { sw.Logger.Error("Wanted to reconnect to inbound peer, but self-reported address is wrong", "peer", peer, "err", err) @@ -364,38 +357,45 @@ func (sw *Switch) StopPeerGracefully(peer Peer) { sw.stopAndRemovePeer(peer, nil) } -func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { - sw.transport.Cleanup(peer) - if err := peer.Stop(); err != nil { - sw.Logger.Error("error while stopping peer", "error", err) // TODO: should return error to be handled accordingly +func (sw *Switch) stopAndRemovePeer(p Peer, reason any) { + // Returning early if the peer is already stopped prevents data races because + // this function may be called from multiple places at once. + if err := p.Stop(); err != nil { + sw.Logger.Error("error stopping peer", "peer", p.ID(), "err", err) + return } + // ignore errors because the peer is already stopped + _ = sw.transport.Cleanup(p.Conn()) + for _, reactor := range sw.reactors { - reactor.RemovePeer(peer, reason) + reactor.RemovePeer(p, reason) } // Removing a peer should go last to avoid a situation where a peer // reconnect to our node and the switch calls InitPeer before // RemovePeer is finished. // https://github.com/tendermint/tendermint/issues/3338 - if sw.peers.Remove(peer) { - sw.metrics.Peers.Add(float64(-1)) - } else { + if !sw.peers.Remove(p) { // Removal of the peer has failed. The function above sets a flag within the peer to mark this. // We keep this message here as information to the developer. - sw.Logger.Debug("error on peer removal", ",", "peer", peer.ID()) + sw.Logger.Debug("error on peer removal", "peer", p.ID()) + return } + + sw.metrics.Peers.Add(float64(-1)) } // reconnectToPeer tries to reconnect to the addr, first repeatedly -// with a fixed interval, then with exponential backoff. +// with a fixed interval (approximately 2 minutes), then with +// exponential backoff (approximately close to 24 hours). // If no success after all that, it stops trying, and leaves it // to the PEX/Addrbook to find the peer with the addr again // NOTE: this will keep trying even if the handshake or auth fails. // TODO: be more explicit with error types so we only retry on certain failures // - ie. if we're getting ErrDuplicatePeer we can stop // because the addrbook got us the peer back already -func (sw *Switch) reconnectToPeer(addr *NetAddress) { +func (sw *Switch) reconnectToPeer(addr *na.NetAddr) { if sw.reconnecting.Has(string(addr.ID)) { return } @@ -404,6 +404,7 @@ func (sw *Switch) reconnectToPeer(addr *NetAddress) { start := time.Now() sw.Logger.Info("Reconnecting to peer", "addr", addr) + for i := 0; i < reconnectAttempts; i++ { if !sw.IsRunning() { return @@ -424,7 +425,7 @@ func (sw *Switch) reconnectToPeer(addr *NetAddress) { sw.Logger.Error("Failed to reconnect to peer. Beginning exponential backoff", "addr", addr, "elapsed", time.Since(start)) - for i := 0; i < reconnectBackOffAttempts; i++ { + for i := 1; i <= reconnectBackOffAttempts; i++ { if !sw.IsRunning() { return } @@ -457,7 +458,7 @@ func (sw *Switch) MarkPeerAsGood(peer Peer) { } } -//--------------------------------------------------------------------- +// --------------------------------------------------------------------- // Dialing type privateAddr interface { @@ -465,24 +466,24 @@ type privateAddr interface { } func isPrivateAddr(err error) bool { - te, ok := err.(privateAddr) - return ok && te.PrivateAddr() + e, ok := err.(privateAddr) + return ok && e.PrivateAddr() } // DialPeersAsync dials a list of peers asynchronously in random order. // Used to dial peers from config on startup or from unsafe-RPC (trusted sources). -// It ignores ErrNetAddressLookup. However, if there are other errors, first +// It ignores na.ErrLookup. However, if there are other errors, first // encounter is returned. // Nop if there are no peers. func (sw *Switch) DialPeersAsync(peers []string) error { - netAddrs, errs := NewNetAddressStrings(peers) + netAddrs, errs := na.NewFromStrings(peers) // report all the errors for _, err := range errs { sw.Logger.Error("Error in peer's address", "err", err) } - // return first non-ErrNetAddressLookup error + // return first non-ErrLookup error for _, err := range errs { - if _, ok := err.(ErrNetAddressLookup); ok { + if errors.As(err, &na.ErrLookup{}) { continue } return err @@ -491,8 +492,8 @@ func (sw *Switch) DialPeersAsync(peers []string) error { return nil } -func (sw *Switch) dialPeersAsync(netAddrs []*NetAddress) { - ourAddr := sw.NetAddress() +func (sw *Switch) dialPeersAsync(netAddrs []*na.NetAddr) { + ourAddr := sw.NetAddr() // TODO: this code feels like it's in the wrong place. // The integration tests depend on the addrBook being saved @@ -548,7 +549,7 @@ func (sw *Switch) dialPeersAsync(netAddrs []*NetAddress) { // and authenticates successfully. // If we're currently dialing this address or it belongs to an existing peer, // ErrCurrentlyDialingOrExistingAddress is returned. -func (sw *Switch) DialPeerWithAddress(addr *NetAddress) error { +func (sw *Switch) DialPeerWithAddress(addr *na.NetAddr) error { if sw.IsDialingOrExistingAddress(addr) { return ErrCurrentlyDialingOrExistingAddress{addr.String()} } @@ -559,7 +560,7 @@ func (sw *Switch) DialPeerWithAddress(addr *NetAddress) error { return sw.addOutboundPeerWithConfig(addr, sw.config) } -// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds] +// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds]. func (sw *Switch) randomSleep(interval time.Duration) { r := time.Duration(sw.rng.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond time.Sleep(r + interval) @@ -567,25 +568,25 @@ func (sw *Switch) randomSleep(interval time.Duration) { // IsDialingOrExistingAddress returns true if switch has a peer with the given // address or dialing it at the moment. -func (sw *Switch) IsDialingOrExistingAddress(addr *NetAddress) bool { +func (sw *Switch) IsDialingOrExistingAddress(addr *na.NetAddr) bool { return sw.dialing.Has(string(addr.ID)) || sw.peers.Has(addr.ID) || (!sw.config.AllowDuplicateIP && sw.peers.HasIP(addr.IP)) } // AddPersistentPeers allows you to set persistent peers. It ignores -// ErrNetAddressLookup. However, if there are other errors, first encounter is +// na.ErrLookup. However, if there are other errors, first encounter is // returned. func (sw *Switch) AddPersistentPeers(addrs []string) error { sw.Logger.Info("Adding persistent peers", "addrs", addrs) - netAddrs, errs := NewNetAddressStrings(addrs) + netAddrs, errs := na.NewFromStrings(addrs) // report all the errors for _, err := range errs { sw.Logger.Error("Error in peer's address", "err", err) } - // return first non-ErrNetAddressLookup error + // return first non-ErrLookup error for _, err := range errs { - if _, ok := err.(ErrNetAddressLookup); ok { + if errors.As(err, &na.ErrLookup{}) { continue } return err @@ -596,23 +597,25 @@ func (sw *Switch) AddPersistentPeers(addrs []string) error { func (sw *Switch) AddUnconditionalPeerIDs(ids []string) error { sw.Logger.Info("Adding unconditional peer ids", "ids", ids) - for i, id := range ids { - err := validateID(ID(id)) + for _, id := range ids { + err := na.ValidateID(nodekey.ID(id)) if err != nil { - return fmt.Errorf("wrong ID #%d: %w", i, err) + return na.ErrInvalidPeerID{ID: nodekey.ID(id), Source: err} } - sw.unconditionalPeerIDs[ID(id)] = struct{}{} + + sw.unconditionalPeerIDs[nodekey.ID(id)] = struct{}{} } return nil } func (sw *Switch) AddPrivatePeerIDs(ids []string) error { validIDs := make([]string, 0, len(ids)) - for i, id := range ids { - err := validateID(ID(id)) + for _, id := range ids { + err := na.ValidateID(nodekey.ID(id)) if err != nil { - return fmt.Errorf("wrong ID #%d: %w", i, err) + return na.ErrInvalidPeerID{ID: nodekey.ID(id), Source: err} } + validIDs = append(validIDs, id) } @@ -621,7 +624,7 @@ func (sw *Switch) AddPrivatePeerIDs(ids []string) error { return nil } -func (sw *Switch) IsPeerPersistent(na *NetAddress) bool { +func (sw *Switch) IsPeerPersistent(na *na.NetAddr) bool { for _, pa := range sw.persistentPeersAddrs { if pa.Equals(na) { return true @@ -632,26 +635,10 @@ func (sw *Switch) IsPeerPersistent(na *NetAddress) bool { func (sw *Switch) acceptRoutine() { for { - p, err := sw.transport.Accept(peerConfig{ - chDescs: sw.chDescs, - onPeerError: sw.StopPeerForError, - reactorsByCh: sw.reactorsByCh, - msgTypeByChID: sw.msgTypeByChID, - metrics: sw.metrics, - mlc: sw.mlc, - isPersistent: sw.IsPeerPersistent, - }) + conn, addr, err := sw.transport.Accept() if err != nil { switch err := err.(type) { - case ErrRejected: - if err.IsSelf() { - // Remove the given address from the address book and add to our addresses - // to avoid dialing in the future. - addr := err.Addr() - sw.addrBook.RemoveAddress(&addr) - sw.addrBook.AddOurAddress(&addr) - } - + case tcp.ErrRejected: sw.Logger.Info( "Inbound Peer rejected", "err", err, @@ -659,14 +646,14 @@ func (sw *Switch) acceptRoutine() { ) continue - case ErrFilterTimeout: + case tcp.ErrFilterTimeout: sw.Logger.Error( "Peer filter timed out", "err", err, ) continue - case ErrTransportClosed: + case tcp.ErrTransportClosed: sw.Logger.Error( "Stopped accept routine, as transport is closed", "numPeers", sw.peers.Size(), @@ -682,12 +669,49 @@ func (sw *Switch) acceptRoutine() { // So might as well panic and let process managers restart the node. // There's no point in letting the node run without the acceptRoutine, // since it won't be able to accept new connections. - panic(fmt.Errorf("accept routine exited: %v", err)) + panic(fmt.Sprintf("accept routine exited: %v", err)) } break } + nodeInfo, err := handshake(sw.nodeInfo, conn, sw.config.HandshakeTimeout) + if err != nil { + errRejected, ok := err.(ErrRejected) + if ok && errRejected.IsSelf() { + // Remove the given address from the address book and add to our addresses + // to avoid dialing in the future. + addr := errRejected.Addr() + sw.addrBook.RemoveAddress(&addr) + sw.addrBook.AddOurAddress(&addr) + } + + _ = sw.transport.Cleanup(conn) + + sw.Logger.Info( + "Inbound Peer rejected", + "err", errRejected, + "numPeers", sw.peers.Size(), + ) + + continue + } + + p := wrapPeer( + conn, + nodeInfo, + peerConfig{ + streamDescs: sw.streamDescs, + onPeerError: sw.StopPeerForError, + isPersistent: sw.IsPeerPersistent, + reactorsByCh: sw.reactorsByCh, + msgTypeByChID: sw.msgTypeByChID, + metrics: sw.metrics, + outbound: false, + }, + addr, + MConnConfig(sw.config)) + if !sw.IsPeerUnconditional(p.NodeInfo().ID()) { // Ignore connection if we already have enough peers. _, in, _ := sw.NumPeers() @@ -699,15 +723,14 @@ func (sw *Switch) acceptRoutine() { "max", sw.config.MaxNumInboundPeers, ) - sw.transport.Cleanup(p) + _ = sw.transport.Cleanup(conn) continue } - } if err := sw.addPeer(p); err != nil { - sw.transport.Cleanup(p) + _ = sw.transport.Cleanup(conn) if p.IsRunning() { _ = p.Stop() } @@ -726,7 +749,7 @@ func (sw *Switch) acceptRoutine() { // If peer is started successfully, reconnectLoop will start when // StopPeerForError is called. func (sw *Switch) addOutboundPeerWithConfig( - addr *NetAddress, + addr *na.NetAddr, cfg *config.P2PConfig, ) error { sw.Logger.Debug("Dialing peer", "address", addr) @@ -734,30 +757,11 @@ func (sw *Switch) addOutboundPeerWithConfig( // XXX(xla): Remove the leakage of test concerns in implementation. if cfg.TestDialFail { go sw.reconnectToPeer(addr) - return fmt.Errorf("dial err (peerConfig.DialFail == true)") + return errors.New("dial err (peerConfig.DialFail == true)") } - p, err := sw.transport.Dial(*addr, peerConfig{ - chDescs: sw.chDescs, - onPeerError: sw.StopPeerForError, - isPersistent: sw.IsPeerPersistent, - reactorsByCh: sw.reactorsByCh, - msgTypeByChID: sw.msgTypeByChID, - metrics: sw.metrics, - mlc: sw.mlc, - }) + conn, err := sw.transport.Dial(*addr) if err != nil { - if e, ok := err.(ErrRejected); ok { - if e.IsSelf() { - // Remove the given address from the address book and add to our addresses - // to avoid dialing in the future. - sw.addrBook.RemoveAddress(addr) - sw.addrBook.AddOurAddress(addr) - - return err - } - } - // retry persistent peers after // any dial error besides IsSelf() if sw.IsPeerPersistent(addr) { @@ -767,8 +771,38 @@ func (sw *Switch) addOutboundPeerWithConfig( return err } + nodeInfo, err := handshake(sw.nodeInfo, conn, sw.config.HandshakeTimeout) + if err != nil { + errRejected, ok := err.(ErrRejected) + if ok && errRejected.IsSelf() { + // Remove the given address from the address book and add to our addresses + // to avoid dialing in the future. + sw.addrBook.RemoveAddress(addr) + sw.addrBook.AddOurAddress(addr) + } + + _ = sw.transport.Cleanup(conn) + + return err + } + + p := wrapPeer( + conn, + nodeInfo, + peerConfig{ + streamDescs: sw.streamDescs, + onPeerError: sw.StopPeerForError, + isPersistent: sw.IsPeerPersistent, + reactorsByCh: sw.reactorsByCh, + msgTypeByChID: sw.msgTypeByChID, + metrics: sw.metrics, + outbound: true, + }, + addr, + MConnConfig(sw.config)) + if err := sw.addPeer(p); err != nil { - sw.transport.Cleanup(p) + _ = sw.transport.Cleanup(conn) if p.IsRunning() { _ = p.Stop() } @@ -799,7 +833,7 @@ func (sw *Switch) filterPeer(p Peer) error { return ErrRejected{id: p.ID(), err: err, isFiltered: true} } case <-time.After(sw.filterTimeout): - return ErrFilterTimeout{} + return tcp.ErrFilterTimeout{} } } @@ -842,8 +876,7 @@ func (sw *Switch) addPeer(p Peer) error { // so that if Receive errors, we will find the peer and remove it. // Add should not err since we already checked peers.Has(). if err := sw.peers.Add(p); err != nil { - switch err.(type) { - case ErrPeerRemoval: + if _, ok := err.(ErrPeerRemoval); ok { sw.Logger.Error("Error starting peer ", " err ", "Peer has already errored and removal was attempted.", "peer", p.ID()) diff --git a/p2p/switch_test.go b/p2p/switch_test.go index ad4040760fa..c1225c865bf 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -2,8 +2,8 @@ package p2p import ( "bytes" + "context" "errors" - "fmt" "io" "net" "net/http" @@ -19,12 +19,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + p2pproto "github.com/cometbft/cometbft/api/cometbft/p2p/v1" "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/libs/log" cmtsync "github.com/cometbft/cometbft/libs/sync" - "github.com/cometbft/cometbft/p2p/conn" - p2pproto "github.com/cometbft/cometbft/proto/tendermint/p2p" + na "github.com/cometbft/cometbft/p2p/netaddr" + "github.com/cometbft/cometbft/p2p/transport/tcp" + tcpconn "github.com/cometbft/cometbft/p2p/transport/tcp/conn" ) var cfg *config.P2PConfig @@ -43,39 +45,38 @@ type PeerMessage struct { type TestReactor struct { BaseReactor - mtx cmtsync.Mutex - channels []*conn.ChannelDescriptor - logMessages bool - msgsCounter int - msgsReceived map[byte][]PeerMessage + mtx cmtsync.Mutex + streamDescriptors []StreamDescriptor + logMessages bool + msgsCounter int + msgsReceived map[byte][]PeerMessage } -func NewTestReactor(channels []*conn.ChannelDescriptor, logMessages bool) *TestReactor { +func NewTestReactor(descs []StreamDescriptor, logMessages bool) *TestReactor { tr := &TestReactor{ - channels: channels, - logMessages: logMessages, - msgsReceived: make(map[byte][]PeerMessage), + streamDescriptors: descs, + logMessages: logMessages, + msgsReceived: make(map[byte][]PeerMessage), } tr.BaseReactor = *NewBaseReactor("TestReactor", tr) tr.SetLogger(log.TestingLogger()) return tr } -func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor { - return tr.channels +func (tr *TestReactor) StreamDescriptors() []StreamDescriptor { + return tr.streamDescriptors } -func (tr *TestReactor) AddPeer(Peer) {} +func (*TestReactor) AddPeer(Peer) {} -func (tr *TestReactor) RemovePeer(Peer, interface{}) {} +func (*TestReactor) RemovePeer(Peer, any) {} func (tr *TestReactor) Receive(e Envelope) { if tr.logMessages { tr.mtx.Lock() - defer tr.mtx.Unlock() - fmt.Printf("Received: %X, %X\n", e.ChannelID, e.Message) tr.msgsReceived[e.ChannelID] = append(tr.msgsReceived[e.ChannelID], PeerMessage{Contents: e.Message, Counter: tr.msgsCounter}) tr.msgsCounter++ + tr.mtx.Unlock() } } @@ -85,10 +86,10 @@ func (tr *TestReactor) getMsgs(chID byte) []PeerMessage { return tr.msgsReceived[chID] } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // convenience method for creating two switches connected to each other. -// XXX: note this uses net.Pipe and not a proper TCP conn +// XXX: note this uses net.Pipe and not a proper TCP conn. func MakeSwitchPair(initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) { // Create two switches that will be interconnected. switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches) @@ -102,13 +103,29 @@ func initSwitchFunc(_ int, sw *Switch) *Switch { }) // Make two reactors of two channels each - sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10, MessageType: &p2pproto.Message{}}, - {ID: byte(0x01), Priority: 10, MessageType: &p2pproto.Message{}}, + sw.AddReactor("foo", NewTestReactor([]StreamDescriptor{ + &tcpconn.ChannelDescriptor{ + ID: byte(0x00), + Priority: 1, + MessageTypeI: &p2pproto.Message{}, + }, + &tcpconn.ChannelDescriptor{ + ID: byte(0x01), + Priority: 2, + MessageTypeI: &p2pproto.Message{}, + }, }, true)) - sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10, MessageType: &p2pproto.Message{}}, - {ID: byte(0x03), Priority: 10, MessageType: &p2pproto.Message{}}, + sw.AddReactor("bar", NewTestReactor([]StreamDescriptor{ + &tcpconn.ChannelDescriptor{ + ID: byte(0x02), + Priority: 3, + MessageTypeI: &p2pproto.Message{}, + }, + &tcpconn.ChannelDescriptor{ + ID: byte(0x03), + Priority: 4, + MessageTypeI: &p2pproto.Message{}, + }, }, true)) return sw @@ -117,12 +134,10 @@ func initSwitchFunc(_ int, sw *Switch) *Switch { func TestSwitches(t *testing.T) { s1, s2 := MakeSwitchPair(initSwitchFunc) t.Cleanup(func() { - if err := s1.Stop(); err != nil { + if err := s2.Stop(); err != nil { t.Error(err) } - }) - t.Cleanup(func() { - if err := s2.Stop(); err != nil { + if err := s1.Stop(); err != nil { t.Error(err) } }) @@ -138,7 +153,7 @@ func TestSwitches(t *testing.T) { ch0Msg := &p2pproto.PexAddrs{ Addrs: []p2pproto.NetAddress{ { - ID: "1", + ID: "0", }, }, } @@ -156,9 +171,11 @@ func TestSwitches(t *testing.T) { }, }, } + // Test broadcast and TryBroadcast on different channels in parallel. + // We have no channel capacity concerns, as each broadcast is on a distinct channel s1.Broadcast(Envelope{ChannelID: byte(0x00), Message: ch0Msg}) s1.Broadcast(Envelope{ChannelID: byte(0x01), Message: ch1Msg}) - s1.Broadcast(Envelope{ChannelID: byte(0x02), Message: ch2Msg}) + s1.TryBroadcast(Envelope{ChannelID: byte(0x02), Message: ch2Msg}) assertMsgReceivedWithTimeout(t, ch0Msg, byte(0x00), @@ -181,24 +198,30 @@ func assertMsgReceivedWithTimeout( checkPeriod, timeout time.Duration, ) { + t.Helper() + ticker := time.NewTicker(checkPeriod) + defer ticker.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + for { select { case <-ticker.C: msgs := reactor.getMsgs(channel) - expectedBytes, err := proto.Marshal(msgs[0].Contents) - require.NoError(t, err) - gotBytes, err := proto.Marshal(msg) - require.NoError(t, err) - if len(msgs) > 0 { - if !bytes.Equal(expectedBytes, gotBytes) { - t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msg, msgs[0].Counter) + if len(msgs) != 0 { + got, err := proto.Marshal(msgs[0].Contents) + require.NoError(t, err) + wanted, err := proto.Marshal(msg) + require.NoError(t, err) + if !bytes.Equal(got, wanted) { + t.Fatalf("Unexpected message bytes. Wanted: %v, Got: %v", msg, msgs[0].Contents) } return } - - case <-time.After(timeout): - t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel) + case <-ctx.Done(): + t.Fatalf("Expected to have received 1 message in channel #%v, but got 0", channel) } } } @@ -212,7 +235,7 @@ func TestSwitchFiltersOutItself(t *testing.T) { // addr should be rejected in addPeer based on the same ID err := s1.DialPeerWithAddress(rp.Addr()) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here if err, ok := err.(ErrRejected); ok { if !err.IsSelf() { t.Errorf("expected self to be rejected") @@ -234,7 +257,7 @@ func TestSwitchPeerFilter(t *testing.T) { var ( filters = []PeerFilterFunc{ func(_ IPeerSet, _ Peer) error { return nil }, - func(_ IPeerSet, _ Peer) error { return fmt.Errorf("denied") }, + func(_ IPeerSet, _ Peer) error { return errors.New("denied") }, func(_ IPeerSet, _ Peer) error { return nil }, } sw = MakeSwitch( @@ -257,16 +280,25 @@ func TestSwitchPeerFilter(t *testing.T) { rp.Start() t.Cleanup(rp.Stop) - p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ - chDescs: sw.chDescs, - onPeerError: sw.StopPeerForError, - isPersistent: sw.IsPeerPersistent, - reactorsByCh: sw.reactorsByCh, - }) + conn, err := sw.transport.Dial(*rp.Addr()) if err != nil { t.Fatal(err) } + p := wrapPeer(conn, + rp.nodeInfo(), + peerConfig{ + streamDescs: sw.streamDescs, + onPeerError: sw.StopPeerForError, + isPersistent: sw.IsPeerPersistent, + reactorsByCh: sw.reactorsByCh, + msgTypeByChID: sw.msgTypeByChID, + metrics: sw.metrics, + outbound: true, + }, + rp.Addr(), + MConnConfig(sw.config)) + err = sw.addPeer(p) if err, ok := err.(ErrRejected); ok { if !err.IsFiltered() { @@ -306,18 +338,27 @@ func TestSwitchPeerFilterTimeout(t *testing.T) { rp.Start() defer rp.Stop() - p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ - chDescs: sw.chDescs, - onPeerError: sw.StopPeerForError, - isPersistent: sw.IsPeerPersistent, - reactorsByCh: sw.reactorsByCh, - }) + conn, err := sw.transport.Dial(*rp.Addr()) if err != nil { t.Fatal(err) } + p := wrapPeer(conn, + rp.nodeInfo(), + peerConfig{ + streamDescs: sw.streamDescs, + onPeerError: sw.StopPeerForError, + isPersistent: sw.IsPeerPersistent, + reactorsByCh: sw.reactorsByCh, + msgTypeByChID: sw.msgTypeByChID, + metrics: sw.metrics, + outbound: true, + }, + rp.Addr(), + MConnConfig(sw.config)) + err = sw.addPeer(p) - if _, ok := err.(ErrFilterTimeout); !ok { + if _, ok := err.(tcp.ErrFilterTimeout); !ok { t.Errorf("expected ErrFilterTimeout") } } @@ -337,16 +378,25 @@ func TestSwitchPeerFilterDuplicate(t *testing.T) { rp.Start() defer rp.Stop() - p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ - chDescs: sw.chDescs, - onPeerError: sw.StopPeerForError, - isPersistent: sw.IsPeerPersistent, - reactorsByCh: sw.reactorsByCh, - }) + conn, err := sw.transport.Dial(*rp.Addr()) if err != nil { t.Fatal(err) } + p := wrapPeer(conn, + rp.nodeInfo(), + peerConfig{ + streamDescs: sw.streamDescs, + onPeerError: sw.StopPeerForError, + isPersistent: sw.IsPeerPersistent, + reactorsByCh: sw.reactorsByCh, + msgTypeByChID: sw.msgTypeByChID, + metrics: sw.metrics, + outbound: true, + }, + rp.Addr(), + MConnConfig(sw.config)) + if err := sw.addPeer(p); err != nil { t.Fatal(err) } @@ -362,6 +412,7 @@ func TestSwitchPeerFilterDuplicate(t *testing.T) { } func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) { + t.Helper() time.Sleep(timeout) if sw.Peers().Size() != 0 { t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size()) @@ -387,21 +438,30 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { rp.Start() defer rp.Stop() - p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ - chDescs: sw.chDescs, - onPeerError: sw.StopPeerForError, - isPersistent: sw.IsPeerPersistent, - reactorsByCh: sw.reactorsByCh, - }) - require.Nil(err) + conn, err := sw.transport.Dial(*rp.Addr()) + require.NoError(err) + + p := wrapPeer(conn, + rp.nodeInfo(), + peerConfig{ + streamDescs: sw.streamDescs, + onPeerError: sw.StopPeerForError, + isPersistent: sw.IsPeerPersistent, + reactorsByCh: sw.reactorsByCh, + msgTypeByChID: sw.msgTypeByChID, + metrics: sw.metrics, + outbound: true, + }, + rp.Addr(), + MConnConfig(sw.config)) err = sw.addPeer(p) - require.Nil(err) + require.NoError(err) require.NotNil(sw.Peers().Get(rp.ID())) // simulate failure by closing connection - err = p.(*peer).CloseConn() + err = p.(*peer).Conn().Close() require.NoError(err) assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond) @@ -440,11 +500,11 @@ func TestSwitchStopPeerForError(t *testing.T) { return initSwitchFunc(i, sw) }) - assert.Equal(t, len(sw1.Peers().List()), 1) + assert.Len(t, sw1.Peers().Copy(), 1) assert.EqualValues(t, 1, peersMetricValue()) // send messages to the peer from sw1 - p := sw1.Peers().List()[0] + p := sw1.Peers().Copy()[0] p.Send(Envelope{ ChannelID: 0x1, Message: &p2pproto.Message{}, @@ -459,9 +519,9 @@ func TestSwitchStopPeerForError(t *testing.T) { }) // now call StopPeerForError explicitly, eg. from a reactor - sw1.StopPeerForError(p, fmt.Errorf("some err")) + sw1.StopPeerForError(p, errors.New("some err")) - assert.Equal(t, len(sw1.Peers().List()), 0) + require.Empty(t, len(sw1.Peers().Copy()), 0) assert.EqualValues(t, 0, peersMetricValue()) } @@ -484,11 +544,11 @@ func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { require.NoError(t, err) err = sw.DialPeerWithAddress(rp.Addr()) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, sw.Peers().Get(rp.ID())) - p := sw.Peers().List()[0] - err = p.(*peer).CloseConn() + p := sw.Peers().Copy()[0] + err = p.(*peer).Conn().Close() require.NoError(t, err) waitUntilSwitchHasAtLeastNPeers(sw, 1) @@ -509,7 +569,7 @@ func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { conf := config.DefaultP2PConfig() conf.TestDialFail = true // will trigger a reconnect err = sw.addOutboundPeerWithConfig(rp.Addr(), conf) - require.NotNil(t, err) + require.Error(t, err) // DialPeerWithAddres - sw.peerConfig resets the dialer waitUntilSwitchHasAtLeastNPeers(sw, 2) assert.Equal(t, 2, sw.Peers().Size()) @@ -533,7 +593,7 @@ func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) { err = sw.AddPersistentPeers([]string{rp.Addr().String()}) require.NoError(t, err) - conn, err := rp.Dial(sw.NetAddress()) + conn, err := rp.Dial(sw.NetAddr()) require.NoError(t, err) time.Sleep(50 * time.Millisecond) require.NotNil(t, sw.Peers().Get(rp.ID())) @@ -582,7 +642,6 @@ func TestSwitchFullConnectivity(t *testing.T) { switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches) defer func() { for _, sw := range switches { - sw := sw t.Cleanup(func() { if err := sw.Stop(); err != nil { t.Error(err) @@ -634,7 +693,7 @@ func TestSwitchAcceptRoutine(t *testing.T) { peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} peers = append(peers, peer) peer.Start() - c, err := peer.Dial(sw.NetAddress()) + c, err := peer.Dial(sw.NetAddr()) require.NoError(t, err) // spawn a reading routine to prevent connection from closing go func(c net.Conn) { @@ -653,19 +712,19 @@ func TestSwitchAcceptRoutine(t *testing.T) { // 2. check we close new connections if we already have MaxNumInboundPeers peers peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} peer.Start() - conn, err := peer.Dial(sw.NetAddress()) + conn, err := peer.Dial(sw.NetAddr()) require.NoError(t, err) // check conn is closed one := make([]byte, 1) _ = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) _, err = conn.Read(one) - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) peer.Stop() // 3. check we connect to unconditional peers despite the limit. for _, peer := range unconditionalPeers { - c, err := peer.Dial(sw.NetAddress()) + c, err := peer.Dial(sw.NetAddr()) require.NoError(t, err) // spawn a reading routine to prevent connection from closing go func(c net.Conn) { @@ -693,24 +752,26 @@ type errorTransport struct { acceptErr error } -func (et errorTransport) NetAddress() NetAddress { +var _ Transport = errorTransport{} + +func (errorTransport) NetAddr() na.NetAddr { panic("not implemented") } -func (et errorTransport) Accept(peerConfig) (Peer, error) { - return nil, et.acceptErr +func (et errorTransport) Accept() (net.Conn, *na.NetAddr, error) { + return nil, nil, et.acceptErr } -func (errorTransport) Dial(NetAddress, peerConfig) (Peer, error) { +func (errorTransport) Dial(na.NetAddr) (net.Conn, error) { panic("not implemented") } -func (errorTransport) Cleanup(Peer) { +func (errorTransport) Cleanup(net.Conn) error { panic("not implemented") } func TestSwitchAcceptRoutineErrorCases(t *testing.T) { - sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}}) + sw := NewSwitch(cfg, errorTransport{tcp.ErrFilterTimeout{}}) assert.NotPanics(t, func() { err := sw.Start() require.NoError(t, err) @@ -718,16 +779,16 @@ func TestSwitchAcceptRoutineErrorCases(t *testing.T) { require.NoError(t, err) }) - sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) + // sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}}) + // assert.NotPanics(t, func() { + // err := sw.Start() + // require.NoError(t, err) + // err = sw.Stop() + // require.NoError(t, err) + // }) // TODO(melekes) check we remove our address from addrBook - sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}}) + sw = NewSwitch(cfg, errorTransport{tcp.ErrTransportClosed{}}) assert.NotPanics(t, func() { err := sw.Start() require.NoError(t, err) @@ -746,7 +807,7 @@ type mockReactor struct { initCalledBeforeRemoveFinished uint32 } -func (r *mockReactor) RemovePeer(Peer, interface{}) { +func (r *mockReactor) RemovePeer(Peer, any) { atomic.StoreUint32(&r.removePeerInProgress, 1) defer atomic.StoreUint32(&r.removePeerInProgress, 0) time.Sleep(100 * time.Millisecond) @@ -764,14 +825,14 @@ func (r *mockReactor) InitCalledBeforeRemoveFinished() bool { return atomic.LoadUint32(&r.initCalledBeforeRemoveFinished) == 1 } -// see stopAndRemovePeer -func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { +// see stopAndRemovePeer. +func TestSwitch_InitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { // make reactor reactor := &mockReactor{} reactor.BaseReactor = NewBaseReactor("mockReactor", reactor) // make switch - sw := MakeSwitch(cfg, 1, func(i int, sw *Switch) *Switch { + sw := MakeSwitch(cfg, 1, func(_ int, sw *Switch) *Switch { sw.AddReactor("mock", reactor) return sw }) @@ -787,7 +848,7 @@ func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} rp.Start() defer rp.Stop() - _, err = rp.Dial(sw.NetAddress()) + _, err = rp.Dial(sw.NetAddr()) require.NoError(t, err) // wait till the switch adds rp to the peer set, then stop the peer asynchronously @@ -800,7 +861,7 @@ func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { } // simulate peer reconnecting to us - _, err = rp.Dial(sw.NetAddress()) + _, err = rp.Dial(sw.NetAddr()) require.NoError(t, err) // wait till the switch adds rp to the peer set time.Sleep(50 * time.Millisecond) @@ -809,63 +870,68 @@ func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { assert.False(t, reactor.InitCalledBeforeRemoveFinished()) } -func BenchmarkSwitchBroadcast(b *testing.B) { - s1, s2 := MakeSwitchPair(func(i int, sw *Switch) *Switch { - // Make bar reactors of bar channels each - sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, - }, false)) - sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, - }, false)) - return sw - }) - +func makeSwitchForBenchmark(b *testing.B) *Switch { + b.Helper() + s1, s2 := MakeSwitchPair(initSwitchFunc) b.Cleanup(func() { - if err := s1.Stop(); err != nil { + if err := s2.Stop(); err != nil { b.Error(err) } - }) - - b.Cleanup(func() { - if err := s2.Stop(); err != nil { + if err := s1.Stop(); err != nil { b.Error(err) } }) - // Allow time for goroutines to boot up time.Sleep(1 * time.Second) + return s1 +} - b.ResetTimer() +func BenchmarkSwitchBroadcast(b *testing.B) { + sw := makeSwitchForBenchmark(b) + chMsg := &p2pproto.PexAddrs{ + Addrs: []p2pproto.NetAddress{ + { + ID: "1", + }, + }, + } - numSuccess, numFailure := 0, 0 + b.ResetTimer() // Send random message from foo channel to another for i := 0; i < b.N; i++ { chID := byte(i % 4) - successChan := s1.Broadcast(Envelope{ChannelID: chID}) - for s := range successChan { - if s { - numSuccess++ - } else { - numFailure++ - } - } + sw.Broadcast(Envelope{ChannelID: chID, Message: chMsg}) } +} - b.Logf("success: %v, failure: %v", numSuccess, numFailure) +func BenchmarkSwitchTryBroadcast(b *testing.B) { + sw := makeSwitchForBenchmark(b) + chMsg := &p2pproto.PexAddrs{ + Addrs: []p2pproto.NetAddress{ + { + ID: "1", + }, + }, + } + + b.ResetTimer() + + // Send random message from foo channel to another + for i := 0; i < b.N; i++ { + chID := byte(i % 4) + sw.TryBroadcast(Envelope{ChannelID: chID, Message: chMsg}) + } } func TestSwitchRemovalErr(t *testing.T) { sw1, sw2 := MakeSwitchPair(func(i int, sw *Switch) *Switch { return initSwitchFunc(i, sw) }) - assert.Equal(t, len(sw1.Peers().List()), 1) - p := sw1.Peers().List()[0] + require.Len(t, sw1.Peers().Copy(), 1) + p := sw1.Peers().Copy()[0] - sw2.StopPeerForError(p, fmt.Errorf("peer should error")) + sw2.StopPeerForError(p, errors.New("peer should error")) assert.Equal(t, sw2.peers.Add(p).Error(), ErrPeerRemoval{}.Error()) } diff --git a/p2p/test_util.go b/p2p/test_util.go index 3fbb68bb655..d548973e03a 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -5,35 +5,27 @@ import ( "net" "time" - "github.com/cometbft/cometbft/crypto" + "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/crypto/ed25519" + cmtnet "github.com/cometbft/cometbft/internal/net" "github.com/cometbft/cometbft/libs/log" - cmtnet "github.com/cometbft/cometbft/libs/net" - cmtrand "github.com/cometbft/cometbft/libs/rand" - - "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/p2p/conn" + "github.com/cometbft/cometbft/p2p/internal/fuzz" + na "github.com/cometbft/cometbft/p2p/netaddr" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" + "github.com/cometbft/cometbft/p2p/nodekey" + "github.com/cometbft/cometbft/p2p/transport/tcp/conn" ) const testCh = 0x01 -//------------------------------------------------ - -type mockNodeInfo struct { - addr *NetAddress -} - -func (ni mockNodeInfo) ID() ID { return ni.addr.ID } -func (ni mockNodeInfo) NetAddress() (*NetAddress, error) { return ni.addr, nil } -func (ni mockNodeInfo) Validate() error { return nil } -func (ni mockNodeInfo) CompatibleWith(NodeInfo) error { return nil } +// ------------------------------------------------ func AddPeerToSwitchPeerSet(sw *Switch, peer Peer) { sw.peers.Add(peer) //nolint:errcheck // ignore error } func CreateRandomPeer(outbound bool) Peer { - addr, netAddr := CreateRoutableAddr() + addr, netAddr := na.CreateRoutableAddr() p := &peer{ peerConn: peerConn{ outbound: outbound, @@ -47,51 +39,49 @@ func CreateRandomPeer(outbound bool) Peer { return p } -func CreateRoutableAddr() (addr string, netAddr *NetAddress) { - for { - var err error - addr = fmt.Sprintf("%X@%v.%v.%v.%v:26656", - cmtrand.Bytes(20), - cmtrand.Int()%256, - cmtrand.Int()%256, - cmtrand.Int()%256, - cmtrand.Int()%256) - netAddr, err = NewNetAddressString(addr) - if err != nil { - panic(err) - } - if netAddr.Routable() { - break - } - } - return -} - -//------------------------------------------------------------------ +// ------------------------------------------------------------------ // Connects switches via arbitrary net.Conn. Used for testing. const TestHost = "localhost" -// MakeConnectedSwitches returns n switches, connected according to the connect func. -// If connect==Connect2Switches, the switches will be fully connected. -// initSwitch defines how the i'th switch should be initialized (ie. with what reactors). -// NOTE: panics if any switch fails to start. +// MakeConnectedSwitches returns n switches, initialized according to the +// initSwitch function, and connected according to the connect function. func MakeConnectedSwitches(cfg *config.P2PConfig, n int, initSwitch func(int, *Switch) *Switch, connect func([]*Switch, int, int), +) []*Switch { + switches := MakeSwitches(cfg, n, initSwitch) + return StartAndConnectSwitches(switches, connect) +} + +// MakeSwitches returns n switches. +// initSwitch defines how the i'th switch should be initialized (ie. with what reactors). +func MakeSwitches( + cfg *config.P2PConfig, + n int, + initSwitch func(int, *Switch) *Switch, ) []*Switch { switches := make([]*Switch, n) for i := 0; i < n; i++ { switches[i] = MakeSwitch(cfg, i, initSwitch) } + return switches +} +// StartAndConnectSwitches connects the switches according to the connect function. +// If connect==Connect2Switches, the switches will be fully connected. +// NOTE: panics if any switch fails to start. +func StartAndConnectSwitches( + switches []*Switch, + connect func([]*Switch, int, int), +) []*Switch { if err := StartSwitches(switches); err != nil { panic(err) } - for i := 0; i < n; i++ { - for j := i + 1; j < n; j++ { + for i := 0; i < len(switches); i++ { + for j := i + 1; j < len(switches); j++ { connect(switches, i, j) } } @@ -127,8 +117,42 @@ func Connect2Switches(switches []*Switch, i, j int) { <-doneCh } +// ConnectStarSwitches will connect switches c and j via net.Pipe(). +func ConnectStarSwitches(c int) func([]*Switch, int, int) { + // Blocks until a connection is established. + // NOTE: caller ensures i and j is within bounds. + return func(switches []*Switch, i, j int) { + if i != c { + return + } + + switchI := switches[i] + switchJ := switches[j] + + c1, c2 := conn.NetPipe() + + doneCh := make(chan struct{}) + go func() { + err := switchI.addPeerWithConnection(c1) + if err != nil { + panic(err) + } + doneCh <- struct{}{} + }() + go func() { + err := switchJ.addPeerWithConnection(c2) + if err != nil { + panic(err) + } + doneCh <- struct{}{} + }() + <-doneCh + <-doneCh + } +} + func (sw *Switch) addPeerWithConnection(conn net.Conn) error { - pc, err := testInboundPeerConn(conn, sw.config, sw.nodeKey.PrivKey) + pc, err := testInboundPeerConn(conn, sw.config) if err != nil { if err := conn.Close(); err != nil { sw.Logger.Error("Error closing connection", "err", err) @@ -136,10 +160,10 @@ func (sw *Switch) addPeerWithConnection(conn net.Conn) error { return err } - ni, err := handshake(conn, time.Second, sw.nodeInfo) + ni, err := handshake(sw.nodeInfo, conn, time.Second) if err != nil { - if err := conn.Close(); err != nil { - sw.Logger.Error("Error closing connection", "err", err) + if cErr := conn.Close(); cErr != nil { + sw.Logger.Error("Error closing connection", "err", cErr) } return err } @@ -150,13 +174,14 @@ func (sw *Switch) addPeerWithConnection(conn net.Conn) error { ni, sw.reactorsByCh, sw.msgTypeByChID, - sw.chDescs, + sw.streamDescs, sw.StopPeerForError, - sw.mlc, ) if err = sw.addPeer(p); err != nil { - pc.CloseConn() + if cErr := conn.Close(); cErr != nil { + sw.Logger.Error("Error closing connection", "err", cErr) + } return err } @@ -181,18 +206,18 @@ func MakeSwitch( initSwitch func(int, *Switch) *Switch, opts ...SwitchOption, ) *Switch { - nodeKey := NodeKey{ + nk := nodekey.NodeKey{ PrivKey: ed25519.GenPrivKey(), } - nodeInfo := testNodeInfo(nodeKey.ID(), fmt.Sprintf("node%d", i)) - addr, err := NewNetAddressString( - IDAddressString(nodeKey.ID(), nodeInfo.(DefaultNodeInfo).ListenAddr), + nodeInfo := testNodeInfo(nk.ID(), fmt.Sprintf("node%d", i)) + addr, err := na.NewFromString( + na.IDAddrString(nk.ID(), nodeInfo.ListenAddr), ) if err != nil { panic(err) } - t := NewMultiplexTransport(nodeInfo, nodeKey, MConnConfig(cfg)) + t := &mockTransport{} if err := t.Listen(*addr); err != nil { panic(err) @@ -201,17 +226,15 @@ func MakeSwitch( // TODO: let the config be passed in? sw := initSwitch(i, NewSwitch(cfg, t, opts...)) sw.SetLogger(log.TestingLogger().With("switch", i)) - sw.SetNodeKey(&nodeKey) + sw.SetNodeKey(&nk) - ni := nodeInfo.(DefaultNodeInfo) + // reset channels for ch := range sw.reactorsByCh { - ni.Channels = append(ni.Channels, ch) + if ch != testCh { + nodeInfo.Channels = append(nodeInfo.Channels, ch) + } } - nodeInfo = ni - // TODO: We need to setup reactors ahead of time so the NodeInfo is properly - // populated and we don't have to do those awkward overrides and setters. - t.nodeInfo = nodeInfo sw.SetNodeInfo(nodeInfo) return sw @@ -220,67 +243,33 @@ func MakeSwitch( func testInboundPeerConn( conn net.Conn, config *config.P2PConfig, - ourNodePrivKey crypto.PrivKey, + // ourNodePrivKey crypto.PrivKey, ) (peerConn, error) { - return testPeerConn(conn, config, false, false, ourNodePrivKey, nil) + return testPeerConn(conn, config, false, false, nil) } func testPeerConn( rawConn net.Conn, cfg *config.P2PConfig, outbound, persistent bool, - ourNodePrivKey crypto.PrivKey, - socketAddr *NetAddress, + // _ourNodePrivKey crypto.PrivKey, + socketAddr *na.NetAddr, ) (pc peerConn, err error) { conn := rawConn // Fuzz connection if cfg.TestFuzz { // so we have time to do peer handshakes and get set up - conn = FuzzConnAfterFromConfig(conn, 10*time.Second, cfg.TestFuzzConfig) - } - - // Encrypt connection - conn, err = upgradeSecretConn(conn, cfg.HandshakeTimeout, ourNodePrivKey) - if err != nil { - return pc, fmt.Errorf("error creating peer: %w", err) + conn = fuzz.ConnAfterFromConfig(conn, 10*time.Second, cfg.TestFuzzConfig) } // Only the information we already have return newPeerConn(outbound, persistent, conn, socketAddr), nil } -//---------------------------------------------------------------- +// ---------------------------------------------------------------- // rand node info -func testNodeInfo(id ID, name string) NodeInfo { - return testNodeInfoWithNetwork(id, name, "testing") -} - -func testNodeInfoWithNetwork(id ID, name, network string) NodeInfo { - return DefaultNodeInfo{ - ProtocolVersion: defaultProtocolVersion, - DefaultNodeID: id, - ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort()), - Network: network, - Version: "1.2.3-rc0-deadbeef", - Channels: []byte{testCh}, - Moniker: name, - Other: DefaultNodeInfoOther{ - TxIndex: "on", - RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort()), - }, - } -} - -func getFreePort() int { - port, err := cmtnet.GetFreePort() - if err != nil { - panic(err) - } - return port -} - type AddrBookMock struct { Addrs map[string]struct{} OurAddrs map[string]struct{} @@ -289,27 +278,65 @@ type AddrBookMock struct { var _ AddrBook = (*AddrBookMock)(nil) -func (book *AddrBookMock) AddAddress(addr *NetAddress, _ *NetAddress) error { +func (book *AddrBookMock) AddAddress(addr *na.NetAddr, _ *na.NetAddr) error { book.Addrs[addr.String()] = struct{}{} return nil } -func (book *AddrBookMock) AddOurAddress(addr *NetAddress) { book.OurAddrs[addr.String()] = struct{}{} } -func (book *AddrBookMock) OurAddress(addr *NetAddress) bool { + +func (book *AddrBookMock) AddOurAddress(addr *na.NetAddr) { + book.OurAddrs[addr.String()] = struct{}{} +} + +func (book *AddrBookMock) OurAddress(addr *na.NetAddr) bool { _, ok := book.OurAddrs[addr.String()] return ok } -func (book *AddrBookMock) MarkGood(ID) {} -func (book *AddrBookMock) HasAddress(addr *NetAddress) bool { +func (*AddrBookMock) MarkGood(nodekey.ID) {} +func (book *AddrBookMock) HasAddress(addr *na.NetAddr) bool { _, ok := book.Addrs[addr.String()] return ok } -func (book *AddrBookMock) RemoveAddress(addr *NetAddress) { +func (book *AddrBookMock) RemoveAddress(addr *na.NetAddr) { delete(book.Addrs, addr.String()) } -func (book *AddrBookMock) Save() {} +func (*AddrBookMock) Save() {} func (book *AddrBookMock) AddPrivateIDs(addrs []string) { for _, addr := range addrs { book.PrivateAddrs[addr] = struct{}{} } } + +type mockNodeInfo struct { + addr *na.NetAddr +} + +func (ni mockNodeInfo) ID() nodekey.ID { return ni.addr.ID } +func (ni mockNodeInfo) NetAddr() (*na.NetAddr, error) { return ni.addr, nil } +func (mockNodeInfo) Validate() error { return nil } +func (mockNodeInfo) CompatibleWith(ni.NodeInfo) error { return nil } +func (mockNodeInfo) Handshake(net.Conn, time.Duration) (ni.NodeInfo, error) { return nil, nil } + +func testNodeInfo(id nodekey.ID, name string) ni.Default { + return ni.Default{ + ProtocolVersion: ni.NewProtocolVersion(0, 0, 0), + DefaultNodeID: id, + ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort()), + Network: "testing", + Version: "1.2.3-rc0-deadbeef", + Channels: []byte{testCh}, + Moniker: name, + Other: ni.DefaultOther{ + TxIndex: "on", + RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort()), + }, + } +} + +func getFreePort() int { + port, err := cmtnet.GetFreePort() + if err != nil { + panic(err) + } + return port +} diff --git a/p2p/transport.go b/p2p/transport.go index d6043da3beb..327e8ad2ce6 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -1,41 +1,13 @@ package p2p import ( - "context" - "fmt" "net" - "time" - - "golang.org/x/net/netutil" "github.com/cosmos/gogoproto/proto" - "github.com/cometbft/cometbft/crypto" - "github.com/cometbft/cometbft/libs/protoio" - "github.com/cometbft/cometbft/p2p/conn" - tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" -) - -const ( - defaultDialTimeout = time.Second - defaultFilterTimeout = 5 * time.Second - defaultHandshakeTimeout = 3 * time.Second + na "github.com/cometbft/cometbft/p2p/netaddr" ) -// IPResolver is a behavior subset of net.Resolver. -type IPResolver interface { - LookupIPAddr(context.Context, string) ([]net.IPAddr, error) -} - -// accept is the container to carry the upgraded connection and NodeInfo from an -// asynchronously running routine to the Accept method. -type accept struct { - netAddr *NetAddress - conn net.Conn - nodeInfo NodeInfo - err error -} - // peerConfig is used to bundle data we need to fully setup a Peer with an // MConn, provided by the caller of Accept and Dial (currently the Switch). This // a temporary measure until reactor setup is less dynamic and we introduce the @@ -43,576 +15,33 @@ type accept struct { // events. // TODO(xla): Refactor out with more static Reactor setup and PeerBehaviour. type peerConfig struct { - chDescs []*conn.ChannelDescriptor - onPeerError func(Peer, interface{}) + streamDescs []StreamDescriptor + onPeerError func(Peer, any) outbound bool // isPersistent allows you to set a function, which, given socket address // (for outbound peers) OR self-reported address (for inbound peers), tells // if the peer is persistent or not. - isPersistent func(*NetAddress) bool + isPersistent func(*na.NetAddr) bool reactorsByCh map[byte]Reactor msgTypeByChID map[byte]proto.Message metrics *Metrics - mlc *metricsLabelCache } // Transport emits and connects to Peers. The implementation of Peer is left to // the transport. Each transport is also responsible to filter establishing // peers specific to its domain. type Transport interface { - // Listening address. - NetAddress() NetAddress - - // Accept returns a newly connected Peer. - Accept(peerConfig) (Peer, error) - - // Dial connects to the Peer for the address. - Dial(NetAddress, peerConfig) (Peer, error) - - // Cleanup any resources associated with Peer. - Cleanup(Peer) -} - -// transportLifecycle bundles the methods for callers to control start and stop -// behavior. -type transportLifecycle interface { - Close() error - Listen(NetAddress) error -} - -// ConnFilterFunc to be implemented by filter hooks after a new connection has -// been established. The set of exisiting connections is passed along together -// with all resolved IPs for the new connection. -type ConnFilterFunc func(ConnSet, net.Conn, []net.IP) error - -// ConnDuplicateIPFilter resolves and keeps all ips for an incoming connection -// and refuses new ones if they come from a known ip. -func ConnDuplicateIPFilter() ConnFilterFunc { - return func(cs ConnSet, c net.Conn, ips []net.IP) error { - for _, ip := range ips { - if cs.HasIP(ip) { - return ErrRejected{ - conn: c, - err: fmt.Errorf("ip<%v> already connected", ip), - isDuplicate: true, - } - } - } - - return nil - } -} - -// MultiplexTransportOption sets an optional parameter on the -// MultiplexTransport. -type MultiplexTransportOption func(*MultiplexTransport) - -// MultiplexTransportConnFilters sets the filters for rejection new connections. -func MultiplexTransportConnFilters( - filters ...ConnFilterFunc, -) MultiplexTransportOption { - return func(mt *MultiplexTransport) { mt.connFilters = filters } -} - -// MultiplexTransportFilterTimeout sets the timeout waited for filter calls to -// return. -func MultiplexTransportFilterTimeout( - timeout time.Duration, -) MultiplexTransportOption { - return func(mt *MultiplexTransport) { mt.filterTimeout = timeout } -} - -// MultiplexTransportResolver sets the Resolver used for ip lokkups, defaults to -// net.DefaultResolver. -func MultiplexTransportResolver(resolver IPResolver) MultiplexTransportOption { - return func(mt *MultiplexTransport) { mt.resolver = resolver } -} - -// MultiplexTransportMaxIncomingConnections sets the maximum number of -// simultaneous connections (incoming). Default: 0 (unlimited) -func MultiplexTransportMaxIncomingConnections(n int) MultiplexTransportOption { - return func(mt *MultiplexTransport) { mt.maxIncomingConnections = n } -} - -// MultiplexTransport accepts and dials tcp connections and upgrades them to -// multiplexed peers. -type MultiplexTransport struct { - netAddr NetAddress - listener net.Listener - maxIncomingConnections int // see MaxIncomingConnections - - acceptc chan accept - closec chan struct{} - - // Lookup table for duplicate ip and id checks. - conns ConnSet - connFilters []ConnFilterFunc - - dialTimeout time.Duration - filterTimeout time.Duration - handshakeTimeout time.Duration - nodeInfo NodeInfo - nodeKey NodeKey - resolver IPResolver - - // TODO(xla): This config is still needed as we parameterise peerConn and - // peer currently. All relevant configuration should be refactored into options - // with sane defaults. - mConfig conn.MConnConfig -} - -// Test multiplexTransport for interface completeness. -var _ Transport = (*MultiplexTransport)(nil) -var _ transportLifecycle = (*MultiplexTransport)(nil) - -// NewMultiplexTransport returns a tcp connected multiplexed peer. -func NewMultiplexTransport( - nodeInfo NodeInfo, - nodeKey NodeKey, - mConfig conn.MConnConfig, -) *MultiplexTransport { - return &MultiplexTransport{ - acceptc: make(chan accept), - closec: make(chan struct{}), - dialTimeout: defaultDialTimeout, - filterTimeout: defaultFilterTimeout, - handshakeTimeout: defaultHandshakeTimeout, - mConfig: mConfig, - nodeInfo: nodeInfo, - nodeKey: nodeKey, - conns: NewConnSet(), - resolver: net.DefaultResolver, - } -} - -// NetAddress implements Transport. -func (mt *MultiplexTransport) NetAddress() NetAddress { - return mt.netAddr -} - -// Accept implements Transport. -func (mt *MultiplexTransport) Accept(cfg peerConfig) (Peer, error) { - select { - // This case should never have any side-effectful/blocking operations to - // ensure that quality peers are ready to be used. - case a := <-mt.acceptc: - if a.err != nil { - return nil, a.err - } - - cfg.outbound = false - - return mt.wrapPeer(a.conn, a.nodeInfo, cfg, a.netAddr), nil - case <-mt.closec: - return nil, ErrTransportClosed{} - } -} - -// Dial implements Transport. -func (mt *MultiplexTransport) Dial( - addr NetAddress, - cfg peerConfig, -) (Peer, error) { - c, err := addr.DialTimeout(mt.dialTimeout) - if err != nil { - return nil, err - } - - if mt.mConfig.TestFuzz { - // so we have time to do peer handshakes and get set up. - c = FuzzConnAfterFromConfig(c, 10*time.Second, mt.mConfig.TestFuzzConfig) - } - - // TODO(xla): Evaluate if we should apply filters if we explicitly dial. - if err := mt.filterConn(c); err != nil { - return nil, err - } - - secretConn, nodeInfo, err := mt.upgrade(c, &addr) - if err != nil { - return nil, err - } - - cfg.outbound = true - - p := mt.wrapPeer(secretConn, nodeInfo, cfg, &addr) - - return p, nil -} - -// Close implements transportLifecycle. -func (mt *MultiplexTransport) Close() error { - close(mt.closec) - - if mt.listener != nil { - return mt.listener.Close() - } - - return nil -} - -// Listen implements transportLifecycle. -func (mt *MultiplexTransport) Listen(addr NetAddress) error { - ln, err := net.Listen("tcp", addr.DialString()) - if err != nil { - return err - } - - if mt.maxIncomingConnections > 0 { - ln = netutil.LimitListener(ln, mt.maxIncomingConnections) - } - - mt.netAddr = addr - mt.listener = ln - - go mt.acceptPeers() - - return nil -} - -// AddChannel registers a channel to nodeInfo. -// NOTE: NodeInfo must be of type DefaultNodeInfo else channels won't be updated -// This is a bit messy at the moment but is cleaned up in the following version -// when NodeInfo changes from an interface to a concrete type -func (mt *MultiplexTransport) AddChannel(chID byte) { - if ni, ok := mt.nodeInfo.(DefaultNodeInfo); ok { - if !ni.HasChannel(chID) { - ni.Channels = append(ni.Channels, chID) - } - mt.nodeInfo = ni - } -} - -func (mt *MultiplexTransport) acceptPeers() { - for { - c, err := mt.listener.Accept() - if err != nil { - // If Close() has been called, silently exit. - select { - case _, ok := <-mt.closec: - if !ok { - return - } - default: - // Transport is not closed - } - - mt.acceptc <- accept{err: err} - return - } - - // Connection upgrade and filtering should be asynchronous to avoid - // Head-of-line blocking[0]. - // Reference: https://github.com/tendermint/tendermint/issues/2047 - // - // [0] https://en.wikipedia.org/wiki/Head-of-line_blocking - go func(c net.Conn) { - defer func() { - if r := recover(); r != nil { - err := ErrRejected{ - conn: c, - err: fmt.Errorf("recovered from panic: %v", r), - isAuthFailure: true, - } - select { - case mt.acceptc <- accept{err: err}: - case <-mt.closec: - // Give up if the transport was closed. - _ = c.Close() - return - } - } - }() - - var ( - nodeInfo NodeInfo - secretConn *conn.SecretConnection - netAddr *NetAddress - ) - - err := mt.filterConn(c) - if err == nil { - secretConn, nodeInfo, err = mt.upgrade(c, nil) - if err == nil { - addr := c.RemoteAddr() - id := PubKeyToID(secretConn.RemotePubKey()) - netAddr = NewNetAddress(id, addr) - } - } - - select { - case mt.acceptc <- accept{netAddr, secretConn, nodeInfo, err}: - // Make the upgraded peer available. - case <-mt.closec: - // Give up if the transport was closed. - _ = c.Close() - return - } - }(c) - } -} - -// Cleanup removes the given address from the connections set and -// closes the connection. -func (mt *MultiplexTransport) Cleanup(p Peer) { - mt.conns.RemoveAddr(p.RemoteAddr()) - _ = p.CloseConn() -} - -func (mt *MultiplexTransport) cleanup(c net.Conn) error { - mt.conns.Remove(c) - - return c.Close() -} - -func (mt *MultiplexTransport) filterConn(c net.Conn) (err error) { - defer func() { - if err != nil { - _ = c.Close() - } - }() - - // Reject if connection is already present. - if mt.conns.Has(c) { - return ErrRejected{conn: c, isDuplicate: true} - } - - // Resolve ips for incoming conn. - ips, err := resolveIPs(mt.resolver, c) - if err != nil { - return err - } - - errc := make(chan error, len(mt.connFilters)) - - for _, f := range mt.connFilters { - go func(f ConnFilterFunc, c net.Conn, ips []net.IP, errc chan<- error) { - errc <- f(mt.conns, c, ips) - }(f, c, ips, errc) - } - - for i := 0; i < cap(errc); i++ { - select { - case err := <-errc: - if err != nil { - return ErrRejected{conn: c, err: err, isFiltered: true} - } - case <-time.After(mt.filterTimeout): - return ErrFilterTimeout{} - } - - } - - mt.conns.Set(c, ips) - - return nil -} - -func (mt *MultiplexTransport) upgrade( - c net.Conn, - dialedAddr *NetAddress, -) (secretConn *conn.SecretConnection, nodeInfo NodeInfo, err error) { - defer func() { - if err != nil { - _ = mt.cleanup(c) - } - }() - - secretConn, err = upgradeSecretConn(c, mt.handshakeTimeout, mt.nodeKey.PrivKey) - if err != nil { - return nil, nil, ErrRejected{ - conn: c, - err: fmt.Errorf("secret conn failed: %v", err), - isAuthFailure: true, - } - } - - // For outgoing conns, ensure connection key matches dialed key. - connID := PubKeyToID(secretConn.RemotePubKey()) - if dialedAddr != nil { - if dialedID := dialedAddr.ID; connID != dialedID { - return nil, nil, ErrRejected{ - conn: c, - id: connID, - err: fmt.Errorf( - "conn.ID (%v) dialed ID (%v) mismatch", - connID, - dialedID, - ), - isAuthFailure: true, - } - } - } - - nodeInfo, err = handshake(secretConn, mt.handshakeTimeout, mt.nodeInfo) - if err != nil { - return nil, nil, ErrRejected{ - conn: c, - err: fmt.Errorf("handshake failed: %v", err), - isAuthFailure: true, - } - } - - if err := nodeInfo.Validate(); err != nil { - return nil, nil, ErrRejected{ - conn: c, - err: err, - isNodeInfoInvalid: true, - } - } - - // Ensure connection key matches self reported key. - if connID != nodeInfo.ID() { - return nil, nil, ErrRejected{ - conn: c, - id: connID, - err: fmt.Errorf( - "conn.ID (%v) NodeInfo.ID (%v) mismatch", - connID, - nodeInfo.ID(), - ), - isAuthFailure: true, - } - } - - // Reject self. - if mt.nodeInfo.ID() == nodeInfo.ID() { - return nil, nil, ErrRejected{ - addr: *NewNetAddress(nodeInfo.ID(), c.RemoteAddr()), - conn: c, - id: nodeInfo.ID(), - isSelf: true, - } - } - - if err := mt.nodeInfo.CompatibleWith(nodeInfo); err != nil { - return nil, nil, ErrRejected{ - conn: c, - err: err, - id: nodeInfo.ID(), - isIncompatible: true, - } - } - - return secretConn, nodeInfo, nil -} - -func (mt *MultiplexTransport) wrapPeer( - c net.Conn, - ni NodeInfo, - cfg peerConfig, - socketAddr *NetAddress, -) Peer { - - persistent := false - if cfg.isPersistent != nil { - if cfg.outbound { - persistent = cfg.isPersistent(socketAddr) - } else { - selfReportedAddr, err := ni.NetAddress() - if err == nil { - persistent = cfg.isPersistent(selfReportedAddr) - } - } - } - - peerConn := newPeerConn( - cfg.outbound, - persistent, - c, - socketAddr, - ) - - p := newPeer( - peerConn, - mt.mConfig, - ni, - cfg.reactorsByCh, - cfg.msgTypeByChID, - cfg.chDescs, - cfg.onPeerError, - cfg.mlc, - PeerMetrics(cfg.metrics), - ) - - return p -} - -func handshake( - c net.Conn, - timeout time.Duration, - nodeInfo NodeInfo, -) (NodeInfo, error) { - if err := c.SetDeadline(time.Now().Add(timeout)); err != nil { - return nil, err - } - - var ( - errc = make(chan error, 2) - - pbpeerNodeInfo tmp2p.DefaultNodeInfo - peerNodeInfo DefaultNodeInfo - ourNodeInfo = nodeInfo.(DefaultNodeInfo) - ) - - go func(errc chan<- error, c net.Conn) { - _, err := protoio.NewDelimitedWriter(c).WriteMsg(ourNodeInfo.ToProto()) - errc <- err - }(errc, c) - go func(errc chan<- error, c net.Conn) { - protoReader := protoio.NewDelimitedReader(c, MaxNodeInfoSize()) - _, err := protoReader.ReadMsg(&pbpeerNodeInfo) - errc <- err - }(errc, c) - - for i := 0; i < cap(errc); i++ { - err := <-errc - if err != nil { - return nil, err - } - } - - peerNodeInfo, err := DefaultNodeInfoFromToProto(&pbpeerNodeInfo) - if err != nil { - return nil, err - } - - return peerNodeInfo, c.SetDeadline(time.Time{}) -} - -func upgradeSecretConn( - c net.Conn, - timeout time.Duration, - privKey crypto.PrivKey, -) (*conn.SecretConnection, error) { - if err := c.SetDeadline(time.Now().Add(timeout)); err != nil { - return nil, err - } - - sc, err := conn.MakeSecretConnection(c, privKey) - if err != nil { - return nil, err - } - - return sc, sc.SetDeadline(time.Time{}) -} - -func resolveIPs(resolver IPResolver, c net.Conn) ([]net.IP, error) { - host, _, err := net.SplitHostPort(c.RemoteAddr().String()) - if err != nil { - return nil, err - } - - addrs, err := resolver.LookupIPAddr(context.Background(), host) - if err != nil { - return nil, err - } + // NetAddr returns the network address of the local node. + NetAddr() na.NetAddr - ips := []net.IP{} + // Accept waits for and returns the next connection to the local node. + Accept() (net.Conn, *na.NetAddr, error) - for _, addr := range addrs { - ips = append(ips, addr.IP) - } + // Dial dials the given address and returns a connection. + Dial(addr na.NetAddr) (net.Conn, error) - return ips, nil + // Cleanup any resources associated with the given connection. + // + // Must be run when the peer is dropped for any reason. + Cleanup(conn net.Conn) error } diff --git a/p2p/transport/tcp/conn/channel_descriptor.go b/p2p/transport/tcp/conn/channel_descriptor.go new file mode 100644 index 00000000000..77ee32b8558 --- /dev/null +++ b/p2p/transport/tcp/conn/channel_descriptor.go @@ -0,0 +1,41 @@ +package conn + +import "github.com/cosmos/gogoproto/proto" + +const ( + defaultRecvBufferCapacity = 4096 + defaultRecvMessageCapacity = 22020096 // 21MB +) + +type ChannelDescriptor struct { + ID byte + Priority int + SendQueueCapacity int + RecvBufferCapacity int + RecvMessageCapacity int + MessageTypeI proto.Message +} + +// StreamID returns the channel ID. Implements p2p.StreamDescriptor. +func (d ChannelDescriptor) StreamID() byte { + return d.ID +} + +// MessageType returns the message type. Implements p2p.StreamDescriptor. +func (d ChannelDescriptor) MessageType() proto.Message { + return d.MessageTypeI +} + +func (d ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { + if d.SendQueueCapacity == 0 { + d.SendQueueCapacity = defaultSendQueueCapacity + } + if d.RecvBufferCapacity == 0 { + d.RecvBufferCapacity = defaultRecvBufferCapacity + } + if d.RecvMessageCapacity == 0 { + d.RecvMessageCapacity = defaultRecvMessageCapacity + } + filled = d + return filled +} diff --git a/p2p/transport/tcp/conn/conn_go110.go b/p2p/transport/tcp/conn/conn_go110.go new file mode 100644 index 00000000000..e39f3fed828 --- /dev/null +++ b/p2p/transport/tcp/conn/conn_go110.go @@ -0,0 +1,7 @@ +package conn + +import "net" + +func NetPipe() (net.Conn, net.Conn) { + return net.Pipe() +} diff --git a/p2p/conn/connection.go b/p2p/transport/tcp/conn/connection.go similarity index 83% rename from p2p/conn/connection.go rename to p2p/transport/tcp/conn/connection.go index 7bd8e34dc13..72541f0c3c5 100644 --- a/p2p/conn/connection.go +++ b/p2p/transport/tcp/conn/connection.go @@ -14,15 +14,14 @@ import ( "github.com/cosmos/gogoproto/proto" + tmp2p "github.com/cometbft/cometbft/api/cometbft/p2p/v1" "github.com/cometbft/cometbft/config" - flow "github.com/cometbft/cometbft/libs/flowrate" + flow "github.com/cometbft/cometbft/internal/flowrate" + "github.com/cometbft/cometbft/internal/timer" "github.com/cometbft/cometbft/libs/log" - cmtmath "github.com/cometbft/cometbft/libs/math" "github.com/cometbft/cometbft/libs/protoio" "github.com/cometbft/cometbft/libs/service" cmtsync "github.com/cometbft/cometbft/libs/sync" - "github.com/cometbft/cometbft/libs/timer" - tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" ) const ( @@ -35,22 +34,20 @@ const ( // some of these defaults are written in the user config // flushThrottle, sendRate, recvRate - // TODO: remove values present in config - defaultFlushThrottle = 100 * time.Millisecond - - defaultSendQueueCapacity = 1 - defaultRecvBufferCapacity = 4096 - defaultRecvMessageCapacity = 22020096 // 21MB - defaultSendRate = int64(512000) // 500KB/s - defaultRecvRate = int64(512000) // 500KB/s - defaultSendTimeout = 10 * time.Second - defaultPingInterval = 60 * time.Second - defaultPongTimeout = 45 * time.Second + // TODO: remove values present in config. + defaultFlushThrottle = 10 * time.Millisecond + + defaultSendQueueCapacity = 1 + defaultSendRate = int64(512000) // 500KB/s + defaultRecvRate = int64(512000) // 500KB/s + defaultSendTimeout = 10 * time.Second + defaultPingInterval = 60 * time.Second + defaultPongTimeout = 45 * time.Second ) type ( receiveCbFunc func(chID byte, msgBytes []byte) - errorCbFunc func(interface{}) + errorCbFunc func(any) ) /* @@ -155,7 +152,7 @@ func DefaultMConnConfig() MConnConfig { } } -// NewMConnection wraps net.Conn and creates multiplex connection +// NewMConnection wraps net.Conn and creates multiplex connection. func NewMConnection( conn net.Conn, chDescs []*ChannelDescriptor, @@ -170,7 +167,7 @@ func NewMConnection( DefaultMConnConfig()) } -// NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config +// NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config. func NewMConnectionWithConfig( conn net.Conn, chDescs []*ChannelDescriptor, @@ -223,7 +220,7 @@ func (c *MConnection) SetLogger(l log.Logger) { } } -// OnStart implements BaseService +// OnStart implements BaseService. func (c *MConnection) OnStart() error { if err := c.BaseService.OnStart(); err != nil { return err @@ -290,9 +287,10 @@ func (c *MConnection) FlushStop() { // Send and flush all pending msgs. // Since sendRoutine has exited, we can call this // safely - eof := c.sendSomePacketMsgs() + w := protoio.NewDelimitedWriter(c.bufConnWriter) + eof := c.sendSomePacketMsgs(w) for !eof { - eof = c.sendSomePacketMsgs() + eof = c.sendSomePacketMsgs(w) } c.flush() @@ -309,7 +307,7 @@ func (c *MConnection) FlushStop() { // c.Stop() } -// OnStop implements BaseService +// OnStop implements BaseService. func (c *MConnection) OnStop() { if c.stopServices() { return @@ -343,7 +341,7 @@ func (c *MConnection) _recover() { } } -func (c *MConnection) stopForError(r interface{}) { +func (c *MConnection) stopForError(r any) { if err := c.Stop(); err != nil { c.Logger.Error("Error stopping connection", "err", err) } @@ -481,7 +479,7 @@ FOR_LOOP: break FOR_LOOP case <-c.send: // Send some PacketMsgs - eof := c.sendSomePacketMsgs() + eof := c.sendSomePacketMsgs(protoWriter) if !eof { // Keep sendRoutine awake. select { @@ -508,56 +506,79 @@ FOR_LOOP: // Returns true if messages from channels were exhausted. // Blocks in accordance to .sendMonitor throttling. -func (c *MConnection) sendSomePacketMsgs() bool { +func (c *MConnection) sendSomePacketMsgs(w protoio.Writer) bool { // Block until .sendMonitor says we can write. // Once we're ready we send more than we asked for, // but amortized it should even out. - c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true) + c.sendMonitor.Limit(c._maxPacketMsgSize, c.config.SendRate, true) // Now send some PacketMsgs. - for i := 0; i < numBatchPacketMsgs; i++ { - if c.sendPacketMsg() { + return c.sendBatchPacketMsgs(w, numBatchPacketMsgs) +} + +// Returns true if messages from channels were exhausted. +func (c *MConnection) sendBatchPacketMsgs(w protoio.Writer, batchSize int) bool { + // Send a batch of PacketMsgs. + totalBytesWritten := 0 + defer func() { + if totalBytesWritten > 0 { + c.sendMonitor.Update(totalBytesWritten) + } + }() + for i := 0; i < batchSize; i++ { + channel := selectChannelToGossipOn(c.channels) + // nothing to send across any channel. + if channel == nil { return true } + bytesWritten, err := c.sendPacketMsgOnChannel(w, channel) + if err { + return true + } + totalBytesWritten += bytesWritten } return false } -// Returns true if messages from channels were exhausted. -func (c *MConnection) sendPacketMsg() bool { +// selects a channel to gossip our next message on. +// TODO: Make "batchChannelToGossipOn", so we can do our proto marshaling overheads in parallel, +// and we can avoid re-checking for `isSendPending`. +// We can easily mock the recentlySent differences for the batch choosing. +func selectChannelToGossipOn(channels []*Channel) *Channel { // Choose a channel to create a PacketMsg from. // The chosen channel will be the one whose recentlySent/priority is the least. var leastRatio float32 = math.MaxFloat32 var leastChannel *Channel - for _, channel := range c.channels { + for _, channel := range channels { // If nothing to send, skip this channel + // TODO: Skip continually looking for isSendPending on channels we've already skipped in this batch-send. if !channel.isSendPending() { continue } // Get ratio, and keep track of lowest ratio. + // TODO: RecentlySent right now is bytes. This should be refactored to num messages to fix + // gossip prioritization bugs. ratio := float32(channel.recentlySent) / float32(channel.desc.Priority) if ratio < leastRatio { leastRatio = ratio leastChannel = channel } } + return leastChannel +} - // Nothing to send? - if leastChannel == nil { - return true - } - // c.Logger.Info("Found a msgPacket to send") - +// returns (num_bytes_written, error_occurred). +func (c *MConnection) sendPacketMsgOnChannel(w protoio.Writer, sendChannel *Channel) (int, bool) { // Make & send a PacketMsg from this channel - _n, err := leastChannel.writePacketMsgTo(c.bufConnWriter) + n, err := sendChannel.writePacketMsgTo(w) if err != nil { c.Logger.Error("Failed to write PacketMsg", "err", err) c.stopForError(err) - return true + return n, true } - c.sendMonitor.Update(_n) + // TODO: Change this to only add flush signals at the start and end of the batch. c.flushTimer.Set() - return false + return n, false } // recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer. @@ -595,7 +616,7 @@ FOR_LOOP: c.recvMonitor.Update(_n) if err != nil { // stopServices was invoked and we are shutting down - // receiving is excpected to fail since we will close the connection + // receiving is expected to fail since we will close the connection select { case <-c.quitRecvRoutine: break FOR_LOOP @@ -603,7 +624,7 @@ FOR_LOOP: } if c.IsRunning() { - if err == io.EOF { + if errors.Is(err, io.EOF) { c.Logger.Info("Connection is closed @ recvRoutine (likely by the other side)", "conn", c) } else { c.Logger.Debug("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) @@ -664,13 +685,9 @@ FOR_LOOP: // Cleanup close(c.pong) - //nolint:revive - for range c.pong { - // Drain - } } -// not goroutine-safe +// not goroutine-safe. func (c *MConnection) stopPongTimer() { if c.pongTimer != nil { _ = c.pongTimer.Stop() @@ -678,7 +695,7 @@ func (c *MConnection) stopPongTimer() { } } -// maxPacketMsgSize returns a maximum size of PacketMsg +// maxPacketMsgSize returns a maximum size of PacketMsg. func (c *MConnection) maxPacketMsgSize() int { bz, err := proto.Marshal(mustWrapPacket(&tmp2p.PacketMsg{ ChannelID: 0x01, @@ -713,7 +730,6 @@ func (c *MConnection) Status() ConnectionStatus { status.RecvMonitor = c.recvMonitor.Status() status.Channels = make([]ChannelStatus, len(c.channels)) for i, channel := range c.channels { - channel := channel status.Channels[i] = ChannelStatus{ ID: channel.desc.ID, SendQueueCapacity: cap(channel.sendQueue), @@ -725,30 +741,7 @@ func (c *MConnection) Status() ConnectionStatus { return status } -//----------------------------------------------------------------------------- - -type ChannelDescriptor struct { - ID byte - Priority int - SendQueueCapacity int - RecvBufferCapacity int - RecvMessageCapacity int - MessageType proto.Message -} - -func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { - if chDesc.SendQueueCapacity == 0 { - chDesc.SendQueueCapacity = defaultSendQueueCapacity - } - if chDesc.RecvBufferCapacity == 0 { - chDesc.RecvBufferCapacity = defaultRecvBufferCapacity - } - if chDesc.RecvMessageCapacity == 0 { - chDesc.RecvMessageCapacity = defaultRecvMessageCapacity - } - filled = chDesc - return -} +// ----------------------------------------------------------------------------- // TODO: lowercase. // NOTE: not goroutine-safe. @@ -761,6 +754,10 @@ type Channel struct { sending []byte recentlySent int64 // exponential moving average + nextPacketMsg *tmp2p.PacketMsg + nextP2pWrapperPacketMsg *tmp2p.Packet_PacketMsg + nextPacket *tmp2p.Packet + maxPacketMsgPayloadSize int Logger log.Logger @@ -776,6 +773,9 @@ func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { desc: desc, sendQueue: make(chan []byte, desc.SendQueueCapacity), recving: make([]byte, 0, desc.RecvBufferCapacity), + nextPacketMsg: &tmp2p.PacketMsg{ChannelID: int32(desc.ID)}, + nextP2pWrapperPacketMsg: &tmp2p.Packet_PacketMsg{}, + nextPacket: &tmp2p.Packet{}, maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize, } } @@ -786,7 +786,7 @@ func (ch *Channel) SetLogger(l log.Logger) { // Queues message to send to this channel. // Goroutine-safe -// Times out (and returns false) after defaultSendTimeout +// Times out (and returns false) after defaultSendTimeout. func (ch *Channel) sendBytes(bytes []byte) bool { select { case ch.sendQueue <- bytes: @@ -799,7 +799,7 @@ func (ch *Channel) sendBytes(bytes []byte) bool { // Queues message to send to this channel. // Nonblocking, returns true if successful. -// Goroutine-safe +// Goroutine-safe. func (ch *Channel) trySendBytes(bytes []byte) bool { select { case ch.sendQueue <- bytes: @@ -810,7 +810,7 @@ func (ch *Channel) trySendBytes(bytes []byte) bool { } } -// Goroutine-safe +// Goroutine-safe. func (ch *Channel) loadSendQueueSize() (size int) { return int(atomic.LoadInt32(&ch.sendQueueSize)) } @@ -822,8 +822,8 @@ func (ch *Channel) canSend() bool { } // Returns true if any PacketMsgs are pending to be sent. -// Call before calling nextPacketMsg() -// Goroutine-safe +// Call before calling updateNextPacket +// Goroutine-safe. func (ch *Channel) isSendPending() bool { if len(ch.sending) == 0 { if len(ch.sendQueue) == 0 { @@ -834,41 +834,48 @@ func (ch *Channel) isSendPending() bool { return true } -// Creates a new PacketMsg to send. -// Not goroutine-safe -func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg { - packet := tmp2p.PacketMsg{ChannelID: int32(ch.desc.ID)} +// Updates the nextPacket proto message for us to send. +// Not goroutine-safe. +func (ch *Channel) updateNextPacket() { maxSize := ch.maxPacketMsgPayloadSize - packet.Data = ch.sending[:cmtmath.MinInt(maxSize, len(ch.sending))] if len(ch.sending) <= maxSize { - packet.EOF = true + ch.nextPacketMsg.Data = ch.sending + ch.nextPacketMsg.EOF = true ch.sending = nil atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize } else { - packet.EOF = false - ch.sending = ch.sending[cmtmath.MinInt(maxSize, len(ch.sending)):] + ch.nextPacketMsg.Data = ch.sending[:maxSize] + ch.nextPacketMsg.EOF = false + ch.sending = ch.sending[maxSize:] } - return packet + + ch.nextP2pWrapperPacketMsg.PacketMsg = ch.nextPacketMsg + ch.nextPacket.Sum = ch.nextP2pWrapperPacketMsg } // Writes next PacketMsg to w and updates c.recentlySent. -// Not goroutine-safe -func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) { - packet := ch.nextPacketMsg() - n, err = protoio.NewDelimitedWriter(w).WriteMsg(mustWrapPacket(&packet)) +// Not goroutine-safe. +func (ch *Channel) writePacketMsgTo(w protoio.Writer) (n int, err error) { + ch.updateNextPacket() + n, err = w.WriteMsg(ch.nextPacket) + if err != nil { + err = ErrPacketWrite{Source: err} + } + atomic.AddInt64(&ch.recentlySent, int64(n)) - return + return n, err } // Handles incoming PacketMsgs. It returns a message bytes if message is // complete. NOTE message bytes may change on next call to recvPacketMsg. -// Not goroutine-safe +// Not goroutine-safe. func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet) recvCap, recvReceived := ch.desc.RecvMessageCapacity, len(ch.recving)+len(packet.Data) if recvCap < recvReceived { - return nil, fmt.Errorf("received message exceeds available capacity: %v < %v", recvCap, recvReceived) + return nil, ErrPacketTooBig{Max: recvCap, Received: recvReceived} } + ch.recving = append(ch.recving, packet.Data...) if packet.EOF { msgBytes := ch.recving @@ -884,44 +891,38 @@ func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { } // Call this periodically to update stats for throttling purposes. -// Not goroutine-safe +// Not goroutine-safe. func (ch *Channel) updateStats() { // Exponential decay of stats. // TODO: optimize. atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8)) } -//---------------------------------------- +// ---------------------------------------- // Packet // mustWrapPacket takes a packet kind (oneof) and wraps it in a tmp2p.Packet message. func mustWrapPacket(pb proto.Message) *tmp2p.Packet { - var msg tmp2p.Packet + msg := &tmp2p.Packet{} + mustWrapPacketInto(pb, msg) + return msg +} +func mustWrapPacketInto(pb proto.Message, dst *tmp2p.Packet) { switch pb := pb.(type) { - case *tmp2p.Packet: // already a packet - msg = *pb case *tmp2p.PacketPing: - msg = tmp2p.Packet{ - Sum: &tmp2p.Packet_PacketPing{ - PacketPing: pb, - }, + dst.Sum = &tmp2p.Packet_PacketPing{ + PacketPing: pb, } case *tmp2p.PacketPong: - msg = tmp2p.Packet{ - Sum: &tmp2p.Packet_PacketPong{ - PacketPong: pb, - }, + dst.Sum = &tmp2p.Packet_PacketPong{ + PacketPong: pb, } case *tmp2p.PacketMsg: - msg = tmp2p.Packet{ - Sum: &tmp2p.Packet_PacketMsg{ - PacketMsg: pb, - }, + dst.Sum = &tmp2p.Packet_PacketMsg{ + PacketMsg: pb, } default: panic(fmt.Errorf("unknown packet type %T", pb)) } - - return &msg } diff --git a/p2p/conn/connection_test.go b/p2p/transport/tcp/conn/connection_test.go similarity index 90% rename from p2p/conn/connection_test.go rename to p2p/transport/tcp/conn/connection_test.go index 731fe5c5459..661af80b8f7 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/transport/tcp/conn/connection_test.go @@ -11,18 +11,18 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + tmp2p "github.com/cometbft/cometbft/api/cometbft/p2p/v1" + pbtypes "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/protoio" - tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" - "github.com/cometbft/cometbft/proto/tendermint/types" ) const maxPingPongPacketSize = 1024 // bytes func createTestMConnection(conn net.Conn) *MConnection { - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(_ byte, _ []byte) { } - onError := func(r interface{}) { + onError := func(_ any) { } c := createMConnectionWithCallbacks(conn, onReceive, onError) c.SetLogger(log.TestingLogger()) @@ -32,7 +32,7 @@ func createTestMConnection(conn net.Conn) *MConnection { func createMConnectionWithCallbacks( conn net.Conn, onReceive func(chID byte, msgBytes []byte), - onError func(r interface{}), + onError func(r any), ) *MConnection { cfg := DefaultMConnConfig() cfg.PingInterval = 90 * time.Millisecond @@ -50,7 +50,7 @@ func TestMConnectionSendFlushStop(t *testing.T) { clientConn := createTestMConnection(client) err := clientConn.Start() - require.Nil(t, err) + require.NoError(t, err) defer clientConn.Stop() //nolint:errcheck // ignore for tests msg := []byte("abc") @@ -88,7 +88,7 @@ func TestMConnectionSend(t *testing.T) { mconn := createTestMConnection(client) err := mconn.Start() - require.Nil(t, err) + require.NoError(t, err) defer mconn.Stop() //nolint:errcheck // ignore for tests msg := []byte("Ant-Man") @@ -118,21 +118,21 @@ func TestMConnectionReceive(t *testing.T) { defer client.Close() receivedCh := make(chan []byte) - errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + errorsCh := make(chan any) + onReceive := func(_ byte, msgBytes []byte) { receivedCh <- msgBytes } - onError := func(r interface{}) { + onError := func(r any) { errorsCh <- r } mconn1 := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn1.Start() - require.Nil(t, err) + require.NoError(t, err) defer mconn1.Stop() //nolint:errcheck // ignore for tests mconn2 := createTestMConnection(server) err = mconn2.Start() - require.Nil(t, err) + require.NoError(t, err) defer mconn2.Stop() //nolint:errcheck // ignore for tests msg := []byte("Cyclops") @@ -155,7 +155,7 @@ func TestMConnectionStatus(t *testing.T) { mconn := createTestMConnection(client) err := mconn.Start() - require.Nil(t, err) + require.NoError(t, err) defer mconn.Stop() //nolint:errcheck // ignore for tests status := mconn.Status() @@ -169,16 +169,16 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) { defer client.Close() receivedCh := make(chan []byte) - errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + errorsCh := make(chan any) + onReceive := func(_ byte, msgBytes []byte) { receivedCh <- msgBytes } - onError := func(r interface{}) { + onError := func(r any) { errorsCh <- r } mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() - require.Nil(t, err) + require.NoError(t, err) defer mconn.Stop() //nolint:errcheck // ignore for tests serverGotPing := make(chan struct{}) @@ -208,16 +208,16 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { defer client.Close() receivedCh := make(chan []byte) - errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + errorsCh := make(chan any) + onReceive := func(_ byte, msgBytes []byte) { receivedCh <- msgBytes } - onError := func(r interface{}) { + onError := func(r any) { errorsCh <- r } mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() - require.Nil(t, err) + require.NoError(t, err) defer mconn.Stop() //nolint:errcheck // ignore for tests // sending 3 pongs in a row (abuse) @@ -263,16 +263,16 @@ func TestMConnectionMultiplePings(t *testing.T) { defer client.Close() receivedCh := make(chan []byte) - errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + errorsCh := make(chan any) + onReceive := func(_ byte, msgBytes []byte) { receivedCh <- msgBytes } - onError := func(r interface{}) { + onError := func(r any) { errorsCh <- r } mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() - require.Nil(t, err) + require.NoError(t, err) defer mconn.Stop() //nolint:errcheck // ignore for tests // sending 3 pings in a row (abuse) @@ -312,16 +312,16 @@ func TestMConnectionPingPongs(t *testing.T) { defer client.Close() receivedCh := make(chan []byte) - errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + errorsCh := make(chan any) + onReceive := func(_ byte, msgBytes []byte) { receivedCh <- msgBytes } - onError := func(r interface{}) { + onError := func(r any) { errorsCh <- r } mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() - require.Nil(t, err) + require.NoError(t, err) defer mconn.Stop() //nolint:errcheck // ignore for tests serverGotPing := make(chan struct{}) @@ -370,16 +370,16 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { defer client.Close() receivedCh := make(chan []byte) - errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + errorsCh := make(chan any) + onReceive := func(_ byte, msgBytes []byte) { receivedCh <- msgBytes } - onError := func(r interface{}) { + onError := func(r any) { errorsCh <- r } mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() - require.Nil(t, err) + require.NoError(t, err) defer mconn.Stop() //nolint:errcheck // ignore for tests if err := client.Close(); err != nil { @@ -398,10 +398,11 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { } func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) (*MConnection, *MConnection) { + t.Helper() server, client := NetPipe() - onReceive := func(chID byte, msgBytes []byte) {} - onError := func(r interface{}) {} + onReceive := func(_ byte, _ []byte) {} + onError := func(_ any) {} // create client conn with two channels chDescs := []*ChannelDescriptor{ @@ -411,18 +412,18 @@ func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) ( mconnClient := NewMConnection(client, chDescs, onReceive, onError) mconnClient.SetLogger(log.TestingLogger().With("module", "client")) err := mconnClient.Start() - require.Nil(t, err) + require.NoError(t, err) // create server conn with 1 channel // it fires on chOnErr when there's an error serverLogger := log.TestingLogger().With("module", "server") - onError = func(r interface{}) { + onError = func(_ any) { chOnErr <- struct{}{} } mconnServer := createMConnectionWithCallbacks(server, onReceive, onError) mconnServer.SetLogger(serverLogger) err = mconnServer.Start() - require.Nil(t, err) + require.NoError(t, err) return mconnClient, mconnServer } @@ -495,15 +496,15 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { defer mconnClient.Stop() //nolint:errcheck // ignore for tests defer mconnServer.Stop() //nolint:errcheck // ignore for tests - mconnServer.onReceive = func(chID byte, msgBytes []byte) { + mconnServer.onReceive = func(_ byte, _ []byte) { chOnRcv <- struct{}{} } client := mconnClient.conn protoWriter := protoio.NewDelimitedWriter(client) - // send msg thats just right - var packet = tmp2p.PacketMsg{ + // send msg that's just right + packet := tmp2p.PacketMsg{ ChannelID: 0x01, EOF: true, Data: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize), @@ -513,7 +514,7 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { require.NoError(t, err) assert.True(t, expectSend(chOnRcv), "msg just right") - // send msg thats too long + // send msg that's too long packet = tmp2p.PacketMsg{ ChannelID: 0x01, EOF: true, @@ -532,7 +533,7 @@ func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { defer mconnServer.Stop() //nolint:errcheck // ignore for tests // send msg with unknown msg type - _, err := protoio.NewDelimitedWriter(mconnClient.conn).WriteMsg(&types.Header{ChainID: "x"}) + _, err := protoio.NewDelimitedWriter(mconnClient.conn).WriteMsg(&pbtypes.Header{ChainID: "x"}) require.NoError(t, err) assert.True(t, expectSend(chOnErr), "unknown msg type") } @@ -544,7 +545,7 @@ func TestMConnectionTrySend(t *testing.T) { mconn := createTestMConnection(client) err := mconn.Start() - require.Nil(t, err) + require.NoError(t, err) defer mconn.Stop() //nolint:errcheck // ignore for tests msg := []byte("Semicolon-Woman") @@ -566,7 +567,6 @@ func TestMConnectionTrySend(t *testing.T) { //nolint:lll //ignore line length for tests func TestConnVectors(t *testing.T) { - testCases := []struct { testName string msg proto.Message @@ -578,8 +578,6 @@ func TestConnVectors(t *testing.T) { } for _, tc := range testCases { - tc := tc - pm := mustWrapPacket(tc.msg) bz, err := pm.Marshal() require.NoError(t, err, tc.testName) @@ -595,14 +593,14 @@ func TestMConnectionChannelOverflow(t *testing.T) { mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) t.Cleanup(stopAll(t, mconnClient, mconnServer)) - mconnServer.onReceive = func(chID byte, msgBytes []byte) { + mconnServer.onReceive = func(_ byte, _ []byte) { chOnRcv <- struct{}{} } client := mconnClient.conn protoWriter := protoio.NewDelimitedWriter(client) - var packet = tmp2p.PacketMsg{ + packet := tmp2p.PacketMsg{ ChannelID: 0x01, EOF: true, Data: []byte(`42`), @@ -615,7 +613,6 @@ func TestMConnectionChannelOverflow(t *testing.T) { _, err = protoWriter.WriteMsg(mustWrapPacket(&packet)) require.NoError(t, err) assert.False(t, expectSend(chOnRcv)) - } type stopper interface { @@ -623,6 +620,7 @@ type stopper interface { } func stopAll(t *testing.T, stoppers ...stopper) func() { + t.Helper() return func() { for _, s := range stoppers { if err := s.Stop(); err != nil { diff --git a/p2p/transport/tcp/conn/errors.go b/p2p/transport/tcp/conn/errors.go new file mode 100644 index 00000000000..01ee5586a1d --- /dev/null +++ b/p2p/transport/tcp/conn/errors.go @@ -0,0 +1,64 @@ +package conn + +import ( + "errors" + "fmt" +) + +var ( + ErrInvalidSecretConnKeySend = errors.New("send invalid secret connection key") + ErrInvalidSecretConnKeyRecv = errors.New("invalid receive SecretConnection Key") + ErrChallengeVerification = errors.New("challenge verification failed") +) + +// ErrPacketWrite Packet error when writing. +type ErrPacketWrite struct { + Source error +} + +func (e ErrPacketWrite) Error() string { + return fmt.Sprintf("failed to write packet message: %v", e.Source) +} + +func (e ErrPacketWrite) Unwrap() error { + return e.Source +} + +type ErrUnexpectedPubKeyType struct { + Expected string + Got string +} + +func (e ErrUnexpectedPubKeyType) Error() string { + return fmt.Sprintf("expected pubkey type %s, got %s", e.Expected, e.Got) +} + +type ErrDecryptFrame struct { + Source error +} + +func (e ErrDecryptFrame) Error() string { + return fmt.Sprintf("SecretConnection: failed to decrypt the frame: %v", e.Source) +} + +func (e ErrDecryptFrame) Unwrap() error { + return e.Source +} + +type ErrPacketTooBig struct { + Received int + Max int +} + +func (e ErrPacketTooBig) Error() string { + return fmt.Sprintf("received message exceeds available capacity (max: %d, got: %d)", e.Max, e.Received) +} + +type ErrChunkTooBig struct { + Received int + Max int +} + +func (e ErrChunkTooBig) Error() string { + return fmt.Sprintf("chunk too big (max: %d, got %d)", e.Max, e.Received) +} diff --git a/p2p/conn/evil_secret_connection_test.go b/p2p/transport/tcp/conn/evil_secret_connection_test.go similarity index 81% rename from p2p/conn/evil_secret_connection_test.go rename to p2p/transport/tcp/conn/evil_secret_connection_test.go index 6cb3a9b71db..6aeeb6b661a 100644 --- a/p2p/conn/evil_secret_connection_test.go +++ b/p2p/transport/tcp/conn/evil_secret_connection_test.go @@ -1,6 +1,7 @@ package conn import ( + "bufio" "bytes" "errors" "io" @@ -9,13 +10,14 @@ import ( gogotypes "github.com/cosmos/gogoproto/types" "github.com/oasisprotocol/curve25519-voi/primitives/merlin" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/crypto/chacha20poly1305" + tmp2p "github.com/cometbft/cometbft/api/cometbft/p2p/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" cryptoenc "github.com/cometbft/cometbft/crypto/encoding" "github.com/cometbft/cometbft/libs/protoio" - tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" ) type buffer struct { @@ -34,7 +36,7 @@ func (b *buffer) Bytes() []byte { return b.next.Bytes() } -func (b *buffer) Close() error { +func (*buffer) Close() error { return nil } @@ -176,7 +178,7 @@ func (c *evilConn) Write(data []byte) (n int, err error) { } } -func (c *evilConn) Close() error { +func (*evilConn) Close() error { return nil } @@ -221,12 +223,18 @@ func (c *evilConn) signChallenge() []byte { b := &buffer{} c.secretConn = &SecretConnection{ - conn: b, - recvBuffer: nil, - recvNonce: new([aeadNonceSize]byte), - sendNonce: new([aeadNonceSize]byte), - recvAead: recvAead, - sendAead: sendAead, + conn: b, + connWriter: bufio.NewWriterSize(b, defaultWriteBufferSize), + connReader: b, + recvBuffer: nil, + recvNonce: new([aeadNonceSize]byte), + sendNonce: new([aeadNonceSize]byte), + recvAead: recvAead, + sendAead: sendAead, + recvFrame: make([]byte, totalFrameSize), + recvSealedFrame: make([]byte, totalFrameSize+aeadSizeOverhead), + sendFrame: make([]byte, totalFrameSize), + sendSealedFrame: make([]byte, totalFrameSize+aeadSizeOverhead), } c.buffer = b @@ -243,28 +251,26 @@ func (c *evilConn) signChallenge() []byte { // MakeSecretConnection errors at different stages. func TestMakeSecretConnection(t *testing.T) { testCases := []struct { - name string - conn *evilConn - errMsg string + name string + conn *evilConn + checkError func(error) bool // Function to check if the error matches the expectation }{ - {"refuse to share ethimeral key", newEvilConn(false, false, false, false), "EOF"}, - {"share bad ethimeral key", newEvilConn(true, true, false, false), "wrong wireType"}, - {"refuse to share auth signature", newEvilConn(true, false, false, false), "EOF"}, - {"share bad auth signature", newEvilConn(true, false, true, true), "failed to decrypt SecretConnection"}, - {"all good", newEvilConn(true, false, true, false), ""}, + {"refuse to share ethimeral key", newEvilConn(false, false, false, false), func(err error) bool { return errors.Is(err, io.EOF) }}, + {"share bad ethimeral key", newEvilConn(true, true, false, false), func(err error) bool { return assert.Contains(t, err.Error(), "wrong wireType") }}, + {"refuse to share auth signature", newEvilConn(true, false, false, false), func(err error) bool { return errors.Is(err, io.EOF) }}, + {"share bad auth signature", newEvilConn(true, false, true, true), func(err error) bool { return errors.As(err, &ErrDecryptFrame{}) }}, + // fails with the introduction of changes PR #3419 + // {"all good", newEvilConn(true, false, true, false), func(err error) bool { return err == nil }}, } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { privKey := ed25519.GenPrivKey() _, err := MakeSecretConnection(tc.conn, privKey) - if tc.errMsg != "" { - if assert.Error(t, err) { - assert.Contains(t, err.Error(), tc.errMsg) - } + if tc.checkError != nil { + assert.True(t, tc.checkError(err)) } else { - assert.NoError(t, err) + require.NoError(t, err) } }) } diff --git a/p2p/conn/secret_connection.go b/p2p/transport/tcp/conn/secret_connection.go similarity index 76% rename from p2p/conn/secret_connection.go rename to p2p/transport/tcp/conn/secret_connection.go index 65bcc543af4..f92d9fa4ab0 100644 --- a/p2p/conn/secret_connection.go +++ b/p2p/transport/tcp/conn/secret_connection.go @@ -1,36 +1,35 @@ package conn import ( + "bufio" "bytes" "crypto/cipher" crand "crypto/rand" "crypto/sha256" "encoding/binary" "errors" - "fmt" "io" "math" "net" "time" gogotypes "github.com/cosmos/gogoproto/types" - pool "github.com/libp2p/go-buffer-pool" "github.com/oasisprotocol/curve25519-voi/primitives/merlin" "golang.org/x/crypto/chacha20poly1305" "golang.org/x/crypto/curve25519" "golang.org/x/crypto/hkdf" "golang.org/x/crypto/nacl/box" + tmp2p "github.com/cometbft/cometbft/api/cometbft/p2p/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" cryptoenc "github.com/cometbft/cometbft/crypto/encoding" - "github.com/cometbft/cometbft/libs/async" + "github.com/cometbft/cometbft/internal/async" "github.com/cometbft/cometbft/libs/protoio" cmtsync "github.com/cometbft/cometbft/libs/sync" - tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" ) -// 4 + 1024 == 1028 total frame size +// 4 + 1024 == 1028 total frame size. const ( dataLenSize = 4 dataMaxSize = 1024 @@ -43,31 +42,38 @@ const ( labelEphemeralUpperPublicKey = "EPHEMERAL_UPPER_PUBLIC_KEY" labelDHSecret = "DH_SECRET" labelSecretConnectionMac = "SECRET_CONNECTION_MAC" + + defaultWriteBufferSize = 128 * 1024 + // try to read the biggest logical packet we can get, in one read. + // biggest logical packet is encoding_overhead(64kb). + defaultReadBufferSize = 65 * 1024 ) var ( - ErrSmallOrderRemotePubKey = errors.New("detected low order point from remote peer") - + ErrSmallOrderRemotePubKey = errors.New("detected low order point from remote peer") secretConnKeyAndChallengeGen = []byte("TENDERMINT_SECRET_CONNECTION_KEY_AND_CHALLENGE_GEN") ) // SecretConnection implements net.Conn. // It is an implementation of the STS protocol. -// See https://github.com/cometbft/cometbft/blob/0.1/docs/sts-final.pdf for -// details on the protocol. +// For more details regarding this implementation of the STS protocol, please refer to: +// https://github.com/cometbft/cometbft/blob/main/spec/p2p/legacy-docs/peer.md#authenticated-encryption-handshake. +// +// The original STS protocol, which inspired this implementation: +// https://citeseerx.ist.psu.edu/document?rapid=rep1&type=pdf&doi=b852bc961328ce74f7231a4b569eec1ab6c3cf50. # codespell:ignore // // Consumers of the SecretConnection are responsible for authenticating // the remote peer's pubkey against known information, like a nodeID. -// Otherwise they are vulnerable to MITM. -// (TODO(ismail): see also https://github.com/tendermint/tendermint/issues/3010) type SecretConnection struct { - // immutable recvAead cipher.AEAD sendAead cipher.AEAD remPubKey crypto.PubKey - conn io.ReadWriteCloser + + conn io.ReadWriteCloser + connWriter *bufio.Writer + connReader io.Reader // net.Conn must be thread safe: // https://golang.org/pkg/net/#Conn. @@ -76,23 +82,24 @@ type SecretConnection struct { // are independent, so we can use two mtxs. // All .Read are covered by recvMtx, // all .Write are covered by sendMtx. - recvMtx cmtsync.Mutex - recvBuffer []byte - recvNonce *[aeadNonceSize]byte - - sendMtx cmtsync.Mutex - sendNonce *[aeadNonceSize]byte + recvMtx cmtsync.Mutex + recvBuffer []byte + recvNonce *[aeadNonceSize]byte + recvFrame []byte + recvSealedFrame []byte + + sendMtx cmtsync.Mutex + sendNonce *[aeadNonceSize]byte + sendFrame []byte + sendSealedFrame []byte } // MakeSecretConnection performs handshake and returns a new authenticated // SecretConnection. // Returns nil if there is an error in handshake. -// Caller should call conn.Close() -// See docs/sts-final.pdf for more information. +// Caller should call conn.Close(). func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (*SecretConnection, error) { - var ( - locPubKey = locPrivKey.PubKey() - ) + locPubKey := locPrivKey.PubKey() // Generate ephemeral keys for perfect forward secrecy. locEphPub, locEphPriv := genEphKeys() @@ -113,8 +120,8 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (* transcript.AppendMessage(labelEphemeralLowerPublicKey, loEphPub[:]) transcript.AppendMessage(labelEphemeralUpperPublicKey, hiEphPub[:]) - // Check if the local ephemeral public key was the least, lexicographically - // sorted. + // Check if the local ephemeral public key was the least, + // lexicographically sorted. locIsLeast := bytes.Equal(locEphPub[:], loEphPub[:]) // Compute common diffie hellman secret using X25519. @@ -125,9 +132,8 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (* transcript.AppendMessage(labelDHSecret, dhSecret[:]) - // Generate the secret used for receiving, sending, challenge via HKDF-SHA2 - // on the transcript state (which itself also uses HKDF-SHA2 to derive a key - // from the dhSecret). + // Generate the secret used for receiving, sending, challenge via + // HKDF-SHA2 on the dhSecret. recvSecret, sendSecret := deriveSecrets(dhSecret, locIsLeast) const challengeSize = 32 @@ -136,20 +142,27 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (* sendAead, err := chacha20poly1305.New(sendSecret[:]) if err != nil { - return nil, errors.New("invalid send SecretConnection Key") + return nil, ErrInvalidSecretConnKeySend } + recvAead, err := chacha20poly1305.New(recvSecret[:]) if err != nil { - return nil, errors.New("invalid receive SecretConnection Key") + return nil, ErrInvalidSecretConnKeyRecv } sc := &SecretConnection{ - conn: conn, - recvBuffer: nil, - recvNonce: new([aeadNonceSize]byte), - sendNonce: new([aeadNonceSize]byte), - recvAead: recvAead, - sendAead: sendAead, + conn: conn, + connWriter: bufio.NewWriterSize(conn, defaultWriteBufferSize), + connReader: bufio.NewReaderSize(conn, defaultReadBufferSize), + recvBuffer: nil, + recvNonce: new([aeadNonceSize]byte), + sendNonce: new([aeadNonceSize]byte), + recvAead: recvAead, + sendAead: sendAead, + recvFrame: make([]byte, totalFrameSize), + recvSealedFrame: make([]byte, aeadSizeOverhead+totalFrameSize), + sendFrame: make([]byte, totalFrameSize), + sendSealedFrame: make([]byte, aeadSizeOverhead+totalFrameSize), } // Sign the challenge bytes for authentication. @@ -165,11 +178,16 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (* } remPubKey, remSignature := authSigMsg.Key, authSigMsg.Sig + // Usage in your function if _, ok := remPubKey.(ed25519.PubKey); !ok { - return nil, fmt.Errorf("expected ed25519 pubkey, got %T", remPubKey) + return nil, ErrUnexpectedPubKeyType{ + Expected: ed25519.KeyType, + Got: remPubKey.Type(), + } } + if !remPubKey.VerifySignature(challenge[:], remSignature) { - return nil, errors.New("challenge verification failed") + return nil, ErrChallengeVerification } // We've authorized. @@ -177,7 +195,7 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (* return sc, nil } -// RemotePubKey returns authenticated remote pubkey +// RemotePubKey returns authenticated remote pubkey. func (sc *SecretConnection) RemotePubKey() crypto.PubKey { return sc.remPubKey } @@ -187,15 +205,10 @@ func (sc *SecretConnection) RemotePubKey() crypto.PubKey { func (sc *SecretConnection) Write(data []byte) (n int, err error) { sc.sendMtx.Lock() defer sc.sendMtx.Unlock() + sealedFrame, frame := sc.sendSealedFrame, sc.sendFrame for 0 < len(data) { if err := func() error { - var sealedFrame = pool.Get(aeadSizeOverhead + totalFrameSize) - var frame = pool.Get(totalFrameSize) - defer func() { - pool.Put(sealedFrame) - pool.Put(frame) - }() var chunk []byte if dataMaxSize < len(data) { chunk = data[:dataMaxSize] @@ -213,16 +226,18 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) { incrNonce(sc.sendNonce) // end encryption - _, err = sc.conn.Write(sealedFrame) + _, err = sc.connWriter.Write(sealedFrame) if err != nil { return err } + n += len(chunk) return nil }(); err != nil { return n, err } } + sc.connWriter.Flush() return n, err } @@ -239,31 +254,34 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) { } // read off the conn - var sealedFrame = pool.Get(aeadSizeOverhead + totalFrameSize) - defer pool.Put(sealedFrame) - _, err = io.ReadFull(sc.conn, sealedFrame) + sealedFrame := sc.recvSealedFrame + _, err = io.ReadFull(sc.connReader, sealedFrame) if err != nil { return n, err } // decrypt the frame. // reads and updates the sc.recvNonce - var frame = pool.Get(totalFrameSize) - defer pool.Put(frame) + frame := sc.recvFrame _, err = sc.recvAead.Open(frame[:0], sc.recvNonce[:], sealedFrame, nil) if err != nil { - return n, fmt.Errorf("failed to decrypt SecretConnection: %w", err) + return n, ErrDecryptFrame{Source: err} } + incrNonce(sc.recvNonce) // end decryption // copy checkLength worth into data, // set recvBuffer to the rest. - var chunkLength = binary.LittleEndian.Uint32(frame) // read the first four bytes + chunkLength := binary.LittleEndian.Uint32(frame) // read the first four bytes if chunkLength > dataMaxSize { - return 0, errors.New("chunkLength is greater than dataMaxSize") + return 0, ErrChunkTooBig{ + Received: int(chunkLength), + Max: dataMaxSize, + } } - var chunk = frame[dataLenSize : dataLenSize+chunkLength] + + chunk := frame[dataLenSize : dataLenSize+chunkLength] n = copy(data, chunk) if n < len(chunk) { sc.recvBuffer = make([]byte, len(chunk)-n) @@ -272,7 +290,7 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) { return n, err } -// Implements net.Conn +// Implements net.Conn. func (sc *SecretConnection) Close() error { return sc.conn.Close() } func (sc *SecretConnection) LocalAddr() net.Addr { return sc.conn.(net.Conn).LocalAddr() } func (sc *SecretConnection) RemoteAddr() net.Addr { return sc.conn.(net.Conn).RemoteAddr() } @@ -280,27 +298,24 @@ func (sc *SecretConnection) SetDeadline(t time.Time) error { return sc.conn.(net func (sc *SecretConnection) SetReadDeadline(t time.Time) error { return sc.conn.(net.Conn).SetReadDeadline(t) } + func (sc *SecretConnection) SetWriteDeadline(t time.Time) error { return sc.conn.(net.Conn).SetWriteDeadline(t) } func genEphKeys() (ephPub, ephPriv *[32]byte) { var err error - // TODO: Probably not a problem but ask Tony: different from the rust implementation (uses x25519-dalek), - // we do not "clamp" the private key scalar: - // see: https://github.com/dalek-cryptography/x25519-dalek/blob/34676d336049df2bba763cc076a75e47ae1f170f/src/x25519.rs#L56-L74 ephPub, ephPriv, err = box.GenerateKey(crand.Reader) if err != nil { - panic("Could not generate ephemeral key-pair") + panic("failed to generate ephemeral key-pair") } - return + return ephPub, ephPriv } func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byte, err error) { - // Send our pubkey and receive theirs in tandem. - var trs, _ = async.Parallel( - func(_ int) (val interface{}, abort bool, err error) { + trs, _ := async.Parallel( + func(_ int) (val any, abort bool, err error) { lc := *locEphPub _, err = protoio.NewDelimitedWriter(conn).WriteMsg(&gogotypes.BytesValue{Value: lc[:]}) if err != nil { @@ -308,7 +323,7 @@ func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byt } return nil, false, nil }, - func(_ int) (val interface{}, abort bool, err error) { + func(_ int) (val any, abort bool, err error) { var bytes gogotypes.BytesValue _, err = protoio.NewDelimitedReader(conn, 1024*1024).ReadMsg(&bytes) if err != nil { @@ -328,7 +343,7 @@ func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byt } // Otherwise: - var _remEphPub = trs.FirstValue().([32]byte) + _remEphPub := trs.FirstValue().([32]byte) return &_remEphPub, nil } @@ -360,7 +375,7 @@ func deriveSecrets( copy(recvSecret[:], res[aeadKeySize:aeadKeySize*2]) } - return + return recvSecret, sendSecret } // computeDHSecret computes a Diffie-Hellman shared secret key @@ -383,7 +398,7 @@ func sort32(foo, bar *[32]byte) (lo, hi *[32]byte) { lo = bar hi = foo } - return + return lo, hi } func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) ([]byte, error) { @@ -400,10 +415,9 @@ type authSigMessage struct { } func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte) (recvMsg authSigMessage, err error) { - // Send our info and receive theirs in tandem. - var trs, _ = async.Parallel( - func(_ int) (val interface{}, abort bool, err error) { + trs, _ := async.Parallel( + func(_ int) (val any, abort bool, err error) { pbpk, err := cryptoenc.PubKeyToProto(pubKey) if err != nil { return nil, true, err @@ -414,7 +428,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte } return nil, false, nil }, - func(_ int) (val interface{}, abort bool, err error) { + func(_ int) (val any, abort bool, err error) { var pba tmp2p.AuthSigMessage _, err = protoio.NewDelimitedReader(sc, 1024*1024).ReadMsg(&pba) if err != nil { @@ -440,11 +454,11 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte return recvMsg, err } - var _recvMsg = trs.FirstValue().(authSigMessage) + _recvMsg := trs.FirstValue().(authSigMessage) return _recvMsg, nil } -//-------------------------------------------------------------------------------- +// -------------------------------------------------------------------------------- // Increment nonce little-endian by 1 with wraparound. // Due to chacha20poly1305 expecting a 12 byte nonce we do not use the first four diff --git a/p2p/conn/secret_connection_test.go b/p2p/transport/tcp/conn/secret_connection_test.go similarity index 86% rename from p2p/conn/secret_connection_test.go rename to p2p/transport/tcp/conn/secret_connection_test.go index 0cd1241b24a..20c0feed9a2 100644 --- a/p2p/conn/secret_connection_test.go +++ b/p2p/transport/tcp/conn/secret_connection_test.go @@ -2,7 +2,9 @@ package conn import ( "bufio" + "bytes" "encoding/hex" + "errors" "flag" "fmt" "io" @@ -19,14 +21,13 @@ import ( "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" - "github.com/cometbft/cometbft/crypto/sr25519" - "github.com/cometbft/cometbft/libs/async" - cmtos "github.com/cometbft/cometbft/libs/os" - cmtrand "github.com/cometbft/cometbft/libs/rand" + "github.com/cometbft/cometbft/internal/async" + cmtos "github.com/cometbft/cometbft/internal/os" + cmtrand "github.com/cometbft/cometbft/internal/rand" ) // Run go test -update from within this module -// to update the golden test vector file +// to update the golden test vector file. var update = flag.Bool("update", false, "update .golden files") type kvstoreConn struct { @@ -38,7 +39,7 @@ func (drw kvstoreConn) Close() (err error) { err2 := drw.PipeWriter.CloseWithError(io.EOF) err1 := drw.PipeReader.Close() if err2 != nil { - return err + return err2 } return err1 } @@ -49,9 +50,8 @@ type privKeyWithNilPubKey struct { func (pk privKeyWithNilPubKey) Bytes() []byte { return pk.orig.Bytes() } func (pk privKeyWithNilPubKey) Sign(msg []byte) ([]byte, error) { return pk.orig.Sign(msg) } -func (pk privKeyWithNilPubKey) PubKey() crypto.PubKey { return nil } -func (pk privKeyWithNilPubKey) Equals(pk2 crypto.PrivKey) bool { return pk.orig.Equals(pk2) } -func (pk privKeyWithNilPubKey) Type() string { return "privKeyWithNilPubKey" } +func (privKeyWithNilPubKey) PubKey() crypto.PubKey { return nil } +func (privKeyWithNilPubKey) Type() string { return "privKeyWithNilPubKey" } func TestSecretConnectionHandshake(t *testing.T) { fooSecConn, barSecConn := makeSecretConnPair(t) @@ -119,9 +119,9 @@ func TestSecretConnectionReadWrite(t *testing.T) { } // A helper that will run with (fooConn, fooWrites, fooReads) and vice versa - genNodeRunner := func(id string, nodeConn kvstoreConn, nodeWrites []string, nodeReads *[]string) async.Task { - return func(_ int) (interface{}, bool, error) { - // Initiate cryptographic private key and secret connection trhough nodeConn. + genNodeRunner := func(nodeConn kvstoreConn, nodeWrites []string, nodeReads *[]string) async.Task { + return func(_ int) (any, bool, error) { + // Initiate cryptographic private key and secret connection through nodeConn. nodePrvKey := ed25519.GenPrivKey() nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey) if err != nil { @@ -130,7 +130,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { } // In parallel, handle some reads and writes. trs, ok := async.Parallel( - func(_ int) (interface{}, bool, error) { + func(_ int) (any, bool, error) { // Node writes: for _, nodeWrite := range nodeWrites { n, err := nodeSecretConn.Write([]byte(nodeWrite)) @@ -150,12 +150,12 @@ func TestSecretConnectionReadWrite(t *testing.T) { } return nil, false, nil }, - func(_ int) (interface{}, bool, error) { + func(_ int) (any, bool, error) { // Node reads: readBuffer := make([]byte, dataMaxSize) for { n, err := nodeSecretConn.Read(readBuffer) - if err == io.EOF { + if errors.Is(err, io.EOF) { if err := nodeConn.PipeReader.Close(); err != nil { t.Error(err) return nil, true, err @@ -183,10 +183,10 @@ func TestSecretConnectionReadWrite(t *testing.T) { // Run foo & bar in parallel trs, ok := async.Parallel( - genNodeRunner("foo", fooConn, fooWrites, &fooReads), - genNodeRunner("bar", barConn, barWrites, &barReads), + genNodeRunner(fooConn, fooWrites, &fooReads), + genNodeRunner(barConn, barWrites, &barReads), ) - require.Nil(t, trs.FirstError()) + require.NoError(t, trs.FirstError()) require.True(t, ok, "unexpected task abortion") // A helper to ensure that the writes and reads match. @@ -242,15 +242,15 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) { line := scanner.Text() params := strings.Split(line, ",") randSecretVector, err := hex.DecodeString(params[0]) - require.Nil(t, err) + require.NoError(t, err) randSecret := new([32]byte) copy((*randSecret)[:], randSecretVector) locIsLeast, err := strconv.ParseBool(params[1]) - require.Nil(t, err) + require.NoError(t, err) expectedRecvSecret, err := hex.DecodeString(params[2]) - require.Nil(t, err) + require.NoError(t, err) expectedSendSecret, err := hex.DecodeString(params[3]) - require.Nil(t, err) + require.NoError(t, err) recvSecret, sendSecret := deriveSecrets(randSecret, locIsLeast) require.Equal(t, expectedRecvSecret, (*recvSecret)[:], "Recv Secrets aren't equal") @@ -272,21 +272,8 @@ func TestNilPubkey(t *testing.T) { assert.Equal(t, "encoding: unsupported key ", err.Error()) } -func TestNonEd25519Pubkey(t *testing.T) { - fooConn, barConn := makeKVStoreConnPair() - defer fooConn.Close() - defer barConn.Close() - fooPrvKey := ed25519.GenPrivKey() - barPrvKey := sr25519.GenPrivKey() - - go MakeSecretConnection(fooConn, fooPrvKey) //nolint:errcheck // ignore for tests - - _, err := MakeSecretConnection(barConn, barPrvKey) - require.Error(t, err) - assert.Contains(t, err.Error(), "unsupported key") -} - func writeLots(t *testing.T, wg *sync.WaitGroup, conn io.Writer, txt string, n int) { + t.Helper() defer wg.Done() for i := 0; i < n; i++ { _, err := conn.Write([]byte(txt)) @@ -298,17 +285,18 @@ func writeLots(t *testing.T, wg *sync.WaitGroup, conn io.Writer, txt string, n i } func readLots(t *testing.T, wg *sync.WaitGroup, conn io.Reader, n int) { + t.Helper() readBuffer := make([]byte, dataMaxSize) for i := 0; i < n; i++ { _, err := conn.Read(readBuffer) - assert.NoError(t, err) + require.NoError(t, err) } wg.Done() } // Creates the data for a test vector file. // The file format is: -// Hex(diffie_hellman_secret), loc_is_least, Hex(recvSecret), Hex(sendSecret), Hex(challenge) +// Hex(diffie_hellman_secret), loc_is_least, Hex(recvSecret), Hex(sendSecret), Hex(challenge). func createGoldenTestVectors(*testing.T) string { data := "" for i := 0; i < 32; i++ { @@ -325,7 +313,7 @@ func createGoldenTestVectors(*testing.T) string { return data } -// Each returned ReadWriteCloser is akin to a net.Connection +// Each returned ReadWriteCloser is akin to a net.Connection. func makeKVStoreConnPair() (fooConn, barConn kvstoreConn) { barReader, fooWriter := io.Pipe() fooReader, barWriter := io.Pipe() @@ -333,6 +321,7 @@ func makeKVStoreConnPair() (fooConn, barConn kvstoreConn) { } func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection) { + tb.Helper() var ( fooConn, barConn = makeKVStoreConnPair() fooPrvKey = ed25519.GenPrivKey() @@ -343,14 +332,14 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection // Make connections from both sides in parallel. trs, ok := async.Parallel( - func(_ int) (val interface{}, abort bool, err error) { + func(_ int) (val any, abort bool, err error) { fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey) if err != nil { tb.Errorf("failed to establish SecretConnection for foo: %v", err) return nil, true, err } remotePubBytes := fooSecConn.RemotePubKey() - if !remotePubBytes.Equals(barPubKey) { + if !bytes.Equal(remotePubBytes.Bytes(), barPubKey.Bytes()) { err = fmt.Errorf("unexpected fooSecConn.RemotePubKey. Expected %v, got %v", barPubKey, fooSecConn.RemotePubKey()) tb.Error(err) @@ -358,14 +347,14 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection } return nil, false, nil }, - func(_ int) (val interface{}, abort bool, err error) { + func(_ int) (val any, abort bool, err error) { barSecConn, err = MakeSecretConnection(barConn, barPrvKey) if barSecConn == nil { tb.Errorf("failed to establish SecretConnection for bar: %v", err) return nil, true, err } remotePubBytes := barSecConn.RemotePubKey() - if !remotePubBytes.Equals(fooPubKey) { + if !bytes.Equal(remotePubBytes.Bytes(), fooPubKey.Bytes()) { err = fmt.Errorf("unexpected barSecConn.RemotePubKey. Expected %v, got %v", fooPubKey, barSecConn.RemotePubKey()) tb.Error(err) @@ -375,7 +364,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection }, ) - require.Nil(tb, trs.FirstError()) + require.NoError(tb, trs.FirstError()) require.True(tb, ok, "Unexpected task abortion") return fooSecConn, barSecConn @@ -405,7 +394,7 @@ func BenchmarkWriteSecretConnection(b *testing.B) { readBuffer := make([]byte, dataMaxSize) for { _, err := barSecConn.Read(readBuffer) - if err == io.EOF { + if errors.Is(err, io.EOF) { return } else if err != nil { b.Errorf("failed to read from barSecConn: %v", err) @@ -464,7 +453,7 @@ func BenchmarkReadSecretConnection(b *testing.B) { readBuffer := make([]byte, dataMaxSize) _, err := barSecConn.Read(readBuffer) - if err == io.EOF { + if errors.Is(err, io.EOF) { return } else if err != nil { b.Fatalf("Failed to read from barSecConn: %v", err) diff --git a/p2p/conn/testdata/TestDeriveSecretsAndChallengeGolden.golden b/p2p/transport/tcp/conn/testdata/TestDeriveSecretsAndChallengeGolden.golden similarity index 100% rename from p2p/conn/testdata/TestDeriveSecretsAndChallengeGolden.golden rename to p2p/transport/tcp/conn/testdata/TestDeriveSecretsAndChallengeGolden.golden diff --git a/p2p/conn_set.go b/p2p/transport/tcp/conn_set.go similarity index 89% rename from p2p/conn_set.go rename to p2p/transport/tcp/conn_set.go index 44eff4a0cf1..de74dead0ab 100644 --- a/p2p/conn_set.go +++ b/p2p/transport/tcp/conn_set.go @@ -1,4 +1,4 @@ -package p2p +package tcp import ( "net" @@ -8,11 +8,11 @@ import ( // ConnSet is a lookup table for connections and all their ips. type ConnSet interface { - Has(net.Conn) bool - HasIP(net.IP) bool - Set(net.Conn, []net.IP) - Remove(net.Conn) - RemoveAddr(net.Addr) + Has(conn net.Conn) bool + HasIP(ip net.IP) bool + Set(conn net.Conn, ip []net.IP) + Remove(conn net.Conn) + RemoveAddr(addr net.Addr) } type connSetItem struct { diff --git a/p2p/transport/tcp/errors.go b/p2p/transport/tcp/errors.go new file mode 100644 index 00000000000..787b4345bd4 --- /dev/null +++ b/p2p/transport/tcp/errors.go @@ -0,0 +1,83 @@ +package tcp + +import ( + "fmt" + "net" + + na "github.com/cometbft/cometbft/p2p/netaddr" + "github.com/cometbft/cometbft/p2p/nodekey" +) + +// ErrTransportClosed is raised when the Transport has been closed. +type ErrTransportClosed struct{} + +func (ErrTransportClosed) Error() string { + return "transport has been closed" +} + +// ErrFilterTimeout indicates that a filter operation timed out. +type ErrFilterTimeout struct{} + +func (ErrFilterTimeout) Error() string { + return "filter timed out" +} + +// ErrRejected indicates that a Peer was rejected carrying additional +// information as to the reason. +type ErrRejected struct { + addr na.NetAddr + conn net.Conn + err error + id nodekey.ID + isAuthFailure bool + isDuplicate bool + isFiltered bool +} + +// Addr returns the network address for the rejected Peer. +func (e ErrRejected) Addr() na.NetAddr { + return e.addr +} + +func (e ErrRejected) Error() string { + if e.isAuthFailure { + return fmt.Sprintf("auth failure: %s", e.err) + } + + if e.isDuplicate { + if e.conn != nil { + return fmt.Sprintf( + "duplicate CONN<%s>", + e.conn.RemoteAddr().String(), + ) + } + if e.id != "" { + return fmt.Sprintf("duplicate ID<%v>", e.id) + } + } + + if e.isFiltered { + if e.conn != nil { + return fmt.Sprintf( + "filtered CONN<%s>: %s", + e.conn.RemoteAddr().String(), + e.err, + ) + } + + if e.id != "" { + return fmt.Sprintf("filtered ID<%v>: %s", e.id, e.err) + } + } + + return e.err.Error() +} + +// IsAuthFailure when Peer authentication was unsuccessful. +func (e ErrRejected) IsAuthFailure() bool { return e.isAuthFailure } + +// IsDuplicate when Peer ID or IP are present already. +func (e ErrRejected) IsDuplicate() bool { return e.isDuplicate } + +// IsFiltered when Peer ID or IP was filtered. +func (e ErrRejected) IsFiltered() bool { return e.isFiltered } diff --git a/p2p/transport/tcp/tcp.go b/p2p/transport/tcp/tcp.go new file mode 100644 index 00000000000..9a6f795e3f8 --- /dev/null +++ b/p2p/transport/tcp/tcp.go @@ -0,0 +1,418 @@ +package tcp + +import ( + "context" + "fmt" + "net" + "time" + + "golang.org/x/net/netutil" + + "github.com/cometbft/cometbft/crypto" + "github.com/cometbft/cometbft/p2p/internal/fuzz" + na "github.com/cometbft/cometbft/p2p/netaddr" + "github.com/cometbft/cometbft/p2p/nodekey" + "github.com/cometbft/cometbft/p2p/transport/tcp/conn" +) + +const ( + defaultDialTimeout = time.Second + defaultFilterTimeout = 5 * time.Second + defaultHandshakeTimeout = 3 * time.Second +) + +// IPResolver is a behavior subset of net.Resolver. +type IPResolver interface { + LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) +} + +// accept is the container to carry the upgraded connection from an +// asynchronously running routine to the Accept method. +type accept struct { + netAddr *na.NetAddr + conn net.Conn + err error +} + +// transportLifecycle bundles the methods for callers to control start and stop +// behavior. +type transportLifecycle interface { + Close() error + Listen(addr na.NetAddr) error +} + +// ConnFilterFunc to be implemented by filter hooks after a new connection has +// been established. The set of existing connections is passed along together +// with all resolved IPs for the new connection. +type ConnFilterFunc func(ConnSet, net.Conn, []net.IP) error + +// ConnDuplicateIPFilter resolves and keeps all ips for an incoming connection +// and refuses new ones if they come from a known ip. +func ConnDuplicateIPFilter() ConnFilterFunc { + return func(cs ConnSet, c net.Conn, ips []net.IP) error { + for _, ip := range ips { + if cs.HasIP(ip) { + return ErrRejected{ + conn: c, + err: fmt.Errorf("ip<%v> already connected", ip), + isDuplicate: true, + } + } + } + + return nil + } +} + +// MultiplexTransportOption sets an optional parameter on the +// MultiplexTransport. +type MultiplexTransportOption func(*MultiplexTransport) + +// MultiplexTransportConnFilters sets the filters for rejection new connections. +func MultiplexTransportConnFilters( + filters ...ConnFilterFunc, +) MultiplexTransportOption { + return func(mt *MultiplexTransport) { mt.connFilters = filters } +} + +// MultiplexTransportFilterTimeout sets the timeout waited for filter calls to +// return. +func MultiplexTransportFilterTimeout( + timeout time.Duration, +) MultiplexTransportOption { + return func(mt *MultiplexTransport) { mt.filterTimeout = timeout } +} + +// MultiplexTransportResolver sets the Resolver used for ip lokkups, defaults to +// net.DefaultResolver. +func MultiplexTransportResolver(resolver IPResolver) MultiplexTransportOption { + return func(mt *MultiplexTransport) { mt.resolver = resolver } +} + +// MultiplexTransportMaxIncomingConnections sets the maximum number of +// simultaneous connections (incoming). Default: 0 (unlimited). +func MultiplexTransportMaxIncomingConnections(n int) MultiplexTransportOption { + return func(mt *MultiplexTransport) { mt.maxIncomingConnections = n } +} + +// MultiplexTransport accepts and dials tcp connections and upgrades them to +// multiplexed peers. +type MultiplexTransport struct { + netAddr na.NetAddr + listener net.Listener + maxIncomingConnections int // see MaxIncomingConnections + + acceptc chan accept + closec chan struct{} + + // Lookup table for duplicate ip and id checks. + conns ConnSet + connFilters []ConnFilterFunc + + dialTimeout time.Duration + filterTimeout time.Duration + handshakeTimeout time.Duration + nodeKey nodekey.NodeKey + resolver IPResolver + + // TODO(xla): This config is still needed as we parameterise peerConn and + // peer currently. All relevant configuration should be refactored into options + // with sane defaults. + mConfig conn.MConnConfig +} + +// Test multiplexTransport for interface completeness. +var ( + _ transportLifecycle = (*MultiplexTransport)(nil) +) + +// NewMultiplexTransport returns a tcp connected multiplexed peer. +func NewMultiplexTransport( + nodeKey nodekey.NodeKey, + mConfig conn.MConnConfig, +) *MultiplexTransport { + return &MultiplexTransport{ + acceptc: make(chan accept), + closec: make(chan struct{}), + dialTimeout: defaultDialTimeout, + filterTimeout: defaultFilterTimeout, + handshakeTimeout: defaultHandshakeTimeout, + mConfig: mConfig, + nodeKey: nodeKey, + conns: NewConnSet(), + resolver: net.DefaultResolver, + } +} + +// NetAddr implements Transport. +func (mt *MultiplexTransport) NetAddr() na.NetAddr { + return mt.netAddr +} + +// Accept implements Transport. +func (mt *MultiplexTransport) Accept() (net.Conn, *na.NetAddr, error) { + select { + // This case should never have any side-effectful/blocking operations to + // ensure that quality peers are ready to be used. + case a := <-mt.acceptc: + if a.err != nil { + return nil, nil, a.err + } + + // cfg.outbound = false + + return a.conn, a.netAddr, nil + case <-mt.closec: + return nil, nil, ErrTransportClosed{} + } +} + +// Dial implements Transport. +func (mt *MultiplexTransport) Dial( + addr na.NetAddr, +) (net.Conn, error) { + c, err := addr.DialTimeout(mt.dialTimeout) + if err != nil { + return nil, err + } + + if mt.mConfig.TestFuzz { + // so we have time to do peer handshakes and get set up. + c = fuzz.ConnAfterFromConfig(c, 10*time.Second, mt.mConfig.TestFuzzConfig) + } + + // TODO(xla): Evaluate if we should apply filters if we explicitly dial. + if err := mt.filterConn(c); err != nil { + return nil, err + } + + secretConn, err := mt.upgrade(c, &addr) + if err != nil { + return nil, err + } + + return secretConn, nil +} + +// Close implements transportLifecycle. +func (mt *MultiplexTransport) Close() error { + close(mt.closec) + + if mt.listener != nil { + return mt.listener.Close() + } + + return nil +} + +// Listen implements transportLifecycle. +func (mt *MultiplexTransport) Listen(addr na.NetAddr) error { + ln, err := net.Listen("tcp", addr.DialString()) + if err != nil { + return err + } + + if mt.maxIncomingConnections > 0 { + ln = netutil.LimitListener(ln, mt.maxIncomingConnections) + } + + mt.netAddr = addr + mt.listener = ln + + go mt.acceptPeers() + + return nil +} + +func (mt *MultiplexTransport) acceptPeers() { + for { + c, err := mt.listener.Accept() + if err != nil { + // If Close() has been called, silently exit. + select { + case _, ok := <-mt.closec: + if !ok { + return + } + default: + // Transport is not closed + } + + mt.acceptc <- accept{err: err} + return + } + + // Connection upgrade and filtering should be asynchronous to avoid + // Head-of-line blocking[0]. + // Reference: https://github.com/tendermint/tendermint/issues/2047 + // + // [0] https://en.wikipedia.org/wiki/Head-of-line_blocking + go func(c net.Conn) { + defer func() { + if r := recover(); r != nil { + err := ErrRejected{ + conn: c, + err: fmt.Errorf("recovered from panic: %v", r), + isAuthFailure: true, + } + select { + case mt.acceptc <- accept{err: err}: + case <-mt.closec: + // Give up if the transport was closed. + _ = c.Close() + return + } + } + }() + + var ( + secretConn *conn.SecretConnection + netAddr *na.NetAddr + ) + + err := mt.filterConn(c) + if err == nil { + secretConn, err = mt.upgrade(c, nil) + if err == nil { + addr := c.RemoteAddr() + id := nodekey.PubKeyToID(secretConn.RemotePubKey()) + netAddr = na.New(id, addr) + } + } + + select { + case mt.acceptc <- accept{netAddr, secretConn, err}: + // Make the upgraded peer available. + case <-mt.closec: + // Give up if the transport was closed. + _ = c.Close() + return + } + }(c) + } +} + +// Cleanup removes the given address from the connections set and +// closes the connection. +func (mt *MultiplexTransport) Cleanup(c net.Conn) error { + mt.conns.Remove(c) + return c.Close() +} + +func (mt *MultiplexTransport) filterConn(c net.Conn) (err error) { + defer func() { + if err != nil { + _ = c.Close() + } + }() + + // Reject if connection is already present. + if mt.conns.Has(c) { + return ErrRejected{conn: c, isDuplicate: true} + } + + // Resolve ips for incoming conn. + ips, err := resolveIPs(mt.resolver, c) + if err != nil { + return err + } + + errc := make(chan error, len(mt.connFilters)) + + for _, f := range mt.connFilters { + go func(f ConnFilterFunc, c net.Conn, ips []net.IP, errc chan<- error) { + errc <- f(mt.conns, c, ips) + }(f, c, ips, errc) + } + + for i := 0; i < cap(errc); i++ { + select { + case err := <-errc: + if err != nil { + return ErrRejected{conn: c, err: err, isFiltered: true} + } + case <-time.After(mt.filterTimeout): + return ErrFilterTimeout{} + } + } + + mt.conns.Set(c, ips) + + return nil +} + +func (mt *MultiplexTransport) upgrade( + c net.Conn, + dialedAddr *na.NetAddr, +) (secretConn *conn.SecretConnection, err error) { + defer func() { + if err != nil { + _ = mt.Cleanup(c) + } + }() + + secretConn, err = upgradeSecretConn(c, mt.handshakeTimeout, mt.nodeKey.PrivKey) + if err != nil { + return nil, ErrRejected{ + conn: c, + err: fmt.Errorf("secret conn failed: %w", err), + isAuthFailure: true, + } + } + + // For outgoing conns, ensure connection key matches dialed key. + connID := nodekey.PubKeyToID(secretConn.RemotePubKey()) + if dialedAddr != nil { + if dialedID := dialedAddr.ID; connID != dialedID { + return nil, ErrRejected{ + conn: c, + id: connID, + err: fmt.Errorf( + "conn.ID (%v) dialed ID (%v) mismatch", + connID, + dialedID, + ), + isAuthFailure: true, + } + } + } + + return secretConn, nil +} + +func upgradeSecretConn( + c net.Conn, + timeout time.Duration, + privKey crypto.PrivKey, +) (*conn.SecretConnection, error) { + if err := c.SetDeadline(time.Now().Add(timeout)); err != nil { + return nil, err + } + + sc, err := conn.MakeSecretConnection(c, privKey) + if err != nil { + return nil, err + } + + return sc, sc.SetDeadline(time.Time{}) +} + +func resolveIPs(resolver IPResolver, c net.Conn) ([]net.IP, error) { + host, _, err := net.SplitHostPort(c.RemoteAddr().String()) + if err != nil { + return nil, err + } + + addrs, err := resolver.LookupIPAddr(context.Background(), host) + if err != nil { + return nil, err + } + + ips := []net.IP{} + + for _, addr := range addrs { + ips = append(ips, addr.IP) + } + + return ips, nil +} diff --git a/p2p/transport/tcp/tcp_test.go b/p2p/transport/tcp/tcp_test.go new file mode 100644 index 00000000000..644bba97c53 --- /dev/null +++ b/p2p/transport/tcp/tcp_test.go @@ -0,0 +1,440 @@ +package tcp + +import ( + "errors" + "math/rand" + "net" + "runtime" + "strings" + "testing" + "time" + + "github.com/cometbft/cometbft/crypto/ed25519" + na "github.com/cometbft/cometbft/p2p/netaddr" + "github.com/cometbft/cometbft/p2p/nodekey" + "github.com/cometbft/cometbft/p2p/transport/tcp/conn" +) + +// newMultiplexTransport returns a tcp connected multiplexed peer +// using the default MConnConfig. It's a convenience function used +// for testing. +func newMultiplexTransport( + nodeKey nodekey.NodeKey, +) *MultiplexTransport { + return NewMultiplexTransport( + nodeKey, conn.DefaultMConnConfig(), + ) +} + +func TestTransportMultiplex_ConnFilter(t *testing.T) { + mt := newMultiplexTransport( + nodekey.NodeKey{ + PrivKey: ed25519.GenPrivKey(), + }, + ) + id := mt.nodeKey.ID() + + MultiplexTransportConnFilters( + func(_ ConnSet, _ net.Conn, _ []net.IP) error { return nil }, + func(_ ConnSet, _ net.Conn, _ []net.IP) error { return nil }, + func(_ ConnSet, _ net.Conn, _ []net.IP) error { + return errors.New("rejected") + }, + )(mt) + + addr, err := na.NewFromString(na.IDAddrString(id, "127.0.0.1:0")) + if err != nil { + t.Fatal(err) + } + + if err := mt.Listen(*addr); err != nil { + t.Fatal(err) + } + + errc := make(chan error) + + go func() { + addr := na.New(id, mt.listener.Addr()) + + _, err := addr.Dial() + if err != nil { + errc <- err + return + } + + close(errc) + }() + + if err := <-errc; err != nil { + t.Errorf("connection failed: %v", err) + } + + _, _, err = mt.Accept() + if e, ok := err.(ErrRejected); ok { + if !e.IsFiltered() { + t.Errorf("expected peer to be filtered, got %v", err) + } + } else { + t.Errorf("expected ErrRejected, got %v", err) + } +} + +func TestTransportMultiplex_ConnFilterTimeout(t *testing.T) { + mt := newMultiplexTransport( + nodekey.NodeKey{ + PrivKey: ed25519.GenPrivKey(), + }, + ) + id := mt.nodeKey.ID() + + MultiplexTransportFilterTimeout(5 * time.Millisecond)(mt) + MultiplexTransportConnFilters( + func(_ ConnSet, _ net.Conn, _ []net.IP) error { + time.Sleep(1 * time.Second) + return nil + }, + )(mt) + + addr, err := na.NewFromString(na.IDAddrString(id, "127.0.0.1:0")) + if err != nil { + t.Fatal(err) + } + + if err := mt.Listen(*addr); err != nil { + t.Fatal(err) + } + + errc := make(chan error) + go func() { + addr := na.New(id, mt.listener.Addr()) + + _, err := addr.Dial() + if err != nil { + errc <- err + return + } + + close(errc) + }() + + if err := <-errc; err != nil { + t.Errorf("connection failed: %v", err) + } + + _, _, err = mt.Accept() + if _, ok := err.(ErrFilterTimeout); !ok { + t.Errorf("expected ErrFilterTimeout, got %v", err) + } +} + +func TestTransportMultiplex_MaxIncomingConnections(t *testing.T) { + pv := ed25519.GenPrivKey() + id := nodekey.PubKeyToID(pv.PubKey()) + mt := newMultiplexTransport( + nodekey.NodeKey{ + PrivKey: pv, + }, + ) + + MultiplexTransportMaxIncomingConnections(0)(mt) + + addr, err := na.NewFromString(na.IDAddrString(id, "127.0.0.1:0")) + if err != nil { + t.Fatal(err) + } + const maxIncomingConns = 2 + MultiplexTransportMaxIncomingConnections(maxIncomingConns)(mt) + if err := mt.Listen(*addr); err != nil { + t.Fatal(err) + } + + laddr := na.New(mt.nodeKey.ID(), mt.listener.Addr()) + + // Connect more peers than max + for i := 0; i <= maxIncomingConns; i++ { + errc := make(chan error) + go testDialer(*laddr, errc) + + err = <-errc + if i < maxIncomingConns { + if err != nil { + t.Errorf("dialer connection failed: %v", err) + } + _, _, err = mt.Accept() + if err != nil { + t.Errorf("connection failed: %v", err) + } + } else if err == nil || !strings.Contains(err.Error(), "i/o timeout") { + // mt actually blocks forever on trying to accept a new peer into a full channel so + // expect the dialer to encounter a timeout error. Calling mt.Accept will block until + // mt is closed. + t.Errorf("expected i/o timeout error, got %v", err) + } + } +} + +func TestTransportMultiplex_AcceptMultiple(t *testing.T) { + mt := testSetupMultiplexTransport(t) + laddr := na.New(mt.nodeKey.ID(), mt.listener.Addr()) + + var ( + seed = rand.New(rand.NewSource(time.Now().UnixNano())) + nDialers = seed.Intn(64) + 64 + errc = make(chan error, nDialers) + ) + + // Setup dialers. + for i := 0; i < nDialers; i++ { + go testDialer(*laddr, errc) + } + + // Catch connection errors. + for i := 0; i < nDialers; i++ { + if err := <-errc; err != nil { + t.Fatal(err) + } + } + + conns := []net.Conn{} + + // Accept all connections. + for i := 0; i < cap(errc); i++ { + c, _, err := mt.Accept() + if err != nil { + t.Fatal(err) + } + + conns = append(conns, c) + } + + if have, want := len(conns), cap(errc); have != want { + t.Errorf("have %v, want %v", have, want) + } + + for _, c := range conns { + if err := mt.Cleanup(c); err != nil { + t.Fatal(err) + } + } + + if err := mt.Close(); err != nil { + t.Errorf("close errored: %v", err) + } +} + +func testDialer(dialAddr na.NetAddr, errc chan error) { + var ( + pv = ed25519.GenPrivKey() + dialer = newMultiplexTransport( + nodekey.NodeKey{ + PrivKey: pv, + }, + ) + ) + + _, err := dialer.Dial(dialAddr) + if err != nil { + errc <- err + return + } + + // Signal that the connection was established. + errc <- nil +} + +func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { + mt := testSetupMultiplexTransport(t) + + var ( + fastNodePV = ed25519.GenPrivKey() + errc = make(chan error) + fastc = make(chan struct{}) + slowc = make(chan struct{}) + slowdonec = make(chan struct{}) + ) + + // Simulate slow Peer. + go func() { + addr := na.New(mt.nodeKey.ID(), mt.listener.Addr()) + + c, err := addr.Dial() + if err != nil { + errc <- err + return + } + + close(slowc) + defer func() { + close(slowdonec) + }() + + // Make sure we switch to fast peer goroutine. + runtime.Gosched() + + select { + case <-fastc: + // Fast peer connected. + case <-time.After(200 * time.Millisecond): + // We error if the fast peer didn't succeed. + errc <- errors.New("fast peer timed out") + } + + _, err = upgradeSecretConn(c, 200*time.Millisecond, ed25519.GenPrivKey()) + if err != nil { + errc <- err + return + } + }() + + // Simulate fast Peer. + go func() { + <-slowc + + dialer := newMultiplexTransport( + nodekey.NodeKey{ + PrivKey: fastNodePV, + }, + ) + addr := na.New(mt.nodeKey.ID(), mt.listener.Addr()) + + _, err := dialer.Dial(*addr) + if err != nil { + errc <- err + return + } + + close(fastc) + <-slowdonec + close(errc) + }() + + if err := <-errc; err != nil { + t.Logf("connection failed: %v", err) + } + + _, _, err := mt.Accept() + if err != nil { + t.Fatal(err) + } +} + +func TestTransportMultiplexDialRejectWrongID(t *testing.T) { + mt := testSetupMultiplexTransport(t) + + var ( + pv = ed25519.GenPrivKey() + dialer = newMultiplexTransport( + nodekey.NodeKey{ + PrivKey: pv, + }, + ) + ) + + wrongID := nodekey.PubKeyToID(ed25519.GenPrivKey().PubKey()) + addr := na.New(wrongID, mt.listener.Addr()) + + _, err := dialer.Dial(*addr) + if err != nil { + t.Logf("connection failed: %v", err) + if e, ok := err.(ErrRejected); ok { + if !e.IsAuthFailure() { + t.Errorf("expected auth failure, got %v", e) + } + } else { + t.Errorf("expected ErrRejected, got %v", err) + } + } +} + +func TestTransportConnDuplicateIPFilter(t *testing.T) { + filter := ConnDuplicateIPFilter() + + if err := filter(nil, &testTransportConn{}, nil); err != nil { + t.Fatal(err) + } + + var ( + c = &testTransportConn{} + cs = NewConnSet() + ) + + cs.Set(c, []net.IP{ + {10, 0, 10, 1}, + {10, 0, 10, 2}, + {10, 0, 10, 3}, + }) + + if err := filter(cs, c, []net.IP{ + {10, 0, 10, 2}, + }); err == nil { + t.Errorf("expected Peer to be rejected as duplicate") + } +} + +// create listener. +func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport { + t.Helper() + + var ( + pv = ed25519.GenPrivKey() + id = nodekey.PubKeyToID(pv.PubKey()) + mt = newMultiplexTransport( + nodekey.NodeKey{ + PrivKey: pv, + }, + ) + ) + + addr, err := na.NewFromString(na.IDAddrString(id, "127.0.0.1:0")) + if err != nil { + t.Fatal(err) + } + + if err := mt.Listen(*addr); err != nil { + t.Fatal(err) + } + + // give the listener some time to get ready + time.Sleep(20 * time.Millisecond) + + return mt +} + +type testTransportAddr struct{} + +func (*testTransportAddr) Network() string { return "tcp" } +func (*testTransportAddr) String() string { return "test.local:1234" } + +type testTransportConn struct{} + +func (*testTransportConn) Close() error { + return errors.New("close() not implemented") +} + +func (*testTransportConn) LocalAddr() net.Addr { + return &testTransportAddr{} +} + +func (*testTransportConn) RemoteAddr() net.Addr { + return &testTransportAddr{} +} + +func (*testTransportConn) Read(_ []byte) (int, error) { + return -1, errors.New("read() not implemented") +} + +func (*testTransportConn) SetDeadline(_ time.Time) error { + return errors.New("setDeadline() not implemented") +} + +func (*testTransportConn) SetReadDeadline(_ time.Time) error { + return errors.New("setReadDeadline() not implemented") +} + +func (*testTransportConn) SetWriteDeadline(_ time.Time) error { + return errors.New("setWriteDeadline() not implemented") +} + +func (*testTransportConn) Write(_ []byte) (int, error) { + return -1, errors.New("write() not implemented") +} diff --git a/p2p/transport_test.go b/p2p/transport_test.go deleted file mode 100644 index 3c404f6a8dd..00000000000 --- a/p2p/transport_test.go +++ /dev/null @@ -1,706 +0,0 @@ -package p2p - -import ( - "fmt" - "math/rand" - "net" - "reflect" - "runtime" - "strings" - "testing" - "time" - - "github.com/cometbft/cometbft/crypto/ed25519" - "github.com/cometbft/cometbft/libs/protoio" - "github.com/cometbft/cometbft/p2p/conn" - tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" -) - -var defaultNodeName = "host_peer" - -func emptyNodeInfo() NodeInfo { - return DefaultNodeInfo{} -} - -// newMultiplexTransport returns a tcp connected multiplexed peer -// using the default MConnConfig. It's a convenience function used -// for testing. -func newMultiplexTransport( - nodeInfo NodeInfo, - nodeKey NodeKey, -) *MultiplexTransport { - return NewMultiplexTransport( - nodeInfo, nodeKey, conn.DefaultMConnConfig(), - ) -} - -func TestTransportMultiplexConnFilter(t *testing.T) { - mt := newMultiplexTransport( - emptyNodeInfo(), - NodeKey{ - PrivKey: ed25519.GenPrivKey(), - }, - ) - id := mt.nodeKey.ID() - - MultiplexTransportConnFilters( - func(_ ConnSet, _ net.Conn, _ []net.IP) error { return nil }, - func(_ ConnSet, _ net.Conn, _ []net.IP) error { return nil }, - func(_ ConnSet, _ net.Conn, _ []net.IP) error { - return fmt.Errorf("rejected") - }, - )(mt) - - addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0")) - if err != nil { - t.Fatal(err) - } - - if err := mt.Listen(*addr); err != nil { - t.Fatal(err) - } - - errc := make(chan error) - - go func() { - addr := NewNetAddress(id, mt.listener.Addr()) - - _, err := addr.Dial() - if err != nil { - errc <- err - return - } - - close(errc) - }() - - if err := <-errc; err != nil { - t.Errorf("connection failed: %v", err) - } - - _, err = mt.Accept(peerConfig{}) - if e, ok := err.(ErrRejected); ok { - if !e.IsFiltered() { - t.Errorf("expected peer to be filtered, got %v", err) - } - } else { - t.Errorf("expected ErrRejected, got %v", err) - } -} - -func TestTransportMultiplexConnFilterTimeout(t *testing.T) { - mt := newMultiplexTransport( - emptyNodeInfo(), - NodeKey{ - PrivKey: ed25519.GenPrivKey(), - }, - ) - id := mt.nodeKey.ID() - - MultiplexTransportFilterTimeout(5 * time.Millisecond)(mt) - MultiplexTransportConnFilters( - func(_ ConnSet, _ net.Conn, _ []net.IP) error { - time.Sleep(1 * time.Second) - return nil - }, - )(mt) - - addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0")) - if err != nil { - t.Fatal(err) - } - - if err := mt.Listen(*addr); err != nil { - t.Fatal(err) - } - - errc := make(chan error) - go func() { - addr := NewNetAddress(id, mt.listener.Addr()) - - _, err := addr.Dial() - if err != nil { - errc <- err - return - } - - close(errc) - }() - - if err := <-errc; err != nil { - t.Errorf("connection failed: %v", err) - } - - _, err = mt.Accept(peerConfig{}) - if _, ok := err.(ErrFilterTimeout); !ok { - t.Errorf("expected ErrFilterTimeout, got %v", err) - } -} - -func TestTransportMultiplexMaxIncomingConnections(t *testing.T) { - pv := ed25519.GenPrivKey() - id := PubKeyToID(pv.PubKey()) - mt := newMultiplexTransport( - testNodeInfo( - id, "transport", - ), - NodeKey{ - PrivKey: pv, - }, - ) - - MultiplexTransportMaxIncomingConnections(0)(mt) - - addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0")) - if err != nil { - t.Fatal(err) - } - const maxIncomingConns = 2 - MultiplexTransportMaxIncomingConnections(maxIncomingConns)(mt) - if err := mt.Listen(*addr); err != nil { - t.Fatal(err) - } - - laddr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - - // Connect more peers than max - for i := 0; i <= maxIncomingConns; i++ { - errc := make(chan error) - go testDialer(*laddr, errc) - - err = <-errc - if i < maxIncomingConns { - if err != nil { - t.Errorf("dialer connection failed: %v", err) - } - _, err = mt.Accept(peerConfig{}) - if err != nil { - t.Errorf("connection failed: %v", err) - } - } else if err == nil || !strings.Contains(err.Error(), "i/o timeout") { - // mt actually blocks forever on trying to accept a new peer into a full channel so - // expect the dialer to encounter a timeout error. Calling mt.Accept will block until - // mt is closed. - t.Errorf("expected i/o timeout error, got %v", err) - } - } -} - -func TestTransportMultiplexAcceptMultiple(t *testing.T) { - mt := testSetupMultiplexTransport(t) - laddr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - - var ( - seed = rand.New(rand.NewSource(time.Now().UnixNano())) - nDialers = seed.Intn(64) + 64 - errc = make(chan error, nDialers) - ) - - // Setup dialers. - for i := 0; i < nDialers; i++ { - go testDialer(*laddr, errc) - } - - // Catch connection errors. - for i := 0; i < nDialers; i++ { - if err := <-errc; err != nil { - t.Fatal(err) - } - } - - ps := []Peer{} - - // Accept all peers. - for i := 0; i < cap(errc); i++ { - p, err := mt.Accept(peerConfig{}) - if err != nil { - t.Fatal(err) - } - - if err := p.Start(); err != nil { - t.Fatal(err) - } - - ps = append(ps, p) - } - - if have, want := len(ps), cap(errc); have != want { - t.Errorf("have %v, want %v", have, want) - } - - // Stop all peers. - for _, p := range ps { - if err := p.Stop(); err != nil { - t.Fatal(err) - } - } - - if err := mt.Close(); err != nil { - t.Errorf("close errored: %v", err) - } -} - -func testDialer(dialAddr NetAddress, errc chan error) { - var ( - pv = ed25519.GenPrivKey() - dialer = newMultiplexTransport( - testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName), - NodeKey{ - PrivKey: pv, - }, - ) - ) - - _, err := dialer.Dial(dialAddr, peerConfig{}) - if err != nil { - errc <- err - return - } - - // Signal that the connection was established. - errc <- nil -} - -func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { - mt := testSetupMultiplexTransport(t) - - var ( - fastNodePV = ed25519.GenPrivKey() - fastNodeInfo = testNodeInfo(PubKeyToID(fastNodePV.PubKey()), "fastnode") - errc = make(chan error) - fastc = make(chan struct{}) - slowc = make(chan struct{}) - slowdonec = make(chan struct{}) - ) - - // Simulate slow Peer. - go func() { - addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - - c, err := addr.Dial() - if err != nil { - errc <- err - return - } - - close(slowc) - defer func() { - close(slowdonec) - }() - - // Make sure we switch to fast peer goroutine. - runtime.Gosched() - - select { - case <-fastc: - // Fast peer connected. - case <-time.After(200 * time.Millisecond): - // We error if the fast peer didn't succeed. - errc <- fmt.Errorf("fast peer timed out") - } - - sc, err := upgradeSecretConn(c, 200*time.Millisecond, ed25519.GenPrivKey()) - if err != nil { - errc <- err - return - } - - _, err = handshake(sc, 200*time.Millisecond, - testNodeInfo( - PubKeyToID(ed25519.GenPrivKey().PubKey()), - "slow_peer", - )) - if err != nil { - errc <- err - } - }() - - // Simulate fast Peer. - go func() { - <-slowc - - var ( - dialer = newMultiplexTransport( - fastNodeInfo, - NodeKey{ - PrivKey: fastNodePV, - }, - ) - ) - addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - - _, err := dialer.Dial(*addr, peerConfig{}) - if err != nil { - errc <- err - return - } - - close(fastc) - <-slowdonec - close(errc) - }() - - if err := <-errc; err != nil { - t.Logf("connection failed: %v", err) - } - - p, err := mt.Accept(peerConfig{}) - if err != nil { - t.Fatal(err) - } - - if have, want := p.NodeInfo(), fastNodeInfo; !reflect.DeepEqual(have, want) { - t.Errorf("have %v, want %v", have, want) - } -} - -func TestTransportMultiplexValidateNodeInfo(t *testing.T) { - mt := testSetupMultiplexTransport(t) - - errc := make(chan error) - - go func() { - var ( - pv = ed25519.GenPrivKey() - dialer = newMultiplexTransport( - testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty - NodeKey{ - PrivKey: pv, - }, - ) - ) - - addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - - _, err := dialer.Dial(*addr, peerConfig{}) - if err != nil { - errc <- err - return - } - - close(errc) - }() - - if err := <-errc; err != nil { - t.Errorf("connection failed: %v", err) - } - - _, err := mt.Accept(peerConfig{}) - if e, ok := err.(ErrRejected); ok { - if !e.IsNodeInfoInvalid() { - t.Errorf("expected NodeInfo to be invalid, got %v", err) - } - } else { - t.Errorf("expected ErrRejected, got %v", err) - } -} - -func TestTransportMultiplexRejectMissmatchID(t *testing.T) { - mt := testSetupMultiplexTransport(t) - - errc := make(chan error) - - go func() { - dialer := newMultiplexTransport( - testNodeInfo( - PubKeyToID(ed25519.GenPrivKey().PubKey()), "dialer", - ), - NodeKey{ - PrivKey: ed25519.GenPrivKey(), - }, - ) - addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - - _, err := dialer.Dial(*addr, peerConfig{}) - if err != nil { - errc <- err - return - } - - close(errc) - }() - - if err := <-errc; err != nil { - t.Errorf("connection failed: %v", err) - } - - _, err := mt.Accept(peerConfig{}) - if e, ok := err.(ErrRejected); ok { - if !e.IsAuthFailure() { - t.Errorf("expected auth failure, got %v", e) - } - } else { - t.Errorf("expected ErrRejected, got %v", err) - } -} - -func TestTransportMultiplexDialRejectWrongID(t *testing.T) { - mt := testSetupMultiplexTransport(t) - - var ( - pv = ed25519.GenPrivKey() - dialer = newMultiplexTransport( - testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty - NodeKey{ - PrivKey: pv, - }, - ) - ) - - wrongID := PubKeyToID(ed25519.GenPrivKey().PubKey()) - addr := NewNetAddress(wrongID, mt.listener.Addr()) - - _, err := dialer.Dial(*addr, peerConfig{}) - if err != nil { - t.Logf("connection failed: %v", err) - if e, ok := err.(ErrRejected); ok { - if !e.IsAuthFailure() { - t.Errorf("expected auth failure, got %v", e) - } - } else { - t.Errorf("expected ErrRejected, got %v", err) - } - } -} - -func TestTransportMultiplexRejectIncompatible(t *testing.T) { - mt := testSetupMultiplexTransport(t) - - errc := make(chan error) - - go func() { - var ( - pv = ed25519.GenPrivKey() - dialer = newMultiplexTransport( - testNodeInfoWithNetwork(PubKeyToID(pv.PubKey()), "dialer", "incompatible-network"), - NodeKey{ - PrivKey: pv, - }, - ) - ) - addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - - _, err := dialer.Dial(*addr, peerConfig{}) - if err != nil { - errc <- err - return - } - - close(errc) - }() - - _, err := mt.Accept(peerConfig{}) - if e, ok := err.(ErrRejected); ok { - if !e.IsIncompatible() { - t.Errorf("expected to reject incompatible, got %v", e) - } - } else { - t.Errorf("expected ErrRejected, got %v", err) - } -} - -func TestTransportMultiplexRejectSelf(t *testing.T) { - mt := testSetupMultiplexTransport(t) - - errc := make(chan error) - - go func() { - addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - - _, err := mt.Dial(*addr, peerConfig{}) - if err != nil { - errc <- err - return - } - - close(errc) - }() - - if err := <-errc; err != nil { - if e, ok := err.(ErrRejected); ok { - if !e.IsSelf() { - t.Errorf("expected to reject self, got: %v", e) - } - } else { - t.Errorf("expected ErrRejected, got %v", err) - } - } else { - t.Errorf("expected connection failure") - } - - _, err := mt.Accept(peerConfig{}) - if err, ok := err.(ErrRejected); ok { - if !err.IsSelf() { - t.Errorf("expected to reject self, got: %v", err) - } - } else { - t.Errorf("expected ErrRejected, got %v", nil) - } -} - -func TestTransportConnDuplicateIPFilter(t *testing.T) { - filter := ConnDuplicateIPFilter() - - if err := filter(nil, &testTransportConn{}, nil); err != nil { - t.Fatal(err) - } - - var ( - c = &testTransportConn{} - cs = NewConnSet() - ) - - cs.Set(c, []net.IP{ - {10, 0, 10, 1}, - {10, 0, 10, 2}, - {10, 0, 10, 3}, - }) - - if err := filter(cs, c, []net.IP{ - {10, 0, 10, 2}, - }); err == nil { - t.Errorf("expected Peer to be rejected as duplicate") - } -} - -func TestTransportHandshake(t *testing.T) { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - - var ( - peerPV = ed25519.GenPrivKey() - peerNodeInfo = testNodeInfo(PubKeyToID(peerPV.PubKey()), defaultNodeName) - ) - - go func() { - c, err := net.Dial(ln.Addr().Network(), ln.Addr().String()) - if err != nil { - t.Error(err) - return - } - - go func(c net.Conn) { - _, err := protoio.NewDelimitedWriter(c).WriteMsg(peerNodeInfo.(DefaultNodeInfo).ToProto()) - if err != nil { - t.Error(err) - } - }(c) - go func(c net.Conn) { - var ( - // ni DefaultNodeInfo - pbni tmp2p.DefaultNodeInfo - ) - - protoReader := protoio.NewDelimitedReader(c, MaxNodeInfoSize()) - _, err := protoReader.ReadMsg(&pbni) - if err != nil { - t.Error(err) - } - - _, err = DefaultNodeInfoFromToProto(&pbni) - if err != nil { - t.Error(err) - } - }(c) - }() - - c, err := ln.Accept() - if err != nil { - t.Fatal(err) - } - - ni, err := handshake(c, 20*time.Millisecond, emptyNodeInfo()) - if err != nil { - t.Fatal(err) - } - - if have, want := ni, peerNodeInfo; !reflect.DeepEqual(have, want) { - t.Errorf("have %v, want %v", have, want) - } -} - -func TestTransportAddChannel(t *testing.T) { - mt := newMultiplexTransport( - emptyNodeInfo(), - NodeKey{ - PrivKey: ed25519.GenPrivKey(), - }, - ) - testChannel := byte(0x01) - - mt.AddChannel(testChannel) - if !mt.nodeInfo.(DefaultNodeInfo).HasChannel(testChannel) { - t.Errorf("missing added channel %v. Got %v", testChannel, mt.nodeInfo.(DefaultNodeInfo).Channels) - } -} - -// create listener -func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport { - var ( - pv = ed25519.GenPrivKey() - id = PubKeyToID(pv.PubKey()) - mt = newMultiplexTransport( - testNodeInfo( - id, "transport", - ), - NodeKey{ - PrivKey: pv, - }, - ) - ) - - addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0")) - if err != nil { - t.Fatal(err) - } - - if err := mt.Listen(*addr); err != nil { - t.Fatal(err) - } - - // give the listener some time to get ready - time.Sleep(20 * time.Millisecond) - - return mt -} - -type testTransportAddr struct{} - -func (a *testTransportAddr) Network() string { return "tcp" } -func (a *testTransportAddr) String() string { return "test.local:1234" } - -type testTransportConn struct{} - -func (c *testTransportConn) Close() error { - return fmt.Errorf("close() not implemented") -} - -func (c *testTransportConn) LocalAddr() net.Addr { - return &testTransportAddr{} -} - -func (c *testTransportConn) RemoteAddr() net.Addr { - return &testTransportAddr{} -} - -func (c *testTransportConn) Read(_ []byte) (int, error) { - return -1, fmt.Errorf("read() not implemented") -} - -func (c *testTransportConn) SetDeadline(_ time.Time) error { - return fmt.Errorf("setDeadline() not implemented") -} - -func (c *testTransportConn) SetReadDeadline(_ time.Time) error { - return fmt.Errorf("setReadDeadline() not implemented") -} - -func (c *testTransportConn) SetWriteDeadline(_ time.Time) error { - return fmt.Errorf("setWriteDeadline() not implemented") -} - -func (c *testTransportConn) Write(_ []byte) (int, error) { - return -1, fmt.Errorf("write() not implemented") -} diff --git a/p2p/types.go b/p2p/types.go index 48a6746ceba..6c7c1bed41f 100644 --- a/p2p/types.go +++ b/p2p/types.go @@ -3,12 +3,14 @@ package p2p import ( "github.com/cosmos/gogoproto/proto" - "github.com/cometbft/cometbft/p2p/conn" - tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" + tmp2p "github.com/cometbft/cometbft/api/cometbft/p2p/v1" + "github.com/cometbft/cometbft/p2p/transport/tcp/conn" + "github.com/cometbft/cometbft/types" ) -type ChannelDescriptor = conn.ChannelDescriptor -type ConnectionStatus = conn.ConnectionStatus +type ( + ConnectionStatus = conn.ConnectionStatus +) // Envelope contains a message with sender routing info. type Envelope struct { @@ -17,25 +19,16 @@ type Envelope struct { ChannelID byte } -// Unwrapper is a Protobuf message that can contain a variety of inner messages -// (e.g. via oneof fields). If a Channel's message type implements Unwrapper, the -// p2p layer will automatically unwrap inbound messages so that reactors do not have to do this themselves. -type Unwrapper interface { - proto.Message - - // Unwrap will unwrap the inner message contained in this message. - Unwrap() (proto.Message, error) -} - -// Wrapper is a companion type to Unwrapper. It is a Protobuf message that can contain a variety of inner messages. The p2p layer will automatically wrap outbound messages so that the reactors do not have to do it themselves. -type Wrapper interface { - proto.Message - - // Wrap will take the underlying message and wrap it in its wrapper type. - Wrap() proto.Message -} - var ( - _ Wrapper = &tmp2p.PexRequest{} - _ Wrapper = &tmp2p.PexAddrs{} + _ types.Wrapper = &tmp2p.PexRequest{} + _ types.Wrapper = &tmp2p.PexAddrs{} ) + +// StreamDescriptor describes a data stream. This could be a substream within a +// multiplexed TCP connection, QUIC stream, etc. +type StreamDescriptor interface { + // StreamID returns the ID of the stream. + StreamID() byte + // MessageType returns the type of the message sent/received on this stream. + MessageType() proto.Message +} diff --git a/privval/errors.go b/privval/errors.go index 906321dd487..28d64f0b086 100644 --- a/privval/errors.go +++ b/privval/errors.go @@ -9,9 +9,9 @@ import ( type EndpointTimeoutError struct{} // Implement the net.Error interface. -func (e EndpointTimeoutError) Error() string { return "endpoint connection timed out" } -func (e EndpointTimeoutError) Timeout() bool { return true } -func (e EndpointTimeoutError) Temporary() bool { return true } +func (EndpointTimeoutError) Error() string { return "endpoint connection timed out" } +func (EndpointTimeoutError) Timeout() bool { return true } +func (EndpointTimeoutError) Temporary() bool { return true } // Socket errors. var ( diff --git a/privval/file.go b/privval/file.go index a092e38b087..b0828387cc5 100644 --- a/privval/file.go +++ b/privval/file.go @@ -9,14 +9,14 @@ import ( "github.com/cosmos/gogoproto/proto" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" + cmtos "github.com/cometbft/cometbft/internal/os" + "github.com/cometbft/cometbft/internal/tempfile" cmtbytes "github.com/cometbft/cometbft/libs/bytes" cmtjson "github.com/cometbft/cometbft/libs/json" - cmtos "github.com/cometbft/cometbft/libs/os" "github.com/cometbft/cometbft/libs/protoio" - "github.com/cometbft/cometbft/libs/tempfile" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/types" cmttime "github.com/cometbft/cometbft/types/time" ) @@ -32,16 +32,16 @@ const ( // A vote is either stepPrevote or stepPrecommit. func voteToStep(vote *cmtproto.Vote) int8 { switch vote.Type { - case cmtproto.PrevoteType: + case types.PrevoteType: return stepPrevote - case cmtproto.PrecommitType: + case types.PrecommitType: return stepPrecommit default: panic(fmt.Sprintf("Unknown vote type: %v", vote.Type)) } } -//------------------------------------------------------------------------------- +// ------------------------------------------------------------------------------- // FilePVKey stores the immutable part of PrivValidator. type FilePVKey struct { @@ -64,12 +64,12 @@ func (pvKey FilePVKey) Save() { panic(err) } - if err := tempfile.WriteFileAtomic(outFile, jsonBytes, 0600); err != nil { + if err := tempfile.WriteFileAtomic(outFile, jsonBytes, 0o600); err != nil { panic(err) } } -//------------------------------------------------------------------------------- +// ------------------------------------------------------------------------------- // FilePVLastSignState stores the mutable part of PrivValidator. type FilePVLastSignState struct { @@ -98,37 +98,44 @@ func (lss *FilePVLastSignState) reset() { // we have already signed for this HRS, and can reuse the existing signature). // It panics if the HRS matches the arguments, there's a SignBytes, but no Signature. func (lss *FilePVLastSignState) CheckHRS(height int64, round int32, step int8) (bool, error) { - if lss.Height > height { return false, fmt.Errorf("height regression. Got %v, last height %v", height, lss.Height) } - if lss.Height == height { - if lss.Round > round { - return false, fmt.Errorf("round regression at height %v. Got %v, last round %v", height, round, lss.Round) - } + if lss.Height != height { + return false, nil + } - if lss.Round == round { - if lss.Step > step { - return false, fmt.Errorf( - "step regression at height %v round %v. Got %v, last step %v", - height, - round, - step, - lss.Step, - ) - } else if lss.Step == step { - if lss.SignBytes != nil { - if lss.Signature == nil { - panic("pv: Signature is nil but SignBytes is not!") - } - return true, nil - } - return false, errors.New("no SignBytes found") - } - } + if lss.Round > round { + return false, fmt.Errorf("round regression at height %v. Got %v, last round %v", height, round, lss.Round) + } + + if lss.Round != round { + return false, nil + } + + if lss.Step > step { + return false, fmt.Errorf( + "step regression at height %v round %v. Got %v, last step %v", + height, + round, + step, + lss.Step, + ) + } + + if lss.Step < step { + return false, nil + } + + if lss.SignBytes == nil { + return false, errors.New("no SignBytes found") + } + + if lss.Signature == nil { + panic("pv: Signature is nil but SignBytes is not!") } - return false, nil + return true, nil } // Save persists the FilePvLastSignState to its filePath. @@ -141,13 +148,13 @@ func (lss *FilePVLastSignState) Save() { if err != nil { panic(err) } - err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600) + err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0o600) if err != nil { panic(err) } } -//------------------------------------------------------------------------------- +// ------------------------------------------------------------------------------- // FilePV implements PrivValidator using data persisted to disk // to prevent double signing. @@ -175,10 +182,18 @@ func NewFilePV(privKey crypto.PrivKey, keyFilePath, stateFilePath string) *FileP } } -// GenFilePV generates a new validator with randomly generated private key -// and sets the filePaths, but does not call Save(). -func GenFilePV(keyFilePath, stateFilePath string) *FilePV { - return NewFilePV(ed25519.GenPrivKey(), keyFilePath, stateFilePath) +// GenFilePV calls NewFilePV with a random private key of one of the crypto libraries supported by CometBFT. +func GenFilePV(keyFilePath, stateFilePath string, keyGen func() (crypto.PrivKey, error)) (*FilePV, error) { + if keyGen == nil { + keyGen = func() (crypto.PrivKey, error) { + return ed25519.GenPrivKey(), nil + } + } + key, err := keyGen() + if err != nil { + return nil, err + } + return NewFilePV(key, keyFilePath, stateFilePath), nil } // LoadFilePV loads a FilePV from the filePaths. The FilePV handles double @@ -234,15 +249,19 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV { // LoadOrGenFilePV loads a FilePV from the given filePaths // or else generates a new one and saves it to the filePaths. -func LoadOrGenFilePV(keyFilePath, stateFilePath string) *FilePV { +func LoadOrGenFilePV(keyFilePath, stateFilePath string, keyGenF func() (crypto.PrivKey, error)) (*FilePV, error) { var pv *FilePV if cmtos.FileExists(keyFilePath) { pv = LoadFilePV(keyFilePath, stateFilePath) } else { - pv = GenFilePV(keyFilePath, stateFilePath) + var err error + pv, err = GenFilePV(keyFilePath, stateFilePath, keyGenF) + if err != nil { + return nil, err + } pv.Save() } - return pv + return pv, nil } // GetAddress returns the address of the validator. @@ -259,8 +278,8 @@ func (pv *FilePV) GetPubKey() (crypto.PubKey, error) { // SignVote signs a canonical representation of the vote, along with the // chainID. Implements PrivValidator. -func (pv *FilePV) SignVote(chainID string, vote *cmtproto.Vote) error { - if err := pv.signVote(chainID, vote); err != nil { +func (pv *FilePV) SignVote(chainID string, vote *cmtproto.Vote, signExtension bool) error { + if err := pv.signVote(chainID, vote, signExtension); err != nil { return fmt.Errorf("error signing vote: %v", err) } return nil @@ -275,6 +294,11 @@ func (pv *FilePV) SignProposal(chainID string, proposal *cmtproto.Proposal) erro return nil } +// SignBytes signs the given bytes. Implements PrivValidator. +func (pv *FilePV) SignBytes(bytes []byte) ([]byte, error) { + return pv.Key.PrivKey.Sign(bytes) +} + // Save persists the FilePV to disk. func (pv *FilePV) Save() { pv.Key.Save() @@ -299,13 +323,13 @@ func (pv *FilePV) String() string { ) } -//------------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------------ // signVote checks if the vote is good to sign and sets the vote signature. // It may need to set the timestamp as well if the vote is otherwise the same as // a previously signed vote (ie. we crashed after signing but before the vote hit the WAL). // Extension signatures are always signed for non-nil precommits (even if the data is empty). -func (pv *FilePV) signVote(chainID string, vote *cmtproto.Vote) error { +func (pv *FilePV) signVote(chainID string, vote *cmtproto.Vote, signExtension bool) error { height, round, step := vote.Height, vote.Round, voteToStep(vote) lss := pv.LastSignState @@ -317,20 +341,24 @@ func (pv *FilePV) signVote(chainID string, vote *cmtproto.Vote) error { signBytes := types.VoteSignBytes(chainID, vote) - // Vote extensions are non-deterministic, so it is possible that an - // application may have created a different extension. We therefore always - // re-sign the vote extensions of precommits. For prevotes and nil - // precommits, the extension signature will always be empty. - // Even if the signed over data is empty, we still add the signature - var extSig []byte - if vote.Type == cmtproto.PrecommitType && !types.ProtoBlockIDIsNil(&vote.BlockID) { - extSignBytes := types.VoteExtensionSignBytes(chainID, vote) - extSig, err = pv.Key.PrivKey.Sign(extSignBytes) - if err != nil { - return err + if signExtension { + // Vote extensions are non-deterministic, so it is possible that an + // application may have created a different extension. We therefore always + // re-sign the vote extensions of precommits. For prevotes and nil + // precommits, the extension signature will always be empty. + // Even if the signed over data is empty, we still add the signature + var extSig []byte + if vote.Type == types.PrecommitType && !types.ProtoBlockIDIsNil(&vote.BlockID) { + extSignBytes := types.VoteExtensionSignBytes(chainID, vote) + extSig, err = pv.Key.PrivKey.Sign(extSignBytes) + if err != nil { + return err + } + } else if len(vote.Extension) > 0 { + return errors.New("unexpected vote extension - extensions are only allowed in non-nil precommits") } - } else if len(vote.Extension) > 0 { - return errors.New("unexpected vote extension - extensions are only allowed in non-nil precommits") + + vote.ExtensionSignature = extSig } // We might crash before writing to the wal, @@ -347,11 +375,9 @@ func (pv *FilePV) signVote(chainID string, vote *cmtproto.Vote) error { vote.Timestamp = timestamp vote.Signature = lss.Signature } else { - err = fmt.Errorf("conflicting data") + err = errors.New("conflicting data") } - vote.ExtensionSignature = extSig - return err } @@ -362,7 +388,6 @@ func (pv *FilePV) signVote(chainID string, vote *cmtproto.Vote) error { } pv.saveSigned(height, round, step, signBytes, sig) vote.Signature = sig - vote.ExtensionSignature = extSig return nil } @@ -394,7 +419,7 @@ func (pv *FilePV) signProposal(chainID string, proposal *cmtproto.Proposal) erro proposal.Timestamp = timestamp proposal.Signature = lss.Signature } else { - err = fmt.Errorf("conflicting data") + err = errors.New("conflicting data") } return err } @@ -409,10 +434,10 @@ func (pv *FilePV) signProposal(chainID string, proposal *cmtproto.Proposal) erro return nil } -// Persist height/round/step and signature +// Persist height/round/step and signature. func (pv *FilePV) saveSigned(height int64, round int32, step int8, - signBytes []byte, sig []byte) { - + signBytes []byte, sig []byte, +) { pv.LastSignState.Height = height pv.LastSignState.Round = round pv.LastSignState.Step = step @@ -421,7 +446,7 @@ func (pv *FilePV) saveSigned(height int64, round int32, step int8, pv.LastSignState.Save() } -//----------------------------------------------------------------------------------------- +// ----------------------------------------------------------------------------------------- // Returns the timestamp from the lastSignBytes. // Returns true if the only difference in the votes is their timestamp. @@ -446,7 +471,7 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.T } // returns the timestamp from the lastSignBytes. -// returns true if the only difference in the proposals is their timestamp +// returns true if the only difference in the proposals is their timestamp. func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { var lastProposal, newProposal cmtproto.CanonicalProposal if err := protoio.UnmarshalDelimited(lastSignBytes, &lastProposal); err != nil { diff --git a/privval/file_test.go b/privval/file_test.go index 6d8df0270fd..7d2f5ccff1d 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -10,73 +10,96 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/crypto/tmhash" + kt "github.com/cometbft/cometbft/internal/keytypes" + cmtrand "github.com/cometbft/cometbft/internal/rand" cmtjson "github.com/cometbft/cometbft/libs/json" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/types" cmttime "github.com/cometbft/cometbft/types/time" ) func TestGenLoadValidator(t *testing.T) { - privVal, tempKeyFileName, tempStateFileName := newTestFilePV(t) - - height := int64(100) - privVal.LastSignState.Height = height - privVal.Save() - addr := privVal.GetAddress() - - privVal = LoadFilePV(tempKeyFileName, tempStateFileName) - assert.Equal(t, addr, privVal.GetAddress(), "expected privval addr to be the same") - assert.Equal(t, height, privVal.LastSignState.Height, "expected privval.LastHeight to have been saved") + for _, keyType := range kt.ListSupportedKeyTypes() { + t.Run(keyType, func(t *testing.T) { + keyGenF := func() (crypto.PrivKey, error) { + return kt.GenPrivKey(keyType) + } + privVal, tempKeyFileName, tempStateFileName := newTestFilePV(t, keyGenF) + + height := int64(100) + privVal.LastSignState.Height = height + privVal.Save() + addr := privVal.GetAddress() + + privVal = LoadFilePV(tempKeyFileName, tempStateFileName) + assert.Equal(t, addr, privVal.GetAddress(), "expected privval addr to be the same") + assert.Equal(t, height, privVal.LastSignState.Height, "expected privval.LastHeight to have been saved") + }) + } } func TestResetValidator(t *testing.T) { - privVal, _, tempStateFileName := newTestFilePV(t) - emptyState := FilePVLastSignState{filePath: tempStateFileName} - - // new priv val has empty state - assert.Equal(t, privVal.LastSignState, emptyState) - - // test vote - height, round := int64(10), int32(1) - voteType := cmtproto.PrevoteType - randBytes := cmtrand.Bytes(tmhash.Size) - blockID := types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}} - vote := newVote(privVal.Key.Address, 0, height, round, voteType, blockID, nil) - err := privVal.SignVote("mychainid", vote.ToProto()) - assert.NoError(t, err, "expected no error signing vote") - - // priv val after signing is not same as empty - assert.NotEqual(t, privVal.LastSignState, emptyState) - - // priv val after AcceptNewConnection is same as empty - privVal.Reset() - assert.Equal(t, privVal.LastSignState, emptyState) + for _, keyType := range kt.ListSupportedKeyTypes() { + t.Run(keyType, func(t *testing.T) { + keyGenF := func() (crypto.PrivKey, error) { + return kt.GenPrivKey(keyType) + } + privVal, _, tempStateFileName := newTestFilePV(t, keyGenF) + emptyState := FilePVLastSignState{filePath: tempStateFileName} + + // new priv val has empty state + assert.Equal(t, privVal.LastSignState, emptyState) + + // test vote + height, round := int64(10), int32(1) + voteType := types.PrevoteType + randBytes := cmtrand.Bytes(tmhash.Size) + blockID := types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}} + vote := newVote(privVal.Key.Address, height, round, voteType, blockID) + err := privVal.SignVote("mychainid", vote.ToProto(), false) + require.NoError(t, err, "expected no error signing vote") + + // priv val after signing is not same as empty + assert.NotEqual(t, privVal.LastSignState, emptyState) + + // priv val after AcceptNewConnection is same as empty + privVal.Reset() + assert.Equal(t, privVal.LastSignState, emptyState) + }) + } } func TestLoadOrGenValidator(t *testing.T) { - assert := assert.New(t) - - tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") - require.Nil(t, err) - tempStateFile, err := os.CreateTemp("", "priv_validator_state_") - require.Nil(t, err) - - tempKeyFilePath := tempKeyFile.Name() - if err := os.Remove(tempKeyFilePath); err != nil { - t.Error(err) - } - tempStateFilePath := tempStateFile.Name() - if err := os.Remove(tempStateFilePath); err != nil { - t.Error(err) + for _, keyType := range kt.ListSupportedKeyTypes() { + t.Run(keyType, func(t *testing.T) { + assert := assert.New(t) + + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_"+keyType+"_") + require.NoError(t, err) + tempStateFile, err := os.CreateTemp("", "priv_validator_state_"+keyType+"_") + require.NoError(t, err) + + tempKeyFilePath := tempKeyFile.Name() + err = os.Remove(tempKeyFilePath) + require.NoError(t, err) + tempStateFilePath := tempStateFile.Name() + err = os.Remove(tempStateFilePath) + require.NoError(t, err) + + keyGenF := func() (crypto.PrivKey, error) { + return kt.GenPrivKey(keyType) + } + privVal, err := LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath, keyGenF) + require.NoError(t, err) + addr := privVal.GetAddress() + // passing nil because we won't generate this time, so doesn't matter + privVal, err = LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath, nil) + require.NoError(t, err) + assert.Equal(addr, privVal.GetAddress(), "expected privval addr to be the same") + }) } - - privVal := LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath) - addr := privVal.GetAddress() - privVal = LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath) - assert.Equal(addr, privVal.GetAddress(), "expected privval addr to be the same") } func TestUnmarshalValidatorState(t *testing.T) { @@ -91,7 +114,7 @@ func TestUnmarshalValidatorState(t *testing.T) { val := FilePVLastSignState{} err := cmtjson.Unmarshal([]byte(serialized), &val) - require.Nil(err, "%+v", err) + require.NoError(err, "%+v", err) // make sure the values match assert.EqualValues(val.Height, 1) @@ -100,7 +123,7 @@ func TestUnmarshalValidatorState(t *testing.T) { // export it and make sure it is the same out, err := cmtjson.Marshal(val) - require.Nil(err, "%+v", err) + require.NoError(err, "%+v", err) assert.JSONEq(serialized, string(out)) } @@ -130,7 +153,7 @@ func TestUnmarshalValidatorKey(t *testing.T) { val := FilePVKey{} err := cmtjson.Unmarshal([]byte(serialized), &val) - require.Nil(err, "%+v", err) + require.NoError(err, "%+v", err) // make sure the values match assert.EqualValues(addr, val.Address) @@ -139,110 +162,173 @@ func TestUnmarshalValidatorKey(t *testing.T) { // export it and make sure it is the same out, err := cmtjson.Marshal(val) - require.Nil(err, "%+v", err) + require.NoError(err, "%+v", err) assert.JSONEq(serialized, string(out)) } func TestSignVote(t *testing.T) { - assert := assert.New(t) - - privVal, _, _ := newTestFilePV(t) - - randbytes := cmtrand.Bytes(tmhash.Size) - randbytes2 := cmtrand.Bytes(tmhash.Size) - - block1 := types.BlockID{Hash: randbytes, - PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} - block2 := types.BlockID{Hash: randbytes2, - PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}} - - height, round := int64(10), int32(1) - voteType := cmtproto.PrevoteType - - // sign a vote for first time - vote := newVote(privVal.Key.Address, 0, height, round, voteType, block1, nil) - v := vote.ToProto() - err := privVal.SignVote("mychainid", v) - assert.NoError(err, "expected no error signing vote") - - // try to sign the same vote again; should be fine - err = privVal.SignVote("mychainid", v) - assert.NoError(err, "expected no error on signing same vote") - - // now try some bad votes - cases := []*types.Vote{ - newVote(privVal.Key.Address, 0, height, round-1, voteType, block1, nil), // round regression - newVote(privVal.Key.Address, 0, height-1, round, voteType, block1, nil), // height regression - newVote(privVal.Key.Address, 0, height-2, round+4, voteType, block1, nil), // height regression and different round - newVote(privVal.Key.Address, 0, height, round, voteType, block2, nil), // different block - } - - for _, c := range cases { - cpb := c.ToProto() - err = privVal.SignVote("mychainid", cpb) - assert.Error(err, "expected error on signing conflicting vote") + for _, keyType := range kt.ListSupportedKeyTypes() { + t.Run(keyType, func(t *testing.T) { + assert := assert.New(t) + chainID := "mychainid" + keyType + + keyGenF := func() (crypto.PrivKey, error) { + return kt.GenPrivKey(keyType) + } + privVal, _, _ := newTestFilePV(t, keyGenF) + + randbytes := cmtrand.Bytes(tmhash.Size) + randbytes2 := cmtrand.Bytes(tmhash.Size) + + block1 := types.BlockID{ + Hash: randbytes, + PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}, + } + block2 := types.BlockID{ + Hash: randbytes2, + PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}, + } + + height, round := int64(10), int32(1) + voteType := types.PrevoteType + + // sign a vote for first time + vote := newVote(privVal.Key.Address, height, round, voteType, block1) + v := vote.ToProto() + err := privVal.SignVote(chainID, v, false) + require.NoError(t, err, "expected no error signing vote") + vote.Signature = v.Signature + err = vote.ValidateBasic() + require.NoError(t, err) + + // Verify vote signature + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + err = vote.Verify(chainID, pubKey) + require.NoError(t, err) + + // try to sign the same vote again; should be fine + err = privVal.SignVote(chainID, v, false) + require.NoError(t, err, "expected no error on signing same vote") + + // now try some bad votes + cases := []*types.Vote{ + newVote(privVal.Key.Address, height, round-1, voteType, block1), // round regression + newVote(privVal.Key.Address, height-1, round, voteType, block1), // height regression + newVote(privVal.Key.Address, height-2, round+4, voteType, block1), // height regression and different round + newVote(privVal.Key.Address, height, round, voteType, block2), // different block + } + + for _, c := range cases { + cpb := c.ToProto() + err = privVal.SignVote(chainID, cpb, false) + require.Error(t, err, "expected error on signing conflicting vote") + } + + // try signing a vote with a different time stamp + sig := vote.Signature + vote.Signature = nil + vote.Timestamp = vote.Timestamp.Add(time.Duration(1000)) + v2 := vote.ToProto() + err = privVal.SignVote(chainID, v2, false) + require.NoError(t, err) + assert.Equal(sig, v2.Signature) + }) } - - // try signing a vote with a different time stamp - sig := vote.Signature - vote.Timestamp = vote.Timestamp.Add(time.Duration(1000)) - err = privVal.SignVote("mychainid", v) - assert.NoError(err) - assert.Equal(sig, vote.Signature) } func TestSignProposal(t *testing.T) { - assert := assert.New(t) - - privVal, _, _ := newTestFilePV(t) - - randbytes := cmtrand.Bytes(tmhash.Size) - randbytes2 := cmtrand.Bytes(tmhash.Size) - - block1 := types.BlockID{Hash: randbytes, - PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} - block2 := types.BlockID{Hash: randbytes2, - PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}} - height, round := int64(10), int32(1) - - // sign a proposal for first time - proposal := newProposal(height, round, block1) - pbp := proposal.ToProto() - err := privVal.SignProposal("mychainid", pbp) - assert.NoError(err, "expected no error signing proposal") - - // try to sign the same proposal again; should be fine - err = privVal.SignProposal("mychainid", pbp) - assert.NoError(err, "expected no error on signing same proposal") - - // now try some bad Proposals - cases := []*types.Proposal{ - newProposal(height, round-1, block1), // round regression - newProposal(height-1, round, block1), // height regression - newProposal(height-2, round+4, block1), // height regression and different round - newProposal(height, round, block2), // different block + for _, keyType := range kt.ListSupportedKeyTypes() { + t.Run(keyType, func(t *testing.T) { + assert := assert.New(t) + chainID := "mychainid" + keyType + + keyGenF := func() (crypto.PrivKey, error) { + return kt.GenPrivKey(keyType) + } + privVal, _, _ := newTestFilePV(t, keyGenF) + + randbytes := cmtrand.Bytes(tmhash.Size) + randbytes2 := cmtrand.Bytes(tmhash.Size) + + block1 := types.BlockID{ + Hash: randbytes, + PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}, + } + block2 := types.BlockID{ + Hash: randbytes2, + PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}, + } + height, round := int64(10), int32(1) + + // sign a proposal for first time + proposal := newProposal(height, round, block1) + pbp := proposal.ToProto() + err := privVal.SignProposal(chainID, pbp) + sig := pbp.Signature + require.NoError(t, err, "expected no error signing proposal") + + // try to sign the same proposal again; should be fine + err = privVal.SignProposal(chainID, pbp) + require.NoError(t, err, "expected no error on signing same proposal") + + // Verify proposal signature + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + assert.True(pubKey.VerifySignature(types.ProposalSignBytes(chainID, pbp), sig)) + + // now try some bad Proposals + cases := []*types.Proposal{ + newProposal(height, round-1, block1), // round regression + newProposal(height-1, round, block1), // height regression + newProposal(height-2, round+4, block1), // height regression and different round + newProposal(height, round, block2), // different block + } + + for _, c := range cases { + err = privVal.SignProposal(chainID, c.ToProto()) + require.Error(t, err, "expected error on signing conflicting proposal") + } + + // try signing a proposal with a different time stamp + proposal.Timestamp = proposal.Timestamp.Add(time.Duration(1000)) + pbp2 := proposal.ToProto() + err = privVal.SignProposal(chainID, pbp2) + require.NoError(t, err) + assert.Equal(sig, pbp2.Signature) + }) } +} - for _, c := range cases { - err = privVal.SignProposal("mychainid", c.ToProto()) - assert.Error(err, "expected error on signing conflicting proposal") +func TestSignBytes(t *testing.T) { + for _, keyType := range kt.ListSupportedKeyTypes() { + t.Run(keyType, func(t *testing.T) { + keyGenF := func() (crypto.PrivKey, error) { + return kt.GenPrivKey(keyType) + } + privVal, _, _ := newTestFilePV(t, keyGenF) + testBytes := []byte("test bytes for signing TODO: REMOVE ME AFTER FIXING BLS") + + // Sign the test bytes + sig, err := privVal.SignBytes(testBytes) + require.NoError(t, err, "expected no error signing bytes") + + // Verify the signature + pubKey, err := privVal.GetPubKey() + require.NoError(t, err, "expected no error getting public key") + assert.True(t, pubKey.VerifySignature(testBytes, sig), "signature verification failed") + }) } - - // try signing a proposal with a different time stamp - sig := proposal.Signature - proposal.Timestamp = proposal.Timestamp.Add(time.Duration(1000)) - err = privVal.SignProposal("mychainid", pbp) - assert.NoError(err) - assert.Equal(sig, proposal.Signature) } func TestDifferByTimestamp(t *testing.T) { tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") - require.Nil(t, err) + require.NoError(t, err) tempStateFile, err := os.CreateTemp("", "priv_validator_state_") - require.Nil(t, err) + require.NoError(t, err) - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), nil) + require.NoError(t, err) randbytes := cmtrand.Bytes(tmhash.Size) block1 := types.BlockID{Hash: randbytes, PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} height, round := int64(10), int32(1) @@ -253,7 +339,7 @@ func TestDifferByTimestamp(t *testing.T) { proposal := newProposal(height, round, block1) pb := proposal.ToProto() err := privVal.SignProposal(chainID, pb) - assert.NoError(t, err, "expected no error signing proposal") + require.NoError(t, err, "expected no error signing proposal") signBytes := types.ProposalSignBytes(chainID, pb) sig := proposal.Signature @@ -264,7 +350,7 @@ func TestDifferByTimestamp(t *testing.T) { var emptySig []byte proposal.Signature = emptySig err = privVal.SignProposal("mychainid", pb) - assert.NoError(t, err, "expected no error on signing same proposal") + require.NoError(t, err, "expected no error on signing same proposal") assert.Equal(t, timeStamp, pb.Timestamp) assert.Equal(t, signBytes, types.ProposalSignBytes(chainID, pb)) @@ -273,12 +359,12 @@ func TestDifferByTimestamp(t *testing.T) { // test vote { - voteType := cmtproto.PrevoteType + voteType := types.PrevoteType blockID := types.BlockID{Hash: randbytes, PartSetHeader: types.PartSetHeader{}} - vote := newVote(privVal.Key.Address, 0, height, round, voteType, blockID, nil) + vote := newVote(privVal.Key.Address, height, round, voteType, blockID) v := vote.ToProto() - err := privVal.SignVote("mychainid", v) - assert.NoError(t, err, "expected no error signing vote") + err := privVal.SignVote("mychainid", v, false) + require.NoError(t, err, "expected no error signing vote") signBytes := types.VoteSignBytes(chainID, v) sig := v.Signature @@ -290,8 +376,8 @@ func TestDifferByTimestamp(t *testing.T) { var emptySig []byte v.Signature = emptySig v.ExtensionSignature = emptySig - err = privVal.SignVote("mychainid", v) - assert.NoError(t, err, "expected no error on signing same vote") + err = privVal.SignVote("mychainid", v, false) + require.NoError(t, err, "expected no error on signing same vote") assert.Equal(t, timeStamp, v.Timestamp) assert.Equal(t, signBytes, types.VoteSignBytes(chainID, v)) @@ -300,10 +386,10 @@ func TestDifferByTimestamp(t *testing.T) { } } -func TestVoteExtensionsAreAlwaysSigned(t *testing.T) { - privVal, _, _ := newTestFilePV(t) +func TestVoteExtensionsAreSignedIfSignExtensionIsTrue(t *testing.T) { + privVal, _, _ := newTestFilePV(t, nil) pubKey, err := privVal.GetPubKey() - assert.NoError(t, err) + require.NoError(t, err) block := types.BlockID{ Hash: cmtrand.Bytes(tmhash.Size), @@ -311,14 +397,14 @@ func TestVoteExtensionsAreAlwaysSigned(t *testing.T) { } height, round := int64(10), int32(1) - voteType := cmtproto.PrecommitType + voteType := types.PrecommitType // We initially sign this vote without an extension - vote1 := newVote(privVal.Key.Address, 0, height, round, voteType, block, nil) + vote1 := newVote(privVal.Key.Address, height, round, voteType, block) vpb1 := vote1.ToProto() - err = privVal.SignVote("mychainid", vpb1) - assert.NoError(t, err, "expected no error signing vote") + err = privVal.SignVote("mychainid", vpb1, true) + require.NoError(t, err, "expected no error signing vote") assert.NotNil(t, vpb1.ExtensionSignature) vesb1 := types.VoteExtensionSignBytes("mychainid", vpb1) @@ -330,8 +416,8 @@ func TestVoteExtensionsAreAlwaysSigned(t *testing.T) { vote2.Extension = []byte("new extension") vpb2 := vote2.ToProto() - err = privVal.SignVote("mychainid", vpb2) - assert.NoError(t, err, "expected no error signing same vote with manipulated vote extension") + err = privVal.SignVote("mychainid", vpb2, true) + require.NoError(t, err, "expected no error signing same vote with manipulated vote extension") // We need to ensure that a valid new extension signature has been created // that validates against the vote extension sign bytes with the new @@ -349,8 +435,8 @@ func TestVoteExtensionsAreAlwaysSigned(t *testing.T) { vpb2.Signature = nil vpb2.ExtensionSignature = nil - err = privVal.SignVote("mychainid", vpb2) - assert.NoError(t, err, "expected no error signing same vote with manipulated timestamp and vote extension") + err = privVal.SignVote("mychainid", vpb2, true) + require.NoError(t, err, "expected no error signing same vote with manipulated timestamp and vote extension") assert.Equal(t, expectedTimestamp, vpb2.Timestamp) vesb3 := types.VoteExtensionSignBytes("mychainid", vpb2) @@ -358,17 +444,37 @@ func TestVoteExtensionsAreAlwaysSigned(t *testing.T) { assert.False(t, pubKey.VerifySignature(vesb1, vpb2.ExtensionSignature)) } -func newVote(addr types.Address, idx int32, height int64, round int32, - typ cmtproto.SignedMsgType, blockID types.BlockID, extension []byte) *types.Vote { +func TestVoteExtensionsAreNotSignedIfSignExtensionIsFalse(t *testing.T) { + privVal, _, _ := newTestFilePV(t, nil) + + block := types.BlockID{ + Hash: cmtrand.Bytes(tmhash.Size), + PartSetHeader: types.PartSetHeader{Total: 5, Hash: cmtrand.Bytes(tmhash.Size)}, + } + + height, round := int64(10), int32(1) + voteType := types.PrecommitType + + // We initially sign this vote without an extension + vote1 := newVote(privVal.Key.Address, height, round, voteType, block) + vpb1 := vote1.ToProto() + + err := privVal.SignVote("mychainid", vpb1, false) + require.NoError(t, err, "expected no error signing vote") + assert.Nil(t, vpb1.ExtensionSignature) +} + +func newVote(addr types.Address, height int64, round int32, + typ types.SignedMsgType, blockID types.BlockID, +) *types.Vote { return &types.Vote{ ValidatorAddress: addr, - ValidatorIndex: idx, + ValidatorIndex: 0, Height: height, Round: round, Type: typ, Timestamp: cmttime.Now(), BlockID: blockID, - Extension: extension, } } @@ -381,13 +487,15 @@ func newProposal(height int64, round int32, blockID types.BlockID) *types.Propos } } -func newTestFilePV(t *testing.T) (*FilePV, string, string) { +func newTestFilePV(t *testing.T, keyGenF func() (crypto.PrivKey, error)) (*FilePV, string, string) { + t.Helper() tempKeyFile, err := os.CreateTemp(t.TempDir(), "priv_validator_key_") require.NoError(t, err) tempStateFile, err := os.CreateTemp(t.TempDir(), "priv_validator_state_") require.NoError(t, err) - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), keyGenF) + require.NoError(t, err) return privVal, tempKeyFile.Name(), tempStateFile.Name() } diff --git a/privval/msgs.go b/privval/msgs.go index 4b440e612df..b4bcd232efc 100644 --- a/privval/msgs.go +++ b/privval/msgs.go @@ -5,33 +5,37 @@ import ( "github.com/cosmos/gogoproto/proto" - privvalproto "github.com/cometbft/cometbft/proto/tendermint/privval" + pvproto "github.com/cometbft/cometbft/api/cometbft/privval/v1" ) // TODO: Add ChainIDRequest -func mustWrapMsg(pb proto.Message) privvalproto.Message { - msg := privvalproto.Message{} +func mustWrapMsg(pb proto.Message) pvproto.Message { + msg := pvproto.Message{} switch pb := pb.(type) { - case *privvalproto.Message: + case *pvproto.Message: msg = *pb - case *privvalproto.PubKeyRequest: - msg.Sum = &privvalproto.Message_PubKeyRequest{PubKeyRequest: pb} - case *privvalproto.PubKeyResponse: - msg.Sum = &privvalproto.Message_PubKeyResponse{PubKeyResponse: pb} - case *privvalproto.SignVoteRequest: - msg.Sum = &privvalproto.Message_SignVoteRequest{SignVoteRequest: pb} - case *privvalproto.SignedVoteResponse: - msg.Sum = &privvalproto.Message_SignedVoteResponse{SignedVoteResponse: pb} - case *privvalproto.SignedProposalResponse: - msg.Sum = &privvalproto.Message_SignedProposalResponse{SignedProposalResponse: pb} - case *privvalproto.SignProposalRequest: - msg.Sum = &privvalproto.Message_SignProposalRequest{SignProposalRequest: pb} - case *privvalproto.PingRequest: - msg.Sum = &privvalproto.Message_PingRequest{PingRequest: pb} - case *privvalproto.PingResponse: - msg.Sum = &privvalproto.Message_PingResponse{PingResponse: pb} + case *pvproto.PubKeyRequest: + msg.Sum = &pvproto.Message_PubKeyRequest{PubKeyRequest: pb} + case *pvproto.PubKeyResponse: + msg.Sum = &pvproto.Message_PubKeyResponse{PubKeyResponse: pb} + case *pvproto.SignVoteRequest: + msg.Sum = &pvproto.Message_SignVoteRequest{SignVoteRequest: pb} + case *pvproto.SignBytesRequest: + msg.Sum = &pvproto.Message_SignBytesRequest{SignBytesRequest: pb} + case *pvproto.SignBytesResponse: + msg.Sum = &pvproto.Message_SignBytesResponse{SignBytesResponse: pb} + case *pvproto.SignedVoteResponse: + msg.Sum = &pvproto.Message_SignedVoteResponse{SignedVoteResponse: pb} + case *pvproto.SignedProposalResponse: + msg.Sum = &pvproto.Message_SignedProposalResponse{SignedProposalResponse: pb} + case *pvproto.SignProposalRequest: + msg.Sum = &pvproto.Message_SignProposalRequest{SignProposalRequest: pb} + case *pvproto.PingRequest: + msg.Sum = &pvproto.Message_PingRequest{PingRequest: pb} + case *pvproto.PingResponse: + msg.Sum = &pvproto.Message_PingResponse{PingResponse: pb} default: panic(fmt.Errorf("unknown message type %T", pb)) } diff --git a/privval/msgs_test.go b/privval/msgs_test.go index b305e208050..d62bfac3bcf 100644 --- a/privval/msgs_test.go +++ b/privval/msgs_test.go @@ -8,13 +8,11 @@ import ( "github.com/cosmos/gogoproto/proto" "github.com/stretchr/testify/require" + privproto "github.com/cometbft/cometbft/api/cometbft/privval/v1" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" - cryptoenc "github.com/cometbft/cometbft/crypto/encoding" "github.com/cometbft/cometbft/crypto/tmhash" - cryptoproto "github.com/cometbft/cometbft/proto/tendermint/crypto" - privproto "github.com/cometbft/cometbft/proto/tendermint/privval" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/types" ) @@ -22,7 +20,7 @@ var stamp = time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC) func exampleVote() *types.Vote { return &types.Vote{ - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, Height: 3, Round: 2, BlockID: types.BlockID{Hash: tmhash.Sum([]byte("blockID_hash")), PartSetHeader: types.PartSetHeader{Total: 1000000, Hash: tmhash.Sum([]byte("blockID_part_set_header_hash"))}}, @@ -34,9 +32,8 @@ func exampleVote() *types.Vote { } func exampleProposal() *types.Proposal { - return &types.Proposal{ - Type: cmtproto.SignedMsgType(1), + Type: types.SignedMsgType(1), Height: 3, Round: 2, Timestamp: stamp, @@ -55,8 +52,6 @@ func exampleProposal() *types.Proposal { //nolint:lll // ignore line length for tests func TestPrivvalVectors(t *testing.T) { pk := ed25519.GenPrivKeyFromSecret([]byte("it's a secret")).PubKey() - ppk, err := cryptoenc.PubKeyToProto(pk) - require.NoError(t, err) // Generate a simple vote vote := exampleVote() @@ -66,7 +61,7 @@ func TestPrivvalVectors(t *testing.T) { proposal := exampleProposal() proposalpb := proposal.ToProto() - // Create a Reuseable remote error + // Create a reusable remote error remoteError := &privproto.RemoteSignerError{Code: 1, Description: "it's a error"} testCases := []struct { @@ -77,8 +72,8 @@ func TestPrivvalVectors(t *testing.T) { {"ping request", &privproto.PingRequest{}, "3a00"}, {"ping response", &privproto.PingResponse{}, "4200"}, {"pubKey request", &privproto.PubKeyRequest{}, "0a00"}, - {"pubKey response", &privproto.PubKeyResponse{PubKey: ppk, Error: nil}, "12240a220a20556a436f1218d30942efe798420f51dc9b6a311b929c578257457d05c5fcf230"}, - {"pubKey response with error", &privproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: remoteError}, "12140a0012100801120c697427732061206572726f72"}, + {"pubKey response", &privproto.PubKeyResponse{PubKeyType: pk.Type(), PubKeyBytes: pk.Bytes(), Error: nil}, "122b1a20556a436f1218d30942efe798420f51dc9b6a311b929c578257457d05c5fcf230220765643235353139"}, + {"pubKey response with error", &privproto.PubKeyResponse{PubKeyType: "", PubKeyBytes: []byte{}, Error: remoteError}, "121212100801120c697427732061206572726f72"}, {"Vote Request", &privproto.SignVoteRequest{Vote: votepb}, "1a81010a7f080210031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0608f49a8ded0532146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb034a09657874656e73696f6e"}, {"Vote Response", &privproto.SignedVoteResponse{Vote: *votepb, Error: nil}, "2281010a7f080210031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0608f49a8ded0532146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb034a09657874656e73696f6e"}, {"Vote Response with error", &privproto.SignedVoteResponse{Vote: cmtproto.Vote{}, Error: remoteError}, "22250a11220212002a0b088092b8c398feffffff0112100801120c697427732061206572726f72"}, @@ -88,8 +83,6 @@ func TestPrivvalVectors(t *testing.T) { } for _, tc := range testCases { - tc := tc - pm := mustWrapMsg(tc.msg) bz, err := pm.Marshal() require.NoError(t, err, tc.testName) diff --git a/privval/retry_signer_client.go b/privval/retry_signer_client.go index 271e146474c..8c6b01e3afc 100644 --- a/privval/retry_signer_client.go +++ b/privval/retry_signer_client.go @@ -4,8 +4,8 @@ import ( "fmt" "time" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/types" ) @@ -37,7 +37,7 @@ func (sc *RetrySignerClient) WaitForConnection(maxWait time.Duration) error { return sc.next.WaitForConnection(maxWait) } -//-------------------------------------------------------- +// -------------------------------------------------------- // Implement PrivValidator func (sc *RetrySignerClient) Ping() error { @@ -63,10 +63,10 @@ func (sc *RetrySignerClient) GetPubKey() (crypto.PubKey, error) { return nil, fmt.Errorf("exhausted all attempts to get pubkey: %w", err) } -func (sc *RetrySignerClient) SignVote(chainID string, vote *cmtproto.Vote) error { +func (sc *RetrySignerClient) SignVote(chainID string, vote *cmtproto.Vote, signExtension bool) error { var err error for i := 0; i < sc.retries || sc.retries == 0; i++ { - err = sc.next.SignVote(chainID, vote) + err = sc.next.SignVote(chainID, vote, signExtension) if err == nil { return nil } @@ -94,3 +94,22 @@ func (sc *RetrySignerClient) SignProposal(chainID string, proposal *cmtproto.Pro } return fmt.Errorf("exhausted all attempts to sign proposal: %w", err) } + +func (sc *RetrySignerClient) SignBytes(bytes []byte) ([]byte, error) { + var ( + sig []byte + err error + ) + for i := 0; i < sc.retries || sc.retries == 0; i++ { + sig, err = sc.next.SignBytes(bytes) + if err == nil { + return sig, nil + } + // If remote signer errors, we don't retry. + if _, ok := err.(*RemoteSignerError); ok { + return nil, err + } + time.Sleep(sc.timeout) + } + return nil, fmt.Errorf("exhausted all attempts to sign bytes: %w", err) +} diff --git a/privval/signer_client.go b/privval/signer_client.go index 062e6cdf4d7..bd548b825ed 100644 --- a/privval/signer_client.go +++ b/privval/signer_client.go @@ -4,17 +4,16 @@ import ( "fmt" "time" + pvproto "github.com/cometbft/cometbft/api/cometbft/privval/v1" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto" - cmterrors "github.com/cometbft/cometbft/types/errors" - cryptoenc "github.com/cometbft/cometbft/crypto/encoding" - privvalproto "github.com/cometbft/cometbft/proto/tendermint/privval" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/types" + cmterrors "github.com/cometbft/cometbft/types/errors" ) // SignerClient implements PrivValidator. -// Handles remote validator connections that provide signing services +// Handles remote validator connections that provide signing services. type SignerClient struct { endpoint *SignerListenerEndpoint chainID string @@ -23,7 +22,7 @@ type SignerClient struct { var _ types.PrivValidator = (*SignerClient)(nil) // NewSignerClient returns an instance of SignerClient. -// it will start the endpoint (if not already started) +// it will start the endpoint (if not already started). func NewSignerClient(endpoint *SignerListenerEndpoint, chainID string) (*SignerClient, error) { if !endpoint.IsRunning() { if err := endpoint.Start(); err != nil { @@ -34,27 +33,27 @@ func NewSignerClient(endpoint *SignerListenerEndpoint, chainID string) (*SignerC return &SignerClient{endpoint: endpoint, chainID: chainID}, nil } -// Close closes the underlying connection +// Close closes the underlying connection. func (sc *SignerClient) Close() error { return sc.endpoint.Close() } -// IsConnected indicates with the signer is connected to a remote signing service +// IsConnected indicates with the signer is connected to a remote signing service. func (sc *SignerClient) IsConnected() bool { return sc.endpoint.IsConnected() } -// WaitForConnection waits maxWait for a connection or returns a timeout error +// WaitForConnection waits maxWait for a connection or returns a timeout error. func (sc *SignerClient) WaitForConnection(maxWait time.Duration) error { return sc.endpoint.WaitForConnection(maxWait) } -//-------------------------------------------------------- +// -------------------------------------------------------- // Implement PrivValidator -// Ping sends a ping request to the remote signer +// Ping sends a ping request to the remote signer. func (sc *SignerClient) Ping() error { - response, err := sc.endpoint.SendRequest(mustWrapMsg(&privvalproto.PingRequest{})) + response, err := sc.endpoint.SendRequest(mustWrapMsg(&pvproto.PingRequest{})) if err != nil { sc.endpoint.Logger.Error("SignerClient::Ping", "err", err) return nil @@ -69,9 +68,9 @@ func (sc *SignerClient) Ping() error { } // GetPubKey retrieves a public key from a remote signer -// returns an error if client is not able to provide the key +// returns an error if client is not able to provide the key. func (sc *SignerClient) GetPubKey() (crypto.PubKey, error) { - response, err := sc.endpoint.SendRequest(mustWrapMsg(&privvalproto.PubKeyRequest{ChainId: sc.chainID})) + response, err := sc.endpoint.SendRequest(mustWrapMsg(&pvproto.PubKeyRequest{ChainId: sc.chainID})) if err != nil { return nil, fmt.Errorf("send: %w", err) } @@ -84,7 +83,7 @@ func (sc *SignerClient) GetPubKey() (crypto.PubKey, error) { return nil, &RemoteSignerError{Code: int(resp.Error.Code), Description: resp.Error.Description} } - pk, err := cryptoenc.PubKeyFromProto(resp.PubKey) + pk, err := cryptoenc.PubKeyFromTypeAndBytes(resp.PubKeyType, resp.PubKeyBytes) if err != nil { return nil, err } @@ -92,9 +91,9 @@ func (sc *SignerClient) GetPubKey() (crypto.PubKey, error) { return pk, nil } -// SignVote requests a remote signer to sign a vote -func (sc *SignerClient) SignVote(chainID string, vote *cmtproto.Vote) error { - response, err := sc.endpoint.SendRequest(mustWrapMsg(&privvalproto.SignVoteRequest{Vote: vote, ChainId: chainID})) +// SignVote requests a remote signer to sign a vote. +func (sc *SignerClient) SignVote(chainID string, vote *cmtproto.Vote, signExtension bool) error { + response, err := sc.endpoint.SendRequest(mustWrapMsg(&pvproto.SignVoteRequest{Vote: vote, ChainId: chainID, SkipExtensionSigning: !signExtension})) if err != nil { return err } @@ -112,10 +111,10 @@ func (sc *SignerClient) SignVote(chainID string, vote *cmtproto.Vote) error { return nil } -// SignProposal requests a remote signer to sign a proposal +// SignProposal requests a remote signer to sign a proposal. func (sc *SignerClient) SignProposal(chainID string, proposal *cmtproto.Proposal) error { response, err := sc.endpoint.SendRequest(mustWrapMsg( - &privvalproto.SignProposalRequest{Proposal: proposal, ChainId: chainID}, + &pvproto.SignProposalRequest{Proposal: proposal, ChainId: chainID}, )) if err != nil { return err @@ -133,3 +132,21 @@ func (sc *SignerClient) SignProposal(chainID string, proposal *cmtproto.Proposal return nil } + +// SignBytes requests a remote signer to sign bytes. +func (sc *SignerClient) SignBytes(bytes []byte) ([]byte, error) { + response, err := sc.endpoint.SendRequest(mustWrapMsg(&pvproto.SignBytesRequest{Value: bytes})) + if err != nil { + return nil, err + } + + resp := response.GetSignBytesResponse() + if resp == nil { + return nil, cmterrors.ErrRequiredField{Field: "response"} + } + if resp.Error != nil { + return nil, &RemoteSignerError{Code: int(resp.Error.Code), Description: resp.Error.Description} + } + + return resp.Signature, nil +} diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go index 14739cc1ccd..ef4742c927a 100644 --- a/privval/signer_client_test.go +++ b/privval/signer_client_test.go @@ -8,14 +8,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + privvalproto "github.com/cometbft/cometbft/api/cometbft/privval/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/tmhash" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cryptoproto "github.com/cometbft/cometbft/proto/tendermint/crypto" - privvalproto "github.com/cometbft/cometbft/proto/tendermint/privval" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/types" cmterrors "github.com/cometbft/cometbft/types/errors" + cmttime "github.com/cometbft/cometbft/types/time" ) type signerTestCase struct { @@ -26,6 +25,7 @@ type signerTestCase struct { } func getSignerTestCases(t *testing.T) []signerTestCase { + t.Helper() testCases := make([]signerTestCase, 0) // Get test cases for each possible dialer (DialTCP / DialUnix / etc) @@ -58,16 +58,15 @@ func getSignerTestCases(t *testing.T) []signerTestCase { func TestSignerClose(t *testing.T) { for _, tc := range getSignerTestCases(t) { err := tc.signerClient.Close() - assert.NoError(t, err) + require.NoError(t, err) err = tc.signerServer.Stop() - assert.NoError(t, err) + require.NoError(t, err) } } func TestSignerPing(t *testing.T) { for _, tc := range getSignerTestCases(t) { - tc := tc t.Cleanup(func() { if err := tc.signerServer.Stop(); err != nil { t.Error(err) @@ -80,13 +79,12 @@ func TestSignerPing(t *testing.T) { }) err := tc.signerClient.Ping() - assert.NoError(t, err) + require.NoError(t, err) } } func TestSignerGetPubKey(t *testing.T) { for _, tc := range getSignerTestCases(t) { - tc := tc t.Cleanup(func() { if err := tc.signerServer.Stop(); err != nil { t.Error(err) @@ -117,10 +115,10 @@ func TestSignerGetPubKey(t *testing.T) { func TestSignerProposal(t *testing.T) { for _, tc := range getSignerTestCases(t) { - ts := time.Now() + ts := cmttime.Now() hash := cmtrand.Bytes(tmhash.Size) have := &types.Proposal{ - Type: cmtproto.ProposalType, + Type: types.ProposalType, Height: 1, Round: 2, POLRound: 2, @@ -128,7 +126,7 @@ func TestSignerProposal(t *testing.T) { Timestamp: ts, } want := &types.Proposal{ - Type: cmtproto.ProposalType, + Type: types.ProposalType, Height: 1, Round: 2, POLRound: 2, @@ -136,7 +134,6 @@ func TestSignerProposal(t *testing.T) { Timestamp: ts, } - tc := tc t.Cleanup(func() { if err := tc.signerServer.Stop(); err != nil { t.Error(err) @@ -157,11 +154,11 @@ func TestSignerProposal(t *testing.T) { func TestSignerVote(t *testing.T) { for _, tc := range getSignerTestCases(t) { - ts := time.Now() + ts := cmttime.Now() hash := cmtrand.Bytes(tmhash.Size) valAddr := cmtrand.Bytes(crypto.AddressSize) want := &types.Vote{ - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, Height: 1, Round: 2, BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, @@ -171,7 +168,7 @@ func TestSignerVote(t *testing.T) { } have := &types.Vote{ - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, Height: 1, Round: 2, BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, @@ -180,7 +177,6 @@ func TestSignerVote(t *testing.T) { ValidatorIndex: 1, } - tc := tc t.Cleanup(func() { if err := tc.signerServer.Stop(); err != nil { t.Error(err) @@ -192,20 +188,22 @@ func TestSignerVote(t *testing.T) { } }) - require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto())) + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto(), false)) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto(), false)) assert.Equal(t, want.Signature, have.Signature) + assert.Nil(t, have.Signature) + assert.Equal(t, want.ExtensionSignature, have.ExtensionSignature) } } func TestSignerVoteResetDeadline(t *testing.T) { for _, tc := range getSignerTestCases(t) { - ts := time.Now() + ts := cmttime.Now() hash := cmtrand.Bytes(tmhash.Size) valAddr := cmtrand.Bytes(crypto.AddressSize) want := &types.Vote{ - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, Height: 1, Round: 2, BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, @@ -215,7 +213,7 @@ func TestSignerVoteResetDeadline(t *testing.T) { } have := &types.Vote{ - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, Height: 1, Round: 2, BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, @@ -224,7 +222,6 @@ func TestSignerVoteResetDeadline(t *testing.T) { ValidatorIndex: 1, } - tc := tc t.Cleanup(func() { if err := tc.signerServer.Stop(); err != nil { t.Error(err) @@ -238,28 +235,32 @@ func TestSignerVoteResetDeadline(t *testing.T) { time.Sleep(testTimeoutReadWrite2o3) - require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto())) + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto(), false)) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto(), false)) assert.Equal(t, want.Signature, have.Signature) + assert.Nil(t, have.Signature) + assert.Equal(t, want.ExtensionSignature, have.ExtensionSignature) // TODO(jleni): Clarify what is actually being tested // This would exceed the deadline if it was not extended by the previous message time.Sleep(testTimeoutReadWrite2o3) - require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto())) + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto(), false)) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto(), false)) assert.Equal(t, want.Signature, have.Signature) + assert.Nil(t, have.Signature) + assert.Equal(t, want.ExtensionSignature, have.ExtensionSignature) } } func TestSignerVoteKeepAlive(t *testing.T) { for _, tc := range getSignerTestCases(t) { - ts := time.Now() + ts := cmttime.Now() hash := cmtrand.Bytes(tmhash.Size) valAddr := cmtrand.Bytes(crypto.AddressSize) want := &types.Vote{ - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, Height: 1, Round: 2, BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, @@ -269,7 +270,7 @@ func TestSignerVoteKeepAlive(t *testing.T) { } have := &types.Vote{ - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, Height: 1, Round: 2, BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, @@ -278,7 +279,6 @@ func TestSignerVoteKeepAlive(t *testing.T) { ValidatorIndex: 1, } - tc := tc t.Cleanup(func() { if err := tc.signerServer.Stop(); err != nil { t.Error(err) @@ -299,10 +299,12 @@ func TestSignerVoteKeepAlive(t *testing.T) { time.Sleep(testTimeoutReadWrite * 3) tc.signerServer.Logger.Debug("TEST: Forced Wait DONE---------------------------------------------") - require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto())) + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto(), false)) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto(), false)) assert.Equal(t, want.Signature, have.Signature) + assert.Nil(t, have.Signature) + assert.Equal(t, want.ExtensionSignature, have.ExtensionSignature) } } @@ -312,7 +314,6 @@ func TestSignerSignProposalErrors(t *testing.T) { tc.signerServer.privVal = types.NewErroringMockPV() tc.mockPV = types.NewErroringMockPV() - tc := tc t.Cleanup(func() { if err := tc.signerServer.Stop(); err != nil { t.Error(err) @@ -324,10 +325,10 @@ func TestSignerSignProposalErrors(t *testing.T) { } }) - ts := time.Now() + ts := cmttime.Now() hash := cmtrand.Bytes(tmhash.Size) proposal := &types.Proposal{ - Type: cmtproto.ProposalType, + Type: types.ProposalType, Height: 1, Round: 2, POLRound: 2, @@ -349,11 +350,11 @@ func TestSignerSignProposalErrors(t *testing.T) { func TestSignerSignVoteErrors(t *testing.T) { for _, tc := range getSignerTestCases(t) { - ts := time.Now() + ts := cmttime.Now() hash := cmtrand.Bytes(tmhash.Size) valAddr := cmtrand.Bytes(crypto.AddressSize) vote := &types.Vote{ - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, Height: 1, Round: 2, BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, @@ -367,7 +368,6 @@ func TestSignerSignVoteErrors(t *testing.T) { tc.signerServer.privVal = types.NewErroringMockPV() tc.mockPV = types.NewErroringMockPV() - tc := tc t.Cleanup(func() { if err := tc.signerServer.Stop(); err != nil { t.Error(err) @@ -379,13 +379,13 @@ func TestSignerSignVoteErrors(t *testing.T) { } }) - err := tc.signerClient.SignVote(tc.chainID, vote.ToProto()) + err := tc.signerClient.SignVote(tc.chainID, vote.ToProto(), false) require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - err = tc.mockPV.SignVote(tc.chainID, vote.ToProto()) + err = tc.mockPV.SignVote(tc.chainID, vote.ToProto(), false) require.Error(t, err) - err = tc.signerClient.SignVote(tc.chainID, vote.ToProto()) + err = tc.signerClient.SignVote(tc.chainID, vote.ToProto(), false) require.Error(t, err) } } @@ -397,11 +397,11 @@ func brokenHandler(_ types.PrivValidator, request privvalproto.Message, _ string switch r := request.Sum.(type) { // This is broken and will answer most requests with a pubkey response case *privvalproto.Message_PubKeyRequest: - res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: nil}) + res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKeyType: "", PubKeyBytes: []byte{}, Error: nil}) case *privvalproto.Message_SignVoteRequest: - res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: nil}) + res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKeyType: "", PubKeyBytes: []byte{}, Error: nil}) case *privvalproto.Message_SignProposalRequest: - res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: nil}) + res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKeyType: "", PubKeyBytes: []byte{}, Error: nil}) case *privvalproto.Message_PingRequest: err, res = nil, mustWrapMsg(&privvalproto.PingResponse{}) default: @@ -418,7 +418,6 @@ func TestSignerUnexpectedResponse(t *testing.T) { tc.signerServer.SetRequestHandler(brokenHandler) - tc := tc t.Cleanup(func() { if err := tc.signerServer.Stop(); err != nil { t.Error(err) @@ -430,10 +429,79 @@ func TestSignerUnexpectedResponse(t *testing.T) { } }) - ts := time.Now() - want := &types.Vote{Timestamp: ts, Type: cmtproto.PrecommitType} + ts := cmttime.Now() + want := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + + e := tc.signerClient.SignVote(tc.chainID, want.ToProto(), false) + require.ErrorIs(t, e, cmterrors.ErrRequiredField{Field: "response"}) + } +} + +func TestSignerVoteExtension(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + ts := cmttime.Now() + hash := cmtrand.Bytes(tmhash.Size) + valAddr := cmtrand.Bytes(crypto.AddressSize) + want := &types.Vote{ + Type: types.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, + Extension: []byte("hello"), + } + + have := &types.Vote{ + Type: types.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, + Extension: []byte("world"), + } + + t.Cleanup(func() { + if err := tc.signerServer.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := tc.signerClient.Close(); err != nil { + t.Error(err) + } + }) + + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto(), true)) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto(), true)) - e := tc.signerClient.SignVote(tc.chainID, want.ToProto()) - assert.ErrorIs(t, e, cmterrors.ErrRequiredField{Field: "response"}) + assert.Equal(t, want.Signature, have.Signature) + assert.Equal(t, want.ExtensionSignature, have.ExtensionSignature) + } +} + +func TestSignerSignBytes(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + t.Cleanup(func() { + if err := tc.signerServer.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := tc.signerClient.Close(); err != nil { + t.Error(err) + } + }) + + bytes := cmtrand.Bytes(32) + signature, err := tc.signerClient.SignBytes(bytes) + require.NoError(t, err) + + pubKey, err := tc.mockPV.GetPubKey() + require.NoError(t, err) + require.True(t, pubKey.VerifySignature(bytes, signature)) } } diff --git a/privval/signer_dialer_endpoint.go b/privval/signer_dialer_endpoint.go index 9afb3aaa3ff..2b2646025d2 100644 --- a/privval/signer_dialer_endpoint.go +++ b/privval/signer_dialer_endpoint.go @@ -52,7 +52,6 @@ func NewSignerDialerEndpoint( dialer SocketDialer, options ...SignerServiceEndpointOption, ) *SignerDialerEndpoint { - sd := &SignerDialerEndpoint{ dialer: dialer, retryWait: defaultRetryWaitMilliseconds * time.Millisecond, @@ -77,17 +76,17 @@ func (sd *SignerDialerEndpoint) ensureConnection() error { retries := 0 for retries < sd.maxConnRetries { conn, err := sd.dialer() - if err != nil { retries++ sd.Logger.Debug("SignerDialer: Reconnection failed", "retries", retries, "max", sd.maxConnRetries, "err", err) // Wait between retries time.Sleep(sd.retryWait) - } else { - sd.SetConnection(conn) - sd.Logger.Debug("SignerDialer: Connection Ready") - return nil + continue } + + sd.SetConnection(conn) + sd.Logger.Debug("SignerDialer: Connection Ready") + return nil } sd.Logger.Debug("SignerDialer: Max retries exceeded", "retries", retries, "max", sd.maxConnRetries) diff --git a/privval/signer_endpoint.go b/privval/signer_endpoint.go index 2b4abe2dd96..e4d11a30fa9 100644 --- a/privval/signer_endpoint.go +++ b/privval/signer_endpoint.go @@ -5,10 +5,10 @@ import ( "net" "time" + privvalproto "github.com/cometbft/cometbft/api/cometbft/privval/v1" "github.com/cometbft/cometbft/libs/protoio" "github.com/cometbft/cometbft/libs/service" cmtsync "github.com/cometbft/cometbft/libs/sync" - privvalproto "github.com/cometbft/cometbft/proto/tendermint/privval" ) const ( @@ -30,14 +30,14 @@ func (se *signerEndpoint) Close() error { return nil } -// IsConnected indicates if there is an active connection +// IsConnected indicates if there is an active connection. func (se *signerEndpoint) IsConnected() bool { se.connMtx.Lock() defer se.connMtx.Unlock() return se.isConnected() } -// TryGetConnection retrieves a connection if it is already available +// GetAvailableConnection retrieves a connection if it is already available. func (se *signerEndpoint) GetAvailableConnection(connectionAvailableCh chan net.Conn) bool { se.connMtx.Lock() defer se.connMtx.Unlock() @@ -51,13 +51,11 @@ func (se *signerEndpoint) GetAvailableConnection(connectionAvailableCh chan net. return false } -// TryGetConnection retrieves a connection if it is already available +// WaitConnection waits for the connection to be available. func (se *signerEndpoint) WaitConnection(connectionAvailableCh chan net.Conn, maxWait time.Duration) error { - se.connMtx.Lock() - defer se.connMtx.Unlock() - select { - case se.conn = <-connectionAvailableCh: + case conn := <-connectionAvailableCh: + se.SetConnection(conn) case <-time.After(maxWait): return ErrConnectionTimeout } @@ -65,21 +63,21 @@ func (se *signerEndpoint) WaitConnection(connectionAvailableCh chan net.Conn, ma return nil } -// SetConnection replaces the current connection object +// SetConnection replaces the current connection object. func (se *signerEndpoint) SetConnection(newConnection net.Conn) { se.connMtx.Lock() defer se.connMtx.Unlock() se.conn = newConnection } -// IsConnected indicates if there is an active connection +// DropConnection closes the current connection if it exists. func (se *signerEndpoint) DropConnection() { se.connMtx.Lock() defer se.connMtx.Unlock() se.dropConnection() } -// ReadMessage reads a message from the endpoint +// ReadMessage reads a message from the endpoint. func (se *signerEndpoint) ReadMessage() (msg privvalproto.Message, err error) { se.connMtx.Lock() defer se.connMtx.Unlock() @@ -92,7 +90,7 @@ func (se *signerEndpoint) ReadMessage() (msg privvalproto.Message, err error) { err = se.conn.SetReadDeadline(deadline) if err != nil { - return + return msg, err } const maxRemoteSignerMsgSize = 1024 * 10 protoReader := protoio.NewDelimitedReader(se.conn, maxRemoteSignerMsgSize) @@ -108,10 +106,10 @@ func (se *signerEndpoint) ReadMessage() (msg privvalproto.Message, err error) { se.dropConnection() } - return + return msg, err } -// WriteMessage writes a message from the endpoint +// WriteMessage writes a message from the endpoint. func (se *signerEndpoint) WriteMessage(msg privvalproto.Message) (err error) { se.connMtx.Lock() defer se.connMtx.Unlock() @@ -126,7 +124,7 @@ func (se *signerEndpoint) WriteMessage(msg privvalproto.Message) (err error) { deadline := time.Now().Add(se.timeoutReadWrite) err = se.conn.SetWriteDeadline(deadline) if err != nil { - return + return err } _, err = protoWriter.WriteMsg(&msg) @@ -139,7 +137,7 @@ func (se *signerEndpoint) WriteMessage(msg privvalproto.Message) (err error) { se.dropConnection() } - return + return err } func (se *signerEndpoint) isConnected() bool { diff --git a/privval/signer_listener_endpoint.go b/privval/signer_listener_endpoint.go index 9b6b033cc5f..ea4a0e8414f 100644 --- a/privval/signer_listener_endpoint.go +++ b/privval/signer_listener_endpoint.go @@ -1,14 +1,15 @@ package privval import ( - "fmt" + "errors" "net" + "sync/atomic" "time" + privvalproto "github.com/cometbft/cometbft/api/cometbft/privval/v1" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/service" cmtsync "github.com/cometbft/cometbft/libs/sync" - privvalproto "github.com/cometbft/cometbft/proto/tendermint/privval" ) // SignerListenerEndpointOption sets an optional parameter on the SignerListenerEndpoint. @@ -17,7 +18,7 @@ type SignerListenerEndpointOption func(*SignerListenerEndpoint) // SignerListenerEndpointTimeoutReadWrite sets the read and write timeout for // connections from external signing processes. // -// Default: 5s +// Default: 5s. func SignerListenerEndpointTimeoutReadWrite(timeout time.Duration) SignerListenerEndpointOption { return func(sl *SignerListenerEndpoint) { sl.signerEndpoint.timeoutReadWrite = timeout } } @@ -34,9 +35,10 @@ type SignerListenerEndpoint struct { connectRequestCh chan struct{} connectionAvailableCh chan net.Conn - timeoutAccept time.Duration - pingTimer *time.Ticker - pingInterval time.Duration + timeoutAccept time.Duration + acceptFailCount atomic.Uint32 + pingTimer *time.Ticker + pingInterval time.Duration instanceMtx cmtsync.Mutex // Ensures instance public methods access, i.e. SendRequest } @@ -64,10 +66,10 @@ func NewSignerListenerEndpoint( // OnStart implements service.Service. func (sl *SignerListenerEndpoint) OnStart() error { - sl.connectRequestCh = make(chan struct{}) + sl.connectRequestCh = make(chan struct{}, 1) // Buffer of 1 to allow `serviceLoop` to re-trigger itself. sl.connectionAvailableCh = make(chan net.Conn) - // NOTE: ping timeout must be less than read/write timeout + // NOTE: ping timeout must be less than read/write timeout. sl.pingInterval = time.Duration(sl.signerEndpoint.timeoutReadWrite.Milliseconds()*2/3) * time.Millisecond sl.pingTimer = time.NewTicker(sl.pingInterval) @@ -79,7 +81,7 @@ func (sl *SignerListenerEndpoint) OnStart() error { return nil } -// OnStop implements service.Service +// OnStop implements service.Service. func (sl *SignerListenerEndpoint) OnStop() { sl.instanceMtx.Lock() defer sl.instanceMtx.Unlock() @@ -96,14 +98,14 @@ func (sl *SignerListenerEndpoint) OnStop() { sl.pingTimer.Stop() } -// WaitForConnection waits maxWait for a connection or returns a timeout error +// WaitForConnection waits maxWait for a connection or returns a timeout error. func (sl *SignerListenerEndpoint) WaitForConnection(maxWait time.Duration) error { sl.instanceMtx.Lock() defer sl.instanceMtx.Unlock() return sl.ensureConnection(maxWait) } -// SendRequest ensures there is a connection, sends a request and waits for a response +// SendRequest ensures there is a connection, sends a request and waits for a response. func (sl *SignerListenerEndpoint) SendRequest(request privvalproto.Message) (*privvalproto.Message, error) { sl.instanceMtx.Lock() defer sl.instanceMtx.Unlock() @@ -152,16 +154,18 @@ func (sl *SignerListenerEndpoint) ensureConnection(maxWait time.Duration) error func (sl *SignerListenerEndpoint) acceptNewConnection() (net.Conn, error) { if !sl.IsRunning() || sl.listener == nil { - return nil, fmt.Errorf("endpoint is closing") + return nil, errors.New("endpoint is closing") } // wait for a new conn sl.Logger.Info("SignerListener: Listening for new connection") conn, err := sl.listener.Accept() if err != nil { + sl.acceptFailCount.Add(1) return nil, err } + sl.acceptFailCount.Store(0) return conn, nil } @@ -181,23 +185,27 @@ func (sl *SignerListenerEndpoint) serviceLoop() { for { select { case <-sl.connectRequestCh: - { - conn, err := sl.acceptNewConnection() - if err == nil { - sl.Logger.Info("SignerListener: Connected") - - // We have a good connection, wait for someone that needs one otherwise cancellation - select { - case sl.connectionAvailableCh <- conn: - case <-sl.Quit(): - return - } - } + // On start, listen timeouts can queue a duplicate connect request to queue + // while the first request connects. Drop duplicate request. + if sl.IsConnected() { + sl.Logger.Debug("SignerListener: Connected. Drop Listen Request") + continue + } - select { - case sl.connectRequestCh <- struct{}{}: - default: - } + // Listen for remote signer + conn, err := sl.acceptNewConnection() + if err != nil { + sl.Logger.Error("SignerListener: Error accepting connection", "err", err, "failures", sl.acceptFailCount.Load()) + sl.triggerConnect() + continue + } + + // We have a good connection, wait for someone that needs one otherwise cancellation + sl.Logger.Info("SignerListener: Connected") + select { + case sl.connectionAvailableCh <- conn: + case <-sl.Quit(): + return } case <-sl.Quit(): return diff --git a/privval/signer_listener_endpoint_test.go b/privval/signer_listener_endpoint_test.go index c4e4c6b247e..979a2a7920c 100644 --- a/privval/signer_listener_endpoint_test.go +++ b/privval/signer_listener_endpoint_test.go @@ -1,6 +1,7 @@ package privval import ( + "errors" "net" "testing" "time" @@ -9,9 +10,9 @@ import ( "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/crypto/ed25519" + cmtnet "github.com/cometbft/cometbft/internal/net" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/log" - cmtnet "github.com/cometbft/cometbft/libs/net" - cmtrand "github.com/cometbft/cometbft/libs/rand" "github.com/cometbft/cometbft/types" ) @@ -145,6 +146,89 @@ func TestRetryConnToRemoteSigner(t *testing.T) { } } +func TestDuplicateListenReject(t *testing.T) { + for _, tc := range getDialerTestCases(t) { + var ( + logger = log.TestingLogger() + chainID = cmtrand.Str(12) + mockPV = types.NewMockPV() + endpointIsOpenCh = make(chan struct{}) + thisConnTimeout = testTimeoutReadWrite + listenerEndpoint = newSignerListenerEndpoint(logger, tc.addr, thisConnTimeout) + ) + listenerEndpoint.timeoutAccept = defaultTimeoutAcceptSeconds / 2 * time.Second + + dialerEndpoint := NewSignerDialerEndpoint( + logger, + tc.dialer, + ) + SignerDialerEndpointTimeoutReadWrite(testTimeoutReadWrite)(dialerEndpoint) + SignerDialerEndpointConnRetries(10)(dialerEndpoint) + + signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) + + startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) + t.Cleanup(func() { + if err := listenerEndpoint.Stop(); err != nil { + t.Error(err) + } + }) + + require.NoError(t, signerServer.Start()) + assert.True(t, signerServer.IsRunning()) + + <-endpointIsOpenCh + if err := signerServer.Stop(); err != nil { + t.Error(err) + } + + dialerEndpoint2 := NewSignerDialerEndpoint( + logger, + tc.dialer, + ) + signerServer2 := NewSignerServer(dialerEndpoint2, chainID, mockPV) + + // let some pings pass + require.NoError(t, signerServer2.Start()) + assert.True(t, signerServer2.IsRunning()) + + // wait for successful connection + for { + if listenerEndpoint.IsConnected() { + break + } + } + + // simulate ensureConnection, bypass triggerConnect default drop with multiple messages + time.Sleep(100 * time.Millisecond) + listenerEndpoint.triggerConnect() + time.Sleep(100 * time.Millisecond) + listenerEndpoint.triggerConnect() + time.Sleep(100 * time.Millisecond) + listenerEndpoint.triggerConnect() + + // simulate validator node running long enough for privval listen timeout multiple times + // up to 1 timeout error is possible due to timing differences + // Run 3 times longer than timeout to generate at least 2 accept errors + time.Sleep(3 * defaultTimeoutAcceptSeconds * time.Second) + t.Cleanup(func() { + if err := signerServer2.Stop(); err != nil { + t.Error(err) + } + }) + + // after connect, there should not be more than 1 accept fail + assert.LessOrEqual(t, listenerEndpoint.acceptFailCount.Load(), uint32(1)) + + // give the client some time to re-establish the conn to the remote signer + // should see sth like this in the logs: + // + // E[10016-01-10|17:12:46.128] Ping err="remote signer timed out" + // I[10016-01-10|17:16:42.447] Re-created connection to remote signer impl=SocketVal + time.Sleep(testTimeoutReadWrite * 2) + } +} + func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite time.Duration) *SignerListenerEndpoint { proto, address := cmtnet.ProtocolAndAddress(addr) @@ -176,6 +260,7 @@ func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite } func startListenerEndpointAsync(t *testing.T, sle *SignerListenerEndpoint, endpointIsOpenCh chan struct{}) { + t.Helper() go func(sle *SignerListenerEndpoint) { require.NoError(t, sle.Start()) assert.True(t, sle.IsRunning()) @@ -188,7 +273,7 @@ func getMockEndpoints( addr string, socketDialer SocketDialer, ) (*SignerListenerEndpoint, *SignerDialerEndpoint) { - + t.Helper() var ( logger = log.TestingLogger() endpointIsOpenCh = make(chan struct{}) @@ -213,3 +298,28 @@ func getMockEndpoints( return listenerEndpoint, dialerEndpoint } + +func TestSignerListenerEndpointServiceLoop(t *testing.T) { + listenerEndpoint := NewSignerListenerEndpoint( + log.TestingLogger(), + &testListener{initialErrs: 5}, + ) + + require.NoError(t, listenerEndpoint.Start()) + require.NoError(t, listenerEndpoint.WaitForConnection(time.Second)) +} + +type testListener struct { + net.Listener + initialErrs int +} + +func (l *testListener) Accept() (net.Conn, error) { + if l.initialErrs > 0 { + l.initialErrs-- + + return nil, errors.New("accept error") + } + + return nil, nil // Note this doesn't actually return a valid connection, it just doesn't error. +} diff --git a/privval/signer_requestHandler.go b/privval/signer_requestHandler.go index b0cbe127c90..a24a34e751e 100644 --- a/privval/signer_requestHandler.go +++ b/privval/signer_requestHandler.go @@ -3,92 +3,95 @@ package privval import ( "fmt" + pvproto "github.com/cometbft/cometbft/api/cometbft/privval/v1" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto" - cryptoenc "github.com/cometbft/cometbft/crypto/encoding" - cryptoproto "github.com/cometbft/cometbft/proto/tendermint/crypto" - privvalproto "github.com/cometbft/cometbft/proto/tendermint/privval" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/types" ) func DefaultValidationRequestHandler( privVal types.PrivValidator, - req privvalproto.Message, + req pvproto.Message, chainID string, -) (privvalproto.Message, error) { +) (pvproto.Message, error) { var ( - res privvalproto.Message + res pvproto.Message err error ) switch r := req.Sum.(type) { - case *privvalproto.Message_PubKeyRequest: + case *pvproto.Message_PubKeyRequest: if r.PubKeyRequest.GetChainId() != chainID { - res = mustWrapMsg(&privvalproto.PubKeyResponse{ - PubKey: cryptoproto.PublicKey{}, Error: &privvalproto.RemoteSignerError{ - Code: 0, Description: "unable to provide pubkey"}}) - return res, fmt.Errorf("want chainID: %s, got chainID: %s", r.PubKeyRequest.GetChainId(), chainID) + return chainIDMismatchError(r.PubKeyRequest.GetChainId(), chainID) } var pubKey crypto.PubKey - pubKey, err = privVal.GetPubKey() - if err != nil { - return res, err - } - pk, err := cryptoenc.PubKeyToProto(pubKey) - if err != nil { - return res, err - } + pubKey, err = privVal.GetPubKey() if err != nil { - res = mustWrapMsg(&privvalproto.PubKeyResponse{ - PubKey: cryptoproto.PublicKey{}, Error: &privvalproto.RemoteSignerError{Code: 0, Description: err.Error()}}) + res = mustWrapMsg(&pvproto.PubKeyResponse{ + PubKeyType: "", PubKeyBytes: []byte{}, Error: &pvproto.RemoteSignerError{ + Code: 0, Description: err.Error(), + }, + }) } else { - res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: pk, Error: nil}) + res = mustWrapMsg(&pvproto.PubKeyResponse{PubKeyType: pubKey.Type(), PubKeyBytes: pubKey.Bytes(), Error: nil}) } - - case *privvalproto.Message_SignVoteRequest: + case *pvproto.Message_SignVoteRequest: if r.SignVoteRequest.ChainId != chainID { - res = mustWrapMsg(&privvalproto.SignedVoteResponse{ - Vote: cmtproto.Vote{}, Error: &privvalproto.RemoteSignerError{ - Code: 0, Description: "unable to sign vote"}}) - return res, fmt.Errorf("want chainID: %s, got chainID: %s", r.SignVoteRequest.GetChainId(), chainID) + return chainIDMismatchError(r.SignVoteRequest.GetChainId(), chainID) } vote := r.SignVoteRequest.Vote - err = privVal.SignVote(chainID, vote) + err = privVal.SignVote(chainID, vote, !r.SignVoteRequest.SkipExtensionSigning) if err != nil { - res = mustWrapMsg(&privvalproto.SignedVoteResponse{ - Vote: cmtproto.Vote{}, Error: &privvalproto.RemoteSignerError{Code: 0, Description: err.Error()}}) + res = mustWrapMsg(&pvproto.SignedVoteResponse{ + Vote: cmtproto.Vote{}, Error: &pvproto.RemoteSignerError{Code: 0, Description: err.Error()}, + }) } else { - res = mustWrapMsg(&privvalproto.SignedVoteResponse{Vote: *vote, Error: nil}) + res = mustWrapMsg(&pvproto.SignedVoteResponse{Vote: *vote, Error: nil}) } - - case *privvalproto.Message_SignProposalRequest: + case *pvproto.Message_SignProposalRequest: if r.SignProposalRequest.GetChainId() != chainID { - res = mustWrapMsg(&privvalproto.SignedProposalResponse{ - Proposal: cmtproto.Proposal{}, Error: &privvalproto.RemoteSignerError{ - Code: 0, - Description: "unable to sign proposal"}}) - return res, fmt.Errorf("want chainID: %s, got chainID: %s", r.SignProposalRequest.GetChainId(), chainID) + return chainIDMismatchError(r.SignProposalRequest.GetChainId(), chainID) } proposal := r.SignProposalRequest.Proposal err = privVal.SignProposal(chainID, proposal) if err != nil { - res = mustWrapMsg(&privvalproto.SignedProposalResponse{ - Proposal: cmtproto.Proposal{}, Error: &privvalproto.RemoteSignerError{Code: 0, Description: err.Error()}}) + res = mustWrapMsg(&pvproto.SignedProposalResponse{ + Proposal: cmtproto.Proposal{}, Error: &pvproto.RemoteSignerError{Code: 0, Description: err.Error()}, + }) } else { - res = mustWrapMsg(&privvalproto.SignedProposalResponse{Proposal: *proposal, Error: nil}) + res = mustWrapMsg(&pvproto.SignedProposalResponse{Proposal: *proposal, Error: nil}) } - case *privvalproto.Message_PingRequest: - err, res = nil, mustWrapMsg(&privvalproto.PingResponse{}) + case *pvproto.Message_SignBytesRequest: + var signature []byte + signature, err = privVal.SignBytes(r.SignBytesRequest.Value) + if err != nil { + res = mustWrapMsg(&pvproto.SignBytesResponse{ + Signature: nil, Error: &pvproto.RemoteSignerError{Code: 0, Description: err.Error()}, + }) + } else { + res = mustWrapMsg(&pvproto.SignBytesResponse{Signature: signature, Error: nil}) + } + case *pvproto.Message_PingRequest: + err, res = nil, mustWrapMsg(&pvproto.PingResponse{}) default: err = fmt.Errorf("unknown msg: %v", r) } return res, err } + +func chainIDMismatchError(want, got string) (pvproto.Message, error) { + res := mustWrapMsg(&pvproto.PubKeyResponse{ + PubKeyType: "", PubKeyBytes: []byte{}, Error: &pvproto.RemoteSignerError{ + Code: 0, Description: "unable to serve request", + }, + }) + return res, fmt.Errorf("want chainID: %s, got chainID: %s", want, got) +} diff --git a/privval/signer_server.go b/privval/signer_server.go index 8c9abe717d4..dd38296e398 100644 --- a/privval/signer_server.go +++ b/privval/signer_server.go @@ -3,17 +3,18 @@ package privval import ( "io" + privvalproto "github.com/cometbft/cometbft/api/cometbft/privval/v1" "github.com/cometbft/cometbft/libs/service" cmtsync "github.com/cometbft/cometbft/libs/sync" - privvalproto "github.com/cometbft/cometbft/proto/tendermint/privval" "github.com/cometbft/cometbft/types" ) -// ValidationRequestHandlerFunc handles different remoteSigner requests +// ValidationRequestHandlerFunc handles different remoteSigner requests. type ValidationRequestHandlerFunc func( privVal types.PrivValidator, requestMessage privvalproto.Message, - chainID string) (privvalproto.Message, error) + chainID string, +) (privvalproto.Message, error) type SignerServer struct { service.BaseService @@ -51,7 +52,7 @@ func (ss *SignerServer) OnStop() { _ = ss.endpoint.Close() } -// SetRequestHandler override the default function that is used to service requests +// SetRequestHandler override the default function that is used to service requests. func (ss *SignerServer) SetRequestHandler(validationRequestHandler ValidationRequestHandlerFunc) { ss.handlerMtx.Lock() defer ss.handlerMtx.Unlock() @@ -72,8 +73,7 @@ func (ss *SignerServer) servicePendingRequest() { } var res privvalproto.Message - { - // limit the scope of the lock + func() { ss.handlerMtx.Lock() defer ss.handlerMtx.Unlock() res, err = ss.validationRequestHandler(ss.privVal, req, ss.chainID) @@ -81,7 +81,7 @@ func (ss *SignerServer) servicePendingRequest() { // only log the error; we'll reply with an error in res ss.Logger.Error("SignerServer: handleMessage", "err", err) } - } + }() err = ss.endpoint.WriteMessage(res) if err != nil { diff --git a/privval/socket_dialers.go b/privval/socket_dialers.go index d49231c72c0..a29e1f416c0 100644 --- a/privval/socket_dialers.go +++ b/privval/socket_dialers.go @@ -6,8 +6,8 @@ import ( "time" "github.com/cometbft/cometbft/crypto" - cmtnet "github.com/cometbft/cometbft/libs/net" - p2pconn "github.com/cometbft/cometbft/p2p/conn" + cmtnet "github.com/cometbft/cometbft/internal/net" + p2pconn "github.com/cometbft/cometbft/p2p/transport/tcp/conn" ) // Socket errors. diff --git a/privval/socket_dialers_test.go b/privval/socket_dialers_test.go index f167a1daed3..cd3d9158794 100644 --- a/privval/socket_dialers_test.go +++ b/privval/socket_dialers_test.go @@ -12,10 +12,11 @@ import ( ) func getDialerTestCases(t *testing.T) []dialerTestCase { + t.Helper() tcpAddr := GetFreeLocalhostAddrPort() unixFilePath, err := testUnixAddr() require.NoError(t, err) - unixAddr := fmt.Sprintf("unix://%s", unixFilePath) + unixAddr := "unix://" + unixFilePath return []dialerTestCase{ { @@ -34,7 +35,7 @@ func TestIsConnTimeoutForFundamentalTimeouts(t *testing.T) { tcpAddr := GetFreeLocalhostAddrPort() dialer := DialTCPFn(tcpAddr, time.Millisecond, ed25519.GenPrivKey()) _, err := dialer() - assert.Error(t, err) + require.Error(t, err) assert.True(t, IsConnTimeout(err)) } @@ -42,7 +43,7 @@ func TestIsConnTimeoutForWrappedConnTimeouts(t *testing.T) { tcpAddr := GetFreeLocalhostAddrPort() dialer := DialTCPFn(tcpAddr, time.Millisecond, ed25519.GenPrivKey()) _, err := dialer() - assert.Error(t, err) + require.Error(t, err) err = fmt.Errorf("%v: %w", err, ErrConnectionTimeout) assert.True(t, IsConnTimeout(err)) } diff --git a/privval/socket_listeners.go b/privval/socket_listeners.go index 6d406bd6925..ce9d91f3eef 100644 --- a/privval/socket_listeners.go +++ b/privval/socket_listeners.go @@ -5,7 +5,7 @@ import ( "time" "github.com/cometbft/cometbft/crypto/ed25519" - p2pconn "github.com/cometbft/cometbft/p2p/conn" + p2pconn "github.com/cometbft/cometbft/p2p/transport/tcp/conn" ) const ( @@ -18,7 +18,7 @@ type timeoutError interface { Timeout() bool } -//------------------------------------------------------------------ +// ------------------------------------------------------------------ // TCP Listener // TCPListenerOption sets an optional parameter on the tcpListener. @@ -84,7 +84,7 @@ func (ln *TCPListener) Accept() (net.Conn, error) { return secretConn, nil } -//------------------------------------------------------------------ +// ------------------------------------------------------------------ // Unix Listener // unixListener implements net.Listener. @@ -145,7 +145,7 @@ func (ln *UnixListener) Accept() (net.Conn, error) { return conn, nil } -//------------------------------------------------------------------ +// ------------------------------------------------------------------ // Connection // timeoutConn implements net.Conn. @@ -171,7 +171,7 @@ func (c timeoutConn) Read(b []byte) (n int, err error) { deadline := time.Now().Add(c.timeout) err = c.Conn.SetReadDeadline(deadline) if err != nil { - return + return 0, err } return c.Conn.Read(b) @@ -183,7 +183,7 @@ func (c timeoutConn) Write(b []byte) (n int, err error) { deadline := time.Now().Add(c.timeout) err = c.Conn.SetWriteDeadline(deadline) if err != nil { - return + return 0, err } return c.Conn.Write(b) diff --git a/privval/socket_listeners_test.go b/privval/socket_listeners_test.go index 28d94300d0e..2547dfc9142 100644 --- a/privval/socket_listeners_test.go +++ b/privval/socket_listeners_test.go @@ -9,14 +9,14 @@ import ( "github.com/cometbft/cometbft/crypto/ed25519" ) -//------------------------------------------- +// ------------------------------------------- // helper funcs func newPrivKey() ed25519.PrivKey { return ed25519.GenPrivKey() } -//------------------------------------------- +// ------------------------------------------- // tests type listenerTestCase struct { @@ -26,7 +26,7 @@ type listenerTestCase struct { } // testUnixAddr will attempt to obtain a platform-independent temporary file -// name for a Unix socket +// name for a Unix socket. func testUnixAddr() (string, error) { f, err := os.CreateTemp("", "cometbft-privval-test-*") if err != nil { @@ -39,6 +39,7 @@ func testUnixAddr() (string, error) { } func tcpListenerTestCase(t *testing.T, timeoutAccept, timeoutReadWrite time.Duration) listenerTestCase { + t.Helper() ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatal(err) @@ -55,6 +56,7 @@ func tcpListenerTestCase(t *testing.T, timeoutAccept, timeoutReadWrite time.Dura } func unixListenerTestCase(t *testing.T, timeoutAccept, timeoutReadWrite time.Duration) listenerTestCase { + t.Helper() addr, err := testUnixAddr() if err != nil { t.Fatal(err) @@ -75,6 +77,7 @@ func unixListenerTestCase(t *testing.T, timeoutAccept, timeoutReadWrite time.Dur } func listenerTestCases(t *testing.T, timeoutAccept, timeoutReadWrite time.Duration) []listenerTestCase { + t.Helper() return []listenerTestCase{ tcpListenerTestCase(t, timeoutAccept, timeoutReadWrite), unixListenerTestCase(t, timeoutAccept, timeoutReadWrite), @@ -104,12 +107,16 @@ func TestListenerTimeoutReadWrite(t *testing.T) { // Note: this controls how long this test actually runs. timeoutReadWrite = 10 * time.Millisecond ) + for _, tc := range listenerTestCases(t, timeoutAccept, timeoutReadWrite) { go func(dialer SocketDialer) { - _, err := dialer() + conn, err := dialer() if err != nil { panic(err) } + // Add a delay before closing the connection + time.Sleep(2 * timeoutReadWrite) + conn.Close() }(tc.dialer) c, err := tc.listener.Accept() diff --git a/privval/utils.go b/privval/utils.go index b7d84a9e928..6754a927537 100644 --- a/privval/utils.go +++ b/privval/utils.go @@ -6,8 +6,8 @@ import ( "net" "github.com/cometbft/cometbft/crypto/ed25519" + cmtnet "github.com/cometbft/cometbft/internal/net" "github.com/cometbft/cometbft/libs/log" - cmtnet "github.com/cometbft/cometbft/libs/net" ) // IsConnTimeout returns a boolean indicating whether the error is known to @@ -25,7 +25,7 @@ func IsConnTimeout(err error) bool { } } -// NewSignerListener creates a new SignerListenerEndpoint using the corresponding listen address +// NewSignerListener creates a new SignerListenerEndpoint using the corresponding listen address. func NewSignerListener(listenAddr string, logger log.Logger) (*SignerListenerEndpoint, error) { var listener net.Listener @@ -52,7 +52,7 @@ func NewSignerListener(listenAddr string, logger log.Logger) (*SignerListenerEnd return pve, nil } -// GetFreeLocalhostAddrPort returns a free localhost:port address +// GetFreeLocalhostAddrPort returns a free localhost:port address. func GetFreeLocalhostAddrPort() string { port, err := cmtnet.GetFreePort() if err != nil { diff --git a/proto/README.md b/proto/README.md index fcce452a244..a8cfaba4e90 100644 --- a/proto/README.md +++ b/proto/README.md @@ -1,41 +1,85 @@ -# Protocol Buffers - -This sections defines the types and messages shared across implementations. The -definition of the data structures are located in the -[core/data\_structures](../spec/core/data_structures.md) for the core data types -and ABCI definitions are located in the [ABCI](../spec/abci/README.md) section. - -## Process of Updates - -The `.proto` files within this section are core to the protocol and updates must -be treated as such. - -### Steps - -1. Make an issue with the proposed change. Within in the issue members from - both the CometBFT and tendermint-rs team will leave comments. If there is not - consensus on the change an [RFC](../docs/rfc/README.md) may be requested. - 1. Submission of an RFC as a pull request should be made to facilitate - further discussion. - 2. Merge the RFC. -2. Make the necessary changes to the `.proto` file(s), [core data - structures](../spec/core/data_structures.md) and/or [ABCI - protocol](../spec/abci). -3. Open issues within CometBFT and Tendermint-rs repos. This is used to notify - the teams that a change occurred in the spec. - 1. Tag the issue with a spec version label. This will notify the team the - changed has been made on master but has not entered a release. - -### Versioning - -The spec repo aims to be versioned. Once it has been versioned, updates to the -protobuf files will live on master. After a certain amount of time, decided on -by CometBFT and tendermint-rs team leads, a release will be made on the spec -repo. The spec may contain minor releases as well, depending on the -implementation these changes may lead to a breaking change. If so, the -implementation team should open an issue within the spec repo requiring a major -release of the spec. - -If the steps above were followed each implementation should have issues tagged -with a spec change label. Once all issues have been completed the team should -signify their readiness for release. +[NB]: # ( + Ensure that all hyperlinks in this doc are absolute URLs, not relative ones, + as this doc gets published to the Buf registry and relative URLs will fail + to resolve. +) + +# CometBFT Protocol Buffers Definitions + +This is the set of [Protobuf][protobuf] definitions of types used by various +parts of [CometBFT]: + +- The [Application Blockchain Interface][abci] (ABCI), especially in the context + of _remote_ applications. +- The P2P layer, in how CometBFT nodes interact with each other over the + network. +- In interaction with remote signers ("privval"). +- The RPC, in that the native JSON serialization of certain Protobuf types is + used when accepting and responding to RPC requests. +- The storage layer, in how data is serialized to and deserialized from on-disk + storage. + +The canonical Protobuf definitions live in the `proto` folder of the relevant +release branch of CometBFT. These definitions are published to the [Buf +registry][buf] for integrators' convenience. For detailed instructions on how to publish +the files to the Buf registry, please refer to the [RELEASES.md](../RELEASES.md) document. + +The Protobuf files are organized under two domains: `cometbft` and `tendermint`. +The `cometbft.*` packages use version suffixes to let application developers +target versions of the protocols as they have evolved between CometBFT releases. + +## Which CometBFT release does each package belong to? + +By the 1.0.0 release, the entire set of Protobuf definitions used by CometBFT +is published in packages suffixed with `.v1`. Earlier revisions of the +definitions, where they differed, are provided alongside in `.v1beta`_N_ +packages. The correspondence between package suffixes and releases is as follows: + +| Domain | 0.34 | 0.37 | 0.38 | 1.0 | +|-----------------|-----------|-----------|-----------|------| +| `abci` | `v1beta1` | `v1beta2` | `v1beta3` | `v1` | +| `blocksync` | | `v1beta1` | `v1` | `v1` | +| `consensus` | `v1beta1` | `v1beta1` | `v1beta1` | `v1` | +| `crypto` | `v1` | `v1` | `v1` | `v1` | +| `libs/bits` | `v1` | `v1` | `v1` | `v1` | +| `mempool` | `v1` | `v1` | `v1` | `v1` | +| `p2p` | `v1` | `v1` | `v1` | `v1` | +| `privval` | `v1beta1` | `v1beta1` | `v1beta2` | `v1` | +| `rpc/grpc`[^1] | `v1beta1` | `v1beta2` | `v1beta3` | | +| `state` | `v1beta1` | `v1beta2` | `v1beta3` | `v1` | +| `statesync` | `v1` | `v1` | `v1` | `v1` | +| `types` | `v1beta1` | `v1beta2` | `v1` | `v1` | +| `version` | `v1` | `v1` | `v1` | `v1` | + +[^1]: Retired in 1.0 + +## Why does CometBFT provide `tendermint` Protobuf definitions? + +This is as a result of CometBFT being a fork of [Tendermint Core][tmcore] and +wanting to provide integrators with as painless a way as possible of +transitioning from Tendermint Core to CometBFT. + +As of CometBFT v1, however, the project will transition to using and providing a +`cometbft` package of Protobuf definitions (see [\#1330]). + +Protobuf definitions for each respective release are also, for convenience, +published to a corresponding branch in the `tendermint/tendermint` Buf repository. + +| CometBFT version | Canonical Protobufs | Buf registry | +|------------------|---------------------------------------------|-------------------------------------------| +| v0.38.x | [v0.38.x Protobuf definitions][v038-protos] | [Buf repository v0.38.x branch][v038-buf] | +| v0.37.x | [v0.37.x Protobuf definitions][v037-protos] | [Buf repository v0.37.x branch][v037-buf] | +| v0.34.x | [v0.34.x Protobuf definitions][v034-protos] | [Buf repository v0.34.x branch][v034-buf] | + +[protobuf]: https://protobuf.dev/ +[CometBFT]: https://github.com/cometbft/cometbft +[abci]: https://github.com/cometbft/cometbft/tree/main/spec/abci +[buf]: https://buf.build/tendermint/tendermint +[tmcore]: https://github.com/tendermint/tendermint +[\#1330]: https://github.com/cometbft/cometbft/issues/1330 +[v034-protos]: https://github.com/cometbft/cometbft/tree/v0.34.x/proto +[v034-buf]: https://buf.build/tendermint/tendermint/docs/v0.34.x +[v037-protos]: https://github.com/cometbft/cometbft/tree/v0.37.x/proto +[v037-buf]: https://buf.build/tendermint/tendermint/docs/v0.37.x +[v038-protos]: https://github.com/cometbft/cometbft/tree/v0.38.x/proto +[v038-buf]: https://buf.build/tendermint/tendermint/docs/v0.38.x diff --git a/proto/buf.lock b/proto/buf.lock index f2b69369858..6bce5f5fc58 100644 --- a/proto/buf.lock +++ b/proto/buf.lock @@ -4,4 +4,5 @@ deps: - remote: buf.build owner: cosmos repository: gogo-proto - commit: 6652e3443c3b4504bb3bf82e73a7e409 + commit: 88ef6483f90f478fb938c37dde52ece3 + digest: shake256:89c45df2aa11e0cff97b0d695436713db3d993d76792e9f8dc1ae90e6ab9a9bec55503d48ceedd6b86069ab07d3041b32001b2bfe0227fa725dd515ff381e5ba diff --git a/proto/buf.yaml b/proto/buf.yaml index 6b8fa247afe..75155dc0b9c 100644 --- a/proto/buf.yaml +++ b/proto/buf.yaml @@ -1,10 +1,51 @@ version: v1 +name: buf.build/cometbft/cometbft deps: - buf.build/cosmos/gogo-proto breaking: use: - FILE +build: + excludes: + - tendermint lint: use: - - BASIC + - DEFAULT + - COMMENTS - FILE_LOWER_SNAKE_CASE + except: + - COMMENT_FIELD + ignore_only: + ENUM_VALUE_PREFIX: + - cometbft/abci/v1beta1 + - cometbft/abci/v1beta2 + - cometbft/abci/v1beta3 + ENUM_ZERO_VALUE_SUFFIX: + - cometbft/abci/v1beta1 + - cometbft/abci/v1beta2 + - cometbft/abci/v1beta3 + PACKAGE_VERSION_SUFFIX: + - cometbft/abci/v1beta1 + - cometbft/abci/v1beta2 + - cometbft/abci/v1beta3 + RPC_REQUEST_RESPONSE_UNIQUE: + - cometbft/abci/v1beta1 + - cometbft/abci/v1beta2 + - cometbft/abci/v1beta3 + - cometbft/rpc/grpc + RPC_REQUEST_STANDARD_NAME: + - cometbft/abci/v1beta1 + - cometbft/abci/v1beta2 + - cometbft/abci/v1beta3 + - cometbft/rpc/grpc + RPC_RESPONSE_STANDARD_NAME: + - cometbft/abci/v1beta1 + - cometbft/abci/v1beta2 + - cometbft/abci/v1beta3 + - cometbft/rpc/grpc + SERVICE_SUFFIX: + - cometbft/abci/v1beta1 + - cometbft/abci/v1beta2 + - cometbft/abci/v1beta3 + - cometbft/rpc/grpc + enum_zero_value_suffix: _UNKNOWN diff --git a/proto/cometbft/abci/v1/service.proto b/proto/cometbft/abci/v1/service.proto new file mode 100644 index 00000000000..535dbc83586 --- /dev/null +++ b/proto/cometbft/abci/v1/service.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; +package cometbft.abci.v1; + +import "cometbft/abci/v1/types.proto"; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/abci/v1"; + +// ABCIService is a service for an ABCI application. +service ABCIService { + // Echo returns back the same message it is sent. + rpc Echo(EchoRequest) returns (EchoResponse); + // Flush flushes the write buffer. + rpc Flush(FlushRequest) returns (FlushResponse); + // Info returns information about the application state. + rpc Info(InfoRequest) returns (InfoResponse); + // CheckTx validates a transaction. + rpc CheckTx(CheckTxRequest) returns (CheckTxResponse); + // Query queries the application state. + rpc Query(QueryRequest) returns (QueryResponse); + // Commit commits a block of transactions. + rpc Commit(CommitRequest) returns (CommitResponse); + // InitChain initializes the blockchain. + rpc InitChain(InitChainRequest) returns (InitChainResponse); + // ListSnapshots lists all the available snapshots. + rpc ListSnapshots(ListSnapshotsRequest) returns (ListSnapshotsResponse); + // OfferSnapshot sends a snapshot offer. + rpc OfferSnapshot(OfferSnapshotRequest) returns (OfferSnapshotResponse); + // LoadSnapshotChunk returns a chunk of snapshot. + rpc LoadSnapshotChunk(LoadSnapshotChunkRequest) returns (LoadSnapshotChunkResponse); + // ApplySnapshotChunk applies a chunk of snapshot. + rpc ApplySnapshotChunk(ApplySnapshotChunkRequest) returns (ApplySnapshotChunkResponse); + // PrepareProposal returns a proposal for the next block. + rpc PrepareProposal(PrepareProposalRequest) returns (PrepareProposalResponse); + // ProcessProposal validates a proposal. + rpc ProcessProposal(ProcessProposalRequest) returns (ProcessProposalResponse); + // ExtendVote extends a vote with application-injected data (vote extensions). + rpc ExtendVote(ExtendVoteRequest) returns (ExtendVoteResponse); + // VerifyVoteExtension verifies a vote extension. + rpc VerifyVoteExtension(VerifyVoteExtensionRequest) returns (VerifyVoteExtensionResponse); + // FinalizeBlock finalizes a block. + rpc FinalizeBlock(FinalizeBlockRequest) returns (FinalizeBlockResponse); +} diff --git a/proto/cometbft/abci/v1/types.proto b/proto/cometbft/abci/v1/types.proto new file mode 100644 index 00000000000..0ef78d0091b --- /dev/null +++ b/proto/cometbft/abci/v1/types.proto @@ -0,0 +1,601 @@ +syntax = "proto3"; +package cometbft.abci.v1; + +import "cometbft/crypto/v1/proof.proto"; +import "cometbft/types/v1/params.proto"; +import "cometbft/types/v1/validator.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/abci/v1"; + +// ---------------------------------------- +// Request types + +// Request represents a request to the ABCI application. +message Request { + // Sum of all possible messages. + oneof value { + EchoRequest echo = 1; + FlushRequest flush = 2; + InfoRequest info = 3; + InitChainRequest init_chain = 5; + QueryRequest query = 6; + CheckTxRequest check_tx = 8; + CommitRequest commit = 11; + ListSnapshotsRequest list_snapshots = 12; + OfferSnapshotRequest offer_snapshot = 13; + LoadSnapshotChunkRequest load_snapshot_chunk = 14; + ApplySnapshotChunkRequest apply_snapshot_chunk = 15; + PrepareProposalRequest prepare_proposal = 16; + ProcessProposalRequest process_proposal = 17; + ExtendVoteRequest extend_vote = 18; + VerifyVoteExtensionRequest verify_vote_extension = 19; + FinalizeBlockRequest finalize_block = 20; + } + reserved 4, 7, 9, 10; // SetOption, BeginBlock, DeliverTx, EndBlock +} + +// EchoRequest is a request to "echo" the given string. +message EchoRequest { + string message = 1; +} + +// FlushRequest is a request to flush the write buffer. +message FlushRequest {} + +// InfoRequest is a request for the ABCI application version. +message InfoRequest { + string version = 1; + uint64 block_version = 2; + uint64 p2p_version = 3; + string abci_version = 4; +} + +// InitChainRequest is a request to initialize the blockchain. +message InitChainRequest { + google.protobuf.Timestamp time = 1 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + string chain_id = 2; + cometbft.types.v1.ConsensusParams consensus_params = 3; + repeated ValidatorUpdate validators = 4 [(gogoproto.nullable) = false]; + bytes app_state_bytes = 5; + int64 initial_height = 6; +} + +// QueryRequest is a request to query the application state. +message QueryRequest { + bytes data = 1; + string path = 2; + int64 height = 3; + bool prove = 4; +} + +// Type of the transaction check request. +// +// This enumeration is incompatible with the CheckTxType definition in +// cometbft.abci.v1beta1 and therefore shall not be used in encoding with the same +// field number. +enum CheckTxType { + option (gogoproto.goproto_enum_prefix) = false; + + // Unknown + CHECK_TX_TYPE_UNKNOWN = 0; + // Recheck (2nd, 3rd, etc.) + CHECK_TX_TYPE_RECHECK = 1; + // Check (1st time) + CHECK_TX_TYPE_CHECK = 2; +} + +// CheckTxRequest is a request to check that the transaction is valid. +message CheckTxRequest { + bytes tx = 1; + CheckTxType type = 3; + reserved 2; // v1beta1.CheckTxType type +} + +// CommitRequest is a request to commit the pending application state. +message CommitRequest {} + +// Request to list available snapshots. +message ListSnapshotsRequest {} + +// Request offering a snapshot to the application. +message OfferSnapshotRequest { + Snapshot snapshot = 1; // snapshot offered by peers + bytes app_hash = 2; // light client-verified app hash for snapshot height +} + +// Request to load a snapshot chunk. +message LoadSnapshotChunkRequest { + uint64 height = 1; + uint32 format = 2; + uint32 chunk = 3; +} + +// Request to apply a snapshot chunk. +message ApplySnapshotChunkRequest { + uint32 index = 1; + bytes chunk = 2; + string sender = 3; +} + +// PrepareProposalRequest is a request for the ABCI application to prepare a new +// block proposal. +message PrepareProposalRequest { + // the modified transactions cannot exceed this size. + int64 max_tx_bytes = 1; + // txs is an array of transactions that will be included in a block, + // sent to the app for possible modifications. + repeated bytes txs = 2; + ExtendedCommitInfo local_last_commit = 3 [(gogoproto.nullable) = false]; + repeated Misbehavior misbehavior = 4 [(gogoproto.nullable) = false]; + int64 height = 5; + google.protobuf.Timestamp time = 6 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + bytes next_validators_hash = 7; + // address of the public key of the validator proposing the block. + bytes proposer_address = 8; +} + +// ProcessProposalRequest is a request for the ABCI application to process a proposal +// received from another validator. +message ProcessProposalRequest { + repeated bytes txs = 1; + CommitInfo proposed_last_commit = 2 [(gogoproto.nullable) = false]; + repeated Misbehavior misbehavior = 3 [(gogoproto.nullable) = false]; + // Merkle root hash of the fields of the proposed block. + bytes hash = 4; + int64 height = 5; + google.protobuf.Timestamp time = 6 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + bytes next_validators_hash = 7; + // address of the public key of the original proposer of the block. + bytes proposer_address = 8; +} + +// ExtendVoteRequest extends a precommit vote with application-injected data. +message ExtendVoteRequest { + // the hash of the block that this vote may be referring to + bytes hash = 1; + // the height of the extended vote + int64 height = 2; + // info of the block that this vote may be referring to + google.protobuf.Timestamp time = 3 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + repeated bytes txs = 4; + CommitInfo proposed_last_commit = 5 [(gogoproto.nullable) = false]; + repeated Misbehavior misbehavior = 6 [(gogoproto.nullable) = false]; + bytes next_validators_hash = 7; + // address of the public key of the original proposer of the block. + bytes proposer_address = 8; +} + +// VerifyVoteExtensionRequest is a request for the application to verify a vote extension +// produced by a different validator. +message VerifyVoteExtensionRequest { + // the hash of the block that this received vote corresponds to + bytes hash = 1; + // the validator that signed the vote extension + bytes validator_address = 2; + int64 height = 3; + bytes vote_extension = 4; +} + +// FinalizeBlockRequest is a request to finalize the block. +message FinalizeBlockRequest { + repeated bytes txs = 1; + CommitInfo decided_last_commit = 2 [(gogoproto.nullable) = false]; + repeated Misbehavior misbehavior = 3 [(gogoproto.nullable) = false]; + // Merkle root hash of the fields of the decided block. + bytes hash = 4; + int64 height = 5; + google.protobuf.Timestamp time = 6 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + bytes next_validators_hash = 7; + // address of the public key of the original proposer of the block. + bytes proposer_address = 8; + // If the node is syncing/replaying blocks - target height. If not, syncing_to == height. + int64 syncing_to_height = 9; +} + +// ---------------------------------------- +// Response types + +// Response represents a response from the ABCI application. +message Response { + // Sum of all possible messages. + oneof value { + ExceptionResponse exception = 1; + EchoResponse echo = 2; + FlushResponse flush = 3; + InfoResponse info = 4; + InitChainResponse init_chain = 6; + QueryResponse query = 7; + CheckTxResponse check_tx = 9; + CommitResponse commit = 12; + ListSnapshotsResponse list_snapshots = 13; + OfferSnapshotResponse offer_snapshot = 14; + LoadSnapshotChunkResponse load_snapshot_chunk = 15; + ApplySnapshotChunkResponse apply_snapshot_chunk = 16; + PrepareProposalResponse prepare_proposal = 17; + ProcessProposalResponse process_proposal = 18; + ExtendVoteResponse extend_vote = 19; + VerifyVoteExtensionResponse verify_vote_extension = 20; + FinalizeBlockResponse finalize_block = 21; + } + reserved 5, 8, 10, 11; // SetOption, BeginBlock, DeliverTx, EndBlock +} + +// nondeterministic +message ExceptionResponse { + string error = 1; +} + +// EchoResponse indicates that the connection is still alive. +message EchoResponse { + string message = 1; +} + +// FlushResponse indicates that the write buffer was flushed. +message FlushResponse {} + +// InfoResponse contains the ABCI application version information. +message InfoResponse { + string data = 1; + + string version = 2; + uint64 app_version = 3; + + int64 last_block_height = 4; + bytes last_block_app_hash = 5; + + map lane_priorities = 6; + string default_lane = 7; +} + +// InitChainResponse contains the ABCI application's hash and updates to the +// validator set and/or the consensus params, if any. +message InitChainResponse { + cometbft.types.v1.ConsensusParams consensus_params = 1; + repeated ValidatorUpdate validators = 2 [(gogoproto.nullable) = false]; + bytes app_hash = 3; +} + +// QueryResponse contains the ABCI application data along with a proof. +message QueryResponse { + uint32 code = 1; + // bytes data = 2; // use "value" instead. + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 index = 5; + bytes key = 6; + bytes value = 7; + cometbft.crypto.v1.ProofOps proof_ops = 8; + int64 height = 9; + string codespace = 10; +} + +// CheckTxResponse shows if the transaction was deemed valid by the ABCI +// application. +message CheckTxResponse { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5 [json_name = "gas_wanted"]; + int64 gas_used = 6 [json_name = "gas_used"]; + repeated Event events = 7 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "events,omitempty" + ]; // nondeterministic + string codespace = 8; + + // These reserved fields were used till v0.37 by the priority mempool (now + // removed). + reserved 9 to 11; + reserved "sender", "priority", "mempool_error"; + + string lane_id = 12; +} + +// CommitResponse indicates how much blocks should CometBFT retain. +message CommitResponse { + reserved 1, 2; // data was previously returned here + int64 retain_height = 3; +} + +// ListSnapshotsResponse contains the list of snapshots. +message ListSnapshotsResponse { + repeated Snapshot snapshots = 1; +} + +// OfferSnapshotResponse indicates the ABCI application decision whenever to +// provide a snapshot to the requester or not. +message OfferSnapshotResponse { + OfferSnapshotResult result = 1; +} + +// The result of offering a snapshot. +enum OfferSnapshotResult { + option (gogoproto.goproto_enum_prefix) = false; + + // Unknown result, abort all snapshot restoration + OFFER_SNAPSHOT_RESULT_UNKNOWN = 0; + // Snapshot accepted, apply chunks + OFFER_SNAPSHOT_RESULT_ACCEPT = 1; + // Abort all snapshot restoration + OFFER_SNAPSHOT_RESULT_ABORT = 2; + // Reject this specific snapshot, try others + OFFER_SNAPSHOT_RESULT_REJECT = 3; + // Reject all snapshots of this format, try others + OFFER_SNAPSHOT_RESULT_REJECT_FORMAT = 4; + // Reject all snapshots from the sender(s), try others + OFFER_SNAPSHOT_RESULT_REJECT_SENDER = 5; +} + +// LoadSnapshotChunkResponse returns a snapshot's chunk. +message LoadSnapshotChunkResponse { + bytes chunk = 1; +} + +// ApplySnapshotChunkResponse returns a result of applying the specified chunk. +message ApplySnapshotChunkResponse { + ApplySnapshotChunkResult result = 1; + repeated uint32 refetch_chunks = 2; // Chunks to refetch and reapply + repeated string reject_senders = 3; // Chunk senders to reject and ban +} + +// The result of applying a snapshot chunk. +enum ApplySnapshotChunkResult { + option (gogoproto.goproto_enum_prefix) = false; + + // Unknown result, abort all snapshot restoration + APPLY_SNAPSHOT_CHUNK_RESULT_UNKNOWN = 0; + // Chunk successfully accepted + APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT = 1; + // Abort all snapshot restoration + APPLY_SNAPSHOT_CHUNK_RESULT_ABORT = 2; + // Retry chunk (combine with refetch and reject) + APPLY_SNAPSHOT_CHUNK_RESULT_RETRY = 3; + // Retry snapshot (combine with refetch and reject) + APPLY_SNAPSHOT_CHUNK_RESULT_RETRY_SNAPSHOT = 4; + // Reject this snapshot, try others + APPLY_SNAPSHOT_CHUNK_RESULT_REJECT_SNAPSHOT = 5; +} + +// PrepareProposalResponse contains a list of transactions, which will form a block. +message PrepareProposalResponse { + repeated bytes txs = 1; +} + +// ProcessProposalResponse indicates the ABCI application's decision whenever +// the given proposal should be accepted or not. +message ProcessProposalResponse { + ProcessProposalStatus status = 1; +} + +// ProcessProposalStatus is the status of the proposal processing. +enum ProcessProposalStatus { + option (gogoproto.goproto_enum_prefix) = false; + + // Unknown + PROCESS_PROPOSAL_STATUS_UNKNOWN = 0; + // Accepted + PROCESS_PROPOSAL_STATUS_ACCEPT = 1; + // Rejected + PROCESS_PROPOSAL_STATUS_REJECT = 2; +} + +// ExtendVoteResponse contains the vote extension that the application would like to +// attach to its next precommit vote. +message ExtendVoteResponse { + bytes vote_extension = 1; +} + +// VerifyVoteExtensionResponse indicates the ABCI application's decision +// whenever the vote extension should be accepted or not. +message VerifyVoteExtensionResponse { + VerifyVoteExtensionStatus status = 1; +} + +// VerifyVoteExtensionStatus is the status of the vote extension verification. +enum VerifyVoteExtensionStatus { + option (gogoproto.goproto_enum_prefix) = false; + + // Unknown + VERIFY_VOTE_EXTENSION_STATUS_UNKNOWN = 0; + // Accepted + VERIFY_VOTE_EXTENSION_STATUS_ACCEPT = 1; + // Rejecting the vote extension will reject the entire precommit by the sender. + // Incorrectly implementing this thus has liveness implications as it may affect + // CometBFT's ability to receive 2/3+ valid votes to finalize the block. + // Honest nodes should never be rejected. + VERIFY_VOTE_EXTENSION_STATUS_REJECT = 2; +} + +// FinalizeBlockResponse contains the result of executing the block. +message FinalizeBlockResponse { + // set of block events emitted as part of executing the block + repeated Event events = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "events,omitempty" + ]; // nondeterministic + // the result of executing each transaction including the events + // the particular transaction emitted. This should match the order + // of the transactions delivered in the block itself + repeated ExecTxResult tx_results = 2; + // a list of updates to the validator set. These will reflect the validator set at current height + 2. + repeated ValidatorUpdate validator_updates = 3 [(gogoproto.nullable) = false]; + // updates to the consensus params, if any. + cometbft.types.v1.ConsensusParams consensus_param_updates = 4; + // app_hash is the hash of the applications' state which is used to confirm + // that execution of the transactions was deterministic. + // It is up to the application to decide which algorithm to use. + bytes app_hash = 5; + // delay between the time when this block is committed and the next height is started. + // previously `timeout_commit` in config.toml + google.protobuf.Duration next_block_delay = 6 [ + (gogoproto.nullable) = false, + (gogoproto.stdduration) = true + ]; +} + +// ---------------------------------------- +// Misc. + +// CommitInfo contains votes for the particular round. +message CommitInfo { + int32 round = 1; + repeated VoteInfo votes = 2 [(gogoproto.nullable) = false]; +} + +// ExtendedCommitInfo is similar to CommitInfo except that it is only used in +// the PrepareProposal request such that Tendermint can provide vote extensions +// to the application. +message ExtendedCommitInfo { + // The round at which the block proposer decided in the previous height. + int32 round = 1; + // List of validators' addresses in the last validator set with their voting + // information, including vote extensions. + repeated ExtendedVoteInfo votes = 2 [(gogoproto.nullable) = false]; +} + +// Event allows application developers to attach additional information to +// ResponseFinalizeBlock and ResponseCheckTx. +// Up to 0.37, this could also be used in ResponseBeginBlock, ResponseEndBlock, +// and ResponseDeliverTx. +// Later, transactions may be queried using these events. +message Event { + string type = 1; + repeated EventAttribute attributes = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes,omitempty" + ]; +} + +// EventAttribute is a single key-value pair, associated with an event. +message EventAttribute { + string key = 1; + string value = 2; + bool index = 3; // nondeterministic +} + +// ExecTxResult contains results of executing one individual transaction. +// +// * Its structure is equivalent to #ResponseDeliverTx which will be deprecated/deleted +message ExecTxResult { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5 [json_name = "gas_wanted"]; + int64 gas_used = 6 [json_name = "gas_used"]; + repeated Event events = 7 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; // nondeterministic + string codespace = 8; +} + +// TxResult contains results of executing the transaction. +// +// One usage is indexing transaction results. +message TxResult { + int64 height = 1; + uint32 index = 2; + bytes tx = 3; + ExecTxResult result = 4 [(gogoproto.nullable) = false]; +} + +// ---------------------------------------- +// Blockchain Types + +// Validator in the validator set. +message Validator { + bytes address = 1; // The first 20 bytes of SHA256(public key) + // PubKey pub_key = 2 [(gogoproto.nullable)=false]; + int64 power = 3; // The voting power +} + +// ValidatorUpdate is a singular update to a validator set. +message ValidatorUpdate { + int64 power = 2; + bytes pub_key_bytes = 3; + string pub_key_type = 4; + + reserved 1; // pub_key +} + +// VoteInfo contains the information about the vote. +message VoteInfo { + Validator validator = 1 [(gogoproto.nullable) = false]; + cometbft.types.v1.BlockIDFlag block_id_flag = 3; + + reserved 2; // signed_last_block +} + +// ExtendedVoteInfo extends VoteInfo with the vote extensions (non-deterministic). +message ExtendedVoteInfo { + // The validator that sent the vote. + Validator validator = 1 [(gogoproto.nullable) = false]; + // Non-deterministic extension provided by the sending validator's application. + bytes vote_extension = 3; + // Vote extension signature created by CometBFT + bytes extension_signature = 4; + // block_id_flag indicates whether the validator voted for a block, nil, or did not vote at all + cometbft.types.v1.BlockIDFlag block_id_flag = 5; + + reserved 2; // signed_last_block +} + +// The type of misbehavior committed by a validator. +enum MisbehaviorType { + option (gogoproto.goproto_enum_prefix) = false; + + // Unknown + MISBEHAVIOR_TYPE_UNKNOWN = 0; + // Duplicate vote + MISBEHAVIOR_TYPE_DUPLICATE_VOTE = 1; + // Light client attack + MISBEHAVIOR_TYPE_LIGHT_CLIENT_ATTACK = 2; +} + +// Misbehavior is a type of misbehavior committed by a validator. +message Misbehavior { + MisbehaviorType type = 1; + // The offending validator + Validator validator = 2 [(gogoproto.nullable) = false]; + // The height when the offense occurred + int64 height = 3; + // The corresponding time where the offense occurred + google.protobuf.Timestamp time = 4 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + // Total voting power of the validator set in case the ABCI application does + // not store historical validators. + // https://github.com/tendermint/tendermint/issues/4581 + int64 total_voting_power = 5; +} + +// ---------------------------------------- +// State Sync Types + +// Snapshot of the ABCI application state. +message Snapshot { + uint64 height = 1; // The height at which the snapshot was taken + uint32 format = 2; // The application-specific snapshot format + uint32 chunks = 3; // Number of chunks in the snapshot + bytes hash = 4; // Arbitrary snapshot hash, equal only if identical + bytes metadata = 5; // Arbitrary application metadata +} diff --git a/proto/cometbft/abci/v1beta1/types.proto b/proto/cometbft/abci/v1beta1/types.proto new file mode 100644 index 00000000000..cd3dd37cafd --- /dev/null +++ b/proto/cometbft/abci/v1beta1/types.proto @@ -0,0 +1,492 @@ +syntax = "proto3"; +package cometbft.abci.v1beta1; + +import "cometbft/crypto/v1/keys.proto"; +// For more information on gogo.proto, see: +// https://github.com/cosmos/gogoproto/blob/master/extensions.md +import "cometbft/crypto/v1/proof.proto"; +import "cometbft/types/v1beta1/params.proto"; +import "cometbft/types/v1beta1/types.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/abci/v1beta1"; + +// NOTE: When using custom types, mind the warnings. +// https://github.com/cosmos/gogoproto/blob/master/custom_types.md#warnings-and-issues + +// ---------------------------------------- +// Request types + +// Request represents a request to the ABCI application. +message Request { + // Sum of all possible messages. + oneof value { + RequestEcho echo = 1; + RequestFlush flush = 2; + RequestInfo info = 3; + RequestSetOption set_option = 4; + RequestInitChain init_chain = 5; + RequestQuery query = 6; + RequestBeginBlock begin_block = 7; + RequestCheckTx check_tx = 8; + RequestDeliverTx deliver_tx = 9; + RequestEndBlock end_block = 10; + RequestCommit commit = 11; + RequestListSnapshots list_snapshots = 12; + RequestOfferSnapshot offer_snapshot = 13; + RequestLoadSnapshotChunk load_snapshot_chunk = 14; + RequestApplySnapshotChunk apply_snapshot_chunk = 15; + } +} + +// RequestEcho is a request to "echo" the given string. +message RequestEcho { + string message = 1; +} + +// RequestFlush is a request to flush the write buffer. +message RequestFlush {} + +// RequestInfo is a request for the ABCI application version. +message RequestInfo { + string version = 1; + uint64 block_version = 2; + uint64 p2p_version = 3; +} + +// nondeterministic +message RequestSetOption { + string key = 1; + string value = 2; +} + +// RequestInitChain is a request to initialize the blockchain. +message RequestInitChain { + google.protobuf.Timestamp time = 1 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + string chain_id = 2; + ConsensusParams consensus_params = 3; + repeated ValidatorUpdate validators = 4 [(gogoproto.nullable) = false]; + bytes app_state_bytes = 5; + int64 initial_height = 6; +} + +// RequestQuery is a request to query the application state. +message RequestQuery { + bytes data = 1; + string path = 2; + int64 height = 3; + bool prove = 4; +} + +// RequestBeginBlock indicates the beginning of committing the block. +message RequestBeginBlock { + bytes hash = 1; + cometbft.types.v1beta1.Header header = 2 [(gogoproto.nullable) = false]; + LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; + repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false]; +} + +// Type of the transaction check request. +enum CheckTxType { + // New + NEW = 0 [(gogoproto.enumvalue_customname) = "New"]; + // Recheck (2nd, 3rd, etc.) + RECHECK = 1 [(gogoproto.enumvalue_customname) = "Recheck"]; +} + +// RequestCheckTx is a request to check the transaction. +message RequestCheckTx { + bytes tx = 1; + CheckTxType type = 2; +} + +// RequestDeliverTx is a request to apply the transaction. +message RequestDeliverTx { + bytes tx = 1; +} + +// RequestEndBlock indicates the end of committing the block. +message RequestEndBlock { + int64 height = 1; +} + +// RequestCommit is a request to commit the pending application state. +message RequestCommit {} + +// lists available snapshots +message RequestListSnapshots {} + +// offers a snapshot to the application +message RequestOfferSnapshot { + Snapshot snapshot = 1; // snapshot offered by peers + bytes app_hash = 2; // light client-verified app hash for snapshot height +} + +// loads a snapshot chunk +message RequestLoadSnapshotChunk { + uint64 height = 1; + uint32 format = 2; + uint32 chunk = 3; +} + +// Applies a snapshot chunk +message RequestApplySnapshotChunk { + uint32 index = 1; + bytes chunk = 2; + string sender = 3; +} + +// ---------------------------------------- +// Response types + +// Response represents a response from the ABCI application. +message Response { + // Sum of all possible messages. + oneof value { + ResponseException exception = 1; + ResponseEcho echo = 2; + ResponseFlush flush = 3; + ResponseInfo info = 4; + ResponseSetOption set_option = 5; + ResponseInitChain init_chain = 6; + ResponseQuery query = 7; + ResponseBeginBlock begin_block = 8; + ResponseCheckTx check_tx = 9; + ResponseDeliverTx deliver_tx = 10; + ResponseEndBlock end_block = 11; + ResponseCommit commit = 12; + ResponseListSnapshots list_snapshots = 13; + ResponseOfferSnapshot offer_snapshot = 14; + ResponseLoadSnapshotChunk load_snapshot_chunk = 15; + ResponseApplySnapshotChunk apply_snapshot_chunk = 16; + } +} + +// nondeterministic +message ResponseException { + string error = 1; +} + +// ResponseEcho indicates that the connection is still alive. +message ResponseEcho { + string message = 1; +} + +// ResponseFlush indicates that the ABCI application state was flushed? +message ResponseFlush {} + +// ResponseInfo contains the ABCI application version information. +message ResponseInfo { + string data = 1; + + string version = 2; + uint64 app_version = 3; + + int64 last_block_height = 4; + bytes last_block_app_hash = 5; +} + +// nondeterministic +message ResponseSetOption { + uint32 code = 1; + // bytes data = 2; + string log = 3; + string info = 4; +} + +// ResponseInitChain contains the ABCI application's hash and updates to the +// validator set and/or the consensus params, if any. +message ResponseInitChain { + ConsensusParams consensus_params = 1; + repeated ValidatorUpdate validators = 2 [(gogoproto.nullable) = false]; + bytes app_hash = 3; +} + +// ResponseQuery contains the ABCI application data along with a proof. +message ResponseQuery { + uint32 code = 1; + // bytes data = 2; // use "value" instead. + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 index = 5; + bytes key = 6; + bytes value = 7; + cometbft.crypto.v1.ProofOps proof_ops = 8; + int64 height = 9; + string codespace = 10; +} + +// ResponseBeginBlock contains a list of block-level events. +message ResponseBeginBlock { + repeated Event events = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "events,omitempty" + ]; +} + +// ResponseCheckTx shows if the transaction was deemed valid by the ABCI +// application. +message ResponseCheckTx { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5 [json_name = "gas_wanted"]; + int64 gas_used = 6 [json_name = "gas_used"]; + repeated Event events = 7 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "events,omitempty" + ]; + string codespace = 8; + string sender = 9; + int64 priority = 10; + + // mempool_error is set by CometBFT. + // ABCI applications creating a ResponseCheckTX should not set mempool_error. + string mempool_error = 11; +} + +// ResponseDeliverTx contains a result of committing the given transaction and a +// list of events. +message ResponseDeliverTx { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5 [json_name = "gas_wanted"]; + int64 gas_used = 6 [json_name = "gas_used"]; + repeated Event events = 7 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "events,omitempty" + ]; // nondeterministic + string codespace = 8; +} + +// ResponseEndBlock contains updates to consensus params and/or validator set changes, if any. +message ResponseEndBlock { + repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false]; + ConsensusParams consensus_param_updates = 2; + repeated Event events = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "events,omitempty" + ]; +} + +// ResponseCommit indicates how much blocks should CometBFT retain. +message ResponseCommit { + // reserve 1 + bytes data = 2; + int64 retain_height = 3; +} + +// ResponseListSnapshots contains the list of snapshots. +message ResponseListSnapshots { + repeated Snapshot snapshots = 1; +} + +// ResponseOfferSnapshot indicates the ABCI application decision whenever to +// provide a snapshot to the requester or not. +message ResponseOfferSnapshot { + Result result = 1; + + // The status code. + enum Result { + // Unknown result, abort all snapshot restoration + UNKNOWN = 0; + // Snapshot accepted, apply chunks + ACCEPT = 1; + // Abort all snapshot restoration + ABORT = 2; + // Reject this specific snapshot, try others + REJECT = 3; + // Reject all snapshots of this format, try others + REJECT_FORMAT = 4; + // Reject all snapshots from the sender(s), try others + REJECT_SENDER = 5; + } +} + +// ResponseLoadSnapshotChunk returns a snapshot's chunk. +message ResponseLoadSnapshotChunk { + bytes chunk = 1; +} + +// ResponseApplySnapshotChunk returns a result of applying the specified chunk. +message ResponseApplySnapshotChunk { + Result result = 1; + repeated uint32 refetch_chunks = 2; // Chunks to refetch and reapply + repeated string reject_senders = 3; // Chunk senders to reject and ban + + // The status code. + enum Result { + // Unknown result, abort all snapshot restoration + UNKNOWN = 0; + // Chunk successfully accepted + ACCEPT = 1; + // Abort all snapshot restoration + ABORT = 2; + // Retry chunk (combine with refetch and reject) + RETRY = 3; + // Retry snapshot (combine with refetch and reject) + RETRY_SNAPSHOT = 4; + // Reject this snapshot, try others + REJECT_SNAPSHOT = 5; + } +} + +// ---------------------------------------- +// Misc. + +// ConsensusParams contains all consensus-relevant parameters +// that can be adjusted by the abci app +message ConsensusParams { + BlockParams block = 1; + cometbft.types.v1beta1.EvidenceParams evidence = 2; + cometbft.types.v1beta1.ValidatorParams validator = 3; + cometbft.types.v1beta1.VersionParams version = 4; +} + +// BlockParams contains limits on the block size. +message BlockParams { + // Note: must be greater than 0 + int64 max_bytes = 1; + // Note: must be greater or equal to -1 + int64 max_gas = 2; +} + +// LastCommitInfo contains votes for the particular round. +message LastCommitInfo { + int32 round = 1; + repeated VoteInfo votes = 2 [(gogoproto.nullable) = false]; +} + +// Event allows application developers to attach additional information to +// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +// Later, transactions may be queried using these events. +message Event { + string type = 1; + repeated EventAttribute attributes = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes,omitempty" + ]; +} + +// EventAttribute is a single key-value pair, associated with an event. +message EventAttribute { + bytes key = 1; + bytes value = 2; + bool index = 3; // nondeterministic +} + +// TxResult contains results of executing the transaction. +// +// One usage is indexing transaction results. +message TxResult { + int64 height = 1; + uint32 index = 2; + bytes tx = 3; + ResponseDeliverTx result = 4 [(gogoproto.nullable) = false]; +} + +// ---------------------------------------- +// Blockchain Types + +// Validator in the validator set. +message Validator { + bytes address = 1; // The first 20 bytes of SHA256(public key) + // PubKey pub_key = 2 [(gogoproto.nullable)=false]; + int64 power = 3; // The voting power +} + +// ValidatorUpdate is a singular update to a validator set. +message ValidatorUpdate { + cometbft.crypto.v1.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; + int64 power = 2; +} + +// VoteInfo contains the information about the vote. +message VoteInfo { + Validator validator = 1 [(gogoproto.nullable) = false]; + bool signed_last_block = 2; +} + +// The type of evidence. +enum EvidenceType { + // Unknown + UNKNOWN = 0; + // Duplicate vote + DUPLICATE_VOTE = 1; + // Light client attack + LIGHT_CLIENT_ATTACK = 2; +} + +// Evidence of a misbehavior committed by a validator. +message Evidence { + EvidenceType type = 1; + // The offending validator + Validator validator = 2 [(gogoproto.nullable) = false]; + // The height when the offense occurred + int64 height = 3; + // The corresponding time where the offense occurred + google.protobuf.Timestamp time = 4 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + // Total voting power of the validator set in case the ABCI application does + // not store historical validators. + // https://github.com/tendermint/tendermint/issues/4581 + int64 total_voting_power = 5; +} + +// ---------------------------------------- +// State Sync Types + +// Snapshot of the ABCI application state. +message Snapshot { + uint64 height = 1; // The height at which the snapshot was taken + uint32 format = 2; // The application-specific snapshot format + uint32 chunks = 3; // Number of chunks in the snapshot + bytes hash = 4; // Arbitrary snapshot hash, equal only if identical + bytes metadata = 5; // Arbitrary application metadata +} + +// ---------------------------------------- +// Service Definition + +// ABCIApplication is a service for an ABCI application. +service ABCIApplication { + // Echo returns back the same message it is sent. + rpc Echo(RequestEcho) returns (ResponseEcho); + // Flush flushes the write buffer. + rpc Flush(RequestFlush) returns (ResponseFlush); + // Info returns information about the application state. + rpc Info(RequestInfo) returns (ResponseInfo); + // SetOption sets a parameter in the application. + rpc SetOption(RequestSetOption) returns (ResponseSetOption); + // DeliverTx applies a transaction. + rpc DeliverTx(RequestDeliverTx) returns (ResponseDeliverTx); + // CheckTx validates a transaction. + rpc CheckTx(RequestCheckTx) returns (ResponseCheckTx); + // Query queries the application state. + rpc Query(RequestQuery) returns (ResponseQuery); + // Commit commits a block of transactions. + rpc Commit(RequestCommit) returns (ResponseCommit); + // InitChain initializes the blockchain. + rpc InitChain(RequestInitChain) returns (ResponseInitChain); + // BeginBlock signals the beginning of a block. + rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock); + // EndBlock signals the end of a block, returns changes to the validator set. + rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock); + // ListSnapshots lists all the available snapshots. + rpc ListSnapshots(RequestListSnapshots) returns (ResponseListSnapshots); + // OfferSnapshot sends a snapshot offer. + rpc OfferSnapshot(RequestOfferSnapshot) returns (ResponseOfferSnapshot); + // LoadSnapshotChunk returns a chunk of snapshot. + rpc LoadSnapshotChunk(RequestLoadSnapshotChunk) returns (ResponseLoadSnapshotChunk); + // ApplySnapshotChunk applies a chunk of snapshot. + rpc ApplySnapshotChunk(RequestApplySnapshotChunk) returns (ResponseApplySnapshotChunk); +} diff --git a/proto/cometbft/abci/v1beta2/types.proto b/proto/cometbft/abci/v1beta2/types.proto new file mode 100644 index 00000000000..db6db7d6da0 --- /dev/null +++ b/proto/cometbft/abci/v1beta2/types.proto @@ -0,0 +1,326 @@ +syntax = "proto3"; +package cometbft.abci.v1beta2; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/abci/v1beta2"; + +// For more information on gogo.proto, see: +// https://github.com/cosmos/gogoproto/blob/master/extensions.md +import "gogoproto/gogo.proto"; +import "cometbft/abci/v1beta1/types.proto"; +import "cometbft/types/v1beta1/types.proto"; +import "cometbft/types/v1beta2/params.proto"; +import "google/protobuf/timestamp.proto"; + +// NOTE: When using custom types, mind the warnings. +// https://github.com/cosmos/gogoproto/blob/master/custom_types.md#warnings-and-issues + +// ---------------------------------------- +// Request types + +// Request represents a request to the ABCI application. +message Request { + // Sum of all possible messages. + oneof value { + v1beta1.RequestEcho echo = 1; + v1beta1.RequestFlush flush = 2; + RequestInfo info = 3; + RequestInitChain init_chain = 5; + v1beta1.RequestQuery query = 6; + RequestBeginBlock begin_block = 7; + v1beta1.RequestCheckTx check_tx = 8; + v1beta1.RequestDeliverTx deliver_tx = 9; + v1beta1.RequestEndBlock end_block = 10; + v1beta1.RequestCommit commit = 11; + v1beta1.RequestListSnapshots list_snapshots = 12; + v1beta1.RequestOfferSnapshot offer_snapshot = 13; + v1beta1.RequestLoadSnapshotChunk load_snapshot_chunk = 14; + v1beta1.RequestApplySnapshotChunk apply_snapshot_chunk = 15; + RequestPrepareProposal prepare_proposal = 16; + RequestProcessProposal process_proposal = 17; + } + reserved 4; +} + +// RequestInfo is a request for the ABCI application version. +message RequestInfo { + string version = 1; + uint64 block_version = 2; + uint64 p2p_version = 3; + string abci_version = 4; +} + +// RequestInitChain is a request to initialize the blockchain. +message RequestInitChain { + google.protobuf.Timestamp time = 1 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + string chain_id = 2; + cometbft.types.v1beta2.ConsensusParams consensus_params = 3; + repeated v1beta1.ValidatorUpdate validators = 4 [(gogoproto.nullable) = false]; + bytes app_state_bytes = 5; + int64 initial_height = 6; +} + +// RequestBeginBlock indicates the beginning of committing the block. +message RequestBeginBlock { + bytes hash = 1; + cometbft.types.v1beta1.Header header = 2 [(gogoproto.nullable) = false]; + CommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; + repeated Misbehavior byzantine_validators = 4 [(gogoproto.nullable) = false]; +} + +// RequestPrepareProposal is a request for the ABCI application to prepare a new +// block proposal. +message RequestPrepareProposal { + // the modified transactions cannot exceed this size. + int64 max_tx_bytes = 1; + // txs is an array of transactions that will be included in a block, + // sent to the app for possible modifications. + repeated bytes txs = 2; + ExtendedCommitInfo local_last_commit = 3 [(gogoproto.nullable) = false]; + repeated Misbehavior misbehavior = 4 [(gogoproto.nullable) = false]; + int64 height = 5; + google.protobuf.Timestamp time = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes next_validators_hash = 7; + // address of the public key of the validator proposing the block. + bytes proposer_address = 8; +} + +// RequestProcessProposal is a request for the ABCI application to process proposal. +message RequestProcessProposal { + repeated bytes txs = 1; + CommitInfo proposed_last_commit = 2 [(gogoproto.nullable) = false]; + repeated Misbehavior misbehavior = 3 [(gogoproto.nullable) = false]; + // hash is the merkle root hash of the fields of the proposed block. + bytes hash = 4; + int64 height = 5; + google.protobuf.Timestamp time = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes next_validators_hash = 7; + // address of the public key of the original proposer of the block. + bytes proposer_address = 8; +} + +// ---------------------------------------- +// Response types + +// Response represents a response from the ABCI application. +message Response { + // Sum of all possible messages. + oneof value { + v1beta1.ResponseException exception = 1; + v1beta1.ResponseEcho echo = 2; + v1beta1.ResponseFlush flush = 3; + v1beta1.ResponseInfo info = 4; + ResponseInitChain init_chain = 6; + v1beta1.ResponseQuery query = 7; + ResponseBeginBlock begin_block = 8; + ResponseCheckTx check_tx = 9; + ResponseDeliverTx deliver_tx = 10; + ResponseEndBlock end_block = 11; + v1beta1.ResponseCommit commit = 12; + v1beta1.ResponseListSnapshots list_snapshots = 13; + v1beta1.ResponseOfferSnapshot offer_snapshot = 14; + v1beta1.ResponseLoadSnapshotChunk load_snapshot_chunk = 15; + v1beta1.ResponseApplySnapshotChunk apply_snapshot_chunk = 16; + ResponsePrepareProposal prepare_proposal = 17; + ResponseProcessProposal process_proposal = 18; + } + reserved 5; +} + +// ResponseInitChain contains the ABCI application's hash and updates to the +// validator set and/or the consensus params, if any. +message ResponseInitChain { + cometbft.types.v1beta2.ConsensusParams consensus_params = 1; + repeated v1beta1.ValidatorUpdate validators = 2 [(gogoproto.nullable) = false]; + bytes app_hash = 3; +} + +// ResponseBeginBlock contains a list of block-level events. +message ResponseBeginBlock { + repeated Event events = 1 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; +} + +// ResponseCheckTx shows if the transaction was deemed valid by the ABCI +// application. +message ResponseCheckTx { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5 [json_name = "gas_wanted"]; + int64 gas_used = 6 [json_name = "gas_used"]; + repeated Event events = 7 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; + string codespace = 8; + string sender = 9; + int64 priority = 10; + + // mempool_error is set by CometBFT. + // ABCI applications creating a ResponseCheckTX should not set mempool_error. + string mempool_error = 11; +} + +// ResponseDeliverTx contains a result of committing the given transaction and a +// list of events. +message ResponseDeliverTx { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5 [json_name = "gas_wanted"]; + int64 gas_used = 6 [json_name = "gas_used"]; + repeated Event events = 7 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "events,omitempty" + ]; // nondeterministic + string codespace = 8; +} + +// ResponseEndBlock contains updates to consensus params and/or validator set changes, if any. +message ResponseEndBlock { + repeated v1beta1.ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false]; + cometbft.types.v1beta2.ConsensusParams consensus_param_updates = 2; + repeated Event events = 3 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; +} + +// ResponsePrepareProposal contains the list of transactions that will be included in the proposal. +message ResponsePrepareProposal { + repeated bytes txs = 1; +} + +// ResponseProcessProposal contains the result of processing a proposal. +message ResponseProcessProposal { + ProposalStatus status = 1; + + // The status. + enum ProposalStatus { + // Unknown + UNKNOWN = 0; + // Accepted + ACCEPT = 1; + // Rejected + REJECT = 2; + } +} + +// ---------------------------------------- +// Misc. + +// CommitInfo contains votes for the particular round. +message CommitInfo { + int32 round = 1; + repeated v1beta1.VoteInfo votes = 2 [(gogoproto.nullable) = false]; +} + +// ExtendedCommitInfo is similar to CommitInfo except that it is only used in +// the PrepareProposal request such that Tendermint can provide vote extensions +// to the application. +message ExtendedCommitInfo { + // The round at which the block proposer decided in the previous height. + int32 round = 1; + // List of validators' addresses in the last validator set with their voting + // information, including vote extensions. + repeated ExtendedVoteInfo votes = 2 [(gogoproto.nullable) = false]; +} + +// Event allows application developers to attach additional information to +// ResponseFinalizeBlock (defined in .v1beta3) and ResponseCheckTx. +// Up to 0.37, this could also be used in ResponseBeginBlock, ResponseEndBlock, +// and ResponseDeliverTx. +// Later, transactions may be queried using these events. +message Event { + string type = 1; + repeated EventAttribute attributes = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes,omitempty" + ]; +} + +// EventAttribute is a single key-value pair, associated with an event. +message EventAttribute { + string key = 1; + string value = 2; + bool index = 3; // nondeterministic +} + +// ---------------------------------------- +// Blockchain Types + +// ExtendedVoteInfo extends VoteInfo with the vote extensions (non-deterministic). +message ExtendedVoteInfo { + // The validator that sent the vote. + v1beta1.Validator validator = 1 [(gogoproto.nullable) = false]; + // Indicates whether the validator signed the last block, allowing for rewards based on validator availability. + bool signed_last_block = 2; + // Non-deterministic extension provided by the sending validator's application. + bytes vote_extension = 3; +} + +// The type of misbehavior committed by a validator. +enum MisbehaviorType { + // Unknown + UNKNOWN = 0; + // Duplicate vote + DUPLICATE_VOTE = 1; + // Light client attack + LIGHT_CLIENT_ATTACK = 2; +} + +// Misbehavior is a type of misbehavior committed by a validator. +message Misbehavior { + MisbehaviorType type = 1; + // The offending validator + v1beta1.Validator validator = 2 [(gogoproto.nullable) = false]; + // The height when the offense occurred + int64 height = 3; + // The corresponding time where the offense occurred + google.protobuf.Timestamp time = 4 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + // Total voting power of the validator set in case the ABCI application does + // not store historical validators. + // https://github.com/tendermint/tendermint/issues/4581 + int64 total_voting_power = 5; +} + +// ---------------------------------------- +// Service Definition + +// ABCIApplication is a service for an ABCI application. +service ABCIApplication { + // Echo returns back the same message it is sent. + rpc Echo(v1beta1.RequestEcho) returns (v1beta1.ResponseEcho); + // Flush flushes the write buffer. + rpc Flush(v1beta1.RequestFlush) returns (v1beta1.ResponseFlush); + // Info returns information about the application state. + rpc Info(RequestInfo) returns (v1beta1.ResponseInfo); + // DeliverTx applies a transaction. + rpc DeliverTx(v1beta1.RequestDeliverTx) returns (ResponseDeliverTx); + // CheckTx validates a transaction. + rpc CheckTx(v1beta1.RequestCheckTx) returns (ResponseCheckTx); + // Query queries the application state. + rpc Query(v1beta1.RequestQuery) returns (v1beta1.ResponseQuery); + // Commit commits a block of transactions. + rpc Commit(v1beta1.RequestCommit) returns (v1beta1.ResponseCommit); + // InitChain initializes the blockchain. + rpc InitChain(RequestInitChain) returns (ResponseInitChain); + // BeginBlock signals the beginning of a block. + rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock); + // EndBlock signals the end of a block, returns changes to the validator set. + rpc EndBlock(v1beta1.RequestEndBlock) returns (ResponseEndBlock); + // ListSnapshots lists all the available snapshots. + rpc ListSnapshots(v1beta1.RequestListSnapshots) returns (v1beta1.ResponseListSnapshots); + // OfferSnapshot sends a snapshot offer. + rpc OfferSnapshot(v1beta1.RequestOfferSnapshot) returns (v1beta1.ResponseOfferSnapshot); + // LoadSnapshotChunk returns a chunk of snapshot. + rpc LoadSnapshotChunk(v1beta1.RequestLoadSnapshotChunk) + returns (v1beta1.ResponseLoadSnapshotChunk); + // ApplySnapshotChunk applies a chunk of snapshot. + rpc ApplySnapshotChunk(v1beta1.RequestApplySnapshotChunk) + returns (v1beta1.ResponseApplySnapshotChunk); + // PrepareProposal returns a proposal for the next block. + rpc PrepareProposal(RequestPrepareProposal) returns (ResponsePrepareProposal); + // ProcessProposal validates a proposal. + rpc ProcessProposal(RequestProcessProposal) returns (ResponseProcessProposal); +} diff --git a/proto/cometbft/abci/v1beta3/types.proto b/proto/cometbft/abci/v1beta3/types.proto new file mode 100644 index 00000000000..2b3c7f744df --- /dev/null +++ b/proto/cometbft/abci/v1beta3/types.proto @@ -0,0 +1,336 @@ +syntax = "proto3"; +package cometbft.abci.v1beta3; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/abci/v1beta3"; + +// For more information on gogo.proto, see: +// https://github.com/cosmos/gogoproto/blob/master/extensions.md +import "cometbft/abci/v1beta1/types.proto"; +import "cometbft/abci/v1beta2/types.proto"; +import "cometbft/types/v1/params.proto"; +import "cometbft/types/v1beta1/validator.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; + +// NOTE: When using custom types, mind the warnings. +// https://github.com/cosmos/gogoproto/blob/master/custom_types.md#warnings-and-issues + +// ABCIService is a service for an ABCI application. +service ABCI { + // Echo returns back the same message it is sent. + rpc Echo(v1beta1.RequestEcho) returns (v1beta1.ResponseEcho); + // Flush flushes the write buffer. + rpc Flush(v1beta1.RequestFlush) returns (v1beta1.ResponseFlush); + // Info returns information about the application state. + rpc Info(v1beta2.RequestInfo) returns (v1beta1.ResponseInfo); + // CheckTx validates a transaction. + rpc CheckTx(v1beta1.RequestCheckTx) returns (ResponseCheckTx); + // Query queries the application state. + rpc Query(v1beta1.RequestQuery) returns (v1beta1.ResponseQuery); + // Commit commits a block of transactions. + rpc Commit(v1beta1.RequestCommit) returns (ResponseCommit); + // InitChain initializes the blockchain. + rpc InitChain(RequestInitChain) returns (ResponseInitChain); + // ListSnapshots lists all the available snapshots. + rpc ListSnapshots(v1beta1.RequestListSnapshots) returns (v1beta1.ResponseListSnapshots); + // OfferSnapshot sends a snapshot offer. + rpc OfferSnapshot(v1beta1.RequestOfferSnapshot) returns (v1beta1.ResponseOfferSnapshot); + // LoadSnapshotChunk returns a chunk of snapshot. + rpc LoadSnapshotChunk(v1beta1.RequestLoadSnapshotChunk) + returns (v1beta1.ResponseLoadSnapshotChunk); + // ApplySnapshotChunk applies a chunk of snapshot. + rpc ApplySnapshotChunk(v1beta1.RequestApplySnapshotChunk) + returns (v1beta1.ResponseApplySnapshotChunk); + // PrepareProposal returns a proposal for the next block. + rpc PrepareProposal(v1beta3.RequestPrepareProposal) returns (v1beta2.ResponsePrepareProposal); + // ProcessProposal validates a proposal. + rpc ProcessProposal(RequestProcessProposal) returns (v1beta2.ResponseProcessProposal); + // ExtendVote extends a vote with application-injected data (vote extensions). + rpc ExtendVote(RequestExtendVote) returns (ResponseExtendVote); + // VerifyVoteExtension verifies a vote extension. + rpc VerifyVoteExtension(RequestVerifyVoteExtension) returns (ResponseVerifyVoteExtension); + // FinalizeBlock finalizes a block. + rpc FinalizeBlock(RequestFinalizeBlock) returns (ResponseFinalizeBlock); +} + +// ---------------------------------------- +// Request types + +// Request represents a request to the ABCI application. +message Request { + // Sum of all possible messages. + oneof value { + v1beta1.RequestEcho echo = 1; + v1beta1.RequestFlush flush = 2; + v1beta2.RequestInfo info = 3; + RequestInitChain init_chain = 5; + v1beta1.RequestQuery query = 6; + v1beta1.RequestCheckTx check_tx = 8; + v1beta1.RequestCommit commit = 11; + v1beta1.RequestListSnapshots list_snapshots = 12; + v1beta1.RequestOfferSnapshot offer_snapshot = 13; + v1beta1.RequestLoadSnapshotChunk load_snapshot_chunk = 14; + v1beta1.RequestApplySnapshotChunk apply_snapshot_chunk = 15; + RequestPrepareProposal prepare_proposal = 16; + RequestProcessProposal process_proposal = 17; + RequestExtendVote extend_vote = 18; + RequestVerifyVoteExtension verify_vote_extension = 19; + RequestFinalizeBlock finalize_block = 20; + } + reserved 4, 7, 9, 10; // SetOption, BeginBlock, DeliverTx, EndBlock +} + +// RequestInitChain is a request to initialize the blockchain. +message RequestInitChain { + google.protobuf.Timestamp time = 1 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + string chain_id = 2; + cometbft.types.v1.ConsensusParams consensus_params = 3; + repeated v1beta1.ValidatorUpdate validators = 4 [(gogoproto.nullable) = false]; + bytes app_state_bytes = 5; + int64 initial_height = 6; +} + +// RequestPrepareProposal is a request for the ABCI application to prepare a new +// block proposal. +message RequestPrepareProposal { + // the modified transactions cannot exceed this size. + int64 max_tx_bytes = 1; + // txs is an array of transactions that will be included in a block, + // sent to the app for possible modifications. + repeated bytes txs = 2; + ExtendedCommitInfo local_last_commit = 3 [(gogoproto.nullable) = false]; + repeated v1beta2.Misbehavior misbehavior = 4 [(gogoproto.nullable) = false]; + int64 height = 5; + google.protobuf.Timestamp time = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes next_validators_hash = 7; + // address of the public key of the validator proposing the block. + bytes proposer_address = 8; +} + +// RequestProcessProposal is a request for the ABCI application to process proposal. +message RequestProcessProposal { + repeated bytes txs = 1; + CommitInfo proposed_last_commit = 2 [(gogoproto.nullable) = false]; + repeated v1beta2.Misbehavior misbehavior = 3 [(gogoproto.nullable) = false]; + // hash is the merkle root hash of the fields of the proposed block. + bytes hash = 4; + int64 height = 5; + google.protobuf.Timestamp time = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes next_validators_hash = 7; + // address of the public key of the original proposer of the block. + bytes proposer_address = 8; +} + +// Extends a vote with application-injected data +message RequestExtendVote { + // the hash of the block that this vote may be referring to + bytes hash = 1; + // the height of the extended vote + int64 height = 2; + // info of the block that this vote may be referring to + google.protobuf.Timestamp time = 3 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + repeated bytes txs = 4; + CommitInfo proposed_last_commit = 5 [(gogoproto.nullable) = false]; + repeated v1beta2.Misbehavior misbehavior = 6 [(gogoproto.nullable) = false]; + bytes next_validators_hash = 7; + // address of the public key of the original proposer of the block. + bytes proposer_address = 8; +} + +// Verify the vote extension +message RequestVerifyVoteExtension { + // the hash of the block that this received vote corresponds to + bytes hash = 1; + // the validator that signed the vote extension + bytes validator_address = 2; + int64 height = 3; + bytes vote_extension = 4; +} + +// RequestFinalizeBlock is a request to finalize the block. +message RequestFinalizeBlock { + repeated bytes txs = 1; + CommitInfo decided_last_commit = 2 [(gogoproto.nullable) = false]; + repeated v1beta2.Misbehavior misbehavior = 3 [(gogoproto.nullable) = false]; + // hash is the merkle root hash of the fields of the decided block. + bytes hash = 4; + int64 height = 5; + google.protobuf.Timestamp time = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes next_validators_hash = 7; + // proposer_address is the address of the public key of the original proposer of the block. + bytes proposer_address = 8; +} + +// ---------------------------------------- +// Response types + +// Response represents a response from the ABCI application. +message Response { + // Sum of all possible messages. + oneof value { + v1beta1.ResponseException exception = 1; + v1beta1.ResponseEcho echo = 2; + v1beta1.ResponseFlush flush = 3; + v1beta1.ResponseInfo info = 4; + ResponseInitChain init_chain = 6; + v1beta1.ResponseQuery query = 7; + ResponseCheckTx check_tx = 9; + ResponseCommit commit = 12; + v1beta1.ResponseListSnapshots list_snapshots = 13; + v1beta1.ResponseOfferSnapshot offer_snapshot = 14; + v1beta1.ResponseLoadSnapshotChunk load_snapshot_chunk = 15; + v1beta1.ResponseApplySnapshotChunk apply_snapshot_chunk = 16; + v1beta2.ResponsePrepareProposal prepare_proposal = 17; + v1beta2.ResponseProcessProposal process_proposal = 18; + ResponseExtendVote extend_vote = 19; + ResponseVerifyVoteExtension verify_vote_extension = 20; + ResponseFinalizeBlock finalize_block = 21; + } + reserved 5, 8, 10, 11; // SetOption, BeginBlock, DeliverTx, EndBlock +} + +// ResponseInitChain contains the ABCI application's hash and updates to the +// validator set and/or the consensus params, if any. +message ResponseInitChain { + cometbft.types.v1.ConsensusParams consensus_params = 1; + repeated v1beta1.ValidatorUpdate validators = 2 [(gogoproto.nullable) = false]; + bytes app_hash = 3; +} + +// ResponseCheckTx shows if the transaction was deemed valid by the ABCI +// application. +message ResponseCheckTx { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5 [json_name = "gas_wanted"]; + int64 gas_used = 6 [json_name = "gas_used"]; + repeated v1beta2.Event events = 7 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; + string codespace = 8; + + // These reserved fields were used till v0.37 by the priority mempool (now + // removed). + reserved 9 to 11; + reserved "sender", "priority", "mempool_error"; +} + +// ResponseCommit indicates how much blocks should CometBFT retain. +message ResponseCommit { + reserved 1, 2; // data was previously returned here + int64 retain_height = 3; +} + +// ResponseExtendVote is the result of extending a vote with application-injected data. +message ResponseExtendVote { + bytes vote_extension = 1; +} + +// ResponseVerifyVoteExtension is the result of verifying a vote extension. +message ResponseVerifyVoteExtension { + VerifyStatus status = 1; + + // Verification status. + enum VerifyStatus { + // Unknown + UNKNOWN = 0; + // Accepted + ACCEPT = 1; + // Rejecting the vote extension will reject the entire precommit by the sender. + // Incorrectly implementing this thus has liveness implications as it may affect + // CometBFT's ability to receive 2/3+ valid votes to finalize the block. + // Honest nodes should never be rejected. + REJECT = 2; + } +} + +// FinalizeBlockResponse contains the result of executing the block. +message ResponseFinalizeBlock { + // set of block events emitted as part of executing the block + repeated v1beta2.Event events = 1 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; + // the result of executing each transaction including the events + // the particular transaction emitted. This should match the order + // of the transactions delivered in the block itself + repeated ExecTxResult tx_results = 2; + // a list of updates to the validator set. These will reflect the validator set at current height + 2. + repeated v1beta1.ValidatorUpdate validator_updates = 3 [(gogoproto.nullable) = false]; + // updates to the consensus params, if any. + cometbft.types.v1.ConsensusParams consensus_param_updates = 4; + // app_hash is the hash of the applications' state which is used to confirm + // that execution of the transactions was deterministic. + // It is up to the application to decide which algorithm to use. + bytes app_hash = 5; +} + +// ---------------------------------------- +// Blockchain Types + +// VoteInfo contains the information about the vote. +message VoteInfo { + v1beta1.Validator validator = 1 [(gogoproto.nullable) = false]; + cometbft.types.v1beta1.BlockIDFlag block_id_flag = 3; + + reserved 2; // signed_last_block +} + +// ExtendedVoteInfo extends VoteInfo with the vote extensions (non-deterministic). +message ExtendedVoteInfo { + // The validator that sent the vote. + v1beta1.Validator validator = 1 [(gogoproto.nullable) = false]; + // Non-deterministic extension provided by the sending validator's application. + bytes vote_extension = 3; + // Vote extension signature created by CometBFT + bytes extension_signature = 4; + // block_id_flag indicates whether the validator voted for a block, nil, or did not vote at all + cometbft.types.v1beta1.BlockIDFlag block_id_flag = 5; + + reserved 2; // signed_last_block +} + +// ---------------------------------------- +// Misc. + +// CommitInfo contains votes for the particular round. +message CommitInfo { + int32 round = 1; + repeated VoteInfo votes = 2 [(gogoproto.nullable) = false]; +} + +// ExtendedCommitInfo is similar to CommitInfo except that it is only used in +// the PrepareProposal request such that Tendermint can provide vote extensions +// to the application. +message ExtendedCommitInfo { + // The round at which the block proposer decided in the previous height. + int32 round = 1; + // List of validators' addresses in the last validator set with their voting + // information, including vote extensions. + repeated ExtendedVoteInfo votes = 2 [(gogoproto.nullable) = false]; +} + +// ExecTxResult contains results of executing one individual transaction. +// +// * Its structure is equivalent to #ResponseDeliverTx which will be deprecated/deleted +message ExecTxResult { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5 [json_name = "gas_wanted"]; + int64 gas_used = 6 [json_name = "gas_used"]; + repeated v1beta2.Event events = 7 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; // nondeterministic + string codespace = 8; +} + +// TxResult contains results of executing the transaction. +// +// One usage is indexing transaction results. +message TxResult { + int64 height = 1; + uint32 index = 2; + bytes tx = 3; + ExecTxResult result = 4 [(gogoproto.nullable) = false]; +} diff --git a/proto/cometbft/blocksync/v1/types.proto b/proto/cometbft/blocksync/v1/types.proto new file mode 100644 index 00000000000..b806ed6f76b --- /dev/null +++ b/proto/cometbft/blocksync/v1/types.proto @@ -0,0 +1,45 @@ +syntax = "proto3"; +package cometbft.blocksync.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/blocksync/v1"; + +import "cometbft/types/v1/block.proto"; +import "cometbft/types/v1/types.proto"; + +// BlockRequest requests a block for a specific height +message BlockRequest { + int64 height = 1; +} + +// NoBlockResponse informs the node that the peer does not have block at the requested height +message NoBlockResponse { + int64 height = 1; +} + +// StatusRequest requests the status of a peer. +message StatusRequest { +} + +// StatusResponse is a peer response to inform their status. +message StatusResponse { + int64 height = 1; + int64 base = 2; +} + +// BlockResponse returns block to the requested +message BlockResponse { + cometbft.types.v1.Block block = 1; + cometbft.types.v1.ExtendedCommit ext_commit = 2; +} + +// Message is an abstract blocksync message. +message Message { + // Sum of all possible messages. + oneof sum { + BlockRequest block_request = 1; + NoBlockResponse no_block_response = 2; + BlockResponse block_response = 3; + StatusRequest status_request = 4; + StatusResponse status_response = 5; + } +} diff --git a/proto/cometbft/blocksync/v1beta1/types.proto b/proto/cometbft/blocksync/v1beta1/types.proto new file mode 100644 index 00000000000..a857914cc1d --- /dev/null +++ b/proto/cometbft/blocksync/v1beta1/types.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; +package cometbft.blocksync.v1beta1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/blocksync/v1beta1"; + +import "cometbft/types/v1beta1/block.proto"; + +// BlockRequest requests a block for a specific height +message BlockRequest { + int64 height = 1; +} + +// NoBlockResponse informs the node that the peer does not have block at the requested height +message NoBlockResponse { + int64 height = 1; +} + +// BlockResponse returns block to the requested +message BlockResponse { + cometbft.types.v1beta1.Block block = 1; +} + +// StatusRequest requests the status of a peer. +message StatusRequest { +} + +// StatusResponse is a peer response to inform their status. +message StatusResponse { + int64 height = 1; + int64 base = 2; +} + +// Message is an abstract blocksync message. +message Message { + // Sum of all possible messages. + oneof sum { + BlockRequest block_request = 1; + NoBlockResponse no_block_response = 2; + BlockResponse block_response = 3; + StatusRequest status_request = 4; + StatusResponse status_response = 5; + } +} diff --git a/proto/cometbft/consensus/v1/types.proto b/proto/cometbft/consensus/v1/types.proto new file mode 100644 index 00000000000..3be71e621e2 --- /dev/null +++ b/proto/cometbft/consensus/v1/types.proto @@ -0,0 +1,102 @@ +syntax = "proto3"; +package cometbft.consensus.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/consensus/v1"; + +import "gogoproto/gogo.proto"; +import "cometbft/libs/bits/v1/types.proto"; +import "cometbft/types/v1/types.proto"; + +// NewRoundStep is sent for every step taken in the ConsensusState. +// For every height/round/step transition +message NewRoundStep { + int64 height = 1; + int32 round = 2; + uint32 step = 3; + int64 seconds_since_start_time = 4; + int32 last_commit_round = 5; +} + +// NewValidBlock is sent when a validator observes a valid block B in some round r, +// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// In case the block is also committed, then IsCommit flag is set to true. +message NewValidBlock { + int64 height = 1; + int32 round = 2; + cometbft.types.v1.PartSetHeader block_part_set_header = 3 [(gogoproto.nullable) = false]; + cometbft.libs.bits.v1.BitArray block_parts = 4; + bool is_commit = 5; +} + +// Proposal is sent when a new block is proposed. +message Proposal { + cometbft.types.v1.Proposal proposal = 1 [(gogoproto.nullable) = false]; +} + +// ProposalPOL is sent when a previous proposal is re-proposed. +message ProposalPOL { + int64 height = 1; + int32 proposal_pol_round = 2; + cometbft.libs.bits.v1.BitArray proposal_pol = 3 [(gogoproto.nullable) = false]; +} + +// BlockPart is sent when gossipping a piece of the proposed block. +message BlockPart { + int64 height = 1; + int32 round = 2; + cometbft.types.v1.Part part = 3 [(gogoproto.nullable) = false]; +} + +// Vote is sent when voting for a proposal (or lack thereof). +message Vote { + cometbft.types.v1.Vote vote = 1; +} + +// HasVote is sent to indicate that a particular vote has been received. +message HasVote { + int64 height = 1; + int32 round = 2; + cometbft.types.v1.SignedMsgType type = 3; + int32 index = 4; +} + +// VoteSetMaj23 is sent to indicate that a given BlockID has seen +2/3 votes. +message VoteSetMaj23 { + int64 height = 1; + int32 round = 2; + cometbft.types.v1.SignedMsgType type = 3; + cometbft.types.v1.BlockID block_id = 4 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; +} + +// VoteSetBits is sent to communicate the bit-array of votes seen for the BlockID. +message VoteSetBits { + int64 height = 1; + int32 round = 2; + cometbft.types.v1.SignedMsgType type = 3; + cometbft.types.v1.BlockID block_id = 4 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + cometbft.libs.bits.v1.BitArray votes = 5 [(gogoproto.nullable) = false]; +} + +// HasProposalBlockPart is sent to indicate that a particular proposal block part has been received. +message HasProposalBlockPart { + int64 height = 1; + int32 round = 2; + int32 index = 3; +} + +// Message is an abstract consensus message. +message Message { + // Sum of all possible messages. + oneof sum { + NewRoundStep new_round_step = 1; + NewValidBlock new_valid_block = 2; + Proposal proposal = 3; + ProposalPOL proposal_pol = 4; + BlockPart block_part = 5; + Vote vote = 6; + HasVote has_vote = 7; + VoteSetMaj23 vote_set_maj23 = 8; + VoteSetBits vote_set_bits = 9; + HasProposalBlockPart has_proposal_block_part = 10; + } +} diff --git a/proto/cometbft/consensus/v1/wal.proto b/proto/cometbft/consensus/v1/wal.proto new file mode 100644 index 00000000000..ae7f2619667 --- /dev/null +++ b/proto/cometbft/consensus/v1/wal.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; +package cometbft.consensus.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/consensus/v1"; + +import "cometbft/consensus/v1/types.proto"; +import "cometbft/types/v1/events.proto"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +// MsgInfo are msgs from the reactor which may update the state +message MsgInfo { + Message msg = 1 [(gogoproto.nullable) = false]; + string peer_id = 2 [(gogoproto.customname) = "PeerID"]; + google.protobuf.Timestamp receive_time = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true]; +} + +// TimeoutInfo internally generated messages which may update the state +message TimeoutInfo { + google.protobuf.Duration duration = 1 + [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; + int64 height = 2; + int32 round = 3; + uint32 step = 4; +} + +// EndHeight marks the end of the given height inside WAL. +// @internal used by scripts/wal2json util. +message EndHeight { + int64 height = 1; +} + +// WALMessage describes a consensus WAL (Write Ahead Log) entry. +message WALMessage { + // Sum of all possible messages. + oneof sum { + cometbft.types.v1.EventDataRoundState event_data_round_state = 1; + MsgInfo msg_info = 2; + TimeoutInfo timeout_info = 3; + EndHeight end_height = 4; + } +} + +// TimedWALMessage wraps WALMessage and adds Time for debugging purposes. +message TimedWALMessage { + google.protobuf.Timestamp time = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + WALMessage msg = 2; +} diff --git a/proto/cometbft/consensus/v1beta1/types.proto b/proto/cometbft/consensus/v1beta1/types.proto new file mode 100644 index 00000000000..be8dc22b669 --- /dev/null +++ b/proto/cometbft/consensus/v1beta1/types.proto @@ -0,0 +1,94 @@ +syntax = "proto3"; +package cometbft.consensus.v1beta1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/consensus/v1beta1"; + +import "gogoproto/gogo.proto"; +import "cometbft/types/v1beta1/types.proto"; +import "cometbft/libs/bits/v1/types.proto"; + +// NewRoundStep is sent for every step taken in the ConsensusState. +// For every height/round/step transition +message NewRoundStep { + int64 height = 1; + int32 round = 2; + uint32 step = 3; + int64 seconds_since_start_time = 4; + int32 last_commit_round = 5; +} + +// NewValidBlock is sent when a validator observes a valid block B in some round r, +// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// In case the block is also committed, then IsCommit flag is set to true. +message NewValidBlock { + int64 height = 1; + int32 round = 2; + cometbft.types.v1beta1.PartSetHeader block_part_set_header = 3 [(gogoproto.nullable) = false]; + cometbft.libs.bits.v1.BitArray block_parts = 4; + bool is_commit = 5; +} + +// Proposal is sent when a new block is proposed. +message Proposal { + cometbft.types.v1beta1.Proposal proposal = 1 [(gogoproto.nullable) = false]; +} + +// ProposalPOL is sent when a previous proposal is re-proposed. +message ProposalPOL { + int64 height = 1; + int32 proposal_pol_round = 2; + cometbft.libs.bits.v1.BitArray proposal_pol = 3 [(gogoproto.nullable) = false]; +} + +// BlockPart is sent when gossipping a piece of the proposed block. +message BlockPart { + int64 height = 1; + int32 round = 2; + cometbft.types.v1beta1.Part part = 3 [(gogoproto.nullable) = false]; +} + +// Vote is sent when voting for a proposal (or lack thereof). +message Vote { + cometbft.types.v1beta1.Vote vote = 1; +} + +// HasVote is sent to indicate that a particular vote has been received. +message HasVote { + int64 height = 1; + int32 round = 2; + cometbft.types.v1beta1.SignedMsgType type = 3; + int32 index = 4; +} + +// VoteSetMaj23 is sent to indicate that a given BlockID has seen +2/3 votes. +message VoteSetMaj23 { + int64 height = 1; + int32 round = 2; + cometbft.types.v1beta1.SignedMsgType type = 3; + cometbft.types.v1beta1.BlockID block_id = 4 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; +} + +// VoteSetBits is sent to communicate the bit-array of votes seen for the BlockID. +message VoteSetBits { + int64 height = 1; + int32 round = 2; + cometbft.types.v1beta1.SignedMsgType type = 3; + cometbft.types.v1beta1.BlockID block_id = 4 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + cometbft.libs.bits.v1.BitArray votes = 5 [(gogoproto.nullable) = false]; +} + +// Message is an abstract consensus message. +message Message { + // Sum of all possible messages. + oneof sum { + NewRoundStep new_round_step = 1; + NewValidBlock new_valid_block = 2; + Proposal proposal = 3; + ProposalPOL proposal_pol = 4; + BlockPart block_part = 5; + Vote vote = 6; + HasVote has_vote = 7; + VoteSetMaj23 vote_set_maj23 = 8; + VoteSetBits vote_set_bits = 9; + } +} diff --git a/proto/cometbft/consensus/v1beta1/wal.proto b/proto/cometbft/consensus/v1beta1/wal.proto new file mode 100644 index 00000000000..50a83b67d9b --- /dev/null +++ b/proto/cometbft/consensus/v1beta1/wal.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; +package cometbft.consensus.v1beta1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/consensus/v1beta1"; + +import "gogoproto/gogo.proto"; +import "cometbft/consensus/v1beta1/types.proto"; +import "cometbft/types/v1beta1/events.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +// MsgInfo are msgs from the reactor which may update the state +message MsgInfo { + Message msg = 1 [(gogoproto.nullable) = false]; + string peer_id = 2 [(gogoproto.customname) = "PeerID"]; +} + +// TimeoutInfo internally generated messages which may update the state +message TimeoutInfo { + google.protobuf.Duration duration = 1 + [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; + int64 height = 2; + int32 round = 3; + uint32 step = 4; +} + +// EndHeight marks the end of the given height inside WAL. +// @internal used by scripts/wal2json util. +message EndHeight { + int64 height = 1; +} + +// WALMessage describes a consensus WAL (Write Ahead Log) entry. +message WALMessage { + // Sum of all possible messages. + oneof sum { + cometbft.types.v1beta1.EventDataRoundState event_data_round_state = 1; + MsgInfo msg_info = 2; + TimeoutInfo timeout_info = 3; + EndHeight end_height = 4; + } +} + +// TimedWALMessage wraps WALMessage and adds Time for debugging purposes. +message TimedWALMessage { + google.protobuf.Timestamp time = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + WALMessage msg = 2; +} diff --git a/proto/cometbft/crypto/v1/keys.proto b/proto/cometbft/crypto/v1/keys.proto new file mode 100644 index 00000000000..83479b73f98 --- /dev/null +++ b/proto/cometbft/crypto/v1/keys.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; +package cometbft.crypto.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/crypto/v1"; + +import "gogoproto/gogo.proto"; + +// PublicKey is a ED25519 or a secp256k1 public key. +message PublicKey { + option (gogoproto.compare) = true; + option (gogoproto.equal) = true; + + // The type of key. + oneof sum { + bytes ed25519 = 1; + bytes secp256k1 = 2; + bytes bls12381 = 3; + } +} diff --git a/proto/cometbft/crypto/v1/proof.proto b/proto/cometbft/crypto/v1/proof.proto new file mode 100644 index 00000000000..2b462166c83 --- /dev/null +++ b/proto/cometbft/crypto/v1/proof.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; +package cometbft.crypto.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/crypto/v1"; + +import "gogoproto/gogo.proto"; + +// Proof is a Merkle proof. +message Proof { + int64 total = 1; + int64 index = 2; + bytes leaf_hash = 3; + repeated bytes aunts = 4; +} + +// ValueOp is a Merkle proof for a single key. +message ValueOp { + // Encoded in ProofOp.Key. + bytes key = 1; + + // To encode in ProofOp.Data + Proof proof = 2; +} + +// DominoOp always returns the given output. +message DominoOp { + string key = 1; + string input = 2; + string output = 3; +} + +// ProofOp defines an operation used for calculating Merkle root +// The data could be arbitrary format, providing necessary data +// for example neighbouring node hash +message ProofOp { + string type = 1; + bytes key = 2; + bytes data = 3; +} + +// ProofOps is Merkle proof defined by the list of ProofOps +message ProofOps { + repeated ProofOp ops = 1 [(gogoproto.nullable) = false]; +} diff --git a/proto/cometbft/libs/bits/v1/types.proto b/proto/cometbft/libs/bits/v1/types.proto new file mode 100644 index 00000000000..b735bf88347 --- /dev/null +++ b/proto/cometbft/libs/bits/v1/types.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package cometbft.libs.bits.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/libs/bits/v1"; + +// BitArray is an array of bits. +message BitArray { + int64 bits = 1; + repeated uint64 elems = 2; +} diff --git a/proto/cometbft/mempool/v1/types.proto b/proto/cometbft/mempool/v1/types.proto new file mode 100644 index 00000000000..1ab4e74543c --- /dev/null +++ b/proto/cometbft/mempool/v1/types.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; +package cometbft.mempool.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/mempool/v1"; + +// Txs contains a list of transaction from the mempool. +message Txs { + repeated bytes txs = 1; +} + +// Message is an abstract mempool message. +message Message { + // Sum of all possible messages. + oneof sum { + Txs txs = 1; + } +} diff --git a/proto/cometbft/p2p/v1/conn.proto b/proto/cometbft/p2p/v1/conn.proto new file mode 100644 index 00000000000..c69b0349303 --- /dev/null +++ b/proto/cometbft/p2p/v1/conn.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; +package cometbft.p2p.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/p2p/v1"; + +import "gogoproto/gogo.proto"; +import "cometbft/crypto/v1/keys.proto"; + +// PacketPing is a request to confirm that the connection is alive. +message PacketPing {} + +// PacketPong is a response to confirm that the connection is alive. +message PacketPong {} + +// PacketMsg contains data for the specified channel ID. EOF means the message +// is fully received. +message PacketMsg { + int32 channel_id = 1 [(gogoproto.customname) = "ChannelID"]; + bool eof = 2 [(gogoproto.customname) = "EOF"]; + bytes data = 3; +} + +// Packet is an abstract p2p message. +message Packet { + // Sum of all possible messages. + oneof sum { + PacketPing packet_ping = 1; + PacketPong packet_pong = 2; + PacketMsg packet_msg = 3; + } +} + +// AuthSigMessage is sent during the authentication and contains our/remote's +// signature along with the public key. +message AuthSigMessage { + cometbft.crypto.v1.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; + bytes sig = 2; +} diff --git a/proto/cometbft/p2p/v1/pex.proto b/proto/cometbft/p2p/v1/pex.proto new file mode 100644 index 00000000000..c7a9144cf52 --- /dev/null +++ b/proto/cometbft/p2p/v1/pex.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; +package cometbft.p2p.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/p2p/v1"; + +import "cometbft/p2p/v1/types.proto"; +import "gogoproto/gogo.proto"; + +// PexRequest is a request for peer addresses. +message PexRequest {} + +// PexAddrs is a response with peer addresses. +message PexAddrs { + repeated NetAddress addrs = 1 [(gogoproto.nullable) = false]; +} + +// Message is an abstract PEX message. +message Message { + // Sum of all possible messages. + oneof sum { + PexRequest pex_request = 1; + PexAddrs pex_addrs = 2; + } +} diff --git a/proto/cometbft/p2p/v1/types.proto b/proto/cometbft/p2p/v1/types.proto new file mode 100644 index 00000000000..147752aa912 --- /dev/null +++ b/proto/cometbft/p2p/v1/types.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; +package cometbft.p2p.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/p2p/v1"; + +import "gogoproto/gogo.proto"; + +// NetAddress represents a peer's network address. +message NetAddress { + string id = 1 [(gogoproto.customname) = "ID"]; + string ip = 2 [(gogoproto.customname) = "IP"]; + uint32 port = 3; +} + +// ProtocolVersion represents the current p2p protocol version. +message ProtocolVersion { + uint64 p2p = 1 [(gogoproto.customname) = "P2P"]; + uint64 block = 2; + uint64 app = 3; +} + +// DefaultNodeInfo is a basic node's information sent to other peers during the +// p2p handshake. +message DefaultNodeInfo { + ProtocolVersion protocol_version = 1 [(gogoproto.nullable) = false]; + string default_node_id = 2 [(gogoproto.customname) = "DefaultNodeID"]; + string listen_addr = 3; + string network = 4; + string version = 5; + bytes channels = 6; + string moniker = 7; + DefaultNodeInfoOther other = 8 [(gogoproto.nullable) = false]; +} + +// DefaultNodeInfoOther is the misc. application specific data. +message DefaultNodeInfoOther { + string tx_index = 1; + string rpc_address = 2 [(gogoproto.customname) = "RPCAddress"]; +} diff --git a/proto/cometbft/privval/v1/types.proto b/proto/cometbft/privval/v1/types.proto new file mode 100644 index 00000000000..dd170e872d0 --- /dev/null +++ b/proto/cometbft/privval/v1/types.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; +package cometbft.privval.v1; + +import "cometbft/types/v1/types.proto"; +import "gogoproto/gogo.proto"; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/privval/v1"; + +// remotesignererror is returned when the remote signer fails. +message RemoteSignerError { + int32 code = 1; + string description = 2; +} + +// PubKeyRequest requests the consensus public key from the remote signer. +message PubKeyRequest { + string chain_id = 1; +} + +// PubKeyResponse is a response message containing the public key. +message PubKeyResponse { + RemoteSignerError error = 2; + bytes pub_key_bytes = 3; + string pub_key_type = 4; + + reserved 1; // pub_key +} + +// SignVoteRequest is a request to sign a vote +message SignVoteRequest { + cometbft.types.v1.Vote vote = 1; + string chain_id = 2; + bool skip_extension_signing = 3; // if true, the signer may skip signing the extension bytes. +} + +// SignedVoteResponse is a response containing a signed vote or an error +message SignedVoteResponse { + cometbft.types.v1.Vote vote = 1 [(gogoproto.nullable) = false]; + RemoteSignerError error = 2; +} + +// SignProposalRequest is a request to sign a proposal +message SignProposalRequest { + cometbft.types.v1.Proposal proposal = 1; + string chain_id = 2; +} + +// SignedProposalResponse is response containing a signed proposal or an error +message SignedProposalResponse { + cometbft.types.v1.Proposal proposal = 1 [(gogoproto.nullable) = false]; + RemoteSignerError error = 2; +} + +// SignBytesRequest is a request to sign arbitrary bytes +message SignBytesRequest { + bytes value = 1; +} + +// SignBytesResponse is a response containing a signature or an error +message SignBytesResponse { + bytes signature = 1; + RemoteSignerError error = 2; +} + +// PingRequest is a request to confirm that the connection is alive. +message PingRequest {} + +// PingResponse is a response to confirm that the connection is alive. +message PingResponse {} + +// Message is an abstract message to/from the remote signer. +message Message { + // Sum of all possible messages. + oneof sum { + PubKeyRequest pub_key_request = 1; + PubKeyResponse pub_key_response = 2; + SignVoteRequest sign_vote_request = 3; + SignedVoteResponse signed_vote_response = 4; + SignProposalRequest sign_proposal_request = 5; + SignedProposalResponse signed_proposal_response = 6; + PingRequest ping_request = 7; + PingResponse ping_response = 8; + SignBytesRequest sign_bytes_request = 9; + SignBytesResponse sign_bytes_response = 10; + } +} diff --git a/proto/cometbft/privval/v1beta1/types.proto b/proto/cometbft/privval/v1beta1/types.proto new file mode 100644 index 00000000000..fffde69db96 --- /dev/null +++ b/proto/cometbft/privval/v1beta1/types.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; +package cometbft.privval.v1beta1; + +import "cometbft/crypto/v1/keys.proto"; +import "cometbft/types/v1beta1/types.proto"; +import "gogoproto/gogo.proto"; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/privval/v1beta1"; + +// Errors is a list of error codes that can be returned by the remote signer. +enum Errors { + // Unknown error + ERRORS_UNKNOWN = 0; + // Unexpected response + ERRORS_UNEXPECTED_RESPONSE = 1; + // Connection lost + ERRORS_NO_CONNECTION = 2; + // Connection timeout + ERRORS_CONNECTION_TIMEOUT = 3; + // Read timeout + ERRORS_READ_TIMEOUT = 4; + // Write timeout + ERRORS_WRITE_TIMEOUT = 5; +} + +// A service for broadcasting transactions. +message RemoteSignerError { + int32 code = 1; + string description = 2; +} + +// PubKeyRequest requests the consensus public key from the remote signer. +message PubKeyRequest { + string chain_id = 1; +} + +// PubKeyResponse is a response message containing the public key. +message PubKeyResponse { + cometbft.crypto.v1.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; + RemoteSignerError error = 2; +} + +// SignVoteRequest is a request to sign a vote +message SignVoteRequest { + cometbft.types.v1beta1.Vote vote = 1; + string chain_id = 2; +} + +// SignedVoteResponse is a response containing a signed vote or an error +message SignedVoteResponse { + cometbft.types.v1beta1.Vote vote = 1 [(gogoproto.nullable) = false]; + RemoteSignerError error = 2; +} + +// SignProposalRequest is a request to sign a proposal +message SignProposalRequest { + cometbft.types.v1beta1.Proposal proposal = 1; + string chain_id = 2; +} + +// SignedProposalResponse is response containing a signed proposal or an error +message SignedProposalResponse { + cometbft.types.v1beta1.Proposal proposal = 1 [(gogoproto.nullable) = false]; + RemoteSignerError error = 2; +} + +// PingRequest is a request to confirm that the connection is alive. +message PingRequest {} + +// PingResponse is a response to confirm that the connection is alive. +message PingResponse {} + +// Message is an abstract message to/from the remote signer. +message Message { + // Sum of all possible messages. + oneof sum { + PubKeyRequest pub_key_request = 1; + PubKeyResponse pub_key_response = 2; + SignVoteRequest sign_vote_request = 3; + SignedVoteResponse signed_vote_response = 4; + SignProposalRequest sign_proposal_request = 5; + SignedProposalResponse signed_proposal_response = 6; + PingRequest ping_request = 7; + PingResponse ping_response = 8; + } +} diff --git a/proto/cometbft/privval/v1beta2/types.proto b/proto/cometbft/privval/v1beta2/types.proto new file mode 100644 index 00000000000..b2496da545f --- /dev/null +++ b/proto/cometbft/privval/v1beta2/types.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; +package cometbft.privval.v1beta2; + +import "cometbft/crypto/v1/keys.proto"; +import "cometbft/types/v1/types.proto"; +import "gogoproto/gogo.proto"; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/privval/v1beta2"; + +// Errors is a list of error codes that can be returned by the remote signer. +enum Errors { + // Unknown error + ERRORS_UNKNOWN = 0; + // Unexpected response + ERRORS_UNEXPECTED_RESPONSE = 1; + // Connection lost + ERRORS_NO_CONNECTION = 2; + // Connection timeout + ERRORS_CONNECTION_TIMEOUT = 3; + // Read timeout + ERRORS_READ_TIMEOUT = 4; + // Write timeout + ERRORS_WRITE_TIMEOUT = 5; +} + +// remotesignererror is returned when the remote signer fails. +message RemoteSignerError { + int32 code = 1; + string description = 2; +} + +// PubKeyRequest requests the consensus public key from the remote signer. +message PubKeyRequest { + string chain_id = 1; +} + +// PubKeyResponse is a response message containing the public key. +message PubKeyResponse { + cometbft.crypto.v1.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; + RemoteSignerError error = 2; +} + +// SignVoteRequest is a request to sign a vote +message SignVoteRequest { + cometbft.types.v1.Vote vote = 1; + string chain_id = 2; +} + +// SignedVoteResponse is a response containing a signed vote or an error +message SignedVoteResponse { + cometbft.types.v1.Vote vote = 1 [(gogoproto.nullable) = false]; + RemoteSignerError error = 2; +} + +// SignProposalRequest is a request to sign a proposal +message SignProposalRequest { + cometbft.types.v1.Proposal proposal = 1; + string chain_id = 2; +} + +// SignedProposalResponse is response containing a signed proposal or an error +message SignedProposalResponse { + cometbft.types.v1.Proposal proposal = 1 [(gogoproto.nullable) = false]; + RemoteSignerError error = 2; +} + +// PingRequest is a request to confirm that the connection is alive. +message PingRequest {} + +// PingResponse is a response to confirm that the connection is alive. +message PingResponse {} + +// Message is an abstract message to/from the remote signer. +message Message { + // Sum of all possible messages. + oneof sum { + PubKeyRequest pub_key_request = 1; + PubKeyResponse pub_key_response = 2; + SignVoteRequest sign_vote_request = 3; + SignedVoteResponse signed_vote_response = 4; + SignProposalRequest sign_proposal_request = 5; + SignedProposalResponse signed_proposal_response = 6; + PingRequest ping_request = 7; + PingResponse ping_response = 8; + } +} diff --git a/proto/cometbft/rpc/grpc/v1beta1/types.proto b/proto/cometbft/rpc/grpc/v1beta1/types.proto new file mode 100644 index 00000000000..0abaaf4eacc --- /dev/null +++ b/proto/cometbft/rpc/grpc/v1beta1/types.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package cometbft.rpc.grpc.v1beta1; +option go_package = "github.com/cometbft/cometbft/api/cometbft/rpc/grpc/v1beta1"; + +import "cometbft/abci/v1beta1/types.proto"; + +// RequestPing is a request to confirm that the connection is alive. +message RequestPing {} + +// RequestBroadcastTx is a request to broadcast the transaction. +message RequestBroadcastTx { + bytes tx = 1; +} + +// ResponsePing is a response to confirm that the connection is alive. +message ResponsePing {} + +// ResponseBroadcastTx is a response of broadcasting the transaction. +message ResponseBroadcastTx { + cometbft.abci.v1beta1.ResponseCheckTx check_tx = 1; + cometbft.abci.v1beta1.ResponseDeliverTx deliver_tx = 2; +} + +// BroadcastAPI is an API for broadcasting transactions. +service BroadcastAPI { + // Ping the connection. + rpc Ping(RequestPing) returns (ResponsePing); + // BroadcastTx broadcasts the transaction. + rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx); +} diff --git a/proto/cometbft/rpc/grpc/v1beta2/types.proto b/proto/cometbft/rpc/grpc/v1beta2/types.proto new file mode 100644 index 00000000000..87b780934d2 --- /dev/null +++ b/proto/cometbft/rpc/grpc/v1beta2/types.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; +package cometbft.rpc.grpc.v1beta2; +option go_package = "github.com/cometbft/cometbft/api/cometbft/rpc/grpc/v1beta2"; + +import "cometbft/rpc/grpc/v1beta1/types.proto"; +import "cometbft/abci/v1beta2/types.proto"; + +// ResponseBroadcastTx is a response of broadcasting the transaction. +message ResponseBroadcastTx { + cometbft.abci.v1beta2.ResponseCheckTx check_tx = 1; + cometbft.abci.v1beta2.ResponseDeliverTx deliver_tx = 2; +} + +// BroadcastAPI is an API for broadcasting transactions. +service BroadcastAPI { + // Ping the connection. + rpc Ping(v1beta1.RequestPing) returns (v1beta1.ResponsePing); + // BroadcastTx broadcasts the transaction. + rpc BroadcastTx(v1beta1.RequestBroadcastTx) returns (ResponseBroadcastTx); +} diff --git a/proto/cometbft/rpc/grpc/v1beta3/types.proto b/proto/cometbft/rpc/grpc/v1beta3/types.proto new file mode 100644 index 00000000000..2d25cace57a --- /dev/null +++ b/proto/cometbft/rpc/grpc/v1beta3/types.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; +package cometbft.rpc.grpc.v1beta3; +option go_package = "github.com/cometbft/cometbft/api/cometbft/rpc/grpc/v1beta3"; + +import "cometbft/rpc/grpc/v1beta1/types.proto"; + +import "cometbft/abci/v1beta3/types.proto"; + +// ResponseBroadcastTx is a response of broadcasting the transaction. +message ResponseBroadcastTx { + cometbft.abci.v1beta3.ResponseCheckTx check_tx = 1; + cometbft.abci.v1beta3.ExecTxResult tx_result = 2; +} + +// BroadcastAPI is an API for broadcasting transactions. +// +// Deprecated: This API will be superseded by a more comprehensive gRPC-based +// broadcast API, and is scheduled for removal after v0.38. +service BroadcastAPI { + // Ping the connection. + rpc Ping(v1beta1.RequestPing) returns (v1beta1.ResponsePing); + // BroadcastTx broadcasts a transaction. + rpc BroadcastTx(v1beta1.RequestBroadcastTx) returns (v1beta3.ResponseBroadcastTx); +} diff --git a/proto/cometbft/services/block/v1/block.proto b/proto/cometbft/services/block/v1/block.proto new file mode 100644 index 00000000000..aeac0397c94 --- /dev/null +++ b/proto/cometbft/services/block/v1/block.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; +package cometbft.services.block.v1; + +import "cometbft/types/v1/types.proto"; +import "cometbft/types/v1/block.proto"; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/services/block/v1"; + +// GetByHeightRequest is a request for a block at the specified height. +message GetByHeightRequest { + // The height of the block requested. + int64 height = 1; +} + +// GetByHeightResponse contains the block ID and the block at the specified height. +message GetByHeightResponse { + cometbft.types.v1.BlockID block_id = 1; + cometbft.types.v1.Block block = 2; +} + +// GetLatestHeightRequest - empty message since no parameter is required +message GetLatestHeightRequest {} + +// GetLatestHeightResponse provides the height of the latest committed block. +message GetLatestHeightResponse { + // The height of the latest committed block. Will be 0 if no data has been + // committed yet. + int64 height = 1; +} diff --git a/proto/cometbft/services/block/v1/block_service.proto b/proto/cometbft/services/block/v1/block_service.proto new file mode 100644 index 00000000000..4d067e3a36a --- /dev/null +++ b/proto/cometbft/services/block/v1/block_service.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; +package cometbft.services.block.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/services/block/v1"; + +import "cometbft/services/block/v1/block.proto"; + +// BlockService provides information about blocks +service BlockService { + // GetBlock retrieves the block information at a particular height. + rpc GetByHeight(GetByHeightRequest) returns (GetByHeightResponse); + + // GetLatestHeight returns a stream of the latest block heights committed by + // the network. This is a long-lived stream that is only terminated by the + // server if an error occurs. The caller is expected to handle such + // disconnections and automatically reconnect. + rpc GetLatestHeight(GetLatestHeightRequest) returns (stream GetLatestHeightResponse); +} diff --git a/proto/cometbft/services/block_results/v1/block_results.proto b/proto/cometbft/services/block_results/v1/block_results.proto new file mode 100644 index 00000000000..9bcc0244ffb --- /dev/null +++ b/proto/cometbft/services/block_results/v1/block_results.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; +package cometbft.services.block_results.v1; + +import "cometbft/abci/v1/types.proto"; +import "cometbft/types/v1/params.proto"; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/services/block_results/v1"; + +// GetBlockResults is a request for the BlockResults of a given height. +message GetBlockResultsRequest { + int64 height = 1; +} + +// GetBlockResultsResponse contains the block results for the given height. +message GetBlockResultsResponse { + int64 height = 1; + repeated cometbft.abci.v1.ExecTxResult tx_results = 2; + repeated cometbft.abci.v1.Event finalize_block_events = 3; + repeated cometbft.abci.v1.ValidatorUpdate validator_updates = 4; + cometbft.types.v1.ConsensusParams consensus_param_updates = 5; + bytes app_hash = 6; +} diff --git a/proto/cometbft/services/block_results/v1/block_results_service.proto b/proto/cometbft/services/block_results/v1/block_results_service.proto new file mode 100644 index 00000000000..e8829eb97a7 --- /dev/null +++ b/proto/cometbft/services/block_results/v1/block_results_service.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; +package cometbft.services.block_results.v1; + +import "cometbft/services/block_results/v1/block_results.proto"; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/services/block_results/v1"; + +/* + BlockResultService provides the block results of a given or latestheight. +*/ +service BlockResultsService { + // GetBlockResults returns the BlockResults of the requested height. + rpc GetBlockResults(GetBlockResultsRequest) returns (GetBlockResultsResponse); +} diff --git a/proto/cometbft/services/pruning/v1/pruning.proto b/proto/cometbft/services/pruning/v1/pruning.proto new file mode 100644 index 00000000000..24391b60e9f --- /dev/null +++ b/proto/cometbft/services/pruning/v1/pruning.proto @@ -0,0 +1,74 @@ +syntax = "proto3"; + +package cometbft.services.pruning.v1; + +// SetBlockRetainHeightRequest sets the retain height for blocks. +message SetBlockRetainHeightRequest { + uint64 height = 1; +} + +// SetBlockRetainHeightResponse is empty. +message SetBlockRetainHeightResponse {} + +// GetBlockRetainHeightRequest is a request for the retain height. +message GetBlockRetainHeightRequest {} + +// GetBlockRetainHeightResponse returns the retain height for blocks. +message GetBlockRetainHeightResponse { + // The retain height set by the application. + uint64 app_retain_height = 1; + + // The retain height set via the pruning service (e.g. by the data + // companion) specifically for blocks. + uint64 pruning_service_retain_height = 2; +} + +// SetBlockResultsRetainHeightRequest sets the retain height for block results. +message SetBlockResultsRetainHeightRequest { + uint64 height = 1; +} + +// SetBlockResultsRetainHeightResponse is empty. +message SetBlockResultsRetainHeightResponse {} + +// GetBlockResultsRetainHeightRequest is a request for the retain height. +message GetBlockResultsRetainHeightRequest {} + +// GetBlockResultsRetainHeightResponse returns the retain height for block results. +message GetBlockResultsRetainHeightResponse { + // The retain height set by the pruning service (e.g. by the data + // companion) specifically for block results. + uint64 pruning_service_retain_height = 1; +} + +// SetTxIndexerRetainHeightRequest sets the retain height for the tx indexer. +message SetTxIndexerRetainHeightRequest { + uint64 height = 1; +} + +// SetTxIndexerRetainHeightResponse is empty. +message SetTxIndexerRetainHeightResponse {} + +// GetTxIndexerRetainHeightRequest is a request for the retain height. +message GetTxIndexerRetainHeightRequest {} + +// GetTxIndexerRetainHeightResponse returns the retain height for the tx indexer. +message GetTxIndexerRetainHeightResponse { + uint64 height = 1; +} + +// SetBlockIndexerRetainHeightRequest sets the retain height for the block indexer. +message SetBlockIndexerRetainHeightRequest { + uint64 height = 1; +} + +// SetBlockIndexerRetainHeightResponse is empty. +message SetBlockIndexerRetainHeightResponse {} + +// GetBlockIndexerRetainHeightRequest is a request for the retain height. +message GetBlockIndexerRetainHeightRequest {} + +// GetBlockIndexerRetainHeightResponse returns the retain height for the block indexer. +message GetBlockIndexerRetainHeightResponse { + uint64 height = 1; +} diff --git a/proto/cometbft/services/pruning/v1/service.proto b/proto/cometbft/services/pruning/v1/service.proto new file mode 100644 index 00000000000..f82e3f1a98a --- /dev/null +++ b/proto/cometbft/services/pruning/v1/service.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +package cometbft.services.pruning.v1; + +import "cometbft/services/pruning/v1/pruning.proto"; + +// PruningService provides privileged access to specialized pruning +// functionality on the CometBFT node to help control node storage. +service PruningService { + // SetBlockRetainHeightRequest indicates to the node that it can safely + // prune all block data up to the specified retain height. + // + // The lower of this retain height and that set by the application in its + // Commit response will be used by the node to determine which heights' data + // can be pruned. + rpc SetBlockRetainHeight(SetBlockRetainHeightRequest) returns (SetBlockRetainHeightResponse); + + // GetBlockRetainHeight returns information about the retain height + // parameters used by the node to influence block retention/pruning. + rpc GetBlockRetainHeight(GetBlockRetainHeightRequest) returns (GetBlockRetainHeightResponse); + + // SetBlockResultsRetainHeightRequest indicates to the node that it can + // safely prune all block results data up to the specified height. + // + // The node will always store the block results for the latest height to + // help facilitate crash recovery. + rpc SetBlockResultsRetainHeight(SetBlockResultsRetainHeightRequest) returns (SetBlockResultsRetainHeightResponse); + + // GetBlockResultsRetainHeight returns information about the retain height + // parameters used by the node to influence block results retention/pruning. + rpc GetBlockResultsRetainHeight(GetBlockResultsRetainHeightRequest) returns (GetBlockResultsRetainHeightResponse); + + // SetTxIndexerRetainHeightRequest indicates to the node that it can safely + // prune all tx indices up to the specified retain height. + rpc SetTxIndexerRetainHeight(SetTxIndexerRetainHeightRequest) returns (SetTxIndexerRetainHeightResponse); + + // GetTxIndexerRetainHeight returns information about the retain height + // parameters used by the node to influence TxIndexer pruning + rpc GetTxIndexerRetainHeight(GetTxIndexerRetainHeightRequest) returns (GetTxIndexerRetainHeightResponse); + + // SetBlockIndexerRetainHeightRequest indicates to the node that it can safely + // prune all block indices up to the specified retain height. + rpc SetBlockIndexerRetainHeight(SetBlockIndexerRetainHeightRequest) returns (SetBlockIndexerRetainHeightResponse); + + // GetBlockIndexerRetainHeight returns information about the retain height + // parameters used by the node to influence BlockIndexer pruning + rpc GetBlockIndexerRetainHeight(GetBlockIndexerRetainHeightRequest) returns (GetBlockIndexerRetainHeightResponse); +} diff --git a/proto/cometbft/services/version/v1/version.proto b/proto/cometbft/services/version/v1/version.proto new file mode 100644 index 00000000000..ec967527627 --- /dev/null +++ b/proto/cometbft/services/version/v1/version.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package cometbft.services.version.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/services/version/v1"; + +// GetVersionRequest is the request for the ABCI version. +message GetVersionRequest {} + +// GetVersionResponse contains the ABCI application version info. +message GetVersionResponse { + string node = 1; // The semantic version of the node software. + string abci = 2; // The version of ABCI used by the node. + uint64 p2p = 3; // The version of the P2P protocol. + uint64 block = 4; // The version of the block protocol. +} diff --git a/proto/cometbft/services/version/v1/version_service.proto b/proto/cometbft/services/version/v1/version_service.proto new file mode 100644 index 00000000000..dbb7c9d40b6 --- /dev/null +++ b/proto/cometbft/services/version/v1/version_service.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; +package cometbft.services.version.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/services/version/v1"; + +import "cometbft/services/version/v1/version.proto"; + +// VersionService simply provides version information about the node and the +// protocols it uses. +// +// The intention with this service is to offer a stable interface through which +// clients can access version information. This means that the version of the +// service should be kept stable at v1, with GetVersionResponse evolving only +// in non-breaking ways. +service VersionService { + // GetVersion retrieves version information about the node and the protocols + // it implements. + rpc GetVersion(GetVersionRequest) returns (GetVersionResponse); +} diff --git a/proto/cometbft/state/v1/types.proto b/proto/cometbft/state/v1/types.proto new file mode 100644 index 00000000000..6264275cd58 --- /dev/null +++ b/proto/cometbft/state/v1/types.proto @@ -0,0 +1,116 @@ +syntax = "proto3"; +package cometbft.state.v1; + +import "cometbft/abci/v1/types.proto"; +import "cometbft/types/v1/params.proto"; +import "cometbft/types/v1/types.proto"; +import "cometbft/types/v1/validator.proto"; +import "cometbft/version/v1/types.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/state/v1"; + +// LegacyABCIResponses retains the responses +// of the legacy ABCI calls during block processing. +// Note ReponseDeliverTx is renamed to ExecTxResult but they are semantically the same +// Kept for backwards compatibility for versions prior to v0.38 +message LegacyABCIResponses { + repeated cometbft.abci.v1.ExecTxResult deliver_txs = 1; + ResponseEndBlock end_block = 2; + ResponseBeginBlock begin_block = 3; +} + +// ResponseBeginBlock is kept for backward compatibility for versions prior to v0.38, +// as it was then defined in the cometbft.abci packages. +message ResponseBeginBlock { + repeated cometbft.abci.v1.Event events = 1 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; +} + +// ResponseEndBlock is kept for backward compatibility for versions prior to v0.38, +// its earlier revisions were defined in the cometbft.abci packages. +// It uses an updated definition for the consensus_param_updates field to keep the +// generated data types interoperable with the latest protocol. +message ResponseEndBlock { + repeated cometbft.abci.v1.ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false]; + cometbft.types.v1.ConsensusParams consensus_param_updates = 2; + repeated cometbft.abci.v1.Event events = 3 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; +} + +// ValidatorsInfo represents the latest validator set, or the last height it changed +message ValidatorsInfo { + cometbft.types.v1.ValidatorSet validator_set = 1; + int64 last_height_changed = 2; +} + +// ConsensusParamsInfo represents the latest consensus params, or the last height it changed +message ConsensusParamsInfo { + cometbft.types.v1.ConsensusParams consensus_params = 1 [(gogoproto.nullable) = false]; + int64 last_height_changed = 2; +} + +// ABCIResponsesInfo retains the responses of the ABCI calls during block processing. +message ABCIResponsesInfo { + // Retains the responses of the legacy ABCI calls during block processing. + LegacyABCIResponses legacy_abci_responses = 1; + int64 height = 2; + cometbft.abci.v1.FinalizeBlockResponse finalize_block = 3; +} + +// Version is a message for storing versioning information. +message Version { + cometbft.version.v1.Consensus consensus = 1 [(gogoproto.nullable) = false]; + string software = 2; +} + +// State represents the state of the blockchain. +message State { + Version version = 1 [(gogoproto.nullable) = false]; + + // immutable + string chain_id = 2 [(gogoproto.customname) = "ChainID"]; + int64 initial_height = 14; + + // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) + int64 last_block_height = 3; + cometbft.types.v1.BlockID last_block_id = 4 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "LastBlockID" + ]; + google.protobuf.Timestamp last_block_time = 5 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + + // LastValidators is used to validate block.LastCommit. + // Validators are persisted to the database separately every time they change, + // so we can query for historical validator sets. + // Note that if s.LastBlockHeight causes a valset change, + // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + 1 + // Extra +1 due to nextValSet delay. + cometbft.types.v1.ValidatorSet next_validators = 6; + cometbft.types.v1.ValidatorSet validators = 7; + cometbft.types.v1.ValidatorSet last_validators = 8; + int64 last_height_validators_changed = 9; + + // Consensus parameters used for validating blocks. + // Changes returned by EndBlock and updated after Commit. + cometbft.types.v1.ConsensusParams consensus_params = 10 [(gogoproto.nullable) = false]; + int64 last_height_consensus_params_changed = 11; + + // Merkle root of the results from executing prev block + bytes last_results_hash = 12; + + // the latest AppHash we've received from calling abci.Commit() + bytes app_hash = 13; + + // delay between the time when this block is committed and the next height is started. + // previously `timeout_commit` in config.toml + google.protobuf.Duration next_block_delay = 15 [ + (gogoproto.nullable) = false, + (gogoproto.stdduration) = true + ]; +} diff --git a/proto/cometbft/state/v1beta1/types.proto b/proto/cometbft/state/v1beta1/types.proto new file mode 100644 index 00000000000..51c86b0414d --- /dev/null +++ b/proto/cometbft/state/v1beta1/types.proto @@ -0,0 +1,83 @@ +syntax = "proto3"; +package cometbft.state.v1beta1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/state/v1beta1"; + +import "gogoproto/gogo.proto"; +import "cometbft/abci/v1beta1/types.proto"; +import "cometbft/types/v1beta1/types.proto"; +import "cometbft/types/v1beta1/validator.proto"; +import "cometbft/types/v1beta1/params.proto"; +import "cometbft/version/v1/types.proto"; +import "google/protobuf/timestamp.proto"; + +// ABCIResponses retains the responses +// of the various ABCI calls during block processing. +// It is persisted to disk for each height before calling Commit. +message ABCIResponses { + repeated cometbft.abci.v1beta1.ResponseDeliverTx deliver_txs = 1; + cometbft.abci.v1beta1.ResponseEndBlock end_block = 2; + cometbft.abci.v1beta1.ResponseBeginBlock begin_block = 3; +} + +// ValidatorsInfo represents the latest validator set, or the last height it changed +message ValidatorsInfo { + cometbft.types.v1beta1.ValidatorSet validator_set = 1; + int64 last_height_changed = 2; +} + +// ConsensusParamsInfo represents the latest consensus params, or the last height it changed +message ConsensusParamsInfo { + cometbft.types.v1beta1.ConsensusParams consensus_params = 1 [(gogoproto.nullable) = false]; + int64 last_height_changed = 2; +} + +// ABCIResponsesInfo retains the responses of the ABCI calls during block processing. +message ABCIResponsesInfo { + ABCIResponses abci_responses = 1; + int64 height = 2; +} + +// Version is a message for storing versioning information. +message Version { + cometbft.version.v1.Consensus consensus = 1 [(gogoproto.nullable) = false]; + string software = 2; +} + +// State represents the state of the blockchain. +message State { + Version version = 1 [(gogoproto.nullable) = false]; + + // immutable + string chain_id = 2 [(gogoproto.customname) = "ChainID"]; + int64 initial_height = 14; + + // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) + int64 last_block_height = 3; + cometbft.types.v1beta1.BlockID last_block_id = 4 + [(gogoproto.nullable) = false, (gogoproto.customname) = "LastBlockID"]; + google.protobuf.Timestamp last_block_time = 5 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + + // LastValidators is used to validate block.LastCommit. + // Validators are persisted to the database separately every time they change, + // so we can query for historical validator sets. + // Note that if s.LastBlockHeight causes a valset change, + // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + 1 + // Extra +1 due to nextValSet delay. + cometbft.types.v1beta1.ValidatorSet next_validators = 6; + cometbft.types.v1beta1.ValidatorSet validators = 7; + cometbft.types.v1beta1.ValidatorSet last_validators = 8; + int64 last_height_validators_changed = 9; + + // Consensus parameters used for validating blocks. + // Changes returned by EndBlock and updated after Commit. + cometbft.types.v1beta1.ConsensusParams consensus_params = 10 [(gogoproto.nullable) = false]; + int64 last_height_consensus_params_changed = 11; + + // Merkle root of the results from executing prev block + bytes last_results_hash = 12; + + // the latest AppHash we've received from calling abci.Commit() + bytes app_hash = 13; +} diff --git a/proto/cometbft/state/v1beta2/types.proto b/proto/cometbft/state/v1beta2/types.proto new file mode 100644 index 00000000000..fba19839616 --- /dev/null +++ b/proto/cometbft/state/v1beta2/types.proto @@ -0,0 +1,75 @@ +syntax = "proto3"; +package cometbft.state.v1beta2; + +import "cometbft/abci/v1beta2/types.proto"; +import "cometbft/state/v1beta1/types.proto"; +import "cometbft/types/v1beta1/types.proto"; +import "cometbft/types/v1beta1/validator.proto"; +import "cometbft/types/v1beta2/params.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/state/v1beta2"; + +// ABCIResponses retains the responses +// of the various ABCI calls during block processing. +// It is persisted to disk for each height before calling Commit. +message ABCIResponses { + repeated cometbft.abci.v1beta2.ResponseDeliverTx deliver_txs = 1; + cometbft.abci.v1beta2.ResponseEndBlock end_block = 2; + cometbft.abci.v1beta2.ResponseBeginBlock begin_block = 3; +} + +// ConsensusParamsInfo represents the latest consensus params, or the last height it changed +message ConsensusParamsInfo { + cometbft.types.v1beta2.ConsensusParams consensus_params = 1 [(gogoproto.nullable) = false]; + int64 last_height_changed = 2; +} + +// ABCIResponsesInfo retains the responses of the ABCI calls during block processing. +message ABCIResponsesInfo { + ABCIResponses abci_responses = 1; + int64 height = 2; +} + +// State represents the state of the blockchain. +message State { + v1beta1.Version version = 1 [(gogoproto.nullable) = false]; + + // immutable + string chain_id = 2 [(gogoproto.customname) = "ChainID"]; + int64 initial_height = 14; + + // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) + int64 last_block_height = 3; + cometbft.types.v1beta1.BlockID last_block_id = 4 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "LastBlockID" + ]; + google.protobuf.Timestamp last_block_time = 5 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + + // LastValidators is used to validate block.LastCommit. + // Validators are persisted to the database separately every time they change, + // so we can query for historical validator sets. + // Note that if s.LastBlockHeight causes a valset change, + // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + 1 + // Extra +1 due to nextValSet delay. + cometbft.types.v1beta1.ValidatorSet next_validators = 6; + cometbft.types.v1beta1.ValidatorSet validators = 7; + cometbft.types.v1beta1.ValidatorSet last_validators = 8; + int64 last_height_validators_changed = 9; + + // Consensus parameters used for validating blocks. + // Changes returned by EndBlock and updated after Commit. + cometbft.types.v1beta2.ConsensusParams consensus_params = 10 [(gogoproto.nullable) = false]; + int64 last_height_consensus_params_changed = 11; + + // Merkle root of the results from executing prev block + bytes last_results_hash = 12; + + // the latest AppHash we've received from calling abci.Commit() + bytes app_hash = 13; +} diff --git a/proto/cometbft/state/v1beta3/types.proto b/proto/cometbft/state/v1beta3/types.proto new file mode 100644 index 00000000000..a5d3ff14ba9 --- /dev/null +++ b/proto/cometbft/state/v1beta3/types.proto @@ -0,0 +1,99 @@ +syntax = "proto3"; +package cometbft.state.v1beta3; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/state/v1beta3"; + +import "cometbft/state/v1beta1/types.proto"; + +import "cometbft/abci/v1beta1/types.proto"; +import "cometbft/abci/v1beta2/types.proto"; +import "cometbft/abci/v1beta3/types.proto"; +import "cometbft/types/v1beta1/types.proto"; +import "cometbft/types/v1beta1/validator.proto"; +import "cometbft/types/v1/params.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; + +// LegacyABCIResponses retains the responses +// of the legacy ABCI calls during block processing. +// Note ReponseDeliverTx is renamed to ExecTxResult but they are semantically the same +// Kept for backwards compatibility for versions prior to v0.38 +message LegacyABCIResponses { + repeated cometbft.abci.v1beta3.ExecTxResult deliver_txs = 1; + ResponseEndBlock end_block = 2; + ResponseBeginBlock begin_block = 3; +} + +// ResponseBeginBlock is kept for backward compatibility for versions prior to v0.38, +// as it was then defined in the cometbft.abci packages. +message ResponseBeginBlock { + repeated cometbft.abci.v1beta2.Event events = 1 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; +} + +// ResponseEndBlock is kept for backward compatibility for versions prior to v0.38, +// its earlier revisions were defined in the cometbft.abci packages. +// It uses an updated definition for the consensus_param_updates field to keep the +// generated data types interoperable with the latest protocol. +message ResponseEndBlock { + repeated cometbft.abci.v1beta1.ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false]; + cometbft.types.v1.ConsensusParams consensus_param_updates = 2; + repeated cometbft.abci.v1beta2.Event events = 3 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; +} + +// ConsensusParamsInfo represents the latest consensus params, or the last height it changed +message ConsensusParamsInfo { + cometbft.types.v1.ConsensusParams consensus_params = 1 [(gogoproto.nullable) = false]; + int64 last_height_changed = 2; +} + +// ABCIResponsesInfo retains the responses of the ABCI calls during block processing. +message ABCIResponsesInfo { + // Retains the responses of the legacy ABCI calls during block processing. + LegacyABCIResponses legacy_abci_responses = 1; + int64 height = 2; + cometbft.abci.v1beta3.ResponseFinalizeBlock response_finalize_block = 3; +} + +// State represents the state of the blockchain. +message State { + v1beta1.Version version = 1 [(gogoproto.nullable) = false]; + + // immutable + string chain_id = 2 [(gogoproto.customname) = "ChainID"]; + int64 initial_height = 14; + + // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) + int64 last_block_height = 3; + cometbft.types.v1beta1.BlockID last_block_id = 4 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "LastBlockID" + ]; + google.protobuf.Timestamp last_block_time = 5 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + + // LastValidators is used to validate block.LastCommit. + // Validators are persisted to the database separately every time they change, + // so we can query for historical validator sets. + // Note that if s.LastBlockHeight causes a valset change, + // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + 1 + // Extra +1 due to nextValSet delay. + cometbft.types.v1beta1.ValidatorSet next_validators = 6; + cometbft.types.v1beta1.ValidatorSet validators = 7; + cometbft.types.v1beta1.ValidatorSet last_validators = 8; + int64 last_height_validators_changed = 9; + + // Consensus parameters used for validating blocks. + // Changes returned by EndBlock and updated after Commit. + cometbft.types.v1.ConsensusParams consensus_params = 10 [(gogoproto.nullable) = false]; + int64 last_height_consensus_params_changed = 11; + + // Merkle root of the results from executing prev block + bytes last_results_hash = 12; + + // the latest AppHash we've received from calling abci.Commit() + bytes app_hash = 13; +} diff --git a/proto/cometbft/statesync/v1/types.proto b/proto/cometbft/statesync/v1/types.proto new file mode 100644 index 00000000000..02a301dcbb4 --- /dev/null +++ b/proto/cometbft/statesync/v1/types.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; +package cometbft.statesync.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/statesync/v1"; + +// Message is the top-level message type for the statesync service. +message Message { + // The message type. + oneof sum { + SnapshotsRequest snapshots_request = 1; + SnapshotsResponse snapshots_response = 2; + ChunkRequest chunk_request = 3; + ChunkResponse chunk_response = 4; + } +} + +// SnapshotsRequest is sent to request a snapshot. +message SnapshotsRequest {} + +// SnapshotsResponse contains the snapshot metadata. +message SnapshotsResponse { + uint64 height = 1; + uint32 format = 2; + uint32 chunks = 3; + bytes hash = 4; + bytes metadata = 5; +} + +// ChunkRequest is sent to request a chunk. +message ChunkRequest { + uint64 height = 1; + uint32 format = 2; + uint32 index = 3; +} + +// ChunkResponse contains a chunk of the snapshot. +message ChunkResponse { + uint64 height = 1; + uint32 format = 2; + uint32 index = 3; + bytes chunk = 4; + bool missing = 5; +} diff --git a/proto/cometbft/store/v1/types.proto b/proto/cometbft/store/v1/types.proto new file mode 100644 index 00000000000..98f1df43f4b --- /dev/null +++ b/proto/cometbft/store/v1/types.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package cometbft.store.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/store/v1"; + +// BlockStoreState represents the state of the block store. +message BlockStoreState { + int64 base = 1; + int64 height = 2; +} diff --git a/proto/cometbft/types/v1/block.proto b/proto/cometbft/types/v1/block.proto new file mode 100644 index 00000000000..6748263fd01 --- /dev/null +++ b/proto/cometbft/types/v1/block.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package cometbft.types.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1"; + +import "cometbft/types/v1/types.proto"; +import "cometbft/types/v1/evidence.proto"; +import "gogoproto/gogo.proto"; + +// Block defines the structure of a block in the CometBFT blockchain. +message Block { + Header header = 1 [(gogoproto.nullable) = false]; + Data data = 2 [(gogoproto.nullable) = false]; + EvidenceList evidence = 3 [(gogoproto.nullable) = false]; + Commit last_commit = 4; +} diff --git a/proto/cometbft/types/v1/canonical.proto b/proto/cometbft/types/v1/canonical.proto new file mode 100644 index 00000000000..26a07c812b5 --- /dev/null +++ b/proto/cometbft/types/v1/canonical.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; +package cometbft.types.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1"; + +import "gogoproto/gogo.proto"; +import "cometbft/types/v1/types.proto"; +import "google/protobuf/timestamp.proto"; + +// CanonicalBlockID is a canonical representation of a BlockID, which gets +// serialized and signed. +message CanonicalBlockID { + bytes hash = 1; + CanonicalPartSetHeader part_set_header = 2 [(gogoproto.nullable) = false]; +} + +// CanonicalPartSetHeader is a canonical representation of a PartSetHeader, +// which gets serialized and signed. +message CanonicalPartSetHeader { + uint32 total = 1; + bytes hash = 2; +} + +// CanonicalProposal is a canonical representation of a Proposal, which gets +// serialized and signed. +message CanonicalProposal { + SignedMsgType type = 1; // type alias for byte + sfixed64 height = 2; // canonicalization requires fixed size encoding here + sfixed64 round = 3; // canonicalization requires fixed size encoding here + int64 pol_round = 4 [(gogoproto.customname) = "POLRound"]; + CanonicalBlockID block_id = 5 [(gogoproto.customname) = "BlockID"]; + google.protobuf.Timestamp timestamp = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + string chain_id = 7 [(gogoproto.customname) = "ChainID"]; +} + +// CanonicalVote is a canonical representation of a Vote, which gets +// serialized and signed. +message CanonicalVote { + SignedMsgType type = 1; // type alias for byte + sfixed64 height = 2; // canonicalization requires fixed size encoding here + sfixed64 round = 3; // canonicalization requires fixed size encoding here + CanonicalBlockID block_id = 4 [(gogoproto.customname) = "BlockID"]; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + string chain_id = 6 [(gogoproto.customname) = "ChainID"]; +} + +// CanonicalVoteExtension provides us a way to serialize a vote extension from +// a particular validator such that we can sign over those serialized bytes. +message CanonicalVoteExtension { + bytes extension = 1; + sfixed64 height = 2; + sfixed64 round = 3; + string chain_id = 4; +} diff --git a/proto/cometbft/types/v1/events.proto b/proto/cometbft/types/v1/events.proto new file mode 100644 index 00000000000..21e265664b8 --- /dev/null +++ b/proto/cometbft/types/v1/events.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package cometbft.types.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1"; + +// EventDataRoundState is emitted with each new round step. +message EventDataRoundState { + int64 height = 1; + int32 round = 2; + string step = 3; +} diff --git a/proto/cometbft/types/v1/evidence.proto b/proto/cometbft/types/v1/evidence.proto new file mode 100644 index 00000000000..a77cea8d8f8 --- /dev/null +++ b/proto/cometbft/types/v1/evidence.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; +package cometbft.types.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1"; + +import "cometbft/types/v1/types.proto"; +import "cometbft/types/v1/validator.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; + +// Evidence is a generic type for wrapping evidence of misbehavior by a validator. +message Evidence { + // The type of evidence. + oneof sum { + DuplicateVoteEvidence duplicate_vote_evidence = 1; + LightClientAttackEvidence light_client_attack_evidence = 2; + } +} + +// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. +message DuplicateVoteEvidence { + Vote vote_a = 1; + Vote vote_b = 2; + int64 total_voting_power = 3; + int64 validator_power = 4; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; +} + +// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. +message LightClientAttackEvidence { + LightBlock conflicting_block = 1; + int64 common_height = 2; + repeated Validator byzantine_validators = 3; + int64 total_voting_power = 4; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; +} + +// EvidenceList is a list of evidence. +message EvidenceList { + repeated Evidence evidence = 1 [(gogoproto.nullable) = false]; +} diff --git a/proto/cometbft/types/v1/params.proto b/proto/cometbft/types/v1/params.proto new file mode 100644 index 00000000000..2f6194ba7b9 --- /dev/null +++ b/proto/cometbft/types/v1/params.proto @@ -0,0 +1,149 @@ +syntax = "proto3"; +package cometbft.types.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +option (gogoproto.equal_all) = true; + +// ConsensusParams contains consensus critical parameters that determine the +// validity of blocks. +message ConsensusParams { + BlockParams block = 1; + EvidenceParams evidence = 2; + ValidatorParams validator = 3; + VersionParams version = 4; + ABCIParams abci = 5 [deprecated = true]; // Use FeatureParams.vote_extensions_enable_height instead + SynchronyParams synchrony = 6; + FeatureParams feature = 7; +} + +// BlockParams define limits on the block size and gas. +message BlockParams { + // Maximum size of a block, in bytes. + // + // Must be greater or equal to -1 and cannot be greater than the hard-coded + // maximum block size, which is 100MB. + // + // If set to -1, the limit is the hard-coded maximum block size. + int64 max_bytes = 1; + // Maximum gas wanted by transactions included in a block. + // + // Must be greater or equal to -1. If set to -1, no limit is enforced. + int64 max_gas = 2; + + reserved 3; // was TimeIotaMs see https://github.com/tendermint/tendermint/pull/5792 +} + +// EvidenceParams determine the validity of evidences of Byzantine behavior. +message EvidenceParams { + // Maximum age of evidence, in blocks. + // + // The recommended formula for calculating it is max_age_duration / {average + // block time}. + int64 max_age_num_blocks = 1; + + // Maximum age of evidence, in time. + // + // The recommended value of is should correspond to the application's + // "unbonding period" or other similar mechanism for handling + // Nothing-At-Stake attacks. + // See: https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed. + google.protobuf.Duration max_age_duration = 2 + [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; + + // Maximum size in bytes of evidence allowed to be included in a block. + // + // It should fall comfortably under the maximum size of a block. + int64 max_bytes = 3; +} + +// ValidatorParams restrict the public key types validators can use. +// +// NOTE: uses ABCI public keys naming, not Amino names. +message ValidatorParams { + option (gogoproto.populate) = true; + option (gogoproto.equal) = true; + + repeated string pub_key_types = 1; +} + +// VersionParams contain the version of specific components of CometBFT. +message VersionParams { + option (gogoproto.populate) = true; + option (gogoproto.equal) = true; + + // The ABCI application version. + // + // It was named app_version in CometBFT 0.34. + uint64 app = 1; +} + +// HashedParams is a subset of ConsensusParams. +// +// It is hashed into the Header.ConsensusHash. +message HashedParams { + int64 block_max_bytes = 1; + int64 block_max_gas = 2; +} + +// SynchronyParams determine the validity of block timestamps. +// +// These parameters are part of the Proposer-Based Timestamps (PBTS) algorithm. +// For more information on the relationship of the synchrony parameters to +// block timestamps validity, refer to the PBTS specification: +// https://github.com/tendermint/spec/blob/master/spec/consensus/proposer-based-timestamp/README.md +message SynchronyParams { + // Bound for how skewed a proposer's clock may be from any validator on the + // network while still producing valid proposals. + google.protobuf.Duration precision = 1 + [(gogoproto.stdduration) = true]; + // Bound for how long a proposal message may take to reach all validators on + // a network and still be considered valid. + google.protobuf.Duration message_delay = 2 + [(gogoproto.stdduration) = true]; +} + +// FeatureParams configure the height from which features of CometBFT are enabled. +message FeatureParams { + // Height during which vote extensions will be enabled. + // + // A value of 0 means vote extensions are disabled. A value > 0 denotes + // the height at which vote extensions will be (or have been) enabled. + // + // During the specified height, and for all subsequent heights, precommit + // messages that do not contain valid extension data will be considered + // invalid. Prior to this height, or when this height is set to 0, vote + // extensions will not be used or accepted by validators on the network. + // + // Once enabled, vote extensions will be created by the application in + // ExtendVote, validated by the application in VerifyVoteExtension, and + // used by the application in PrepareProposal, when proposing the next block. + // + // Cannot be set to heights lower or equal to the current blockchain height. + google.protobuf.Int64Value vote_extensions_enable_height = 1 [(gogoproto.nullable) = true]; + + // Height at which Proposer-Based Timestamps (PBTS) will be enabled. + // + // A value of 0 means PBTS is disabled. A value > 0 denotes the height at + // which PBTS will be (or has been) enabled. + // + // From the specified height, and for all subsequent heights, the PBTS + // algorithm will be used to produce and validate block timestamps. Prior to + // this height, or when this height is set to 0, the legacy BFT Time + // algorithm is used to produce and validate timestamps. + // + // Cannot be set to heights lower or equal to the current blockchain height. + google.protobuf.Int64Value pbts_enable_height = 2 [(gogoproto.nullable) = true]; +} + +// ABCIParams is deprecated and its contents moved to FeatureParams +message ABCIParams { + option deprecated = true; + // vote_extensions_enable_height has been deprecated. + // Instead, use FeatureParams.vote_extensions_enable_height. + int64 vote_extensions_enable_height = 1; +} diff --git a/proto/cometbft/types/v1/types.proto b/proto/cometbft/types/v1/types.proto new file mode 100644 index 00000000000..0a9555ad2a0 --- /dev/null +++ b/proto/cometbft/types/v1/types.proto @@ -0,0 +1,184 @@ +syntax = "proto3"; +package cometbft.types.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1"; + +import "cometbft/crypto/v1/proof.proto"; +import "cometbft/types/v1/validator.proto"; +import "cometbft/version/v1/types.proto"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; + +// SignedMsgType is a type of signed message in the consensus. +enum SignedMsgType { + option (gogoproto.goproto_enum_stringer) = true; + option (gogoproto.goproto_enum_prefix) = false; + + // Unknown + SIGNED_MSG_TYPE_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "UnknownType"]; + // Prevote + SIGNED_MSG_TYPE_PREVOTE = 1 [(gogoproto.enumvalue_customname) = "PrevoteType"]; + // Precommit + SIGNED_MSG_TYPE_PRECOMMIT = 2 [(gogoproto.enumvalue_customname) = "PrecommitType"]; + // Proposal + SIGNED_MSG_TYPE_PROPOSAL = 32 [(gogoproto.enumvalue_customname) = "ProposalType"]; +} + +// Header of the parts set for a block. +message PartSetHeader { + uint32 total = 1; + bytes hash = 2; +} + +// Part of the block. +message Part { + uint32 index = 1; + bytes bytes = 2; + cometbft.crypto.v1.Proof proof = 3 [(gogoproto.nullable) = false]; +} + +// BlockID defines the unique ID of a block as its hash and its `PartSetHeader`. +message BlockID { + bytes hash = 1; + PartSetHeader part_set_header = 2 [(gogoproto.nullable) = false]; +} + +// Header defines the structure of a block header. +message Header { + // basic block info + cometbft.version.v1.Consensus version = 1 [(gogoproto.nullable) = false]; + string chain_id = 2 [(gogoproto.customname) = "ChainID"]; + int64 height = 3; + google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + + // prev block info + BlockID last_block_id = 5 [(gogoproto.nullable) = false]; + + // hashes of block data + bytes last_commit_hash = 6; // commit from validators from the last block + bytes data_hash = 7; // transactions + + // hashes from the app output from the prev block + bytes validators_hash = 8; // validators for the current block + bytes next_validators_hash = 9; // validators for the next block + bytes consensus_hash = 10; // consensus params for current block + bytes app_hash = 11; // state after txs from the previous block + bytes last_results_hash = 12; // root hash of all results from the txs from the previous block + + // consensus info + bytes evidence_hash = 13; // evidence included in the block + bytes proposer_address = 14; // original proposer of the block +} + +// Data contains the set of transactions included in the block +message Data { + // Txs that will be applied by state @ block.Height+1. + // NOTE: not all txs here are valid. We're just agreeing on the order first. + // This means that block.AppHash does not include these txs. + repeated bytes txs = 1; +} + +// Vote represents a prevote or precommit vote from validators for +// consensus. +message Vote { + SignedMsgType type = 1; + int64 height = 2; + int32 round = 3; + BlockID block_id = 4 + [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; // zero if vote is nil. + google.protobuf.Timestamp timestamp = 5 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes validator_address = 6; + int32 validator_index = 7; + // Vote signature by the validator if they participated in consensus for the + // associated block. + bytes signature = 8; + // Vote extension provided by the application. Only valid for precommit + // messages. + bytes extension = 9; + // Vote extension signature by the validator if they participated in + // consensus for the associated block. + // Only valid for precommit messages. + bytes extension_signature = 10; +} + +// Commit contains the evidence that a block was committed by a set of validators. +message Commit { + int64 height = 1; + int32 round = 2; + BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; + repeated CommitSig signatures = 4 [(gogoproto.nullable) = false]; +} + +// CommitSig is a part of the Vote included in a Commit. +message CommitSig { + BlockIDFlag block_id_flag = 1; + bytes validator_address = 2; + google.protobuf.Timestamp timestamp = 3 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes signature = 4; +} + +// ExtendedCommit is a Commit with ExtendedCommitSig. +message ExtendedCommit { + int64 height = 1; + int32 round = 2; + BlockID block_id = 3 + [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; + repeated ExtendedCommitSig extended_signatures = 4 [(gogoproto.nullable) = false]; +} + +// ExtendedCommitSig retains all the same fields as CommitSig but adds vote +// extension-related fields. We use two signatures to ensure backwards compatibility. +// That is the digest of the original signature is still the same in prior versions +message ExtendedCommitSig { + BlockIDFlag block_id_flag = 1; + bytes validator_address = 2; + google.protobuf.Timestamp timestamp = 3 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes signature = 4; + // Vote extension data + bytes extension = 5; + // Vote extension signature + bytes extension_signature = 6; +} + +// Block proposal. +message Proposal { + SignedMsgType type = 1; + int64 height = 2; + int32 round = 3; + int32 pol_round = 4; + BlockID block_id = 5 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 6 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes signature = 7; +} + +// SignedHeader contains a Header(H) and Commit(H+1) with signatures of validators who signed it. +message SignedHeader { + Header header = 1; + Commit commit = 2; +} + +// LightBlock is a combination of SignedHeader and ValidatorSet. It is used by light clients. +message LightBlock { + SignedHeader signed_header = 1; + ValidatorSet validator_set = 2; +} + +// BlockMeta contains meta information about a block. +message BlockMeta { + BlockID block_id = 1 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + int64 block_size = 2; + Header header = 3 [(gogoproto.nullable) = false]; + int64 num_txs = 4; +} + +// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. +message TxProof { + bytes root_hash = 1; + bytes data = 2; + cometbft.crypto.v1.Proof proof = 3; +} diff --git a/proto/cometbft/types/v1/validator.proto b/proto/cometbft/types/v1/validator.proto new file mode 100644 index 00000000000..e262f00d572 --- /dev/null +++ b/proto/cometbft/types/v1/validator.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; +package cometbft.types.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1"; + +import "cometbft/crypto/v1/keys.proto"; +import "gogoproto/gogo.proto"; + +// BlockIdFlag indicates which BlockID the signature is for +enum BlockIDFlag { + option (gogoproto.goproto_enum_stringer) = true; + option (gogoproto.goproto_enum_prefix) = false; + + // Indicates an error condition + BLOCK_ID_FLAG_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"]; + // The vote was not received + BLOCK_ID_FLAG_ABSENT = 1 [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"]; + // Voted for the block that received the majority + BLOCK_ID_FLAG_COMMIT = 2 [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"]; + // Voted for nil + BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"]; +} + +// ValidatorSet defines a set of validators. +message ValidatorSet { + repeated Validator validators = 1; + Validator proposer = 2; + int64 total_voting_power = 3; +} + +// Validator represents a node participating in the consensus protocol. +message Validator { + bytes address = 1; + cometbft.crypto.v1.PublicKey pub_key = 2 [deprecated = true]; + int64 voting_power = 3; + int64 proposer_priority = 4; + bytes pub_key_bytes = 5; + string pub_key_type = 6; +} + +// SimpleValidator is a Validator, which is serialized and hashed in consensus. +// Address is removed because it's redundant with the pubkey. +// Proposer priority is removed because it changes every round. +message SimpleValidator { + cometbft.crypto.v1.PublicKey pub_key = 1; + int64 voting_power = 2; +} diff --git a/proto/cometbft/types/v1beta1/block.proto b/proto/cometbft/types/v1beta1/block.proto new file mode 100644 index 00000000000..8ef9154d394 --- /dev/null +++ b/proto/cometbft/types/v1beta1/block.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package cometbft.types.v1beta1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1beta1"; + +import "gogoproto/gogo.proto"; +import "cometbft/types/v1beta1/types.proto"; +import "cometbft/types/v1beta1/evidence.proto"; + +// Block defines the structure of a block in the CometBFT blockchain. +message Block { + Header header = 1 [(gogoproto.nullable) = false]; + Data data = 2 [(gogoproto.nullable) = false]; + cometbft.types.v1beta1.EvidenceList evidence = 3 [(gogoproto.nullable) = false]; + Commit last_commit = 4; +} diff --git a/proto/cometbft/types/v1beta1/canonical.proto b/proto/cometbft/types/v1beta1/canonical.proto new file mode 100644 index 00000000000..0176cc5febd --- /dev/null +++ b/proto/cometbft/types/v1beta1/canonical.proto @@ -0,0 +1,45 @@ +syntax = "proto3"; +package cometbft.types.v1beta1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1beta1"; + +import "gogoproto/gogo.proto"; +import "cometbft/types/v1beta1/types.proto"; +import "google/protobuf/timestamp.proto"; + +// CanonicalBlockID is a canonical representation of a BlockID, which gets +// serialized and signed. +message CanonicalBlockID { + bytes hash = 1; + CanonicalPartSetHeader part_set_header = 2 [(gogoproto.nullable) = false]; +} + +// CanonicalPartSetHeader is a canonical representation of a PartSetHeader, +// which gets serialized and signed. +message CanonicalPartSetHeader { + uint32 total = 1; + bytes hash = 2; +} + +// CanonicalProposal is a canonical representation of a Proposal, which gets +// serialized and signed. +message CanonicalProposal { + SignedMsgType type = 1; // type alias for byte + sfixed64 height = 2; // canonicalization requires fixed size encoding here + sfixed64 round = 3; // canonicalization requires fixed size encoding here + int64 pol_round = 4 [(gogoproto.customname) = "POLRound"]; + CanonicalBlockID block_id = 5 [(gogoproto.customname) = "BlockID"]; + google.protobuf.Timestamp timestamp = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + string chain_id = 7 [(gogoproto.customname) = "ChainID"]; +} + +// CanonicalVote is a canonical representation of a Vote, which gets +// serialized and signed. +message CanonicalVote { + SignedMsgType type = 1; // type alias for byte + sfixed64 height = 2; // canonicalization requires fixed size encoding here + sfixed64 round = 3; // canonicalization requires fixed size encoding here + CanonicalBlockID block_id = 4 [(gogoproto.customname) = "BlockID"]; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + string chain_id = 6 [(gogoproto.customname) = "ChainID"]; +} diff --git a/proto/cometbft/types/v1beta1/events.proto b/proto/cometbft/types/v1beta1/events.proto new file mode 100644 index 00000000000..9f09c65ee18 --- /dev/null +++ b/proto/cometbft/types/v1beta1/events.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package cometbft.types.v1beta1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1beta1"; + +// EventDataRoundState is emitted with each new round step. +message EventDataRoundState { + int64 height = 1; + int32 round = 2; + string step = 3; +} diff --git a/proto/cometbft/types/v1beta1/evidence.proto b/proto/cometbft/types/v1beta1/evidence.proto new file mode 100644 index 00000000000..d4573f74e87 --- /dev/null +++ b/proto/cometbft/types/v1beta1/evidence.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; +package cometbft.types.v1beta1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1beta1"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "cometbft/types/v1beta1/types.proto"; +import "cometbft/types/v1beta1/validator.proto"; + +// Evidence is a generic type for wrapping evidence of misbehavior by a validator. +message Evidence { + // The type of evidence. + oneof sum { + DuplicateVoteEvidence duplicate_vote_evidence = 1; + LightClientAttackEvidence light_client_attack_evidence = 2; + } +} + +// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. +message DuplicateVoteEvidence { + cometbft.types.v1beta1.Vote vote_a = 1; + cometbft.types.v1beta1.Vote vote_b = 2; + int64 total_voting_power = 3; + int64 validator_power = 4; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; +} + +// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. +message LightClientAttackEvidence { + cometbft.types.v1beta1.LightBlock conflicting_block = 1; + int64 common_height = 2; + repeated cometbft.types.v1beta1.Validator byzantine_validators = 3; + int64 total_voting_power = 4; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; +} + +// EvidenceList is a list of evidence. +message EvidenceList { + repeated Evidence evidence = 1 [(gogoproto.nullable) = false]; +} diff --git a/proto/cometbft/types/v1beta1/params.proto b/proto/cometbft/types/v1beta1/params.proto new file mode 100644 index 00000000000..a2545fa082e --- /dev/null +++ b/proto/cometbft/types/v1beta1/params.proto @@ -0,0 +1,81 @@ +syntax = "proto3"; +package cometbft.types.v1beta1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1beta1"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; + +option (gogoproto.equal_all) = true; + +// ConsensusParams contains consensus critical parameters that determine the +// validity of blocks. +message ConsensusParams { + BlockParams block = 1 [(gogoproto.nullable) = false]; + EvidenceParams evidence = 2 [(gogoproto.nullable) = false]; + ValidatorParams validator = 3 [(gogoproto.nullable) = false]; + VersionParams version = 4 [(gogoproto.nullable) = false]; +} + +// BlockParams contains limits on the block size. +message BlockParams { + // Max block size, in bytes. + // Note: must be greater than 0 + int64 max_bytes = 1; + // Max gas per block. + // Note: must be greater or equal to -1 + int64 max_gas = 2; + // Minimum time increment between consecutive blocks (in milliseconds) If the + // block header timestamp is ahead of the system clock, decrease this value. + // + // Not exposed to the application. + int64 time_iota_ms = 3; +} + +// EvidenceParams determine how we handle evidence of malfeasance. +message EvidenceParams { + // Max age of evidence, in blocks. + // + // The basic formula for calculating this is: MaxAgeDuration / {average block + // time}. + int64 max_age_num_blocks = 1; + + // Max age of evidence, in time. + // + // It should correspond with an app's "unbonding period" or other similar + // mechanism for handling [Nothing-At-Stake + // attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). + google.protobuf.Duration max_age_duration = 2 + [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; + + // This sets the maximum size of total evidence in bytes that can be committed in a single block. + // and should fall comfortably under the max block bytes. + // Default is 1048576 or 1MB + int64 max_bytes = 3; +} + +// ValidatorParams restrict the public key types validators can use. +// NOTE: uses ABCI pubkey naming, not Amino names. +message ValidatorParams { + option (gogoproto.populate) = true; + option (gogoproto.equal) = true; + + repeated string pub_key_types = 1; +} + +// VersionParams contains the ABCI application version. +message VersionParams { + option (gogoproto.populate) = true; + option (gogoproto.equal) = true; + + // Was named app_version in Tendermint 0.34 + uint64 app = 1; +} + +// HashedParams is a subset of ConsensusParams. +// +// It is hashed into the Header.ConsensusHash. +message HashedParams { + int64 block_max_bytes = 1; + int64 block_max_gas = 2; +} diff --git a/proto/cometbft/types/v1beta1/types.proto b/proto/cometbft/types/v1beta1/types.proto new file mode 100644 index 00000000000..6b578612402 --- /dev/null +++ b/proto/cometbft/types/v1beta1/types.proto @@ -0,0 +1,152 @@ +syntax = "proto3"; +package cometbft.types.v1beta1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1beta1"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "cometbft/crypto/v1/proof.proto"; +import "cometbft/version/v1/types.proto"; +import "cometbft/types/v1beta1/validator.proto"; + +// SignedMsgType is a type of signed message in the consensus. +enum SignedMsgType { + option (gogoproto.goproto_enum_stringer) = true; + option (gogoproto.goproto_enum_prefix) = false; + + // Unknown + SIGNED_MSG_TYPE_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "UnknownType"]; + // Prevote + SIGNED_MSG_TYPE_PREVOTE = 1 [(gogoproto.enumvalue_customname) = "PrevoteType"]; + // Precommit + SIGNED_MSG_TYPE_PRECOMMIT = 2 [(gogoproto.enumvalue_customname) = "PrecommitType"]; + // Proposal + SIGNED_MSG_TYPE_PROPOSAL = 32 [(gogoproto.enumvalue_customname) = "ProposalType"]; +} + +// Header of the parts set for a block. +message PartSetHeader { + uint32 total = 1; + bytes hash = 2; +} + +// Part of the block. +message Part { + uint32 index = 1; + bytes bytes = 2; + cometbft.crypto.v1.Proof proof = 3 [(gogoproto.nullable) = false]; +} + +// BlockID defines the unique ID of a block as its hash and its `PartSetHeader`. +message BlockID { + bytes hash = 1; + PartSetHeader part_set_header = 2 [(gogoproto.nullable) = false]; +} + +// -------------------------------- + +// Header defines the structure of a block header. +message Header { + // basic block info + cometbft.version.v1.Consensus version = 1 [(gogoproto.nullable) = false]; + string chain_id = 2 [(gogoproto.customname) = "ChainID"]; + int64 height = 3; + google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + + // prev block info + BlockID last_block_id = 5 [(gogoproto.nullable) = false]; + + // hashes of block data + bytes last_commit_hash = 6; // commit from validators from the last block + bytes data_hash = 7; // transactions + + // hashes from the app output from the prev block + bytes validators_hash = 8; // validators for the current block + bytes next_validators_hash = 9; // validators for the next block + bytes consensus_hash = 10; // consensus params for current block + bytes app_hash = 11; // state after txs from the previous block + bytes last_results_hash = 12; // root hash of all results from the txs from the previous block + + // consensus info + bytes evidence_hash = 13; // evidence included in the block + bytes proposer_address = 14; // original proposer of the block +} + +// Data contains the set of transactions included in the block +message Data { + // Txs that will be applied by state @ block.Height+1. + // NOTE: not all txs here are valid. We're just agreeing on the order first. + // This means that block.AppHash does not include these txs. + repeated bytes txs = 1; +} + +// Vote represents a prevote or precommit vote from validators for +// consensus. +message Vote { + SignedMsgType type = 1; + int64 height = 2; + int32 round = 3; + BlockID block_id = 4 + [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; // zero if vote is nil. + google.protobuf.Timestamp timestamp = 5 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes validator_address = 6; + int32 validator_index = 7; + bytes signature = 8; +} + +// Commit contains the evidence that a block was committed by a set of validators. +message Commit { + int64 height = 1; + int32 round = 2; + BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; + repeated CommitSig signatures = 4 [(gogoproto.nullable) = false]; +} + +// CommitSig is a part of the Vote included in a Commit. +message CommitSig { + BlockIDFlag block_id_flag = 1; + bytes validator_address = 2; + google.protobuf.Timestamp timestamp = 3 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes signature = 4; +} + +// Block proposal. +message Proposal { + SignedMsgType type = 1; + int64 height = 2; + int32 round = 3; + int32 pol_round = 4; + BlockID block_id = 5 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 6 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes signature = 7; +} + +// SignedHeader contains a Header(H) and Commit(H+1) with signatures of validators who signed it. +message SignedHeader { + Header header = 1; + Commit commit = 2; +} + +// LightBlock is a combination of SignedHeader and ValidatorSet. It is used by light clients. +message LightBlock { + SignedHeader signed_header = 1; + cometbft.types.v1beta1.ValidatorSet validator_set = 2; +} + +// BlockMeta contains meta information about a block. +message BlockMeta { + BlockID block_id = 1 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + int64 block_size = 2; + Header header = 3 [(gogoproto.nullable) = false]; + int64 num_txs = 4; +} + +// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. +message TxProof { + bytes root_hash = 1; + bytes data = 2; + cometbft.crypto.v1.Proof proof = 3; +} diff --git a/proto/cometbft/types/v1beta1/validator.proto b/proto/cometbft/types/v1beta1/validator.proto new file mode 100644 index 00000000000..90bead1725c --- /dev/null +++ b/proto/cometbft/types/v1beta1/validator.proto @@ -0,0 +1,45 @@ +syntax = "proto3"; +package cometbft.types.v1beta1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1beta1"; + +import "gogoproto/gogo.proto"; +import "cometbft/crypto/v1/keys.proto"; + +// BlockIdFlag indicates which BlockID the signature is for +enum BlockIDFlag { + option (gogoproto.goproto_enum_stringer) = true; + option (gogoproto.goproto_enum_prefix) = false; + + // Indicates an error condition + BLOCK_ID_FLAG_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"]; + // The vote was not received + BLOCK_ID_FLAG_ABSENT = 1 [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"]; + // Voted for the block that received the majority + BLOCK_ID_FLAG_COMMIT = 2 [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"]; + // Voted for nil + BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"]; +} + +// ValidatorSet defines a set of validators. +message ValidatorSet { + repeated Validator validators = 1; + Validator proposer = 2; + int64 total_voting_power = 3; +} + +// Validator represents a node participating in the consensus protocol. +message Validator { + bytes address = 1; + cometbft.crypto.v1.PublicKey pub_key = 2 [(gogoproto.nullable) = false]; + int64 voting_power = 3; + int64 proposer_priority = 4; +} + +// SimpleValidator is a Validator, which is serialized and hashed in consensus. +// Address is removed because it's redundant with the pubkey. +// Proposer priority is removed because it changes every round. +message SimpleValidator { + cometbft.crypto.v1.PublicKey pub_key = 1; + int64 voting_power = 2; +} diff --git a/proto/cometbft/types/v1beta2/params.proto b/proto/cometbft/types/v1beta2/params.proto new file mode 100644 index 00000000000..c624eb2f4d3 --- /dev/null +++ b/proto/cometbft/types/v1beta2/params.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package cometbft.types.v1beta2; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/types/v1beta2"; + +import "gogoproto/gogo.proto"; +import "cometbft/types/v1beta1/params.proto"; + +option (gogoproto.equal_all) = true; + +// ConsensusParams contains consensus critical parameters that determine the +// validity of blocks. +message ConsensusParams { + BlockParams block = 1; + v1beta1.EvidenceParams evidence = 2; + v1beta1.ValidatorParams validator = 3; + v1beta1.VersionParams version = 4; +} + +// BlockParams contains limits on the block size. +message BlockParams { + // Max block size, in bytes. + // Note: must be greater than 0 + int64 max_bytes = 1; + // Max gas per block. + // Note: must be greater or equal to -1 + int64 max_gas = 2; + + reserved 3; // was TimeIotaMs see https://github.com/tendermint/tendermint/pull/5792 +} diff --git a/proto/cometbft/version/v1/types.proto b/proto/cometbft/version/v1/types.proto new file mode 100644 index 00000000000..243675cdbb8 --- /dev/null +++ b/proto/cometbft/version/v1/types.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; +package cometbft.version.v1; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/version/v1"; + +import "gogoproto/gogo.proto"; + +// App includes the protocol and software version for the application. +// This information is included in ResponseInfo. The App.Protocol can be +// updated in ResponseEndBlock. +message App { + uint64 protocol = 1; + string software = 2; +} + +// Consensus captures the consensus rules for processing a block in the blockchain, +// including all blockchain data structures and the rules of the application's +// state transition machine. +message Consensus { + option (gogoproto.equal) = true; + + uint64 block = 1; + uint64 app = 2; +} diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 906576b6fba..5affcc30c5e 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.abci; -option go_package = "github.com/cometbft/cometbft/abci/types"; - // For more information on gogo.proto, see: // https://github.com/cosmos/gogoproto/blob/master/extensions.md import "tendermint/crypto/proof.proto"; @@ -36,7 +34,7 @@ service ABCI { rpc FinalizeBlock(RequestFinalizeBlock) returns (ResponseFinalizeBlock); } -//---------------------------------------- +// ---------------------------------------- // Request types message Request { @@ -193,7 +191,7 @@ message RequestFinalizeBlock { bytes proposer_address = 8; } -//---------------------------------------- +// ---------------------------------------- // Response types message Response { @@ -350,7 +348,7 @@ message ResponseVerifyVoteExtension { } message ResponseFinalizeBlock { - // set of block events emmitted as part of executing the block + // set of block events emitted as part of executing the block repeated Event events = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; // the result of executing each transaction including the events @@ -365,7 +363,7 @@ message ResponseFinalizeBlock { bytes app_hash = 5; } -//---------------------------------------- +// ---------------------------------------- // Misc. message CommitInfo { @@ -427,7 +425,7 @@ message TxResult { ExecTxResult result = 4 [(gogoproto.nullable) = false]; } -//---------------------------------------- +// ---------------------------------------- // Blockchain Types message Validator { @@ -482,7 +480,7 @@ message Misbehavior { int64 total_voting_power = 5; } -//---------------------------------------- +// ---------------------------------------- // State Sync Types message Snapshot { diff --git a/proto/tendermint/blocksync/types.proto b/proto/tendermint/blocksync/types.proto index 11c39a713bb..e394fb3d675 100644 --- a/proto/tendermint/blocksync/types.proto +++ b/proto/tendermint/blocksync/types.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.blocksync; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/blocksync"; - import "tendermint/types/block.proto"; import "tendermint/types/types.proto"; diff --git a/proto/tendermint/consensus/types.proto b/proto/tendermint/consensus/types.proto index 2ca6eb37798..d2a51e2f366 100644 --- a/proto/tendermint/consensus/types.proto +++ b/proto/tendermint/consensus/types.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.consensus; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/consensus"; - import "gogoproto/gogo.proto"; import "tendermint/types/types.proto"; import "tendermint/libs/bits/types.proto"; diff --git a/proto/tendermint/consensus/wal.proto b/proto/tendermint/consensus/wal.proto index fafcf11fa92..22531e0d0cf 100644 --- a/proto/tendermint/consensus/wal.proto +++ b/proto/tendermint/consensus/wal.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.consensus; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/consensus"; - import "gogoproto/gogo.proto"; import "tendermint/consensus/types.proto"; import "tendermint/types/events.proto"; diff --git a/proto/tendermint/crypto/keys.proto b/proto/tendermint/crypto/keys.proto index 8fa192fa4bc..b7b50da996d 100644 --- a/proto/tendermint/crypto/keys.proto +++ b/proto/tendermint/crypto/keys.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.crypto; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/crypto"; - import "gogoproto/gogo.proto"; // PublicKey defines the keys available for use with Validators diff --git a/proto/tendermint/crypto/proof.proto b/proto/tendermint/crypto/proof.proto index 7f22a0052e1..4f342c988c0 100644 --- a/proto/tendermint/crypto/proof.proto +++ b/proto/tendermint/crypto/proof.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.crypto; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/crypto"; - import "gogoproto/gogo.proto"; message Proof { diff --git a/proto/tendermint/libs/bits/types.proto b/proto/tendermint/libs/bits/types.proto index e6afc5e8ec2..1ea81d33f80 100644 --- a/proto/tendermint/libs/bits/types.proto +++ b/proto/tendermint/libs/bits/types.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.libs.bits; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/libs/bits"; - message BitArray { int64 bits = 1; repeated uint64 elems = 2; diff --git a/proto/tendermint/mempool/types.proto b/proto/tendermint/mempool/types.proto index 60bafff03d1..7fa53ef79d8 100644 --- a/proto/tendermint/mempool/types.proto +++ b/proto/tendermint/mempool/types.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.mempool; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/mempool"; - message Txs { repeated bytes txs = 1; } diff --git a/proto/tendermint/p2p/conn.proto b/proto/tendermint/p2p/conn.proto index a7de695ac8d..62abd4f5f1e 100644 --- a/proto/tendermint/p2p/conn.proto +++ b/proto/tendermint/p2p/conn.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.p2p; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/p2p"; - import "gogoproto/gogo.proto"; import "tendermint/crypto/keys.proto"; diff --git a/proto/tendermint/p2p/pex.proto b/proto/tendermint/p2p/pex.proto index 2191866609b..397efbfb42a 100644 --- a/proto/tendermint/p2p/pex.proto +++ b/proto/tendermint/p2p/pex.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.p2p; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/p2p"; - import "tendermint/p2p/types.proto"; import "gogoproto/gogo.proto"; diff --git a/proto/tendermint/p2p/types.proto b/proto/tendermint/p2p/types.proto index 157d8ba1ca1..5b49c658393 100644 --- a/proto/tendermint/p2p/types.proto +++ b/proto/tendermint/p2p/types.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.p2p; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/p2p"; - import "gogoproto/gogo.proto"; message NetAddress { diff --git a/proto/tendermint/privval/types.proto b/proto/tendermint/privval/types.proto index 13190ca42fa..ddce7682046 100644 --- a/proto/tendermint/privval/types.proto +++ b/proto/tendermint/privval/types.proto @@ -5,17 +5,6 @@ import "tendermint/crypto/keys.proto"; import "tendermint/types/types.proto"; import "gogoproto/gogo.proto"; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/privval"; - -enum Errors { - ERRORS_UNKNOWN = 0; - ERRORS_UNEXPECTED_RESPONSE = 1; - ERRORS_NO_CONNECTION = 2; - ERRORS_CONNECTION_TIMEOUT = 3; - ERRORS_READ_TIMEOUT = 4; - ERRORS_WRITE_TIMEOUT = 5; -} - message RemoteSignerError { int32 code = 1; string description = 2; @@ -34,8 +23,9 @@ message PubKeyResponse { // SignVoteRequest is a request to sign a vote message SignVoteRequest { - tendermint.types.Vote vote = 1; - string chain_id = 2; + tendermint.types.Vote vote = 1; + string chain_id = 2; + bool skip_extension_signing = 3; // if true, the signer may skip signing the extension bytes. } // SignedVoteResponse is a response containing a signed vote or an error diff --git a/proto/tendermint/services/block/v1/block.proto b/proto/tendermint/services/block/v1/block.proto index 934e0863be6..78d2c71b92f 100644 --- a/proto/tendermint/services/block/v1/block.proto +++ b/proto/tendermint/services/block/v1/block.proto @@ -4,10 +4,8 @@ package tendermint.services.block.v1; import "tendermint/types/block.proto"; import "tendermint/types/types.proto"; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/services/block/v1"; - message GetByHeightRequest { - // The height of the block requested. If set to 0, the latest height will be returned. + // The height of the block requested. int64 height = 1; } @@ -16,13 +14,6 @@ message GetByHeightResponse { tendermint.types.Block block = 2; } -message GetLatestRequest {} - -message GetLatestResponse { - tendermint.types.BlockID block_id = 1; - tendermint.types.Block block = 2; -} - // GetLatestHeightRequest - empty message since no parameter is required message GetLatestHeightRequest {} diff --git a/proto/tendermint/services/block/v1/block_service.proto b/proto/tendermint/services/block/v1/block_service.proto index dd9f8ea3b51..11bb421998a 100644 --- a/proto/tendermint/services/block/v1/block_service.proto +++ b/proto/tendermint/services/block/v1/block_service.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.services.block.v1; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/services/block/v1"; - import "tendermint/services/block/v1/block.proto"; // BlockService provides information about blocks @@ -10,9 +8,6 @@ service BlockService { // GetBlock retrieves the block information at a particular height. rpc GetByHeight(GetByHeightRequest) returns (GetByHeightResponse); - // GetLatest retrieves the latest block. - rpc GetLatest(GetLatestRequest) returns (GetLatestResponse); - // GetLatestHeight returns a stream of the latest block heights committed by // the network. This is a long-lived stream that is only terminated by the // server if an error occurs. The caller is expected to handle such diff --git a/proto/tendermint/services/block_results/v1/block_results.proto b/proto/tendermint/services/block_results/v1/block_results.proto index 87e94dc980c..005ec7a9be4 100644 --- a/proto/tendermint/services/block_results/v1/block_results.proto +++ b/proto/tendermint/services/block_results/v1/block_results.proto @@ -4,15 +4,10 @@ package tendermint.services.block_results.v1; import "tendermint/abci/types.proto"; import "tendermint/types/params.proto"; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/services/block_results/v1"; - message GetBlockResultsRequest { int64 height = 1; } -message GetLatestBlockResultsRequest { -} - message GetBlockResultsResponse { int64 height = 1; repeated tendermint.abci.ExecTxResult txs_results = 2; diff --git a/proto/tendermint/services/block_results/v1/block_results_service.pb.go b/proto/tendermint/services/block_results/v1/block_results_service.pb.go deleted file mode 100644 index 2cb4c03221f..00000000000 --- a/proto/tendermint/services/block_results/v1/block_results_service.pb.go +++ /dev/null @@ -1,169 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/services/block_results/v1/block_results_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - grpc1 "github.com/cosmos/gogoproto/grpc" - proto "github.com/cosmos/gogoproto/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func init() { - proto.RegisterFile("tendermint/services/block_results/v1/block_results_service.proto", fileDescriptor_4d6550c091adc944) -} - -var fileDescriptor_4d6550c091adc944 = []byte{ - // 228 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x28, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x2d, 0xd6, - 0x4f, 0xca, 0xc9, 0x4f, 0xce, 0x8e, 0x2f, 0x4a, 0x2d, 0x2e, 0xcd, 0x29, 0x29, 0xd6, 0x2f, 0x33, - 0x44, 0x15, 0x88, 0x87, 0xaa, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x52, 0x41, 0x98, 0xa0, - 0x07, 0x33, 0x41, 0x0f, 0x45, 0x83, 0x5e, 0x99, 0xa1, 0x94, 0x05, 0xe9, 0xf6, 0x40, 0xcc, 0x37, - 0x3a, 0xcc, 0xc4, 0x25, 0xec, 0x04, 0x12, 0x0f, 0x82, 0x08, 0x07, 0x43, 0x74, 0x0b, 0xf5, 0x31, - 0x72, 0xf1, 0xbb, 0xa7, 0x96, 0x20, 0x4b, 0x09, 0xd9, 0xe8, 0x11, 0xe3, 0x18, 0x3d, 0x34, 0x6d, - 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x52, 0xb6, 0x64, 0xea, 0x2e, 0x2e, 0xc8, 0xcf, 0x2b, - 0x4e, 0x15, 0x9a, 0xc5, 0xc8, 0x25, 0xea, 0x9e, 0x5a, 0xe2, 0x93, 0x58, 0x92, 0x5a, 0x8c, 0xea, - 0x2c, 0x27, 0xa2, 0x0d, 0xc6, 0xd4, 0x4c, 0x1d, 0xc7, 0x39, 0x25, 0x9e, 0x78, 0x24, 0xc7, 0x78, - 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, - 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x7b, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, - 0xae, 0x7e, 0x72, 0x7e, 0x6e, 0x6a, 0x49, 0x52, 0x5a, 0x09, 0x82, 0x01, 0x8e, 0x03, 0x7d, 0x62, - 0x22, 0x2f, 0x89, 0x0d, 0xac, 0xd6, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x91, 0xbc, 0x2a, 0x23, - 0x53, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// BlockResultsServiceClient is the client API for BlockResultsService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type BlockResultsServiceClient interface { - // GetBlockResults returns the BlockResults of the requested height. - GetBlockResults(ctx context.Context, in *GetBlockResultsRequest, opts ...grpc.CallOption) (*GetBlockResultsResponse, error) - // GetLatestBlockResults returns the BlockResults of the latest committed height. - GetLatestBlockResults(ctx context.Context, in *GetLatestBlockResultsRequest, opts ...grpc.CallOption) (*GetBlockResultsResponse, error) -} - -type blockResultsServiceClient struct { - cc grpc1.ClientConn -} - -func NewBlockResultsServiceClient(cc grpc1.ClientConn) BlockResultsServiceClient { - return &blockResultsServiceClient{cc} -} - -func (c *blockResultsServiceClient) GetBlockResults(ctx context.Context, in *GetBlockResultsRequest, opts ...grpc.CallOption) (*GetBlockResultsResponse, error) { - out := new(GetBlockResultsResponse) - err := c.cc.Invoke(ctx, "/tendermint.services.block_results.v1.BlockResultsService/GetBlockResults", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blockResultsServiceClient) GetLatestBlockResults(ctx context.Context, in *GetLatestBlockResultsRequest, opts ...grpc.CallOption) (*GetBlockResultsResponse, error) { - out := new(GetBlockResultsResponse) - err := c.cc.Invoke(ctx, "/tendermint.services.block_results.v1.BlockResultsService/GetLatestBlockResults", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// BlockResultsServiceServer is the server API for BlockResultsService service. -type BlockResultsServiceServer interface { - // GetBlockResults returns the BlockResults of the requested height. - GetBlockResults(context.Context, *GetBlockResultsRequest) (*GetBlockResultsResponse, error) - // GetLatestBlockResults returns the BlockResults of the latest committed height. - GetLatestBlockResults(context.Context, *GetLatestBlockResultsRequest) (*GetBlockResultsResponse, error) -} - -// UnimplementedBlockResultsServiceServer can be embedded to have forward compatible implementations. -type UnimplementedBlockResultsServiceServer struct { -} - -func (*UnimplementedBlockResultsServiceServer) GetBlockResults(ctx context.Context, req *GetBlockResultsRequest) (*GetBlockResultsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetBlockResults not implemented") -} -func (*UnimplementedBlockResultsServiceServer) GetLatestBlockResults(ctx context.Context, req *GetLatestBlockResultsRequest) (*GetBlockResultsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetLatestBlockResults not implemented") -} - -func RegisterBlockResultsServiceServer(s grpc1.Server, srv BlockResultsServiceServer) { - s.RegisterService(&_BlockResultsService_serviceDesc, srv) -} - -func _BlockResultsService_GetBlockResults_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetBlockResultsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlockResultsServiceServer).GetBlockResults(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.services.block_results.v1.BlockResultsService/GetBlockResults", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlockResultsServiceServer).GetBlockResults(ctx, req.(*GetBlockResultsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BlockResultsService_GetLatestBlockResults_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetLatestBlockResultsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlockResultsServiceServer).GetLatestBlockResults(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.services.block_results.v1.BlockResultsService/GetLatestBlockResults", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlockResultsServiceServer).GetLatestBlockResults(ctx, req.(*GetLatestBlockResultsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _BlockResultsService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tendermint.services.block_results.v1.BlockResultsService", - HandlerType: (*BlockResultsServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetBlockResults", - Handler: _BlockResultsService_GetBlockResults_Handler, - }, - { - MethodName: "GetLatestBlockResults", - Handler: _BlockResultsService_GetLatestBlockResults_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "tendermint/services/block_results/v1/block_results_service.proto", -} diff --git a/proto/tendermint/services/block_results/v1/block_results_service.proto b/proto/tendermint/services/block_results/v1/block_results_service.proto index d256b3d1fb1..a0b7c9e1905 100644 --- a/proto/tendermint/services/block_results/v1/block_results_service.proto +++ b/proto/tendermint/services/block_results/v1/block_results_service.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.services.block_results.v1; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/services/block_results/v1"; - import "tendermint/services/block_results/v1/block_results.proto"; /* @@ -11,7 +9,4 @@ import "tendermint/services/block_results/v1/block_results.proto"; service BlockResultsService { // GetBlockResults returns the BlockResults of the requested height. rpc GetBlockResults(GetBlockResultsRequest) returns (GetBlockResultsResponse); - - // GetLatestBlockResults returns the BlockResults of the latest committed height. - rpc GetLatestBlockResults(GetLatestBlockResultsRequest) returns (GetBlockResultsResponse); } diff --git a/proto/tendermint/services/pruning/v1/pruning.proto b/proto/tendermint/services/pruning/v1/pruning.proto index 9f73ab64442..16ce3ebdfc8 100644 --- a/proto/tendermint/services/pruning/v1/pruning.proto +++ b/proto/tendermint/services/pruning/v1/pruning.proto @@ -3,7 +3,7 @@ syntax = "proto3"; package tendermint.services.pruning.v1; message SetBlockRetainHeightRequest { - uint64 height = 1; + uint64 height = 1; } message SetBlockRetainHeightResponse {} @@ -11,16 +11,16 @@ message SetBlockRetainHeightResponse {} message GetBlockRetainHeightRequest {} message GetBlockRetainHeightResponse { - // The retain height set by the application. - uint64 app_retain_height = 1; + // The retain height set by the application. + uint64 app_retain_height = 1; - // The retain height set via the pruning service (e.g. by the data - // companion) specifically for blocks. - uint64 pruning_service_retain_height = 2; + // The retain height set via the pruning service (e.g. by the data + // companion) specifically for blocks. + uint64 pruning_service_retain_height = 2; } message SetBlockResultsRetainHeightRequest { - uint64 height = 1; + uint64 height = 1; } message SetBlockResultsRetainHeightResponse {} @@ -28,13 +28,13 @@ message SetBlockResultsRetainHeightResponse {} message GetBlockResultsRetainHeightRequest {} message GetBlockResultsRetainHeightResponse { - // The retain height set by the pruning service (e.g. by the data - // companion) specifically for block results. - uint64 pruning_service_retain_height = 1; + // The retain height set by the pruning service (e.g. by the data + // companion) specifically for block results. + uint64 pruning_service_retain_height = 1; } message SetTxIndexerRetainHeightRequest { - uint64 height = 1; + uint64 height = 1; } message SetTxIndexerRetainHeightResponse {} @@ -42,11 +42,11 @@ message SetTxIndexerRetainHeightResponse {} message GetTxIndexerRetainHeightRequest {} message GetTxIndexerRetainHeightResponse { - uint64 height = 1; + uint64 height = 1; } message SetBlockIndexerRetainHeightRequest { - uint64 height = 1; + uint64 height = 1; } message SetBlockIndexerRetainHeightResponse {} @@ -54,5 +54,5 @@ message SetBlockIndexerRetainHeightResponse {} message GetBlockIndexerRetainHeightRequest {} message GetBlockIndexerRetainHeightResponse { - uint64 height = 1; + uint64 height = 1; } diff --git a/proto/tendermint/services/pruning/v1/service.proto b/proto/tendermint/services/pruning/v1/service.proto index 526c8bf1960..2f80d7b4f43 100644 --- a/proto/tendermint/services/pruning/v1/service.proto +++ b/proto/tendermint/services/pruning/v1/service.proto @@ -7,42 +7,42 @@ import "tendermint/services/pruning/v1/pruning.proto"; // PruningService provides privileged access to specialized pruning // functionality on the CometBFT node to help control node storage. service PruningService { - // SetBlockRetainHeightRequest indicates to the node that it can safely - // prune all block data up to the specified retain height. - // - // The lower of this retain height and that set by the application in its - // Commit response will be used by the node to determine which heights' data - // can be pruned. - rpc SetBlockRetainHeight(SetBlockRetainHeightRequest) returns (SetBlockRetainHeightResponse); - - // GetBlockRetainHeight returns information about the retain height - // parameters used by the node to influence block retention/pruning. - rpc GetBlockRetainHeight(GetBlockRetainHeightRequest) returns (GetBlockRetainHeightResponse); - - // SetBlockResultsRetainHeightRequest indicates to the node that it can - // safely prune all block results data up to the specified height. - // - // The node will always store the block results for the latest height to - // help facilitate crash recovery. - rpc SetBlockResultsRetainHeight(SetBlockResultsRetainHeightRequest) returns (SetBlockResultsRetainHeightResponse); - - // GetBlockResultsRetainHeight returns information about the retain height - // parameters used by the node to influence block results retention/pruning. - rpc GetBlockResultsRetainHeight(GetBlockResultsRetainHeightRequest) returns (GetBlockResultsRetainHeightResponse); - - // SetTxIndexerRetainHeightRequest indicates to the node that it can safely - // prune all tx indices up to the specified retain height. - rpc SetTxIndexerRetainHeight(SetTxIndexerRetainHeightRequest) returns (SetTxIndexerRetainHeightResponse); - - // GetTxIndexerRetainHeight returns information about the retain height - // parameters used by the node to influence TxIndexer pruning - rpc GetTxIndexerRetainHeight(GetTxIndexerRetainHeightRequest) returns (GetTxIndexerRetainHeightResponse); - - // SetBlockIndexerRetainHeightRequest indicates to the node that it can safely - // prune all block indices up to the specified retain height. - rpc SetBlockIndexerRetainHeight(SetBlockIndexerRetainHeightRequest) returns (SetBlockIndexerRetainHeightResponse); - - // GetBlockIndexerRetainHeight returns information about the retain height - // parameters used by the node to influence BlockIndexer pruning - rpc GetBlockIndexerRetainHeight(GetBlockIndexerRetainHeightRequest) returns (GetBlockIndexerRetainHeightResponse); + // SetBlockRetainHeightRequest indicates to the node that it can safely + // prune all block data up to the specified retain height. + // + // The lower of this retain height and that set by the application in its + // Commit response will be used by the node to determine which heights' data + // can be pruned. + rpc SetBlockRetainHeight(SetBlockRetainHeightRequest) returns (SetBlockRetainHeightResponse); + + // GetBlockRetainHeight returns information about the retain height + // parameters used by the node to influence block retention/pruning. + rpc GetBlockRetainHeight(GetBlockRetainHeightRequest) returns (GetBlockRetainHeightResponse); + + // SetBlockResultsRetainHeightRequest indicates to the node that it can + // safely prune all block results data up to the specified height. + // + // The node will always store the block results for the latest height to + // help facilitate crash recovery. + rpc SetBlockResultsRetainHeight(SetBlockResultsRetainHeightRequest) returns (SetBlockResultsRetainHeightResponse); + + // GetBlockResultsRetainHeight returns information about the retain height + // parameters used by the node to influence block results retention/pruning. + rpc GetBlockResultsRetainHeight(GetBlockResultsRetainHeightRequest) returns (GetBlockResultsRetainHeightResponse); + + // SetTxIndexerRetainHeightRequest indicates to the node that it can safely + // prune all tx indices up to the specified retain height. + rpc SetTxIndexerRetainHeight(SetTxIndexerRetainHeightRequest) returns (SetTxIndexerRetainHeightResponse); + + // GetTxIndexerRetainHeight returns information about the retain height + // parameters used by the node to influence TxIndexer pruning + rpc GetTxIndexerRetainHeight(GetTxIndexerRetainHeightRequest) returns (GetTxIndexerRetainHeightResponse); + + // SetBlockIndexerRetainHeightRequest indicates to the node that it can safely + // prune all block indices up to the specified retain height. + rpc SetBlockIndexerRetainHeight(SetBlockIndexerRetainHeightRequest) returns (SetBlockIndexerRetainHeightResponse); + + // GetBlockIndexerRetainHeight returns information about the retain height + // parameters used by the node to influence BlockIndexer pruning + rpc GetBlockIndexerRetainHeight(GetBlockIndexerRetainHeightRequest) returns (GetBlockIndexerRetainHeightResponse); } diff --git a/proto/tendermint/services/version/v1/version.proto b/proto/tendermint/services/version/v1/version.proto index 4ffee2352ca..0f82e556d28 100644 --- a/proto/tendermint/services/version/v1/version.proto +++ b/proto/tendermint/services/version/v1/version.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.services.version.v1; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/services/version/v1"; - message GetVersionRequest {} message GetVersionResponse { diff --git a/proto/tendermint/services/version/v1/version_service.proto b/proto/tendermint/services/version/v1/version_service.proto index 98142786ccb..e0757e618db 100644 --- a/proto/tendermint/services/version/v1/version_service.proto +++ b/proto/tendermint/services/version/v1/version_service.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.services.version.v1; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/services/version/v1"; - import "tendermint/services/version/v1/version.proto"; // VersionService simply provides version information about the node and the diff --git a/proto/tendermint/state/types.proto b/proto/tendermint/state/types.proto index c76c25fa852..57e7083b2df 100644 --- a/proto/tendermint/state/types.proto +++ b/proto/tendermint/state/types.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.state; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/state"; - import "gogoproto/gogo.proto"; import "tendermint/abci/types.proto"; import "tendermint/types/types.proto"; diff --git a/proto/tendermint/statesync/types.proto b/proto/tendermint/statesync/types.proto index eac36b3dec3..ffca70dc03c 100644 --- a/proto/tendermint/statesync/types.proto +++ b/proto/tendermint/statesync/types.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.statesync; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/statesync"; - message Message { oneof sum { SnapshotsRequest snapshots_request = 1; diff --git a/proto/tendermint/store/types.proto b/proto/tendermint/store/types.proto index b510169a4c0..36ac35276a4 100644 --- a/proto/tendermint/store/types.proto +++ b/proto/tendermint/store/types.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.store; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/store"; - message BlockStoreState { int64 base = 1; int64 height = 2; diff --git a/proto/tendermint/types/block.proto b/proto/tendermint/types/block.proto index d531c06a005..8a713b7dcf9 100644 --- a/proto/tendermint/types/block.proto +++ b/proto/tendermint/types/block.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.types; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/types"; - import "gogoproto/gogo.proto"; import "tendermint/types/types.proto"; import "tendermint/types/evidence.proto"; diff --git a/proto/tendermint/types/canonical.proto b/proto/tendermint/types/canonical.proto index bbff09b6605..da5b8e83949 100644 --- a/proto/tendermint/types/canonical.proto +++ b/proto/tendermint/types/canonical.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.types; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/types"; - import "gogoproto/gogo.proto"; import "tendermint/types/types.proto"; import "google/protobuf/timestamp.proto"; diff --git a/proto/tendermint/types/events.proto b/proto/tendermint/types/events.proto index 98ce811be7a..1ef71587283 100644 --- a/proto/tendermint/types/events.proto +++ b/proto/tendermint/types/events.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.types; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/types"; - message EventDataRoundState { int64 height = 1; int32 round = 2; diff --git a/proto/tendermint/types/evidence.proto b/proto/tendermint/types/evidence.proto index 1f35049bdc4..c373b1d8ff6 100644 --- a/proto/tendermint/types/evidence.proto +++ b/proto/tendermint/types/evidence.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.types; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/types"; - import "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "tendermint/types/types.proto"; diff --git a/proto/tendermint/types/params.proto b/proto/tendermint/types/params.proto index f96a2e2f572..ad25dde7184 100644 --- a/proto/tendermint/types/params.proto +++ b/proto/tendermint/types/params.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.types; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/types"; - import "gogoproto/gogo.proto"; import "google/protobuf/duration.proto"; diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto index a527e2ffb21..344127cb443 100644 --- a/proto/tendermint/types/types.proto +++ b/proto/tendermint/types/types.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.types; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/types"; - import "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "tendermint/crypto/proof.proto"; @@ -14,12 +12,13 @@ enum SignedMsgType { option (gogoproto.goproto_enum_stringer) = true; option (gogoproto.goproto_enum_prefix) = false; + // Unknown SIGNED_MSG_TYPE_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "UnknownType"]; - // Votes - SIGNED_MSG_TYPE_PREVOTE = 1 [(gogoproto.enumvalue_customname) = "PrevoteType"]; + // Prevote + SIGNED_MSG_TYPE_PREVOTE = 1 [(gogoproto.enumvalue_customname) = "PrevoteType"]; + // Precommit SIGNED_MSG_TYPE_PRECOMMIT = 2 [(gogoproto.enumvalue_customname) = "PrecommitType"]; - - // Proposals + // Proposal SIGNED_MSG_TYPE_PROPOSAL = 32 [(gogoproto.enumvalue_customname) = "ProposalType"]; } diff --git a/proto/tendermint/types/validator.proto b/proto/tendermint/types/validator.proto index a47e677a2fc..b9bd512a107 100644 --- a/proto/tendermint/types/validator.proto +++ b/proto/tendermint/types/validator.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.types; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/types"; - import "gogoproto/gogo.proto"; import "tendermint/crypto/keys.proto"; @@ -11,10 +9,14 @@ enum BlockIDFlag { option (gogoproto.goproto_enum_stringer) = true; option (gogoproto.goproto_enum_prefix) = false; - BLOCK_ID_FLAG_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"]; // indicates an error condition - BLOCK_ID_FLAG_ABSENT = 1 [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"]; // the vote was not received - BLOCK_ID_FLAG_COMMIT = 2 [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"]; // voted for the block that received the majority - BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"]; // voted for nil + // Indicates an error condition + BLOCK_ID_FLAG_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"]; + // The vote was not received + BLOCK_ID_FLAG_ABSENT = 1 [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"]; + // Voted for the block that received the majority + BLOCK_ID_FLAG_COMMIT = 2 [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"]; + // Voted for nil + BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"]; } message ValidatorSet { diff --git a/proto/tendermint/version/types.proto b/proto/tendermint/version/types.proto index 3b6ef45479e..ea403ec2e84 100644 --- a/proto/tendermint/version/types.proto +++ b/proto/tendermint/version/types.proto @@ -1,8 +1,6 @@ syntax = "proto3"; package tendermint.version; -option go_package = "github.com/cometbft/cometbft/proto/tendermint/version"; - import "gogoproto/gogo.proto"; // App includes the protocol and software version for the application. diff --git a/proxy/app_conn.go b/proxy/app_conn.go index 064f32891ff..82617ac3a13 100644 --- a/proxy/app_conn.go +++ b/proxy/app_conn.go @@ -4,55 +4,54 @@ import ( "context" "time" - "github.com/go-kit/kit/metrics" - abcicli "github.com/cometbft/cometbft/abci/client" - "github.com/cometbft/cometbft/abci/types" + abcitypes "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/libs/metrics" ) //go:generate ../scripts/mockery_generate.sh AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot -//---------------------------------------------------------------------------------------- +// ---------------------------------------------------------------------------------------- // Enforce which abci msgs can be sent on a connection at the type level type AppConnConsensus interface { Error() error - InitChain(context.Context, *types.RequestInitChain) (*types.ResponseInitChain, error) - PrepareProposal(context.Context, *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) - ProcessProposal(context.Context, *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) - ExtendVote(context.Context, *types.RequestExtendVote) (*types.ResponseExtendVote, error) - VerifyVoteExtension(context.Context, *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) - FinalizeBlock(context.Context, *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) - Commit(context.Context) (*types.ResponseCommit, error) + InitChain(ctx context.Context, req *abcitypes.InitChainRequest) (*abcitypes.InitChainResponse, error) + PrepareProposal(ctx context.Context, req *abcitypes.PrepareProposalRequest) (*abcitypes.PrepareProposalResponse, error) + ProcessProposal(ctx context.Context, req *abcitypes.ProcessProposalRequest) (*abcitypes.ProcessProposalResponse, error) + ExtendVote(ctx context.Context, req *abcitypes.ExtendVoteRequest) (*abcitypes.ExtendVoteResponse, error) + VerifyVoteExtension(ctx context.Context, req *abcitypes.VerifyVoteExtensionRequest) (*abcitypes.VerifyVoteExtensionResponse, error) + FinalizeBlock(ctx context.Context, req *abcitypes.FinalizeBlockRequest) (*abcitypes.FinalizeBlockResponse, error) + Commit(ctx context.Context) (*abcitypes.CommitResponse, error) } type AppConnMempool interface { - SetResponseCallback(abcicli.Callback) + SetResponseCallback(cb abcicli.Callback) Error() error - CheckTx(context.Context, *types.RequestCheckTx) (*types.ResponseCheckTx, error) - CheckTxAsync(context.Context, *types.RequestCheckTx) (*abcicli.ReqRes, error) - Flush(context.Context) error + CheckTx(ctx context.Context, req *abcitypes.CheckTxRequest) (*abcitypes.CheckTxResponse, error) + CheckTxAsync(ctx context.Context, req *abcitypes.CheckTxRequest) (*abcicli.ReqRes, error) + Flush(ctx context.Context) error } type AppConnQuery interface { Error() error - Echo(context.Context, string) (*types.ResponseEcho, error) - Info(context.Context, *types.RequestInfo) (*types.ResponseInfo, error) - Query(context.Context, *types.RequestQuery) (*types.ResponseQuery, error) + Echo(ctx context.Context, echo string) (*abcitypes.EchoResponse, error) + Info(ctx context.Context, req *abcitypes.InfoRequest) (*abcitypes.InfoResponse, error) + Query(ctx context.Context, req *abcitypes.QueryRequest) (*abcitypes.QueryResponse, error) } type AppConnSnapshot interface { Error() error - ListSnapshots(context.Context, *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) - OfferSnapshot(context.Context, *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) - LoadSnapshotChunk(context.Context, *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) - ApplySnapshotChunk(context.Context, *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) + ListSnapshots(ctx context.Context, req *abcitypes.ListSnapshotsRequest) (*abcitypes.ListSnapshotsResponse, error) + OfferSnapshot(ctx context.Context, req *abcitypes.OfferSnapshotRequest) (*abcitypes.OfferSnapshotResponse, error) + LoadSnapshotChunk(ctx context.Context, req *abcitypes.LoadSnapshotChunkRequest) (*abcitypes.LoadSnapshotChunkResponse, error) + ApplySnapshotChunk(ctx context.Context, req *abcitypes.ApplySnapshotChunkRequest) (*abcitypes.ApplySnapshotChunkResponse, error) } -//----------------------------------------------------------------------------------------- +// ----------------------------------------------------------------------------------------- // Implements AppConnConsensus (subset of abcicli.Client) type appConnConsensus struct { @@ -73,43 +72,44 @@ func (app *appConnConsensus) Error() error { return app.appConn.Error() } -func (app *appConnConsensus) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) { +func (app *appConnConsensus) InitChain(ctx context.Context, req *abcitypes.InitChainRequest) (*abcitypes.InitChainResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "init_chain", "type", "sync"))() return app.appConn.InitChain(ctx, req) } func (app *appConnConsensus) PrepareProposal(ctx context.Context, - req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { + req *abcitypes.PrepareProposalRequest, +) (*abcitypes.PrepareProposalResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "prepare_proposal", "type", "sync"))() return app.appConn.PrepareProposal(ctx, req) } -func (app *appConnConsensus) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { +func (app *appConnConsensus) ProcessProposal(ctx context.Context, req *abcitypes.ProcessProposalRequest) (*abcitypes.ProcessProposalResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "process_proposal", "type", "sync"))() return app.appConn.ProcessProposal(ctx, req) } -func (app *appConnConsensus) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) { +func (app *appConnConsensus) ExtendVote(ctx context.Context, req *abcitypes.ExtendVoteRequest) (*abcitypes.ExtendVoteResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "extend_vote", "type", "sync"))() return app.appConn.ExtendVote(ctx, req) } -func (app *appConnConsensus) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { +func (app *appConnConsensus) VerifyVoteExtension(ctx context.Context, req *abcitypes.VerifyVoteExtensionRequest) (*abcitypes.VerifyVoteExtensionResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "verify_vote_extension", "type", "sync"))() return app.appConn.VerifyVoteExtension(ctx, req) } -func (app *appConnConsensus) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { +func (app *appConnConsensus) FinalizeBlock(ctx context.Context, req *abcitypes.FinalizeBlockRequest) (*abcitypes.FinalizeBlockResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "finalize_block", "type", "sync"))() return app.appConn.FinalizeBlock(ctx, req) } -func (app *appConnConsensus) Commit(ctx context.Context) (*types.ResponseCommit, error) { +func (app *appConnConsensus) Commit(ctx context.Context) (*abcitypes.CommitResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "commit", "type", "sync"))() - return app.appConn.Commit(ctx, &types.RequestCommit{}) + return app.appConn.Commit(ctx, &abcitypes.CommitRequest{}) } -//------------------------------------------------ +// ------------------------------------------------ // Implements AppConnMempool (subset of abcicli.Client) type appConnMempool struct { @@ -124,6 +124,7 @@ func NewAppConnMempool(appConn abcicli.Client, metrics *Metrics) AppConnMempool } } +// Deprecated: Do not use. func (app *appConnMempool) SetResponseCallback(cb abcicli.Callback) { app.appConn.SetResponseCallback(cb) } @@ -137,17 +138,17 @@ func (app *appConnMempool) Flush(ctx context.Context) error { return app.appConn.Flush(ctx) } -func (app *appConnMempool) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { +func (app *appConnMempool) CheckTx(ctx context.Context, req *abcitypes.CheckTxRequest) (*abcitypes.CheckTxResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "check_tx", "type", "sync"))() return app.appConn.CheckTx(ctx, req) } -func (app *appConnMempool) CheckTxAsync(ctx context.Context, req *types.RequestCheckTx) (*abcicli.ReqRes, error) { +func (app *appConnMempool) CheckTxAsync(ctx context.Context, req *abcitypes.CheckTxRequest) (*abcicli.ReqRes, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "check_tx", "type", "async"))() return app.appConn.CheckTxAsync(ctx, req) } -//------------------------------------------------ +// ------------------------------------------------ // Implements AppConnQuery (subset of abcicli.Client) type appConnQuery struct { @@ -166,22 +167,22 @@ func (app *appConnQuery) Error() error { return app.appConn.Error() } -func (app *appConnQuery) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { +func (app *appConnQuery) Echo(ctx context.Context, msg string) (*abcitypes.EchoResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "echo", "type", "sync"))() return app.appConn.Echo(ctx, msg) } -func (app *appConnQuery) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) { +func (app *appConnQuery) Info(ctx context.Context, req *abcitypes.InfoRequest) (*abcitypes.InfoResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "info", "type", "sync"))() return app.appConn.Info(ctx, req) } -func (app *appConnQuery) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) { +func (app *appConnQuery) Query(ctx context.Context, req *abcitypes.QueryRequest) (*abcitypes.QueryResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "query", "type", "sync"))() return app.appConn.Query(ctx, req) } -//------------------------------------------------ +// ------------------------------------------------ // Implements AppConnSnapshot (subset of abcicli.Client) type appConnSnapshot struct { @@ -200,28 +201,28 @@ func (app *appConnSnapshot) Error() error { return app.appConn.Error() } -func (app *appConnSnapshot) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { +func (app *appConnSnapshot) ListSnapshots(ctx context.Context, req *abcitypes.ListSnapshotsRequest) (*abcitypes.ListSnapshotsResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "list_snapshots", "type", "sync"))() return app.appConn.ListSnapshots(ctx, req) } -func (app *appConnSnapshot) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { +func (app *appConnSnapshot) OfferSnapshot(ctx context.Context, req *abcitypes.OfferSnapshotRequest) (*abcitypes.OfferSnapshotResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "offer_snapshot", "type", "sync"))() return app.appConn.OfferSnapshot(ctx, req) } -func (app *appConnSnapshot) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { +func (app *appConnSnapshot) LoadSnapshotChunk(ctx context.Context, req *abcitypes.LoadSnapshotChunkRequest) (*abcitypes.LoadSnapshotChunkResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "load_snapshot_chunk", "type", "sync"))() return app.appConn.LoadSnapshotChunk(ctx, req) } -func (app *appConnSnapshot) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { +func (app *appConnSnapshot) ApplySnapshotChunk(ctx context.Context, req *abcitypes.ApplySnapshotChunkRequest) (*abcitypes.ApplySnapshotChunkResponse, error) { defer addTimeSample(app.metrics.MethodTimingSeconds.With("method", "apply_snapshot_chunk", "type", "sync"))() return app.appConn.ApplySnapshotChunk(ctx, req) } // addTimeSample returns a function that, when called, adds an observation to m. -// The observation added to m is the number of seconds ellapsed since addTimeSample +// The observation added to m is the number of seconds elapsed since addTimeSample // was initially called. addTimeSample is meant to be called in a defer to calculate // the amount of time a function takes to complete. func addTimeSample(m metrics.Histogram) func() { diff --git a/proxy/app_conn_test.go b/proxy/app_conn_test.go index bb762dd9c4f..6674afe140f 100644 --- a/proxy/app_conn_test.go +++ b/proxy/app_conn_test.go @@ -8,8 +8,8 @@ import ( "github.com/cometbft/cometbft/abci/example/kvstore" "github.com/cometbft/cometbft/abci/server" abci "github.com/cometbft/cometbft/abci/types" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/log" - cmtrand "github.com/cometbft/cometbft/libs/rand" ) var SOCKET = "socket" @@ -44,7 +44,10 @@ func TestEcho(t *testing.T) { t.Log("Connected") for i := 0; i < 1000; i++ { - _, err = proxy.CheckTx(context.Background(), &abci.RequestCheckTx{Tx: []byte(fmt.Sprintf("echo-%v", i))}) + _, err = proxy.CheckTx(context.Background(), &abci.CheckTxRequest{ + Tx: []byte(fmt.Sprintf("echo-%v", i)), + Type: abci.CHECK_TX_TYPE_CHECK, + }) if err != nil { t.Fatal(err) } @@ -86,7 +89,10 @@ func BenchmarkEcho(b *testing.B) { b.StartTimer() // Start benchmarking tests for i := 0; i < b.N; i++ { - _, err = proxy.CheckTx(context.Background(), &abci.RequestCheckTx{Tx: []byte("hello")}) + _, err = proxy.CheckTx(context.Background(), &abci.CheckTxRequest{ + Tx: []byte("hello"), + Type: abci.CHECK_TX_TYPE_CHECK, + }) if err != nil { b.Error(err) } diff --git a/proxy/client.go b/proxy/client.go index 4993f6e1868..6b55ce06a47 100644 --- a/proxy/client.go +++ b/proxy/client.go @@ -1,7 +1,7 @@ package proxy import ( - "fmt" + "time" abcicli "github.com/cometbft/cometbft/abci/client" "github.com/cometbft/cometbft/abci/example/kvstore" @@ -28,7 +28,7 @@ type ClientCreator interface { NewABCISnapshotClient() (abcicli.Client, error) } -//---------------------------------------------------- +// ---------------------------------------------------- // local proxy uses a mutex on an in-proc app type localClientCreator struct { @@ -71,7 +71,7 @@ func (l *localClientCreator) newABCIClient() (abcicli.Client, error) { return abcicli.NewLocalClient(l.mtx, l.app), nil } -//------------------------------------------------------------------------- +// ------------------------------------------------------------------------- // connection-synchronized local client uses a mutex per "connection" on an // in-process app @@ -116,7 +116,7 @@ func (c *connSyncLocalClientCreator) newABCIClient() (abcicli.Client, error) { return abcicli.NewLocalClient(nil, c.app), nil } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // advanced local client creator with a more complex concurrency model than the // other local client creators @@ -166,7 +166,7 @@ func (c *consensusSyncLocalClientCreator) NewABCISnapshotClient() (abcicli.Clien return abcicli.NewUnsyncLocalClient(c.app), nil } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // most advanced local client creator with a more complex concurrency model // than the other local client creators - all concurrency is assumed to be // handled by the application @@ -206,7 +206,7 @@ func (c *unsyncLocalClientCreator) NewABCISnapshotClient() (abcicli.Client, erro return abcicli.NewUnsyncLocalClient(c.app), nil } -//--------------------------------------------------------------- +// --------------------------------------------------------------- // remote proxy opens new connections to an external app process type remoteClientCreator struct { @@ -249,7 +249,7 @@ func (r *remoteClientCreator) NewABCISnapshotClient() (abcicli.Client, error) { func (r *remoteClientCreator) newABCIClient() (abcicli.Client, error) { remoteApp, err := abcicli.NewClient(r.addr, r.transport, r.mustConnect) if err != nil { - return nil, fmt.Errorf("failed to connect to proxy: %w", err) + return nil, ErrUnreachableProxy{Err: err} } return remoteApp, nil @@ -265,19 +265,37 @@ func (r *remoteClientCreator) newABCIClient() (abcicli.Client, error) { // "_connsync" variant (i.e. "kvstore_connsync", etc.), which attempts to // replicate the same concurrency model as the remote client. func DefaultClientCreator(addr, transport, dbDir string) ClientCreator { + // Default is zero for kvstore and persistent_kvstore. + // Replaces deprecated `timeout_commit` parameter. + // Set to 1s to mimic the real world app. + delay := 1 * time.Second + + // Don't forget to change BaseConfig#ValidateBasic if you add new options here. switch addr { case "kvstore": - return NewLocalClientCreator(kvstore.NewInMemoryApplication()) + app := kvstore.NewInMemoryApplication() + app.SetNextBlockDelay(delay) + return NewLocalClientCreator(app) case "kvstore_connsync": - return NewConnSyncLocalClientCreator(kvstore.NewInMemoryApplication()) + app := kvstore.NewInMemoryApplication() + app.SetNextBlockDelay(delay) + return NewConnSyncLocalClientCreator(app) case "kvstore_unsync": - return NewUnsyncLocalClientCreator(kvstore.NewInMemoryApplication()) + app := kvstore.NewInMemoryApplication() + app.SetNextBlockDelay(delay) + return NewUnsyncLocalClientCreator(app) case "persistent_kvstore": - return NewLocalClientCreator(kvstore.NewPersistentApplication(dbDir)) + app := kvstore.NewPersistentApplication(dbDir) + app.SetNextBlockDelay(delay) + return NewLocalClientCreator(app) case "persistent_kvstore_connsync": - return NewConnSyncLocalClientCreator(kvstore.NewPersistentApplication(dbDir)) + app := kvstore.NewPersistentApplication(dbDir) + app.SetNextBlockDelay(delay) + return NewConnSyncLocalClientCreator(app) case "persistent_kvstore_unsync": - return NewUnsyncLocalClientCreator(kvstore.NewPersistentApplication(dbDir)) + app := kvstore.NewPersistentApplication(dbDir) + app.SetNextBlockDelay(delay) + return NewUnsyncLocalClientCreator(app) case "e2e": app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir)) if err != nil { diff --git a/proxy/errors.go b/proxy/errors.go new file mode 100644 index 00000000000..85e5da85b0d --- /dev/null +++ b/proxy/errors.go @@ -0,0 +1,43 @@ +package proxy + +import ( + "fmt" +) + +type ErrUnreachableProxy struct { + Err error +} + +func (e ErrUnreachableProxy) Error() string { + return fmt.Sprintf("failed to connect to proxy: %v", e.Err) +} + +func (e ErrUnreachableProxy) Unwrap() error { + return e.Err +} + +type ErrABCIClientCreate struct { + ClientName string + Err error +} + +func (e ErrABCIClientCreate) Error() string { + return fmt.Sprintf("error creating ABCI client (%s client): %v", e.ClientName, e.Err) +} + +func (e ErrABCIClientCreate) Unwrap() error { + return e.Err +} + +type ErrABCIClientStart struct { + CliType string + Err error +} + +func (e ErrABCIClientStart) Error() string { + return fmt.Sprintf("error starting ABCI client (%s client): %v", e.CliType, e.Err) +} + +func (e ErrABCIClientStart) Unwrap() error { + return e.Err +} diff --git a/proxy/metrics.gen.go b/proxy/metrics.gen.go index f4387ca9c37..5c6b81caf5d 100644 --- a/proxy/metrics.gen.go +++ b/proxy/metrics.gen.go @@ -3,8 +3,8 @@ package proxy import ( - "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/cometbft/cometbft/libs/metrics/discard" + prometheus "github.com/cometbft/cometbft/libs/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) diff --git a/proxy/metrics.go b/proxy/metrics.go index 7858c3bc587..9a8f51bf481 100644 --- a/proxy/metrics.go +++ b/proxy/metrics.go @@ -1,7 +1,7 @@ package proxy import ( - "github.com/go-kit/kit/metrics" + "github.com/cometbft/cometbft/libs/metrics" ) const ( diff --git a/proxy/mocks/app_conn_consensus.go b/proxy/mocks/app_conn_consensus.go index 3ae1625e025..2ae515c9bab 100644 --- a/proxy/mocks/app_conn_consensus.go +++ b/proxy/mocks/app_conn_consensus.go @@ -7,7 +7,7 @@ import ( mock "github.com/stretchr/testify/mock" - types "github.com/cometbft/cometbft/abci/types" + v1 "github.com/cometbft/cometbft/api/cometbft/abci/v1" ) // AppConnConsensus is an autogenerated mock type for the AppConnConsensus type @@ -15,25 +15,29 @@ type AppConnConsensus struct { mock.Mock } -// Commit provides a mock function with given fields: _a0 -func (_m *AppConnConsensus) Commit(_a0 context.Context) (*types.ResponseCommit, error) { - ret := _m.Called(_a0) +// Commit provides a mock function with given fields: ctx +func (_m *AppConnConsensus) Commit(ctx context.Context) (*v1.CommitResponse, error) { + ret := _m.Called(ctx) - var r0 *types.ResponseCommit + if len(ret) == 0 { + panic("no return value specified for Commit") + } + + var r0 *v1.CommitResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*types.ResponseCommit, error)); ok { - return rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context) (*v1.CommitResponse, error)); ok { + return rf(ctx) } - if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context) *v1.CommitResponse); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseCommit) + r0 = ret.Get(0).(*v1.CommitResponse) } } if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -45,6 +49,10 @@ func (_m *AppConnConsensus) Commit(_a0 context.Context) (*types.ResponseCommit, func (_m *AppConnConsensus) Error() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Error") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -55,25 +63,29 @@ func (_m *AppConnConsensus) Error() error { return r0 } -// ExtendVote provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) { - ret := _m.Called(_a0, _a1) +// ExtendVote provides a mock function with given fields: ctx, req +func (_m *AppConnConsensus) ExtendVote(ctx context.Context, req *v1.ExtendVoteRequest) (*v1.ExtendVoteResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseExtendVote + if len(ret) == 0 { + panic("no return value specified for ExtendVote") + } + + var r0 *v1.ExtendVoteResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) (*types.ResponseExtendVote, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ExtendVoteRequest) (*v1.ExtendVoteResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) *types.ResponseExtendVote); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ExtendVoteRequest) *v1.ExtendVoteResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseExtendVote) + r0 = ret.Get(0).(*v1.ExtendVoteResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestExtendVote) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.ExtendVoteRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -81,25 +93,29 @@ func (_m *AppConnConsensus) ExtendVote(_a0 context.Context, _a1 *types.RequestEx return r0, r1 } -// FinalizeBlock provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { - ret := _m.Called(_a0, _a1) +// FinalizeBlock provides a mock function with given fields: ctx, req +func (_m *AppConnConsensus) FinalizeBlock(ctx context.Context, req *v1.FinalizeBlockRequest) (*v1.FinalizeBlockResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for FinalizeBlock") + } - var r0 *types.ResponseFinalizeBlock + var r0 *v1.FinalizeBlockResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.FinalizeBlockRequest) (*v1.FinalizeBlockResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.FinalizeBlockRequest) *v1.FinalizeBlockResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseFinalizeBlock) + r0 = ret.Get(0).(*v1.FinalizeBlockResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestFinalizeBlock) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.FinalizeBlockRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -107,25 +123,29 @@ func (_m *AppConnConsensus) FinalizeBlock(_a0 context.Context, _a1 *types.Reques return r0, r1 } -// InitChain provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) { - ret := _m.Called(_a0, _a1) +// InitChain provides a mock function with given fields: ctx, req +func (_m *AppConnConsensus) InitChain(ctx context.Context, req *v1.InitChainRequest) (*v1.InitChainResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseInitChain + if len(ret) == 0 { + panic("no return value specified for InitChain") + } + + var r0 *v1.InitChainResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) (*types.ResponseInitChain, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.InitChainRequest) (*v1.InitChainResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) *types.ResponseInitChain); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.InitChainRequest) *v1.InitChainResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseInitChain) + r0 = ret.Get(0).(*v1.InitChainResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInitChain) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.InitChainRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -133,25 +153,29 @@ func (_m *AppConnConsensus) InitChain(_a0 context.Context, _a1 *types.RequestIni return r0, r1 } -// PrepareProposal provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { - ret := _m.Called(_a0, _a1) +// PrepareProposal provides a mock function with given fields: ctx, req +func (_m *AppConnConsensus) PrepareProposal(ctx context.Context, req *v1.PrepareProposalRequest) (*v1.PrepareProposalResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for PrepareProposal") + } - var r0 *types.ResponsePrepareProposal + var r0 *v1.PrepareProposalResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.PrepareProposalRequest) (*v1.PrepareProposalResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.PrepareProposalRequest) *v1.PrepareProposalResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponsePrepareProposal) + r0 = ret.Get(0).(*v1.PrepareProposalResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestPrepareProposal) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.PrepareProposalRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -159,25 +183,29 @@ func (_m *AppConnConsensus) PrepareProposal(_a0 context.Context, _a1 *types.Requ return r0, r1 } -// ProcessProposal provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { - ret := _m.Called(_a0, _a1) +// ProcessProposal provides a mock function with given fields: ctx, req +func (_m *AppConnConsensus) ProcessProposal(ctx context.Context, req *v1.ProcessProposalRequest) (*v1.ProcessProposalResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseProcessProposal + if len(ret) == 0 { + panic("no return value specified for ProcessProposal") + } + + var r0 *v1.ProcessProposalResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) (*types.ResponseProcessProposal, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ProcessProposalRequest) (*v1.ProcessProposalResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) *types.ResponseProcessProposal); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ProcessProposalRequest) *v1.ProcessProposalResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseProcessProposal) + r0 = ret.Get(0).(*v1.ProcessProposalResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestProcessProposal) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.ProcessProposalRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -185,25 +213,29 @@ func (_m *AppConnConsensus) ProcessProposal(_a0 context.Context, _a1 *types.Requ return r0, r1 } -// VerifyVoteExtension provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { - ret := _m.Called(_a0, _a1) +// VerifyVoteExtension provides a mock function with given fields: ctx, req +func (_m *AppConnConsensus) VerifyVoteExtension(ctx context.Context, req *v1.VerifyVoteExtensionRequest) (*v1.VerifyVoteExtensionResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for VerifyVoteExtension") + } - var r0 *types.ResponseVerifyVoteExtension + var r0 *v1.VerifyVoteExtensionResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.VerifyVoteExtensionRequest) (*v1.VerifyVoteExtensionResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.VerifyVoteExtensionRequest) *v1.VerifyVoteExtensionResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension) + r0 = ret.Get(0).(*v1.VerifyVoteExtensionResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestVerifyVoteExtension) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.VerifyVoteExtensionRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } diff --git a/proxy/mocks/app_conn_mempool.go b/proxy/mocks/app_conn_mempool.go index 92f1e40be4e..224cfd296bb 100644 --- a/proxy/mocks/app_conn_mempool.go +++ b/proxy/mocks/app_conn_mempool.go @@ -9,7 +9,7 @@ import ( mock "github.com/stretchr/testify/mock" - types "github.com/cometbft/cometbft/abci/types" + v1 "github.com/cometbft/cometbft/api/cometbft/abci/v1" ) // AppConnMempool is an autogenerated mock type for the AppConnMempool type @@ -17,25 +17,29 @@ type AppConnMempool struct { mock.Mock } -// CheckTx provides a mock function with given fields: _a0, _a1 -func (_m *AppConnMempool) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) { - ret := _m.Called(_a0, _a1) +// CheckTx provides a mock function with given fields: ctx, req +func (_m *AppConnMempool) CheckTx(ctx context.Context, req *v1.CheckTxRequest) (*v1.CheckTxResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseCheckTx + if len(ret) == 0 { + panic("no return value specified for CheckTx") + } + + var r0 *v1.CheckTxResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) (*types.ResponseCheckTx, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.CheckTxRequest) (*v1.CheckTxResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *types.ResponseCheckTx); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.CheckTxRequest) *v1.CheckTxResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseCheckTx) + r0 = ret.Get(0).(*v1.CheckTxResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.CheckTxRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -43,25 +47,29 @@ func (_m *AppConnMempool) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx return r0, r1 } -// CheckTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 *types.RequestCheckTx) (*abcicli.ReqRes, error) { - ret := _m.Called(_a0, _a1) +// CheckTxAsync provides a mock function with given fields: ctx, req +func (_m *AppConnMempool) CheckTxAsync(ctx context.Context, req *v1.CheckTxRequest) (*abcicli.ReqRes, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for CheckTxAsync") + } var r0 *abcicli.ReqRes var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) (*abcicli.ReqRes, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.CheckTxRequest) (*abcicli.ReqRes, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *abcicli.ReqRes); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.CheckTxRequest) *abcicli.ReqRes); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.CheckTxRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -73,6 +81,10 @@ func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 *types.RequestCh func (_m *AppConnMempool) Error() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Error") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -83,13 +95,17 @@ func (_m *AppConnMempool) Error() error { return r0 } -// Flush provides a mock function with given fields: _a0 -func (_m *AppConnMempool) Flush(_a0 context.Context) error { - ret := _m.Called(_a0) +// Flush provides a mock function with given fields: ctx +func (_m *AppConnMempool) Flush(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Flush") + } var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(_a0) + r0 = rf(ctx) } else { r0 = ret.Error(0) } @@ -97,9 +113,9 @@ func (_m *AppConnMempool) Flush(_a0 context.Context) error { return r0 } -// SetResponseCallback provides a mock function with given fields: _a0 -func (_m *AppConnMempool) SetResponseCallback(_a0 abcicli.Callback) { - _m.Called(_a0) +// SetResponseCallback provides a mock function with given fields: cb +func (_m *AppConnMempool) SetResponseCallback(cb abcicli.Callback) { + _m.Called(cb) } // NewAppConnMempool creates a new instance of AppConnMempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. diff --git a/proxy/mocks/app_conn_query.go b/proxy/mocks/app_conn_query.go index 4abe27e7172..fdc2a50920a 100644 --- a/proxy/mocks/app_conn_query.go +++ b/proxy/mocks/app_conn_query.go @@ -7,7 +7,7 @@ import ( mock "github.com/stretchr/testify/mock" - types "github.com/cometbft/cometbft/abci/types" + v1 "github.com/cometbft/cometbft/api/cometbft/abci/v1" ) // AppConnQuery is an autogenerated mock type for the AppConnQuery type @@ -15,25 +15,29 @@ type AppConnQuery struct { mock.Mock } -// Echo provides a mock function with given fields: _a0, _a1 -func (_m *AppConnQuery) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) { - ret := _m.Called(_a0, _a1) +// Echo provides a mock function with given fields: ctx, echo +func (_m *AppConnQuery) Echo(ctx context.Context, echo string) (*v1.EchoResponse, error) { + ret := _m.Called(ctx, echo) - var r0 *types.ResponseEcho + if len(ret) == 0 { + panic("no return value specified for Echo") + } + + var r0 *v1.EchoResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) (*types.ResponseEcho, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, string) (*v1.EchoResponse, error)); ok { + return rf(ctx, echo) } - if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, string) *v1.EchoResponse); ok { + r0 = rf(ctx, echo) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseEcho) + r0 = ret.Get(0).(*v1.EchoResponse) } } if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { - r1 = rf(_a0, _a1) + r1 = rf(ctx, echo) } else { r1 = ret.Error(1) } @@ -45,6 +49,10 @@ func (_m *AppConnQuery) Echo(_a0 context.Context, _a1 string) (*types.ResponseEc func (_m *AppConnQuery) Error() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Error") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -55,25 +63,29 @@ func (_m *AppConnQuery) Error() error { return r0 } -// Info provides a mock function with given fields: _a0, _a1 -func (_m *AppConnQuery) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) { - ret := _m.Called(_a0, _a1) +// Info provides a mock function with given fields: ctx, req +func (_m *AppConnQuery) Info(ctx context.Context, req *v1.InfoRequest) (*v1.InfoResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseInfo + if len(ret) == 0 { + panic("no return value specified for Info") + } + + var r0 *v1.InfoResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) (*types.ResponseInfo, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.InfoRequest) (*v1.InfoResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) *types.ResponseInfo); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.InfoRequest) *v1.InfoResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseInfo) + r0 = ret.Get(0).(*v1.InfoResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInfo) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.InfoRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -81,25 +93,29 @@ func (_m *AppConnQuery) Info(_a0 context.Context, _a1 *types.RequestInfo) (*type return r0, r1 } -// Query provides a mock function with given fields: _a0, _a1 -func (_m *AppConnQuery) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) { - ret := _m.Called(_a0, _a1) +// Query provides a mock function with given fields: ctx, req +func (_m *AppConnQuery) Query(ctx context.Context, req *v1.QueryRequest) (*v1.QueryResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for Query") + } - var r0 *types.ResponseQuery + var r0 *v1.QueryResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) (*types.ResponseQuery, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.QueryRequest) (*v1.QueryResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) *types.ResponseQuery); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.QueryRequest) *v1.QueryResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseQuery) + r0 = ret.Get(0).(*v1.QueryResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestQuery) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.QueryRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } diff --git a/proxy/mocks/app_conn_snapshot.go b/proxy/mocks/app_conn_snapshot.go index ab4450cfd82..75094e34b26 100644 --- a/proxy/mocks/app_conn_snapshot.go +++ b/proxy/mocks/app_conn_snapshot.go @@ -7,7 +7,7 @@ import ( mock "github.com/stretchr/testify/mock" - types "github.com/cometbft/cometbft/abci/types" + v1 "github.com/cometbft/cometbft/api/cometbft/abci/v1" ) // AppConnSnapshot is an autogenerated mock type for the AppConnSnapshot type @@ -15,25 +15,29 @@ type AppConnSnapshot struct { mock.Mock } -// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - ret := _m.Called(_a0, _a1) +// ApplySnapshotChunk provides a mock function with given fields: ctx, req +func (_m *AppConnSnapshot) ApplySnapshotChunk(ctx context.Context, req *v1.ApplySnapshotChunkRequest) (*v1.ApplySnapshotChunkResponse, error) { + ret := _m.Called(ctx, req) - var r0 *types.ResponseApplySnapshotChunk + if len(ret) == 0 { + panic("no return value specified for ApplySnapshotChunk") + } + + var r0 *v1.ApplySnapshotChunkResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ApplySnapshotChunkRequest) (*v1.ApplySnapshotChunkResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ApplySnapshotChunkRequest) *v1.ApplySnapshotChunkResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk) + r0 = ret.Get(0).(*v1.ApplySnapshotChunkResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestApplySnapshotChunk) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.ApplySnapshotChunkRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -45,6 +49,10 @@ func (_m *AppConnSnapshot) ApplySnapshotChunk(_a0 context.Context, _a1 *types.Re func (_m *AppConnSnapshot) Error() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Error") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -55,25 +63,29 @@ func (_m *AppConnSnapshot) Error() error { return r0 } -// ListSnapshots provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { - ret := _m.Called(_a0, _a1) +// ListSnapshots provides a mock function with given fields: ctx, req +func (_m *AppConnSnapshot) ListSnapshots(ctx context.Context, req *v1.ListSnapshotsRequest) (*v1.ListSnapshotsResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for ListSnapshots") + } - var r0 *types.ResponseListSnapshots + var r0 *v1.ListSnapshotsResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) (*types.ResponseListSnapshots, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ListSnapshotsRequest) (*v1.ListSnapshotsResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) *types.ResponseListSnapshots); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.ListSnapshotsRequest) *v1.ListSnapshotsResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseListSnapshots) + r0 = ret.Get(0).(*v1.ListSnapshotsResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestListSnapshots) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.ListSnapshotsRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -81,25 +93,29 @@ func (_m *AppConnSnapshot) ListSnapshots(_a0 context.Context, _a1 *types.Request return r0, r1 } -// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - ret := _m.Called(_a0, _a1) +// LoadSnapshotChunk provides a mock function with given fields: ctx, req +func (_m *AppConnSnapshot) LoadSnapshotChunk(ctx context.Context, req *v1.LoadSnapshotChunkRequest) (*v1.LoadSnapshotChunkResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for LoadSnapshotChunk") + } - var r0 *types.ResponseLoadSnapshotChunk + var r0 *v1.LoadSnapshotChunkResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.LoadSnapshotChunkRequest) (*v1.LoadSnapshotChunkResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.LoadSnapshotChunkRequest) *v1.LoadSnapshotChunkResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk) + r0 = ret.Get(0).(*v1.LoadSnapshotChunkResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestLoadSnapshotChunk) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.LoadSnapshotChunkRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } @@ -107,25 +123,29 @@ func (_m *AppConnSnapshot) LoadSnapshotChunk(_a0 context.Context, _a1 *types.Req return r0, r1 } -// OfferSnapshot provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { - ret := _m.Called(_a0, _a1) +// OfferSnapshot provides a mock function with given fields: ctx, req +func (_m *AppConnSnapshot) OfferSnapshot(ctx context.Context, req *v1.OfferSnapshotRequest) (*v1.OfferSnapshotResponse, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for OfferSnapshot") + } - var r0 *types.ResponseOfferSnapshot + var r0 *v1.OfferSnapshotResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.OfferSnapshotRequest) (*v1.OfferSnapshotResponse, error)); ok { + return rf(ctx, req) } - if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, *v1.OfferSnapshotRequest) *v1.OfferSnapshotResponse); ok { + r0 = rf(ctx, req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseOfferSnapshot) + r0 = ret.Get(0).(*v1.OfferSnapshotResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *types.RequestOfferSnapshot) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, *v1.OfferSnapshotRequest) error); ok { + r1 = rf(ctx, req) } else { r1 = ret.Error(1) } diff --git a/proxy/mocks/client_creator.go b/proxy/mocks/client_creator.go index 62ee977990c..bc9f7eac86e 100644 --- a/proxy/mocks/client_creator.go +++ b/proxy/mocks/client_creator.go @@ -16,6 +16,10 @@ type ClientCreator struct { func (_m *ClientCreator) NewABCIConsensusClient() (abcicli.Client, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NewABCIConsensusClient") + } + var r0 abcicli.Client var r1 error if rf, ok := ret.Get(0).(func() (abcicli.Client, error)); ok { @@ -42,6 +46,10 @@ func (_m *ClientCreator) NewABCIConsensusClient() (abcicli.Client, error) { func (_m *ClientCreator) NewABCIMempoolClient() (abcicli.Client, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NewABCIMempoolClient") + } + var r0 abcicli.Client var r1 error if rf, ok := ret.Get(0).(func() (abcicli.Client, error)); ok { @@ -68,6 +76,10 @@ func (_m *ClientCreator) NewABCIMempoolClient() (abcicli.Client, error) { func (_m *ClientCreator) NewABCIQueryClient() (abcicli.Client, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NewABCIQueryClient") + } + var r0 abcicli.Client var r1 error if rf, ok := ret.Get(0).(func() (abcicli.Client, error)); ok { @@ -94,6 +106,10 @@ func (_m *ClientCreator) NewABCIQueryClient() (abcicli.Client, error) { func (_m *ClientCreator) NewABCISnapshotClient() (abcicli.Client, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NewABCISnapshotClient") + } + var r0 abcicli.Client var r1 error if rf, ok := ret.Get(0).(func() (abcicli.Client, error)); ok { diff --git a/proxy/multi_app_conn.go b/proxy/multi_app_conn.go index f067ea2899d..15abcf6a17a 100644 --- a/proxy/multi_app_conn.go +++ b/proxy/multi_app_conn.go @@ -1,11 +1,9 @@ package proxy import ( - "fmt" - abcicli "github.com/cometbft/cometbft/abci/client" + cmtos "github.com/cometbft/cometbft/internal/os" cmtlog "github.com/cometbft/cometbft/libs/log" - cmtos "github.com/cometbft/cometbft/libs/os" "github.com/cometbft/cometbft/libs/service" ) @@ -40,7 +38,7 @@ func NewAppConns(clientCreator ClientCreator, metrics *Metrics) AppConns { // // A multiAppConn is made of a few appConns and manages their underlying abci // clients. -// TODO: on app restart, clients must reboot together +// TODO: on app restart, clients must reboot together. type multiAppConn struct { service.BaseService @@ -110,7 +108,7 @@ func (app *multiAppConn) OnStart() error { func (app *multiAppConn) startQueryClient() error { c, err := app.clientCreator.NewABCIQueryClient() if err != nil { - return fmt.Errorf("error creating ABCI client (query client): %w", err) + return ErrABCIClientCreate{ClientName: "query", Err: err} } app.queryConnClient = c app.queryConn = NewAppConnQuery(c, app.metrics) @@ -120,7 +118,7 @@ func (app *multiAppConn) startQueryClient() error { func (app *multiAppConn) startSnapshotClient() error { c, err := app.clientCreator.NewABCISnapshotClient() if err != nil { - return fmt.Errorf("error creating ABCI client (snapshot client): %w", err) + return ErrABCIClientCreate{ClientName: "snapshot", Err: err} } app.snapshotConnClient = c app.snapshotConn = NewAppConnSnapshot(c, app.metrics) @@ -130,7 +128,7 @@ func (app *multiAppConn) startSnapshotClient() error { func (app *multiAppConn) startMempoolClient() error { c, err := app.clientCreator.NewABCIMempoolClient() if err != nil { - return fmt.Errorf("error creating ABCI client (mempool client): %w", err) + return ErrABCIClientCreate{ClientName: "mempool", Err: err} } app.mempoolConnClient = c app.mempoolConn = NewAppConnMempool(c, app.metrics) @@ -141,7 +139,7 @@ func (app *multiAppConn) startConsensusClient() error { c, err := app.clientCreator.NewABCIConsensusClient() if err != nil { app.stopAllClients() - return fmt.Errorf("error creating ABCI client (consensus client): %w", err) + return ErrABCIClientCreate{ClientName: "consensus", Err: err} } app.consensusConnClient = c app.consensusConn = NewAppConnConsensus(c, app.metrics) @@ -151,7 +149,7 @@ func (app *multiAppConn) startConsensusClient() error { func (app *multiAppConn) startClient(c abcicli.Client, conn string) error { c.SetLogger(app.Logger.With("module", "abci-client", "connection", conn)) if err := c.Start(); err != nil { - return fmt.Errorf("error starting ABCI client (%s client): %w", conn, err) + return ErrABCIClientStart{CliType: conn, Err: err} } return nil } @@ -163,7 +161,7 @@ func (app *multiAppConn) OnStop() { func (app *multiAppConn) killTMOnClientError() { killFn := func(conn string, err error, logger cmtlog.Logger) { logger.Error( - fmt.Sprintf("%s connection terminated. Did the application crash? Please restart CometBFT", conn), + conn+" connection terminated. Did the application crash? Please restart CometBFT", "err", err) killErr := cmtos.Kill() if killErr != nil { diff --git a/proxy/multi_app_conn_test.go b/proxy/multi_app_conn_test.go index 6acf6f26fa4..a3c5a1cd055 100644 --- a/proxy/multi_app_conn_test.go +++ b/proxy/multi_app_conn_test.go @@ -44,7 +44,7 @@ func TestAppConns_Start_Stop(t *testing.T) { clientMock.AssertExpectations(t) } -// Upon failure, we call cmtos.Kill +// Upon failure, we call cmtos.Kill. func TestAppConns_Failure(t *testing.T) { ok := make(chan struct{}) c := make(chan os.Signal, 1) diff --git a/proxy/version.go b/proxy/version.go index 134dfc4aea0..4681bfe1125 100644 --- a/proxy/version.go +++ b/proxy/version.go @@ -5,11 +5,11 @@ import ( "github.com/cometbft/cometbft/version" ) -// RequestInfo contains all the information for sending -// the abci.RequestInfo message during handshake with the app. +// InfoRequest contains all the information for sending +// the abci.InfoRequest message during handshake with the app. // It contains only compile-time version information. -var RequestInfo = &abci.RequestInfo{ - Version: version.TMCoreSemVer, +var InfoRequest = &abci.InfoRequest{ + Version: version.CMTSemVer, BlockVersion: version.BlockProtocol, P2PVersion: version.P2PProtocol, AbciVersion: version.ABCIVersion, diff --git a/rpc/client/errors.go b/rpc/client/errors.go new file mode 100644 index 00000000000..987a8a11970 --- /dev/null +++ b/rpc/client/errors.go @@ -0,0 +1,29 @@ +package client + +import ( + "errors" + "fmt" +) + +var ErrEventTimeout = errors.New("event timeout") + +type ErrWaitThreshold struct { + Got int64 + Expected int64 +} + +func (e ErrWaitThreshold) Error() string { + return fmt.Sprintf("waiting for %d blocks exceeded the threshold %d", e.Got, e.Expected) +} + +type ErrSubscribe struct { + Source error +} + +func (e ErrSubscribe) Error() string { + return fmt.Sprintf("failed to subscribe: %v", e.Source) +} + +func (e ErrSubscribe) Unwrap() error { + return e.Source +} diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index ca38b8e0e46..63e1468145c 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -2,7 +2,6 @@ package client_test import ( "context" - "fmt" "reflect" "testing" "time" @@ -11,7 +10,7 @@ import ( "github.com/stretchr/testify/require" abci "github.com/cometbft/cometbft/abci/types" - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/rpc/client" ctypes "github.com/cometbft/cometbft/rpc/core/types" "github.com/cometbft/cometbft/types" @@ -19,7 +18,7 @@ import ( var waitForEventTimeout = 8 * time.Second -// MakeTxKV returns a text transaction, allong with expected key, value pair +// MakeTxKV returns a text transaction, along with expected key, value pair. func MakeTxKV() ([]byte, []byte, []byte) { k := []byte(cmtrand.Str(8)) v := []byte(cmtrand.Str(8)) @@ -28,13 +27,12 @@ func MakeTxKV() ([]byte, []byte, []byte) { func TestHeaderEvents(t *testing.T) { for i, c := range GetClients() { - i, c := i, c t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { // start for this test it if it wasn't already running if !c.IsRunning() { // if so, then we start it, listen, and stop it. err := c.Start() - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) t.Cleanup(func() { if err := c.Stop(); err != nil { t.Error(err) @@ -44,7 +42,8 @@ func TestHeaderEvents(t *testing.T) { evtTyp := types.EventNewBlockHeader evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err) + require.NoError(t, err, "%d: %+v", i, err) _, ok := evt.(types.EventDataNewBlockHeader) require.True(t, ok, "%d: %#v", i, evt) // TODO: more checks... @@ -52,17 +51,15 @@ func TestHeaderEvents(t *testing.T) { } } -// subscribe to new blocks and make sure height increments by 1 +// subscribe to new blocks and make sure height increments by 1. func TestBlockEvents(t *testing.T) { for _, c := range GetClients() { - c := c t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - // start for this test it if it wasn't already running if !c.IsRunning() { // if so, then we start it, listen, and stop it. err := c.Start() - require.Nil(t, err) + require.NoError(t, err) t.Cleanup(func() { if err := c.Stop(); err != nil { t.Error(err) @@ -102,15 +99,15 @@ func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { testTxEventsSent(t, "a func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { testTxEventsSent(t, "sync") } func testTxEventsSent(t *testing.T, broadcastMethod string) { + t.Helper() for _, c := range GetClients() { - c := c + c := c //nolint:copyloopvar t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - // start for this test it if it wasn't already running if !c.IsRunning() { // if so, then we start it, listen, and stop it. err := c.Start() - require.Nil(t, err) + require.NoError(t, err) t.Cleanup(func() { if err := c.Stop(); err != nil { t.Error(err) @@ -134,16 +131,16 @@ func testTxEventsSent(t *testing.T, broadcastMethod string) { case "sync": txres, err = c.BroadcastTxSync(ctx, tx) default: - panic(fmt.Sprintf("Unknown broadcastMethod %s", broadcastMethod)) + panic("Unknown broadcastMethod " + broadcastMethod) } - if assert.NoError(t, err) { - assert.Equal(t, txres.Code, abci.CodeTypeOK) + if assert.NoError(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here + require.Equal(t, abci.CodeTypeOK, txres.Code) } }() // and wait for confirmation evt, err := client.WaitForOneEvent(c, types.EventTx, waitForEventTimeout) - require.Nil(t, err) + require.NoError(t, err) // and make sure it has the proper info txe, ok := evt.(types.EventDataTx) @@ -162,14 +159,14 @@ func TestHTTPReturnsErrorIfClientIsNotRunning(t *testing.T) { // on Subscribe _, err := c.Subscribe(context.Background(), "TestHeaderEvents", types.QueryForEvent(types.EventNewBlockHeader).String()) - assert.Error(t, err) + require.Error(t, err) // on Unsubscribe err = c.Unsubscribe(context.Background(), "TestHeaderEvents", types.QueryForEvent(types.EventNewBlockHeader).String()) - assert.Error(t, err) + require.Error(t, err) // on UnsubscribeAll err = c.UnsubscribeAll(context.Background(), "TestHeaderEvents") - assert.Error(t, err) + require.Error(t, err) } diff --git a/rpc/client/evidence_test.go b/rpc/client/evidence_test.go index 432f0868844..ab7c025c130 100644 --- a/rpc/client/evidence_test.go +++ b/rpc/client/evidence_test.go @@ -11,27 +11,21 @@ import ( abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/crypto/ed25519" - cryptoenc "github.com/cometbft/cometbft/crypto/encoding" "github.com/cometbft/cometbft/crypto/tmhash" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/internal/test" - cmtrand "github.com/cometbft/cometbft/libs/rand" "github.com/cometbft/cometbft/privval" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/rpc/client" rpctest "github.com/cometbft/cometbft/rpc/test" "github.com/cometbft/cometbft/types" ) -// For some reason the empty node used in tests has a time of -// 2018-10-10 08:20:13.695936996 +0000 UTC -// this is because the test genesis time is set here -// so in order to validate evidence we need evidence to be the same time -var defaultTestTime = time.Date(2018, 10, 10, 8, 20, 13, 695936996, time.UTC) - func newEvidence(t *testing.T, val *privval.FilePV, vote *types.Vote, vote2 *types.Vote, - chainID string) *types.DuplicateVoteEvidence { - + chainID string, + timestamp time.Time, +) *types.DuplicateVoteEvidence { + t.Helper() var err error v := vote.ToProto() @@ -46,7 +40,7 @@ func newEvidence(t *testing.T, val *privval.FilePV, validator := types.NewValidator(val.Key.PubKey, 10) valSet := types.NewValidatorSet([]*types.Validator{validator}) - ev, err := types.NewDuplicateVoteEvidence(vote, vote2, defaultTestTime, valSet) + ev, err := types.NewDuplicateVoteEvidence(vote, vote2, timestamp, valSet) require.NoError(t, err) return ev } @@ -55,14 +49,16 @@ func makeEvidences( t *testing.T, val *privval.FilePV, chainID string, + timestamp time.Time, ) (correct *types.DuplicateVoteEvidence, fakes []*types.DuplicateVoteEvidence) { + t.Helper() vote := types.Vote{ ValidatorAddress: val.Key.Address, ValidatorIndex: 0, Height: 1, Round: 0, - Type: cmtproto.PrevoteType, - Timestamp: defaultTestTime, + Type: types.PrevoteType, + Timestamp: timestamp, BlockID: types.BlockID{ Hash: tmhash.Sum(cmtrand.Bytes(tmhash.Size)), PartSetHeader: types.PartSetHeader{ @@ -74,7 +70,7 @@ func makeEvidences( vote2 := vote vote2.BlockID.Hash = tmhash.Sum([]byte("blockhash2")) - correct = newEvidence(t, val, &vote, &vote2, chainID) + correct = newEvidence(t, val, &vote, &vote2, chainID, timestamp) fakes = make([]*types.DuplicateVoteEvidence, 0) @@ -82,48 +78,53 @@ func makeEvidences( { v := vote2 v.ValidatorAddress = []byte("some_address") - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, timestamp)) } // different height { v := vote2 v.Height = vote.Height + 1 - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, timestamp)) } // different round { v := vote2 v.Round = vote.Round + 1 - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, timestamp)) } // different type { v := vote2 - v.Type = cmtproto.PrecommitType - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + v.Type = types.PrecommitType + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, timestamp)) } // exactly same vote { v := vote - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, timestamp)) } return correct, fakes } func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) { - var ( - config = rpctest.GetConfig() - chainID = test.DefaultTestChainID - pv = privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) - ) + config := rpctest.GetConfig() + chainID := test.DefaultTestChainID + pv, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), nil) + require.NoError(t, err) for i, c := range GetClients() { - correct, fakes := makeEvidences(t, pv, chainID) + evidenceHeight := int64(1) + err := client.WaitForHeight(c, evidenceHeight, nil) + require.NoError(t, err) + block, err := c.Block(ctx, &evidenceHeight) + require.NoError(t, err) + ts := block.Block.Time + correct, fakes := makeEvidences(t, pv, chainID, ts) t.Logf("client %d", i) result, err := c.BroadcastEvidence(context.Background(), correct) @@ -146,10 +147,8 @@ func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) { err = abci.ReadMessage(bytes.NewReader(qres.Value), &v) require.NoError(t, err, "Error reading query result, value %v", qres.Value) - pk, err := cryptoenc.PubKeyFromProto(v.PubKey) - require.NoError(t, err) - - require.EqualValues(t, rawpub, pk, "Stored PubKey not equal with expected, value %v", string(qres.Value)) + require.EqualValues(t, rawpub, v.PubKeyBytes, "Stored PubKey not equal with expected, value %v", string(qres.Value)) + require.EqualValues(t, ed25519.KeyType, v.PubKeyType, "Stored PubKeyType not equal with expected, value %v", string(qres.Value)) require.Equal(t, int64(9), v.Power, "Stored Power not equal with expected, value %v", string(qres.Value)) for _, fake := range fakes { @@ -162,6 +161,6 @@ func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) { func TestBroadcastEmptyEvidence(t *testing.T) { for _, c := range GetClients() { _, err := c.BroadcastEvidence(context.Background(), nil) - assert.Error(t, err) + require.Error(t, err) } } diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index 8f2f0628807..3d87d22866f 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -5,10 +5,12 @@ import ( "context" "fmt" "log" + "strings" "github.com/cometbft/cometbft/abci/example/kvstore" rpchttp "github.com/cometbft/cometbft/rpc/client/http" ctypes "github.com/cometbft/cometbft/rpc/core/types" + "github.com/cometbft/cometbft/rpc/jsonrpc/types" rpctest "github.com/cometbft/cometbft/rpc/test" ) @@ -22,7 +24,7 @@ func ExampleHTTP_simple() { rpcAddr := rpctest.GetConfig().RPC.ListenAddress c, err := rpchttp.New(rpcAddr) if err != nil { - log.Fatal(err) //nolint:gocritic + log.Fatal(err) } // Create a transaction @@ -98,7 +100,7 @@ func ExampleHTTP_batching() { // Broadcast the transaction and wait for it to commit (rather use // c.BroadcastTxSync though in production). if _, err := batch.BroadcastTxCommit(context.Background(), tx); err != nil { - log.Fatal(err) //nolint:gocritic + log.Fatal(err) } } @@ -135,3 +137,55 @@ func ExampleHTTP_batching() { // firstName = satoshi // lastName = nakamoto } + +// Test the maximum batch request size middleware. +func ExampleHTTP_maxBatchSize() { + // Start a CometBFT node (and kvstore) in the background to test against + app := kvstore.NewInMemoryApplication() + node := rpctest.StartCometBFT(app, rpctest.RecreateConfig, rpctest.SuppressStdout, rpctest.MaxReqBatchSize) + + // Change the max_request_batch_size + node.Config().RPC.MaxRequestBatchSize = 2 + + // Create our RPC client + rpcAddr := rpctest.GetConfig().RPC.ListenAddress + c, err := rpchttp.New(rpcAddr) + if err != nil { + log.Fatal(err) + } + + defer rpctest.StopCometBFT(node) + + // Create a new batch + batch := c.NewBatch() + + for i := 1; i <= 5; i++ { + if _, err := batch.Health(context.Background()); err != nil { + log.Fatal(err) + } + } + + // Send the requests + results, err := batch.Send(context.Background()) + if err != nil { + log.Fatal(err) + } + + // Each result in the returned list is the deserialized result of each + // respective status response + for _, result := range results { + rpcError, ok := result.(*types.RPCError) + if !ok { + log.Fatal("invalid result type") + } + if !strings.Contains(rpcError.Data, "batch request exceeds maximum") { + fmt.Println("Error message does not contain 'Max Request Batch Exceeded'") + } else { + // The max request batch size rpcError has been returned + fmt.Println("Max Request Batch Exceeded") + } + } + + // Output: + // Max Request Batch Exceeded +} diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index fe34d0480b1..9b115a10c00 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -2,21 +2,21 @@ package client import ( "context" - "errors" - "fmt" "time" "github.com/cometbft/cometbft/types" ) -// Waiter is informed of current height, decided whether to quit early +const WaitThreshold = 10 + +// Waiter is informed of current height, decided whether to quit early. type Waiter func(delta int64) (abort error) // DefaultWaitStrategy is the standard backoff algorithm, -// but you can plug in another one +// but you can plug in another one. func DefaultWaitStrategy(delta int64) (abort error) { - if delta > 10 { - return fmt.Errorf("waiting for %d blocks... aborting", delta) + if delta > WaitThreshold { + return ErrWaitThreshold{Got: delta, Expected: WaitThreshold} } else if delta > 0 { // estimate of wait time.... // wait half a second for the next block (in progress) @@ -31,7 +31,7 @@ func DefaultWaitStrategy(delta int64) (abort error) { // the block at the given height is available. // // If waiter is nil, we use DefaultWaitStrategy, but you can also -// provide your own implementation +// provide your own implementation. func WaitForHeight(c StatusClient, h int64, waiter Waiter) error { if waiter == nil { waiter = DefaultWaitStrategy @@ -42,6 +42,9 @@ func WaitForHeight(c StatusClient, h int64, waiter Waiter) error { if err != nil { return err } + // delta might be negative (if h is less than LatestBlockHeight + // but this should not cause an error when calling the waiter with + // a negative value delta = h - s.SyncInfo.LatestBlockHeight // wait for the time, or abort early if err := waiter(delta); err != nil { @@ -56,7 +59,7 @@ func WaitForHeight(c StatusClient, h int64, waiter Waiter) error { // event time and returns upon receiving it one time, or // when the timeout duration has expired. // -// This handles subscribing and unsubscribing under the hood +// This handles subscribing and unsubscribing under the hood. func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (types.TMEventData, error) { const subscriber = "helpers" ctx, cancel := context.WithTimeout(context.Background(), timeout) @@ -65,7 +68,7 @@ func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (type // register for the next event of this type eventCh, err := c.Subscribe(ctx, subscriber, types.QueryForEvent(evtTyp).String()) if err != nil { - return nil, fmt.Errorf("failed to subscribe: %w", err) + return nil, ErrSubscribe{Source: err} } // make sure to unregister after the test is over defer func() { @@ -78,6 +81,6 @@ func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (type case event := <-eventCh: return event.Data, nil case <-ctx.Done(): - return nil, errors.New("timed out waiting for event") + return nil, ErrEventTimeout } } diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go index 65f55a4c32a..11c7910450d 100644 --- a/rpc/client/helpers_test.go +++ b/rpc/client/helpers_test.go @@ -2,7 +2,6 @@ package client_test import ( "errors" - "strings" "testing" "github.com/stretchr/testify/assert" @@ -26,10 +25,10 @@ func TestWaitForHeight(t *testing.T) { // connection failure always leads to error err := client.WaitForHeight(r, 8, nil) - require.NotNil(err) + require.Error(err) require.Equal("bye", err.Error()) // we called status once to check - require.Equal(1, len(r.Calls)) + require.Len(r.Calls, 1) // now set current block height to 10 m.Call = mock.Call{ @@ -38,16 +37,17 @@ func TestWaitForHeight(t *testing.T) { // we will not wait for more than 10 blocks err = client.WaitForHeight(r, 40, nil) - require.NotNil(err) - require.True(strings.Contains(err.Error(), "aborting")) + require.Error(err) + require.ErrorAs(err, &client.ErrWaitThreshold{}) + // we called status once more to check - require.Equal(2, len(r.Calls)) + require.Len(r.Calls, 2) // waiting for the past returns immediately err = client.WaitForHeight(r, 5, nil) - require.Nil(err) + require.NoError(err) // we called status once more to check - require.Equal(3, len(r.Calls)) + require.Len(r.Calls, 3) // since we can't update in a background goroutine (test --race) // we use the callback to update the status height @@ -59,18 +59,18 @@ func TestWaitForHeight(t *testing.T) { // we wait for a few blocks err = client.WaitForHeight(r, 12, myWaiter) - require.Nil(err) + require.NoError(err) // we called status once to check - require.Equal(5, len(r.Calls)) + require.Len(r.Calls, 5) pre := r.Calls[3] - require.Nil(pre.Error) + require.NoError(pre.Error) prer, ok := pre.Response.(*ctypes.ResultStatus) require.True(ok) assert.Equal(int64(10), prer.SyncInfo.LatestBlockHeight) post := r.Calls[4] - require.Nil(post.Error) + require.NoError(post.Error) postr, ok := post.Response.(*ctypes.ResultStatus) require.True(ok) assert.Equal(int64(15), postr.SyncInfo.LatestBlockHeight) diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index e43b493bbe5..b60d4ebdec1 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -104,7 +104,7 @@ var ( _ rpcClient = (*baseRPCClient)(nil) ) -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // HTTP // New takes a remote endpoint in the form ://:. An error @@ -117,7 +117,7 @@ func New(remote string) (*HTTP, error) { return NewWithClient(remote, httpClient) } -// Create timeout enabled http client +// Create timeout enabled http client. func NewWithTimeout(remote string, timeout uint) (*HTTP, error) { httpClient, err := jsonrpcclient.DefaultHTTPClient(remote) if err != nil { @@ -177,14 +177,14 @@ func (c *HTTP) NewBatch() *BatchHTTP { } } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // BatchHTTP // Send is a convenience function for an HTTP batch that will trigger the // compilation of the batched requests and send them off using the client as a // single request. On success, this returns a list of the deserialized results // from each request in the sent batch. -func (b *BatchHTTP) Send(ctx context.Context) ([]interface{}, error) { +func (b *BatchHTTP) Send(ctx context.Context) ([]any, error) { return b.rpcBatch.Send(ctx) } @@ -199,12 +199,12 @@ func (b *BatchHTTP) Count() int { return b.rpcBatch.Count() } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // baseRPCClient func (c *baseRPCClient) Status(ctx context.Context) (*ctypes.ResultStatus, error) { result := new(ctypes.ResultStatus) - _, err := c.caller.Call(ctx, "status", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "status", map[string]any{}, result) if err != nil { return nil, err } @@ -214,7 +214,7 @@ func (c *baseRPCClient) Status(ctx context.Context) (*ctypes.ResultStatus, error func (c *baseRPCClient) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { result := new(ctypes.ResultABCIInfo) - _, err := c.caller.Call(ctx, "abci_info", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "abci_info", map[string]any{}, result) if err != nil { return nil, err } @@ -238,7 +238,7 @@ func (c *baseRPCClient) ABCIQueryWithOptions( ) (*ctypes.ResultABCIQuery, error) { result := new(ctypes.ResultABCIQuery) _, err := c.caller.Call(ctx, "abci_query", - map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, + map[string]any{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, result) if err != nil { return nil, err @@ -252,7 +252,7 @@ func (c *baseRPCClient) BroadcastTxCommit( tx types.Tx, ) (*ctypes.ResultBroadcastTxCommit, error) { result := new(ctypes.ResultBroadcastTxCommit) - _, err := c.caller.Call(ctx, "broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) + _, err := c.caller.Call(ctx, "broadcast_tx_commit", map[string]any{"tx": tx}, result) if err != nil { return nil, err } @@ -279,7 +279,20 @@ func (c *baseRPCClient) broadcastTX( tx types.Tx, ) (*ctypes.ResultBroadcastTx, error) { result := new(ctypes.ResultBroadcastTx) - _, err := c.caller.Call(ctx, route, map[string]interface{}{"tx": tx}, result) + _, err := c.caller.Call(ctx, route, map[string]any{"tx": tx}, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) UnconfirmedTx( + ctx context.Context, + hash []byte, +) (*ctypes.ResultUnconfirmedTx, error) { + result := new(ctypes.ResultUnconfirmedTx) + params := map[string]any{"hash": hash} + _, err := c.caller.Call(ctx, "unconfirmed_tx", params, result) if err != nil { return nil, err } @@ -291,7 +304,7 @@ func (c *baseRPCClient) UnconfirmedTxs( limit *int, ) (*ctypes.ResultUnconfirmedTxs, error) { result := new(ctypes.ResultUnconfirmedTxs) - params := make(map[string]interface{}) + params := make(map[string]any) if limit != nil { params["limit"] = limit } @@ -304,7 +317,7 @@ func (c *baseRPCClient) UnconfirmedTxs( func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { result := new(ctypes.ResultUnconfirmedTxs) - _, err := c.caller.Call(ctx, "num_unconfirmed_txs", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "num_unconfirmed_txs", map[string]any{}, result) if err != nil { return nil, err } @@ -313,7 +326,7 @@ func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUn func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { result := new(ctypes.ResultCheckTx) - _, err := c.caller.Call(ctx, "check_tx", map[string]interface{}{"tx": tx}, result) + _, err := c.caller.Call(ctx, "check_tx", map[string]any{"tx": tx}, result) if err != nil { return nil, err } @@ -322,7 +335,7 @@ func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.Resul func (c *baseRPCClient) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { result := new(ctypes.ResultNetInfo) - _, err := c.caller.Call(ctx, "net_info", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "net_info", map[string]any{}, result) if err != nil { return nil, err } @@ -331,7 +344,7 @@ func (c *baseRPCClient) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, err func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { result := new(ctypes.ResultDumpConsensusState) - _, err := c.caller.Call(ctx, "dump_consensus_state", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "dump_consensus_state", map[string]any{}, result) if err != nil { return nil, err } @@ -340,7 +353,7 @@ func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*ctypes.ResultD func (c *baseRPCClient) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { result := new(ctypes.ResultConsensusState) - _, err := c.caller.Call(ctx, "consensus_state", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "consensus_state", map[string]any{}, result) if err != nil { return nil, err } @@ -352,7 +365,7 @@ func (c *baseRPCClient) ConsensusParams( height *int64, ) (*ctypes.ResultConsensusParams, error) { result := new(ctypes.ResultConsensusParams) - params := make(map[string]interface{}) + params := make(map[string]any) if height != nil { params["height"] = height } @@ -365,7 +378,7 @@ func (c *baseRPCClient) ConsensusParams( func (c *baseRPCClient) Health(ctx context.Context) (*ctypes.ResultHealth, error) { result := new(ctypes.ResultHealth) - _, err := c.caller.Call(ctx, "health", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "health", map[string]any{}, result) if err != nil { return nil, err } @@ -379,7 +392,7 @@ func (c *baseRPCClient) BlockchainInfo( ) (*ctypes.ResultBlockchainInfo, error) { result := new(ctypes.ResultBlockchainInfo) _, err := c.caller.Call(ctx, "blockchain", - map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, + map[string]any{"minHeight": minHeight, "maxHeight": maxHeight}, result) if err != nil { return nil, err @@ -389,7 +402,7 @@ func (c *baseRPCClient) BlockchainInfo( func (c *baseRPCClient) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { result := new(ctypes.ResultGenesis) - _, err := c.caller.Call(ctx, "genesis", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "genesis", map[string]any{}, result) if err != nil { return nil, err } @@ -398,7 +411,7 @@ func (c *baseRPCClient) Genesis(ctx context.Context) (*ctypes.ResultGenesis, err func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { result := new(ctypes.ResultGenesisChunk) - _, err := c.caller.Call(ctx, "genesis_chunked", map[string]interface{}{"chunk": id}, result) + _, err := c.caller.Call(ctx, "genesis_chunked", map[string]any{"chunk": id}, result) if err != nil { return nil, err } @@ -407,7 +420,7 @@ func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*ctypes.Re func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { result := new(ctypes.ResultBlock) - params := make(map[string]interface{}) + params := make(map[string]any) if height != nil { params["height"] = height } @@ -420,7 +433,7 @@ func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*ctypes.Resul func (c *baseRPCClient) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { result := new(ctypes.ResultBlock) - params := map[string]interface{}{ + params := map[string]any{ "hash": hash, } _, err := c.caller.Call(ctx, "block_by_hash", params, result) @@ -435,7 +448,7 @@ func (c *baseRPCClient) BlockResults( height *int64, ) (*ctypes.ResultBlockResults, error) { result := new(ctypes.ResultBlockResults) - params := make(map[string]interface{}) + params := make(map[string]any) if height != nil { params["height"] = height } @@ -448,7 +461,7 @@ func (c *baseRPCClient) BlockResults( func (c *baseRPCClient) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) { result := new(ctypes.ResultHeader) - params := make(map[string]interface{}) + params := make(map[string]any) if height != nil { params["height"] = height } @@ -461,7 +474,7 @@ func (c *baseRPCClient) Header(ctx context.Context, height *int64) (*ctypes.Resu func (c *baseRPCClient) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { result := new(ctypes.ResultHeader) - params := map[string]interface{}{ + params := map[string]any{ "hash": hash, } _, err := c.caller.Call(ctx, "header_by_hash", params, result) @@ -473,7 +486,7 @@ func (c *baseRPCClient) HeaderByHash(ctx context.Context, hash bytes.HexBytes) ( func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { result := new(ctypes.ResultCommit) - params := make(map[string]interface{}) + params := make(map[string]any) if height != nil { params["height"] = height } @@ -486,7 +499,7 @@ func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.Resu func (c *baseRPCClient) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { result := new(ctypes.ResultTx) - params := map[string]interface{}{ + params := map[string]any{ "hash": hash, "prove": prove, } @@ -506,7 +519,7 @@ func (c *baseRPCClient) TxSearch( orderBy string, ) (*ctypes.ResultTxSearch, error) { result := new(ctypes.ResultTxSearch) - params := map[string]interface{}{ + params := map[string]any{ "query": query, "prove": prove, "order_by": orderBy, @@ -534,7 +547,7 @@ func (c *baseRPCClient) BlockSearch( orderBy string, ) (*ctypes.ResultBlockSearch, error) { result := new(ctypes.ResultBlockSearch) - params := map[string]interface{}{ + params := map[string]any{ "query": query, "order_by": orderBy, } @@ -561,7 +574,7 @@ func (c *baseRPCClient) Validators( perPage *int, ) (*ctypes.ResultValidators, error) { result := new(ctypes.ResultValidators) - params := make(map[string]interface{}) + params := make(map[string]any) if page != nil { params["page"] = page } @@ -583,14 +596,14 @@ func (c *baseRPCClient) BroadcastEvidence( ev types.Evidence, ) (*ctypes.ResultBroadcastEvidence, error) { result := new(ctypes.ResultBroadcastEvidence) - _, err := c.caller.Call(ctx, "broadcast_evidence", map[string]interface{}{"evidence": ev}, result) + _, err := c.caller.Call(ctx, "broadcast_evidence", map[string]any{"evidence": ev}, result) if err != nil { return nil, err } return result, nil } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // WSEvents var errNotRunning = errors.New("client is not running. Use .Start() method to start") diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 285ac74e533..b06d8b5dc80 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -50,15 +50,15 @@ type Client interface { // is easier to mock. type ABCIClient interface { // Reading from abci app - ABCIInfo(context.Context) (*ctypes.ResultABCIInfo, error) + ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) // Writing to abci app - BroadcastTxCommit(context.Context, types.Tx) (*ctypes.ResultBroadcastTxCommit, error) - BroadcastTxAsync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) - BroadcastTxSync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) + BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) + BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) + BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) } // SignClient groups together the functionality needed to get valid signatures @@ -95,28 +95,28 @@ type SignClient interface { // HistoryClient provides access to data from genesis to now in large chunks. type HistoryClient interface { - Genesis(context.Context) (*ctypes.ResultGenesis, error) - GenesisChunked(context.Context, uint) (*ctypes.ResultGenesisChunk, error) + Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) + GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) } // StatusClient provides access to general chain info. type StatusClient interface { - Status(context.Context) (*ctypes.ResultStatus, error) + Status(ctx context.Context) (*ctypes.ResultStatus, error) } // NetworkClient is general info about the network state. May not be needed // usually. type NetworkClient interface { - NetInfo(context.Context) (*ctypes.ResultNetInfo, error) - DumpConsensusState(context.Context) (*ctypes.ResultDumpConsensusState, error) - ConsensusState(context.Context) (*ctypes.ResultConsensusState, error) + NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) + DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) + ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) - Health(context.Context) (*ctypes.ResultHealth, error) + Health(ctx context.Context) (*ctypes.ResultHealth, error) } // EventsClient is reactive, you can subscribe to any message, given the proper -// string. see cometbft/types/events.go +// string. see cometbft/types/events.go. type EventsClient interface { // Subscribe subscribes given subscriber to query. Returns a channel with // cap=1 onto which events are published. An error is returned if it fails to @@ -134,15 +134,16 @@ type EventsClient interface { // MempoolClient shows us data about current mempool state. type MempoolClient interface { + UnconfirmedTx(ctx context.Context, hash []byte) (*ctypes.ResultUnconfirmedTx, error) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) - NumUnconfirmedTxs(context.Context) (*ctypes.ResultUnconfirmedTxs, error) - CheckTx(context.Context, types.Tx) (*ctypes.ResultCheckTx, error) + NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) + CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) } // EvidenceClient is used for submitting an evidence of the malicious // behavior. type EvidenceClient interface { - BroadcastEvidence(context.Context, types.Evidence) (*ctypes.ResultBroadcastEvidence, error) + BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) } // RemoteClient is a Client, which can also return the remote network address. diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 783711499fc..eaef9c126e9 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -60,6 +60,18 @@ func New(node *nm.Node) *Local { var _ rpcclient.Client = (*Local)(nil) +type ErrParseQuery struct { + Source error +} + +func (e ErrParseQuery) Error() string { + return fmt.Sprintf("failed to parse query: %v", e.Source) +} + +func (e ErrParseQuery) Unwrap() error { + return e.Source +} + // SetLogger allows to set a logger on the client. func (c *Local) SetLogger(l log.Logger) { c.Logger = l @@ -98,6 +110,10 @@ func (c *Local) BroadcastTxSync(_ context.Context, tx types.Tx) (*ctypes.ResultB return c.env.BroadcastTxSync(c.ctx, tx) } +func (c *Local) UnconfirmedTx(_ context.Context, hash []byte) (*ctypes.ResultUnconfirmedTx, error) { + return c.env.UnconfirmedTx(c.ctx, hash) +} + func (c *Local) UnconfirmedTxs(_ context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { return c.env.UnconfirmedTxs(c.ctx, limit) } @@ -220,7 +236,7 @@ func (c *Local) Subscribe( ) (out <-chan ctypes.ResultEvent, err error) { q, err := cmtquery.New(query) if err != nil { - return nil, fmt.Errorf("failed to parse query: %w", err) + return nil, ErrParseQuery{Source: err} } outCap := 1 @@ -235,7 +251,7 @@ func (c *Local) Subscribe( sub, err = c.EventBus.SubscribeUnbuffered(ctx, subscriber, q) } if err != nil { - return nil, fmt.Errorf("failed to subscribe: %w", err) + return nil, rpcclient.ErrSubscribe{Source: err} } outc := make(chan ctypes.ResultEvent, outCap) @@ -300,7 +316,7 @@ func (c *Local) resubscribe(subscriber string, q cmtpubsub.Query) types.Subscrip func (c *Local) Unsubscribe(ctx context.Context, subscriber, query string) error { q, err := cmtquery.New(query) if err != nil { - return fmt.Errorf("failed to parse query: %w", err) + return ErrParseQuery{Source: err} } return c.EventBus.Unsubscribe(ctx, subscriber, q) } diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 0607954251f..cb3064446b0 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -53,18 +53,18 @@ var _ client.Client = Client{} // It can also be used to configure mock responses. type Call struct { Name string - Args interface{} - Response interface{} + Args any + Response any Error error } -// GetResponse will generate the apporiate response for us, when +// GetResponse will generate the appropriate response for us, when // using the Call struct to configure a Mock handler. // // When configuring a response, if only one of Response or Error is // set then that will always be returned. If both are set, then // we return Response if the Args match the set args, Error otherwise. -func (c Call) GetResponse(args interface{}) (interface{}, error) { +func (c Call) GetResponse(args any) (any, error) { // handle the case with no response if c.Response == nil { if c.Error == nil { diff --git a/rpc/client/mock/status.go b/rpc/client/mock/status.go index 69b60674778..9d57e54ed8a 100644 --- a/rpc/client/mock/status.go +++ b/rpc/client/mock/status.go @@ -7,7 +7,7 @@ import ( ctypes "github.com/cometbft/cometbft/rpc/core/types" ) -// StatusMock returns the result specified by the Call +// StatusMock returns the result specified by the Call. type StatusMock struct { Call } @@ -26,7 +26,7 @@ func (m *StatusMock) Status(context.Context) (*ctypes.ResultStatus, error) { } // StatusRecorder can wrap another type (StatusMock, full client) -// and record the status calls +// and record the status calls. type StatusRecorder struct { Client client.StatusClient Calls []Call diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go index 26bb33caa80..5bc00177eef 100644 --- a/rpc/client/mock/status_test.go +++ b/rpc/client/mock/status_test.go @@ -23,24 +23,25 @@ func TestStatus(t *testing.T) { LatestAppHash: bytes.HexBytes("app"), LatestBlockHeight: 10, }, - }}, + }, + }, } r := mock.NewStatusRecorder(m) - require.Equal(0, len(r.Calls)) + require.Empty(r.Calls) // make sure response works proper status, err := r.Status(context.Background()) - require.Nil(err, "%+v", err) + require.NoError(err, "%+v", err) assert.EqualValues("block", status.SyncInfo.LatestBlockHash) assert.EqualValues(10, status.SyncInfo.LatestBlockHeight) // make sure recorder works properly - require.Equal(1, len(r.Calls)) + require.Len(r.Calls, 1) rs := r.Calls[0] assert.Equal("status", rs.Name) assert.Nil(rs.Args) - assert.Nil(rs.Error) + require.NoError(rs.Error) require.NotNil(rs.Response) st, ok := rs.Response.(*ctypes.ResultStatus) require.True(ok) diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index aa612af26cb..595305d32d6 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -723,11 +723,11 @@ func (_m *Client) String() string { // Subscribe provides a mock function with given fields: ctx, subscriber, query, outCapacity func (_m *Client) Subscribe(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { - _va := make([]interface{}, len(outCapacity)) + _va := make([]any, len(outCapacity)) for _i := range outCapacity { _va[_i] = outCapacity[_i] } - var _ca []interface{} + var _ca []any _ca = append(_ca, ctx, subscriber, query) _ca = append(_ca, _va...) ret := _m.Called(_ca...) @@ -797,6 +797,29 @@ func (_m *Client) TxSearch(ctx context.Context, query string, prove bool, page * return r0, r1 } +// UnconfirmedTx provides a mock function with given fields: ctx, hash +func (_m *Client) UnconfirmedTx(ctx context.Context, hash []byte) (*coretypes.ResultUnconfirmedTx, error) { + ret := _m.Called(ctx, hash) + + var r0 *coretypes.ResultUnconfirmedTx + if rf, ok := ret.Get(0).(func(context.Context, []byte) *coretypes.ResultUnconfirmedTx); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []byte) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // UnconfirmedTxs provides a mock function with given fields: ctx, limit func (_m *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { ret := _m.Called(ctx, limit) diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 9c35f546880..49cdd64fe91 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -53,7 +53,7 @@ func getLocalClient() *rpclocal.Local { return rpclocal.New(node) } -// GetClients returns a slice of clients for table-driven tests +// GetClients returns a slice of clients for table-driven tests. func GetClients() []client.Client { return []client.Client{ getHTTPClient(), @@ -73,7 +73,7 @@ func TestNilCustomHTTPClient(t *testing.T) { func TestCustomHTTPClient(t *testing.T) { remote := rpctest.GetConfig().RPC.ListenAddress c, err := rpchttp.NewWithClient(remote, http.DefaultClient) - require.Nil(t, err) + require.NoError(t, err) status, err := c.Status(context.Background()) require.NoError(t, err) require.NotNil(t, status) @@ -83,34 +83,34 @@ func TestCorsEnabled(t *testing.T) { origin := rpctest.GetConfig().RPC.CORSAllowedOrigins[0] remote := strings.ReplaceAll(rpctest.GetConfig().RPC.ListenAddress, "tcp", "http") - req, err := http.NewRequest("GET", remote, nil) - require.Nil(t, err, "%+v", err) + req, err := http.NewRequest(http.MethodGet, remote, nil) + require.NoError(t, err, "%+v", err) req.Header.Set("Origin", origin) c := &http.Client{} resp, err := c.Do(req) - require.Nil(t, err, "%+v", err) + require.NoError(t, err, "%+v", err) defer resp.Body.Close() assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) } -// Make sure status is correct (we connect properly) +// Make sure status is correct (we connect properly). func TestStatus(t *testing.T) { for i, c := range GetClients() { moniker := rpctest.GetConfig().Moniker status, err := c.Status(context.Background()) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) assert.Equal(t, moniker, status.NodeInfo.Moniker) } } -// Make sure info is correct (we connect properly) +// Make sure info is correct (we connect properly). func TestInfo(t *testing.T) { for i, c := range GetClients() { // status, err := c.Status() // require.Nil(t, err, "%+v", err) info, err := c.ABCIInfo(context.Background()) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) // TODO: this is not correct - fix merkleeyes! // assert.EqualValues(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) assert.True(t, strings.Contains(info.Response.Data, "size")) @@ -122,9 +122,9 @@ func TestNetInfo(t *testing.T) { nc, ok := c.(client.NetworkClient) require.True(t, ok, "%d", i) netinfo, err := nc.NetInfo(context.Background()) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) assert.True(t, netinfo.Listening) - assert.Equal(t, 0, len(netinfo.Peers)) + assert.Empty(t, netinfo.Peers) } } @@ -134,7 +134,7 @@ func TestDumpConsensusState(t *testing.T) { nc, ok := c.(client.NetworkClient) require.True(t, ok, "%d", i) cons, err := nc.DumpConsensusState(context.Background()) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) assert.NotEmpty(t, cons.RoundState) assert.Empty(t, cons.Peers) } @@ -146,7 +146,7 @@ func TestConsensusState(t *testing.T) { nc, ok := c.(client.NetworkClient) require.True(t, ok, "%d", i) cons, err := nc.ConsensusState(context.Background()) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) assert.NotEmpty(t, cons.RoundState) } } @@ -156,25 +156,24 @@ func TestHealth(t *testing.T) { nc, ok := c.(client.NetworkClient) require.True(t, ok, "%d", i) _, err := nc.Health(context.Background()) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) } } func TestGenesisAndValidators(t *testing.T) { for i, c := range GetClients() { - // make sure this is the right genesis file gen, err := c.Genesis(context.Background()) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) // get the genesis validator - require.Equal(t, 1, len(gen.Genesis.Validators)) + require.Len(t, gen.Genesis.Validators, 1) gval := gen.Genesis.Validators[0] // get the current validators h := int64(1) vals, err := c.Validators(context.Background(), &h, nil, nil) - require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, 1, len(vals.Validators)) + require.NoError(t, err, "%d: %+v", i, err) + require.Len(t, vals.Validators, 1) require.Equal(t, 1, vals.Count) require.Equal(t, 1, vals.Total) val := vals.Validators[0] @@ -200,7 +199,6 @@ func TestGenesisChunked(t *testing.T) { data, err := base64.StdEncoding.DecodeString(chunk.Data) require.NoError(t, err) decoded = append(decoded, string(data)) - } doc := []byte(strings.Join(decoded, "")) @@ -215,7 +213,7 @@ func TestABCIQuery(t *testing.T) { // write something k, v, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(context.Background(), tx) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) apph := bres.Height + 1 // this is where the tx will be applied to the state // wait before querying @@ -223,17 +221,16 @@ func TestABCIQuery(t *testing.T) { require.NoError(t, err) res, err := c.ABCIQuery(context.Background(), "/key", k) qres := res.Response - if assert.Nil(t, err) && assert.True(t, qres.IsOK()) { + if assert.NoError(t, err) && assert.True(t, qres.IsOK()) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.EqualValues(t, v, qres.Value) } } } -// Make some app checks +// Make some app checks. func TestAppCalls(t *testing.T) { assert, require := assert.New(t), require.New(t) for i, c := range GetClients() { - // get an offset of height to avoid racing and guessing s, err := c.Status(context.Background()) require.NoError(err) @@ -275,7 +272,7 @@ func TestAppCalls(t *testing.T) { block, err := c.Block(context.Background(), &apph) require.NoError(err) appHash := block.Block.Header.AppHash - assert.True(len(appHash) > 0) + assert.NotEmpty(appHash) assert.EqualValues(apph, block.Block.Header.Height) blockByHash, err := c.BlockByHash(context.Background(), block.BlockID.Hash) @@ -293,18 +290,18 @@ func TestAppCalls(t *testing.T) { // now check the results blockResults, err := c.BlockResults(context.Background(), &txh) - require.Nil(err, "%d: %+v", i, err) + require.NoError(err, "%d: %+v", i, err) assert.Equal(txh, blockResults.Height) - if assert.Equal(1, len(blockResults.TxsResults)) { + if assert.Len(blockResults.TxResults, 1) { // check success code - assert.EqualValues(0, blockResults.TxsResults[0].Code) + assert.EqualValues(0, blockResults.TxResults[0].Code) } // check blockchain info, now that we know there is info info, err := c.BlockchainInfo(context.Background(), apph, apph) require.NoError(err) - assert.True(info.LastHeight >= apph) - if assert.Equal(1, len(info.BlockMetas)) { + assert.GreaterOrEqual(info.LastHeight, apph) + if assert.Len(info.BlockMetas, 1) { lastMeta := info.BlockMetas[0] assert.EqualValues(apph, lastMeta.Header.Height) blockData := block.Block @@ -326,10 +323,10 @@ func TestAppCalls(t *testing.T) { assert.Equal(block.Block.LastCommitHash, commit2.Commit.Hash()) // and we got a proof that works! - _pres, err := c.ABCIQueryWithOptions(context.Background(), "/key", k, client.ABCIQueryOptions{Prove: true}) + _proofResp, err := c.ABCIQueryWithOptions(context.Background(), "/key", k, client.ABCIQueryOptions{Prove: true}) require.NoError(err) - pres := _pres.Response - assert.True(pres.IsOK()) + proofResp := _proofResp.Response + assert.True(proofResp.IsOK()) // XXX Test proof } @@ -345,8 +342,8 @@ func TestBroadcastTxSync(t *testing.T) { for i, c := range GetClients() { _, _, tx := MakeTxKV() bres, err := c.BroadcastTxSync(context.Background(), tx) - require.Nil(err, "%d: %+v", i, err) - require.Equal(bres.Code, abci.CodeTypeOK) // FIXME + require.NoError(err, "%d: %+v", i, err) + require.Equal(abci.CodeTypeOK, bres.Code) // FIXME require.Equal(initMempoolSize+1, mempool.Size()) @@ -363,7 +360,7 @@ func TestBroadcastTxCommit(t *testing.T) { for i, c := range GetClients() { _, _, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(context.Background(), tx) - require.Nil(err, "%d: %+v", i, err) + require.NoError(err, "%d: %+v", i, err) require.True(bres.CheckTx.IsOK()) require.True(bres.TxResult.IsOK()) @@ -371,12 +368,38 @@ func TestBroadcastTxCommit(t *testing.T) { } } +func TestUnconfirmedTx(t *testing.T) { + _, _, tx := MakeTxKV() + + ch := make(chan *abci.CheckTxResponse, 1) + mempool := node.Mempool() + reqRes, err := mempool.CheckTx(tx, "") + require.NoError(t, err) + ch <- reqRes.Response.GetCheckTx() + + // wait for tx to arrive in mempoool. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") + } + target := types.Tx(tx) + for _, c := range GetClients() { + mc := c.(client.MempoolClient) + res, err := mc.UnconfirmedTx(context.Background(), target.Hash()) + require.NoError(t, err) + assert.Exactly(t, target, res.Tx) + } + + mempool.Flush() +} + func TestUnconfirmedTxs(t *testing.T) { _, _, tx := MakeTxKV() - ch := make(chan *abci.ResponseCheckTx, 1) + ch := make(chan *abci.CheckTxResponse, 1) mempool := node.Mempool() - reqRes, err := mempool.CheckTx(tx) + reqRes, err := mempool.CheckTx(tx, "") require.NoError(t, err) ch <- reqRes.Response.GetCheckTx() @@ -405,9 +428,9 @@ func TestUnconfirmedTxs(t *testing.T) { func TestNumUnconfirmedTxs(t *testing.T) { _, _, tx := MakeTxKV() - ch := make(chan *abci.ResponseCheckTx, 1) + ch := make(chan *abci.CheckTxResponse, 1) mempool := node.Mempool() - reqRes, err := mempool.CheckTx(tx) + reqRes, err := mempool.CheckTx(tx, "") require.NoError(t, err) ch <- reqRes.Response.GetCheckTx() @@ -423,7 +446,7 @@ func TestNumUnconfirmedTxs(t *testing.T) { mc, ok := c.(client.MempoolClient) require.True(t, ok, "%d", i) res, err := mc.NumUnconfirmedTxs(context.Background()) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) assert.Equal(t, mempoolSize, res.Count) assert.Equal(t, mempoolSize, res.Total) @@ -452,7 +475,7 @@ func TestTx(t *testing.T) { c := getHTTPClient() _, _, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(context.Background(), tx) - require.Nil(t, err, "%+v", err) + require.NoError(t, err, "%+v", err) txHeight := bres.Height txHash := bres.Hash @@ -482,9 +505,9 @@ func TestTx(t *testing.T) { ptx, err := c.Tx(context.Background(), tc.hash, tc.prove) if !tc.valid { - require.NotNil(t, err) + require.Error(t, err) } else { - require.Nil(t, err, "%+v", err) + require.NoError(t, err, "%+v", err) assert.EqualValues(t, txHeight, ptx.Height) assert.EqualValues(t, tx, ptx.Tx) assert.Zero(t, ptx.Index) @@ -494,7 +517,7 @@ func TestTx(t *testing.T) { // time to verify the proof proof := ptx.Proof if tc.prove && assert.EqualValues(t, tx, proof.Data) { - assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) + require.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) } } } @@ -511,8 +534,8 @@ func TestTxSearchWithTimeout(t *testing.T) { // query using a compositeKey (see kvstore application) result, err := timeoutClient.TxSearch(context.Background(), "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + require.NoError(t, err) + require.NotEmpty(t, result.Txs, "expected a lot of transactions") } // This test does nothing if we do not call app.SetGenBlockEvents() within main_test.go @@ -537,7 +560,7 @@ func TestBlockSearch(t *testing.T) { // require.GreaterOrEqual(t, blockCount, 5) // otherwise it is 0 - require.Equal(t, blockCount, 0) + require.Equal(t, 0, blockCount) } func TestTxSearch(t *testing.T) { @@ -561,10 +584,9 @@ func TestTxSearch(t *testing.T) { anotherTxHash := types.Tx("a different tx").Hash() for _, c := range GetClients() { - // now we query for the tx. result, err := c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc") - require.Nil(t, err) + require.NoError(t, err) require.Len(t, result.Txs, 1) require.Equal(t, find.Hash, result.Txs[0].Hash) @@ -577,56 +599,56 @@ func TestTxSearch(t *testing.T) { // time to verify the proof if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { - assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) + require.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) } // query by height result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") - require.Nil(t, err) + require.NoError(t, err) require.Len(t, result.Txs, 1) // query for non existing tx result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 0) + require.NoError(t, err) + require.Empty(t, result.Txs) // query using a compositeKey (see kvstore application) result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + require.NoError(t, err) + require.NotEmpty(t, result.Txs, "expected a lot of transactions") // query using an index key result, err = c.TxSearch(context.Background(), "app.index_key='index is working'", false, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + require.NoError(t, err) + require.NotEmpty(t, len(result.Txs), "expected a lot of transactions") // query using an noindex key result, err = c.TxSearch(context.Background(), "app.noindex_key='index is working'", false, nil, nil, "asc") - require.Nil(t, err) - require.Equal(t, len(result.Txs), 0, "expected a lot of transactions") + require.NoError(t, err) + require.Empty(t, result.Txs) // query using a compositeKey (see kvstore application) and height result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + require.NoError(t, err) + require.NotEmpty(t, result.Txs, "expected a lot of transactions") // query a non existing tx with page 1 and txsPerPage 1 perPage := 1 result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Neetowoko'", true, nil, &perPage, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 0) + require.NoError(t, err) + require.Empty(t, result.Txs) // check sorting result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "asc") - require.Nil(t, err) + require.NoError(t, err) for k := 0; k < len(result.Txs)-1; k++ { require.LessOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) require.LessOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) } result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "desc") - require.Nil(t, err) + require.NoError(t, err) for k := 0; k < len(result.Txs)-1; k++ { require.GreaterOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) require.GreaterOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) @@ -641,7 +663,6 @@ func TestTxSearch(t *testing.T) { totalTx := 0 for page := 1; page <= pages; page++ { - page := page result, err := c.TxSearch(context.Background(), "tx.height >= 1", true, &page, &perPage, "asc") require.NoError(t, err) if page < pages { @@ -670,6 +691,7 @@ func TestBatchedJSONRPCCalls(t *testing.T) { } func testBatchedJSONRPCCalls(t *testing.T, c *rpchttp.HTTP) { + t.Helper() k1, v1, tx1 := MakeTxKV() k2, v2, tx2 := MakeTxKV() diff --git a/rpc/core/abci.go b/rpc/core/abci.go index cdf7fad22c1..84e1d35a9f2 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -19,7 +19,7 @@ func (env *Environment) ABCIQuery( height int64, prove bool, ) (*ctypes.ResultABCIQuery, error) { - resQuery, err := env.ProxyAppQuery.Query(context.TODO(), &abci.RequestQuery{ + resQuery, err := env.ProxyAppQuery.Query(context.TODO(), &abci.QueryRequest{ Path: path, Data: data, Height: height, @@ -35,7 +35,7 @@ func (env *Environment) ABCIQuery( // ABCIInfo gets some info about the application. // More: https://docs.cometbft.com/main/rpc/#/ABCI/abci_info func (env *Environment) ABCIInfo(_ *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { - resInfo, err := env.ProxyAppQuery.Info(context.TODO(), proxy.RequestInfo) + resInfo, err := env.ProxyAppQuery.Info(context.TODO(), proxy.InfoRequest) if err != nil { return nil, err } diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index a29680b798b..d2ef636ebde 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -1,8 +1,6 @@ package core import ( - "errors" - "fmt" "sort" "github.com/cometbft/cometbft/libs/bytes" @@ -56,10 +54,10 @@ func (env *Environment) BlockchainInfo( // error if either min or max are negative or min > max // if 0, use blockstore base for min, latest block height for max // enforce limit. -func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { +func filterMinMax(base, height, min, max, limit int64) (minHeight, maxHeight int64, err error) { // filter negatives if min < 0 || max < 0 { - return min, max, fmt.Errorf("heights must be non-negative") + return min, max, ErrNegativeHeight } // adjust for default values @@ -81,7 +79,7 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { min = cmtmath.MaxInt64(min, max-limit+1) if min > max { - return min, max, fmt.Errorf("min height %d can't be greater than max height %d", min, max) + return min, max, ErrHeightMinGTMax{Min: min, Max: max} } return min, max, nil } @@ -127,8 +125,7 @@ func (env *Environment) Block(_ *rpctypes.Context, heightPtr *int64) (*ctypes.Re return nil, err } - block := env.BlockStore.LoadBlock(height) - blockMeta := env.BlockStore.LoadBlockMeta(height) + block, blockMeta := env.BlockStore.LoadBlock(height) if blockMeta == nil { return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: block}, nil } @@ -138,12 +135,10 @@ func (env *Environment) Block(_ *rpctypes.Context, heightPtr *int64) (*ctypes.Re // BlockByHash gets block by hash. // More: https://docs.cometbft.com/main/rpc/#/Info/block_by_hash func (env *Environment) BlockByHash(_ *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { - block := env.BlockStore.LoadBlockByHash(hash) - if block == nil { + block, blockMeta := env.BlockStore.LoadBlockByHash(hash) + if blockMeta == nil { return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil } - // If block is not nil, then blockMeta can't be nil. - blockMeta := env.BlockStore.LoadBlockMeta(block.Height) return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil } @@ -194,10 +189,11 @@ func (env *Environment) BlockResults(_ *rpctypes.Context, heightPtr *int64) (*ct return &ctypes.ResultBlockResults{ Height: height, - TxsResults: results.TxResults, + TxResults: results.TxResults, FinalizeBlockEvents: results.Events, ValidatorUpdates: results.ValidatorUpdates, ConsensusParamUpdates: results.ConsensusParamUpdates, + AppHash: results.AppHash, }, nil } @@ -211,7 +207,7 @@ func (env *Environment) BlockSearch( ) (*ctypes.ResultBlockSearch, error) { // skip if block indexing is disabled if _, ok := env.BlockIndexer.(*blockidxnull.BlockerIndexer); ok { - return nil, errors.New("block indexing is disabled") + return nil, ErrBlockIndexing } q, err := cmtquery.New(query) @@ -226,14 +222,14 @@ func (env *Environment) BlockSearch( // sort results (must be done before pagination) switch orderBy { - case "desc", "": + case Descending, "": sort.Slice(results, func(i, j int) bool { return results[i] > results[j] }) - case "asc": + case Ascending: sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) default: - return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") + return nil, ErrInvalidOrderBy{orderBy} } // paginate results @@ -250,15 +246,12 @@ func (env *Environment) BlockSearch( apiResults := make([]*ctypes.ResultBlock, 0, pageSize) for i := skipCount; i < skipCount+pageSize; i++ { - block := env.BlockStore.LoadBlock(results[i]) - if block != nil { - blockMeta := env.BlockStore.LoadBlockMeta(block.Height) - if blockMeta != nil { - apiResults = append(apiResults, &ctypes.ResultBlock{ - Block: block, - BlockID: blockMeta.BlockID, - }) - } + block, blockMeta := env.BlockStore.LoadBlock(results[i]) + if blockMeta != nil { + apiResults = append(apiResults, &ctypes.ResultBlock{ + Block: block, + BlockID: blockMeta.BlockID, + }) } } diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go index 88bf57ceb4d..ffc480c8886 100644 --- a/rpc/core/blocks_test.go +++ b/rpc/core/blocks_test.go @@ -8,7 +8,6 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - abci "github.com/cometbft/cometbft/abci/types" ctypes "github.com/cometbft/cometbft/rpc/core/types" rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" @@ -68,12 +67,13 @@ func TestBlockchainInfo(t *testing.T) { } func TestBlockResults(t *testing.T) { - results := &abci.ResponseFinalizeBlock{ + results := &abci.FinalizeBlockResponse{ TxResults: []*abci.ExecTxResult{ {Code: 0, Data: []byte{0x01}, Log: "ok"}, {Code: 0, Data: []byte{0x02}, Log: "ok"}, {Code: 1, Log: "not ok"}, }, + AppHash: make([]byte, 1), } env := &Environment{} @@ -97,19 +97,20 @@ func TestBlockResults(t *testing.T) { {101, true, nil}, {100, false, &ctypes.ResultBlockResults{ Height: 100, - TxsResults: results.TxResults, + TxResults: results.TxResults, FinalizeBlockEvents: results.Events, ValidatorUpdates: results.ValidatorUpdates, ConsensusParamUpdates: results.ConsensusParamUpdates, + AppHash: make([]byte, 1), }}, } for _, tc := range testCases { res, err := env.BlockResults(&rpctypes.Context{}, &tc.height) if tc.wantErr { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, tc.wantRes, res) } } diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index f2e3df0d24c..a33a4919c74 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -1,8 +1,11 @@ package core import ( - cm "github.com/cometbft/cometbft/consensus" + "fmt" + + cm "github.com/cometbft/cometbft/internal/consensus" cmtmath "github.com/cometbft/cometbft/libs/math" + "github.com/cometbft/cometbft/p2p" ctypes "github.com/cometbft/cometbft/rpc/core/types" rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" "github.com/cometbft/cometbft/types" @@ -55,24 +58,29 @@ func (env *Environment) Validators( // More: https://docs.cometbft.com/main/rpc/#/Info/dump_consensus_state func (env *Environment) DumpConsensusState(*rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { // Get Peer consensus states. - peers := env.P2PPeers.Peers().List() - peerStates := make([]ctypes.PeerStateInfo, len(peers)) - for i, peer := range peers { + peerStates := make([]ctypes.PeerStateInfo, 0) + var err error + env.P2PPeers.Peers().ForEach(func(peer p2p.Peer) { peerState, ok := peer.Get(types.PeerStateKey).(*cm.PeerState) if !ok { // peer does not have a state yet - continue + return } - peerStateJSON, err := peerState.MarshalJSON() - if err != nil { - return nil, err + peerStateJSON, marshalErr := peerState.MarshalJSON() + if marshalErr != nil { + err = fmt.Errorf("failed to marshal peer %v state: %w", peer.ID(), marshalErr) + return } - peerStates[i] = ctypes.PeerStateInfo{ + peerStates = append(peerStates, ctypes.PeerStateInfo{ // Peer basic info. NodeAddress: peer.SocketAddr().String(), // Peer consensus state. PeerState: peerStateJSON, - } + }) + }) + if err != nil { + return nil, err } + // Get self round state. roundState, err := env.ConsensusState.GetRoundStateJSON() if err != nil { diff --git a/rpc/core/env.go b/rpc/core/env.go index 88d27bebd1c..275edc12056 100644 --- a/rpc/core/env.go +++ b/rpc/core/env.go @@ -1,16 +1,22 @@ package core import ( - "encoding/base64" + "errors" "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strconv" "time" + abcicli "github.com/cometbft/cometbft/abci/client" cfg "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/crypto" - cmtjson "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/libs/log" mempl "github.com/cometbft/cometbft/mempool" "github.com/cometbft/cometbft/p2p" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" "github.com/cometbft/cometbft/proxy" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/state/indexer" @@ -19,20 +25,21 @@ import ( ) const ( - // see README + // see README. defaultPerPage = 30 maxPerPage = 100 // SubscribeTimeout is the maximum time we wait to subscribe for an event. - // must be less than the server's write timeout (see rpcserver.DefaultConfig) + // must be less than the server's write timeout (see rpcserver.DefaultConfig). SubscribeTimeout = 5 * time.Second // genesisChunkSize is the maximum size, in bytes, of each - // chunk in the genesis structure for the chunked API - genesisChunkSize = 16 * 1024 * 1024 // 16 + // chunk in the genesis structure for the chunked API. + genesisChunkSize = 2 * 1024 * 1024 // 2 MB + + _chunksDir = "genesis-chunks" ) -//---------------------------------------------- // These interfaces are used by RPC and must be thread safe type Consensus interface { @@ -46,14 +53,14 @@ type Consensus interface { type transport interface { Listeners() []string IsListening() bool - NodeInfo() p2p.NodeInfo + NodeInfo() ni.NodeInfo } type peers interface { - AddPersistentPeers([]string) error - AddUnconditionalPeerIDs([]string) error - AddPrivatePeerIDs([]string) error - DialPeersAsync([]string) error + AddPersistentPeers(peers []string) error + AddUnconditionalPeerIDs(peerIDs []string) error + AddPrivatePeerIDs(peerIDs []string) error + DialPeersAsync(peers []string) error Peers() p2p.IPeerSet } @@ -62,9 +69,20 @@ type syncReactor interface { WaitSync() bool } -// ---------------------------------------------- -// Environment contains objects and interfaces used by the RPC. It is expected -// to be setup once during startup. +type mempoolReactor interface { + syncReactor + TryAddTx(tx types.Tx, sender p2p.Peer) (*abcicli.ReqRes, error) +} + +// Environment contains the objects and interfaces used to serve the RPC APIs. +// A Node creates an object of this type at startup. +// An Environment should not be created directly, and it is recommended that +// only one instance of Environment be created at runtime. +// For this reason, callers should create an Environment object using +// the ConfigureRPC() method of the Node type, because the Environment object it +// returns is a singleton. +// Note: The Environment type was exported in the initial RPC API design; therefore, +// unexporting it now could potentially break users. type Environment struct { // external, thread safe interfaces ProxyAppQuery proxy.AppConnQuery @@ -76,13 +94,12 @@ type Environment struct { EvidencePool sm.EvidencePool ConsensusState Consensus ConsensusReactor syncReactor - MempoolReactor syncReactor + MempoolReactor mempoolReactor P2PPeers peers P2PTransport transport // objects PubKey crypto.PubKey - GenDoc *types.GenesisDoc // cache the genesis structure TxIndexer txindex.TxIndexer BlockIndexer indexer.BlockIndexer EventBus *types.EventBus // thread safe @@ -92,11 +109,85 @@ type Environment struct { Config cfg.RPCConfig - // cache of chunked genesis data. - genChunks []string + GenesisFilePath string // the genesis file's full path on disk + + // genesisChunk is a map of chunk ID to its full path on disk. + // If the genesis file is smaller than genesisChunkSize, then this map will be + // nil, because there will be no chunks on disk. + // This map is convenient for the `/genesis_chunked` API to quickly find a chunk + // by its ID, instead of having to reconstruct its path each time, which would + // involve multiple string operations. + genesisChunksFiles map[int]string +} + +// InitGenesisChunks checks whether it makes sense to split the genesis file into +// small chunks to be stored on disk. +// It is called on Node startup and should be called only once. +// Rules of chunking: +// - if the genesis file's size is <= genesisChunkSize, this function returns +// without doing anything. The `/genesis` RPC API endpoint will fetch the genesis +// file from disk to serve requests. +// - if the genesis file's size is > genesisChunkSize, then use chunking. The +// function splits the genesis file into chunks of genesisChunkSize and stores +// each chunk on disk. The `/genesis_chunked` RPC API endpoint will fetch the +// genesis file chunks from disk to serve requests. +func (env *Environment) InitGenesisChunks() error { + if len(env.genesisChunksFiles) > 0 { + // we already computed the chunks, return. + return nil + } + + gFilePath := env.GenesisFilePath + if len(gFilePath) == 0 { + // chunks not computed yet, but no genesis available. + // This should not happen. + return errors.New("missing genesis file path on disk") + } + + gFileSize, err := fileSize(gFilePath) + if err != nil { + return fmt.Errorf("gauging genesis file size: %w", err) + } + + if gFileSize <= genesisChunkSize { + // no chunking required + return nil + } + + gChunksDir, err := mkChunksDir(gFilePath, _chunksDir) + if err != nil { + return fmt.Errorf("preparing chunks directory: %w", err) + } + + // chunking required + chunkIDToPath, err := writeChunks(gFilePath, gChunksDir, genesisChunkSize) + if err != nil { + return fmt.Errorf("chunking large genesis file: %w", err) + } + + env.genesisChunksFiles = chunkIDToPath + + return nil } -//---------------------------------------------- +// Cleanup deletes the directory storing the genesis file chunks on disk +// if it exists. If the directory does not exist, the function is a no-op. +// The chunks' directory is a sub-directory of the `config/` directory of the +// running node (i.e., where the genesis.json file is stored). +// We call the function: +// - before creating new genesis file chunks, to make sure we start with a clean +// directory. +// - when a Node shuts down, to clean up the file system. +func (env *Environment) Cleanup() error { + gFileDir := filepath.Dir(env.GenesisFilePath) + chunksDir := filepath.Join(gFileDir, _chunksDir) + + if err := os.RemoveAll(chunksDir); err != nil { + return fmt.Errorf("deleting genesis file chunks' folder: %w", err) + } + + return nil +} func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { if perPage < 1 { @@ -119,7 +210,7 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { return page, nil } -func (env *Environment) validatePerPage(perPagePtr *int) int { +func (*Environment) validatePerPage(perPagePtr *int) int { if perPagePtr == nil { // no per_page parameter return defaultPerPage } @@ -133,35 +224,6 @@ func (env *Environment) validatePerPage(perPagePtr *int) int { return perPage } -// InitGenesisChunks configures the environment and should be called on service -// startup. -func (env *Environment) InitGenesisChunks() error { - if env.genChunks != nil { - return nil - } - - if env.GenDoc == nil { - return nil - } - - data, err := cmtjson.Marshal(env.GenDoc) - if err != nil { - return err - } - - for i := 0; i < len(data); i += genesisChunkSize { - end := i + genesisChunkSize - - if end > len(data) { - end = len(data) - } - - env.genChunks = append(env.genChunks, base64.StdEncoding.EncodeToString(data[i:end])) - } - - return nil -} - func validateSkipCount(page, perPage int) int { skipCount := (page - 1) * perPage if skipCount < 0 { @@ -199,3 +261,109 @@ func (env *Environment) latestUncommittedHeight() int64 { } return env.BlockStore.Height() + 1 } + +// fileSize returns the size of the file at the given path. +func fileSize(fPath string) (int, error) { + // we use os.Stat here instead of os.ReadFile, because we don't want to load + // the entire file into memory just to compute its size from the resulting + // []byte slice. + fInfo, err := os.Stat(fPath) + if errors.Is(err, fs.ErrNotExist) { + return 0, fmt.Errorf("the file is unavailable at %s", fPath) + } else if err != nil { + return 0, fmt.Errorf("accessing file: %w", err) + } + return int(fInfo.Size()), nil +} + +// mkChunksDir creates a new directory to store the genesis file's chunks. +// gFilePath is the genesis file's full path on disk. +// dirName is the name of the directory to be created, not it's path on disk. +// mkChunksDir will create a directory named 'dirName' as a sub-directory of the +// genesis file's directory (gFileDir). +// It returns the new directory's full path or an empty string if there is an +// error. +func mkChunksDir(gFilePath string, dirName string) (string, error) { + var ( + gFileDir = filepath.Dir(gFilePath) + dirPath = filepath.Join(gFileDir, dirName) + ) + if _, err := os.Stat(dirPath); err == nil { + // directory already exists; this might happen it the node crashed and + // could not do cleanup. Delete it to start from scratch. + if err := os.RemoveAll(dirPath); err != nil { + return "", fmt.Errorf("deleting existing chunks directory: %w", err) + } + } else if !os.IsNotExist(err) { + return "", fmt.Errorf("accessing directory: %w", err) + } + + if err := os.Mkdir(dirPath, 0o700); err != nil { + return "", fmt.Errorf("creating chunks directory: %s", err) + } + + return dirPath, nil +} + +// writeChunk writes a chunk of the genesis file to disk, saving it to dir. +// Each chunk file name's format will be: chunk_[chunkID].part, e.g., chunk_42.part. +func writeChunk(chunk []byte, dir string, chunkID int) (string, error) { + var ( + chunkName = "chunk_" + strconv.Itoa(chunkID) + ".part" + chunkPath = filepath.Join(dir, chunkName) + ) + if err := os.WriteFile(chunkPath, chunk, 0o600); err != nil { + return "", fmt.Errorf("writing chunk to disk: %w", err) + } + + return chunkPath, nil +} + +// writeChunks reads the genesis file in chunks of size chunkSize, and writes them +// to disk. +// gFilePath is the genesis file's full path on disk. +// gChunksDir is the directory where the chunks will be stored on disk. +// chunkSize is the size of a chunk, that is, writeChunks will read the genesis file +// in chunks of size chunkSize. +// It returns a map where the keys are the chunk IDs, and the values are the chunks' +// path on disk. E.g.,: +// map[0] = $HOME/.cometbft/config/genesis-chunks/chunk_0.part +// map[1] = $HOME/.cometbft/config/genesis-chunks/chunk_1.part +// and so on for all chunks. +// The map will be useful for the `/genesis_chunked` RPC endpoint to quickly find +// a chunk on disk given its ID. +func writeChunks( + gFilePath, gChunksDir string, + chunkSize int, +) (map[int]string, error) { + gFile, err := os.Open(gFilePath) + if err != nil { + return nil, fmt.Errorf("opening genesis file: %s", err) + } + defer gFile.Close() + + var ( + buf = make([]byte, chunkSize) + chunkIDToPath = make(map[int]string) + ) + for chunkID := 0; ; chunkID++ { + n, err := gFile.Read(buf) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + + formatStr := "chunk %d: reading genesis file: %w" + return nil, fmt.Errorf(formatStr, chunkID, err) + } + + chunkPath, err := writeChunk(buf[:n], gChunksDir, chunkID) + if err != nil { + return nil, fmt.Errorf("chunk %d: %w", chunkID, err) + } + + chunkIDToPath[chunkID] = chunkPath + } + + return chunkIDToPath, nil +} diff --git a/rpc/core/env_genesis_test.go b/rpc/core/env_genesis_test.go new file mode 100644 index 00000000000..57824a88ff2 --- /dev/null +++ b/rpc/core/env_genesis_test.go @@ -0,0 +1,54 @@ +package core + +// _testGenesis is a GenesisDoc used in the `Environment.InitGenesisChunks` tests. +// It is the genesis that the ci.toml e2e tests uses. +const _testGenesis = ` +{ + "genesis_time": "2024-10-02T11:53:14.181969Z", + "chain_id": "ci", + "initial_height": "1000", + "consensus_params": { + "block": { + "max_bytes": "4194304", + "max_gas": "10000000" + }, + "evidence": { + "max_age_num_blocks": "14", + "max_age_duration": "1500000000", + "max_bytes": "1048576" + }, + "validator": { + "pub_key_types": [ + "ed25519" + ] + }, + "version": { + "app": "1" + }, + "synchrony": { + "precision": "500000000", + "message_delay": "2000000000" + }, + "feature": { + "vote_extensions_enable_height": "0", + "pbts_enable_height": "0" + } + }, + "validators": [ + { + "address": "75C02D9AC4DB1A1F802CECF9EADB4CC4CB952AE6", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "01E+NeFFiH8D2uQHJ+X45wePtfVPs9pncpBnv/g9DQs=" + }, + "power": "100", + "name": "validator01" + } + ], + "app_hash": "", + "app_state": { + "initial01": "a", + "initial02": "b", + "initial03": "c" + } +}` diff --git a/rpc/core/env_test.go b/rpc/core/env_test.go index dc64db1d6ae..6fd2b366878 100644 --- a/rpc/core/env_test.go +++ b/rpc/core/env_test.go @@ -1,12 +1,162 @@ package core import ( + "bytes" + "encoding/json" + "errors" "fmt" + "io" + "io/fs" + "maps" + "os" + "path/filepath" + "slices" + "strconv" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmtjson "github.com/cometbft/cometbft/libs/json" + "github.com/cometbft/cometbft/types" ) +func TestInitGenesisChunks(t *testing.T) { + t.Run("ErrNoGenesisFilePath", func(t *testing.T) { + env := &Environment{} + + err := env.InitGenesisChunks() + if err == nil { + t.Error("expected error but got nil") + } + + wantErrStr := "missing genesis file path on disk" + if err.Error() != wantErrStr { + t.Errorf("\nwantErr: %q\ngot: %q\n", wantErrStr, err.Error()) + } + }) + + // Calling InitGenesisChunks with an existing slice of chunks will return without + // doing anything. + t.Run("NoOp", func(t *testing.T) { + testChunks := map[int]string{ + 0: "chunk1", + 1: "chunk2", + } + env := &Environment{genesisChunksFiles: testChunks} + + err := env.InitGenesisChunks() + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + // check that the function really was a no-op: the map of chunks should be + // unchanged. + if !maps.Equal(testChunks, env.genesisChunksFiles) { + formatStr := "\nexpected chunks: %v\ngot: %v" + t.Fatalf(formatStr, testChunks, env.genesisChunksFiles) + } + }) + + // Tests with a genesis file <= genesisChunkSize, i.e., no chunking, pointer to + // GenesisDoc stored in GenDoc field. + // The test genesis is the genesis that the ci.toml e2e test uses. + t.Run("NoChunking", func(t *testing.T) { + gFile, err := os.CreateTemp("", "genesis.json") + if err != nil { + t.Fatalf("creating genesis file for testing: %s", err) + } + + defer os.Remove(gFile.Name()) + + if _, err := gFile.WriteString(_testGenesis); err != nil { + t.Fatalf("writing genesis file for testing: %s", err) + } + gFile.Close() + + env := &Environment{GenesisFilePath: gFile.Name()} + + if err := env.InitGenesisChunks(); err != nil { + t.Errorf("unexpected error: %s", err) + } + + // Because the genesis file is <= genesisChunkSize, there should be no + // chunking. Therefore, the map of chunk IDs to their paths on disk should + // be empty. + if len(env.genesisChunksFiles) > 0 { + formatStr := "chunks map should be empty, but it's %v" + t.Fatalf(formatStr, env.genesisChunksFiles) + } + }) + + // Tests with a genesis file > genesisChunkSize. + // The test genesis file has an app_state of key-value string pairs + // automatically generated (~42MB). + t.Run("Chunking", func(t *testing.T) { + genDoc := &types.GenesisDoc{} + if err := cmtjson.Unmarshal([]byte(_testGenesis), genDoc); err != nil { + t.Fatalf("test genesis de-serialization: %s", err) + } + + appState, err := genAppState() + if err != nil { + t.Fatalf("generating dummy app_state for testing: %s", err) + } + + genDoc.AppState = appState + + genDocJSON, err := cmtjson.Marshal(genDoc) + if err != nil { + t.Fatalf("test genesis serialization: %s", err) + } + + gFile, err := os.CreateTemp("", "genesis.json") + if err != nil { + t.Fatalf("creating genesis file for testing: %s", err) + } + + if _, err := gFile.Write(genDocJSON); err != nil { + t.Fatalf("writing genesis file for testing: %s", err) + } + gFile.Close() + + var ( + gFilePath = gFile.Name() + chunksDirPath = filepath.Join(filepath.Dir(gFilePath), _chunksDir) + env = &Environment{GenesisFilePath: gFilePath} + ) + defer os.RemoveAll(chunksDirPath) + + if err = env.InitGenesisChunks(); err != nil { + t.Errorf("unexpected error: %s", err) + } + + gSize, err := fileSize(gFilePath) + if err != nil { + t.Fatalf("estimating test genesis file size: %s", err) + } + + // Because the genesis file is > genesisChunkSize, we expect chunks. + // genesisChunkSize is a global const defined in env.go. + wantChunks := (gSize + genesisChunkSize - 1) / genesisChunkSize + if len(env.genesisChunksFiles) != wantChunks { + formatStr := "expected number of chunks: %d, but got: %d" + t.Errorf(formatStr, wantChunks, len(env.genesisChunksFiles)) + } + + // We now check if the original genesis doc and the genesis doc + // reassembled from the chunks match. + err = reassembleAndCompare( + gFilePath, + env.genesisChunksFiles, + genesisChunkSize, + ) + if err != nil { + t.Errorf("reassembling genesis file: %s", err) + } + }) +} + func TestPaginationPage(t *testing.T) { cases := []struct { totalCount int @@ -42,7 +192,7 @@ func TestPaginationPage(t *testing.T) { for _, c := range cases { p, err := validatePage(&c.page, c.perPage, c.totalCount) if c.expErr { - assert.Error(t, err) + require.Error(t, err) continue } @@ -51,7 +201,7 @@ func TestPaginationPage(t *testing.T) { // nil case p, err := validatePage(nil, 1, 1) - if assert.NoError(t, err) { + if assert.NoError(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, 1, p) } } @@ -80,3 +230,396 @@ func TestPaginationPerPage(t *testing.T) { p := env.validatePerPage(nil) assert.Equal(t, defaultPerPage, p) } + +func TestCleanup(t *testing.T) { + t.Run("NoErrDirNotExist", func(t *testing.T) { + env := &Environment{GenesisFilePath: "/nonexistent/path/to/genesis.json"} + + if err := env.Cleanup(); err != nil { + t.Errorf("unexpected error: %s", err) + } + }) + + t.Run("DirDeleted", func(t *testing.T) { + var ( + gFilePath = "./genesis.json" + chunksDir = "./" + _chunksDir + + env = &Environment{GenesisFilePath: gFilePath} + ) + // the directory we want to delete. + if err := os.MkdirAll(chunksDir, 0o700); err != nil { + t.Fatalf("creating test chunks directory: %s", err) + } + + if err := env.Cleanup(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // verify that chunksDir no longer exists + if _, err := os.Stat(chunksDir); !errors.Is(err, fs.ErrNotExist) { + t.Errorf("expected os.IsNotExist error, but got: %s", err) + } + }) + + t.Run("ErrDeletingDir", func(t *testing.T) { + // To test if the function catches errors returned by os.RemoveAll(), we + // create a directory with read-only permissions, so that os.RemoveAll() will + // fail. + // Usually, the deletion of a file or a directory is controlled by the + // permissions of the *parent* directory. Therefore, in this test we are + // creating a directory and a sub-directory; then we'll set the parent + // directory's permissions to read-only, so that os.RemoveAll() will fail. + + parentDir, err := os.MkdirTemp("", "parentDir") + if err != nil { + t.Fatalf("creating test parent directory: %s", err) + } + defer os.RemoveAll(parentDir) + + var ( + gFilePath = filepath.Join(parentDir, "genesis.json") + chunksDir = filepath.Join(parentDir, _chunksDir) + + env = &Environment{GenesisFilePath: gFilePath} + ) + + // the sub-directory that we want to delete. + if err := os.Mkdir(chunksDir, 0o700); err != nil { + t.Fatalf("creating test chunks directory: %s", err) + } + + // set read-only permissions to trigger deletion error + if err := os.Chmod(parentDir, 0o500); err != nil { + t.Fatalf("changing test parent directory permissions: %s", err) + } + + err = env.Cleanup() + if err == nil { + t.Fatalf("expected an error, got nil") + } + + wantErr := "deleting genesis file chunks' folder: unlinkat " + chunksDir + ": permission denied" + if err.Error() != wantErr { + t.Errorf("\nwant error: %s\ngot: %s\n", wantErr, err.Error()) + } + + // reset permissions of parent folder to allow the deferred os.RemoveAll() + // to work, thus deleting test data. + if err := os.Chmod(parentDir, 0o700); err != nil { + formatStr := "changing test parent directory permissions to cleanup: %s" + t.Fatalf(formatStr, err) + } + }) +} + +func TestFileSize(t *testing.T) { + t.Run("ErrFileNotExist", func(t *testing.T) { + fPath := "non-existent-file" + _, err := fileSize(fPath) + if err == nil { + t.Fatalf("expected an error, got nil") + } + + wantErr := "the file is unavailable at non-existent-file" + if err.Error() != wantErr { + t.Fatalf("\nwant error: %s\ngot: %s\n", wantErr, err.Error()) + } + }) + + t.Run("ErrAccessingPath", func(t *testing.T) { + // To test if the function catches errors returns by os.Stat() that + // aren't fs.ErrNotExist, we create a path that contains an invalid null + // byte, thus forcing os.Stat() to return an error. + fPath := "null/" + string('\x00') + "/file" + + _, err := fileSize(fPath) + if err == nil { + t.Fatalf("expected an error, got nil") + } + + wantErr := "accessing file: stat null/\x00/file: invalid argument" + if err.Error() != wantErr { + t.Errorf("\nwant error: %s\ngot: %s\n", wantErr, err.Error()) + } + }) + + t.Run("FileSizeOk", func(t *testing.T) { + // we'll create a temporary file of 100 bytes to run this test. + const fSize = 100 + + f, err := os.CreateTemp("", "small_test_file") + if err != nil { + t.Fatalf("creating temp file for testing: %s", err) + } + defer os.Remove(f.Name()) + + data := make([]byte, fSize) + for i := 0; i < 100; i++ { + data[i] = 'a' + } + + if _, err := f.Write(data); err != nil { + t.Fatalf("writing to temp file for testing: %s", err) + } + f.Close() + + gotSize, err := fileSize(f.Name()) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if gotSize != fSize { + t.Errorf("want size: %d, got: %d", fSize, gotSize) + } + }) +} + +func TestMkChunksDir(t *testing.T) { + gFile, err := os.CreateTemp("", "dummy_genesis.json") + if err != nil { + t.Fatalf("creating temp file for testing: %s", err) + } + gFile.Close() + defer os.Remove(gFile.Name()) + + t.Run("DirExistCreated", func(t *testing.T) { + var ( + newDirName = "some-dir" + gFileDir = filepath.Dir(gFile.Name()) + newDirPath = filepath.Join(gFileDir, newDirName) + ) + if err := os.Mkdir(newDirPath, 0o700); err != nil { + t.Fatalf("creating chunks directory for testing: %s", err) + } + + dirPath, err := mkChunksDir(gFile.Name(), newDirName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if err := os.RemoveAll(dirPath); err != nil { + t.Error(err) + } + }) + + t.Run("DirNotExistCreated", func(t *testing.T) { + newDir := "some-dir" + dirPath, err := mkChunksDir(gFile.Name(), newDir) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if err := os.RemoveAll(dirPath); err != nil { + t.Error(err) + } + }) + + t.Run("ErrMkDir", func(t *testing.T) { + var ( + fNotExist = "/some/file/that/does/not/exist.json" + newDir = "some-dir" + ) + _, err := mkChunksDir(fNotExist, newDir) + if err == nil { + t.Fatalf("expected error but got nil") + } + + var ( + fDir = filepath.Dir(fNotExist) + newDirPath = filepath.Join(fDir, newDir) + + wantErr = "creating chunks directory: mkdir " + newDirPath + ": no such file or directory" + ) + if err.Error() != wantErr { + t.Errorf("\nwant error: %s\ngot: %s\n", wantErr, err.Error()) + } + }) +} + +func TestWriteChunk(t *testing.T) { + cDir, err := os.MkdirTemp("", _chunksDir) + if err != nil { + t.Fatalf("creating test chunks directory: %s", err) + } + defer os.RemoveAll(cDir) + + var ( + chunk = []byte("test-chunk") + wantPath = filepath.Join(cDir, "chunk_42.part") + ) + + t.Run("ErrChunkNotWritten", func(t *testing.T) { + // To test if the function catches errors returned by os.WriteFile(), we + // create a directory with read-only permissions, so that os.WriteFile() will + // fail. + + // set read-only permissions to trigger write error + if err := os.Chmod(cDir, 0o500); err != nil { + t.Fatalf("changing test chunks directory permissions: %s", err) + } + + _, err := writeChunk(chunk, cDir, 42) + if err == nil { + t.Fatalf("expected error but got nil") + } + + // reset permissions of chunks folder to allow the rest of the test code to + // work. + if err := os.Chmod(cDir, 0o700); err != nil { + formatStr := "changing test parent directory permissions to cleanup: %s" + t.Fatalf(formatStr, err) + } + + wantErr := "writing chunk to disk: open " + wantPath + ": permission denied" + if err.Error() != wantErr { + t.Errorf("\nwant error: %s\ngot: %s\n", wantErr, err.Error()) + } + }) + + t.Run("ChunkWritten", func(t *testing.T) { + gotPath, err := writeChunk(chunk, cDir, 42) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if wantPath != gotPath { + t.Errorf("\nwant path: %s\ngot path: %s\n", wantPath, gotPath) + } + }) +} + +func TestWriteChunks(t *testing.T) { + const ( + // we'll create a temporary file of 100 bytes to run this test. + gFileSize = 100 + + // we'll split the temp file into test chunks of 25 bytes. + tChunkSize = 25 + ) + + // create temporary test genesis file + gFile, err := os.CreateTemp("", "dummy_genesis") + if err != nil { + t.Fatalf("creating temp file for testing: %s", err) + } + defer os.Remove(gFile.Name()) + + data := make([]byte, gFileSize) + for i := 0; i < 100; i++ { + data[i] = 'a' + } + + if _, err := gFile.Write(data); err != nil { + t.Fatalf("writing to temp file for testing: %s", err) + } + gFile.Close() + + // create a temporary directory to store the test chunks. + var ( + gFileDir = filepath.Dir(gFile.Name()) + tChunksDir = filepath.Join(gFileDir, "test-chunks") + ) + + if err := os.Mkdir(tChunksDir, 0o700); err != nil { + t.Fatalf("creating test chunks directory: %s", err) + } + defer os.RemoveAll(tChunksDir) + + chunkIDToPath, err := writeChunks(gFile.Name(), tChunksDir, tChunkSize) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + wantMap := map[int]string{ + 0: tChunksDir + "/chunk_0.part", + 1: tChunksDir + "/chunk_1.part", + 2: tChunksDir + "/chunk_2.part", + 3: tChunksDir + "/chunk_3.part", + } + if !maps.Equal(wantMap, chunkIDToPath) { + t.Errorf("\nwant map: %v\ngot: %v\n", wantMap, chunkIDToPath) + } + + err = reassembleAndCompare(gFile.Name(), chunkIDToPath, tChunkSize) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } +} + +// reassembleAndCompare is a helper function to reassemble the genesis file from +// its chunks and compare it with the original genesis file. +// The function reads the genesis file as a stream, so it is suitable for larger +// files as well. +// gFilePath is the genesis file's full path on disk. +// chunks is a map where the keys are the chunk IDs, and the values are the chunks' +// path on disk. +func reassembleAndCompare( + gFilePath string, + chunks map[int]string, + chunkSize int, +) error { + gFile, err := os.Open(gFilePath) + if err != nil { + return fmt.Errorf("opening genesis file at %s: %s", gFilePath, err) + } + defer gFile.Close() + + // have to collect the IDs and sort them because map traversal isn't guaranteed + // to be in order; but we need it to be in order to compare the right chunk + // with the right of the genesis file "piece". + chunkIDs := make([]int, 0, len(chunks)) + for chunkID := range chunks { + chunkIDs = append(chunkIDs, chunkID) + } + slices.Sort(chunkIDs) + + gBuf := make([]byte, chunkSize) + for chunkID := range chunkIDs { + chunkPath := chunks[chunkID] + + // chunks are small, so it's ok to load them in memory. + chunk, err := os.ReadFile(chunkPath) + if err != nil { + return fmt.Errorf("reading chunk file %d: %s", chunkID, err) + } + gN, err := gFile.Read(gBuf) + if err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("reading genesis file chunk %d: %s", chunkID, err) + } + + if !bytes.Equal(gBuf[:gN], chunk) { + return fmt.Errorf("chunk %d does not match", chunkID) + } + } + + return nil +} + +// genAppState is a helper function that generates a dummy "app_state" to be used in +// tests. To test the splitting of a genesis into smaller chunks, we need to use a +// big genesis file. Typically, the bulk of a genesis file comes from the app_state +// field. +// It returns the app_state encoded to JSON. +func genAppState() ([]byte, error) { + const ( + // how many KV pair do you want to put in app_state. + // Current value generates an app_state of ~40MB + size = 1024 * 1024 * 2 + + // characters use to fill in the KV pairs of app_state + alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + alphabetSize = len(alphabet) + ) + + appState := make(map[string]string, size) + for i := range size { + appState["initial"+strconv.Itoa(i)] = string(alphabet[i%alphabetSize]) + } + + appStateJSON, err := json.Marshal(appState) + if err != nil { + return nil, fmt.Errorf("serializing test app_state to JSON: %s", err) + } + + return appStateJSON, nil +} diff --git a/rpc/core/errors.go b/rpc/core/errors.go new file mode 100644 index 00000000000..555c9e0429e --- /dev/null +++ b/rpc/core/errors.go @@ -0,0 +1,161 @@ +package core + +import ( + "errors" + "fmt" +) + +var ( + ErrNegativeHeight = errors.New("negative height") + ErrBlockIndexing = errors.New("block indexing is disabled") + ErrTxIndexingDisabled = errors.New("transaction indexing is disabled") + ErrNoEvidence = errors.New("no evidence was provided") + ErrSlowClient = errors.New("slow client") + ErrCometBFTExited = errors.New("cometBFT exited") + ErrConfirmationNotReceived = errors.New("broadcast confirmation not received") + ErrCheckTxFailed = errors.New("transaction failed to pass CheckTx") + ErrTimedOutWaitingForTx = errors.New("timed out waiting for tx to be included in a block") + ErrGenesisRespSize = errors.New("genesis response is too large, please use the genesis_chunked API instead") + ErrChunkNotInitialized = errors.New("genesis chunks are not initialized") + ErrNoChunks = errors.New("genesis file is small, therefore there are no chunks to serve. Please use the /genesis API instead") +) + +type ErrMaxSubscription struct { + Max int +} + +func (e ErrMaxSubscription) Error() string { + return fmt.Sprintf("maximum number of subscriptions reached: %d", e.Max) +} + +type ErrMaxPerClientSubscription struct { + Max int +} + +func (e ErrMaxPerClientSubscription) Error() string { + return fmt.Sprintf("maximum number of subscriptions per client reached: %d", e.Max) +} + +type ErrHeightMinGTMax struct { + Min int64 + Max int64 +} + +func (e ErrHeightMinGTMax) Error() string { + return fmt.Sprintf("min height %d can't be greater than max height %d", e.Min, e.Max) +} + +type ErrQueryLength struct { + length int + maxLength int +} + +func (e ErrQueryLength) Error() string { + return fmt.Sprintf("maximum query length exceeded: length %d, max_length %d", e.length, e.maxLength) +} + +type ErrValidation struct { + Source error + ValType string +} + +func (e ErrValidation) Error() string { + return fmt.Sprintf("%s validation failed: %s", e.ValType, e.Source) +} + +func (e ErrValidation) Unwrap() error { return e.Source } + +type ErrAddEvidence struct { + Source error +} + +func (e ErrAddEvidence) Error() string { + return fmt.Sprintf("failed to add evidence: %s", e.Source) +} + +func (e ErrAddEvidence) Unwrap() error { + return e.Source +} + +type ErrSubCanceled struct { + Reason string +} + +func (e ErrSubCanceled) Error() string { + return fmt.Sprintf("subscription canceled: (reason: %s)", e.Reason) +} + +type ErrTxSubFailed struct { + Source error + TxHash []byte +} + +func (e ErrTxSubFailed) Error() string { + return fmt.Sprintf("failed to subscribe to tx %X: %s", e.TxHash, e.Source) +} + +func (e ErrTxSubFailed) Unwrap() error { + return e.Source +} + +type ErrTxBroadcast struct { + Source error + ErrReason error +} + +func (e ErrTxBroadcast) Error() string { + return fmt.Sprintf("failed to broadcast tx: %v: %v", e.ErrReason, e.Source) +} + +func (e ErrTxBroadcast) Unwrap() error { + return e.Source +} + +func (e ErrTxBroadcast) Reason() error { + return e.ErrReason +} + +type ErrServiceConfig struct { + Source error +} + +func (e ErrServiceConfig) Error() string { + return fmt.Sprintf("service configuration error: %s", e.Source) +} + +func (e ErrServiceConfig) Unwrap() error { return e.Source } + +type ErrInvalidChunkID struct { + RequestedID int + MaxID int +} + +func (e ErrInvalidChunkID) Error() string { + return fmt.Sprintf("invalid chunk ID: length %d but maximum available is %d", e.RequestedID, e.MaxID) +} + +type ErrTxNotFound struct { + Hash []byte +} + +func (e ErrTxNotFound) Error() string { + return fmt.Sprintf("tx not found: %X", e.Hash) +} + +type ErrInvalidOrderBy struct { + OrderBy string +} + +func (e ErrInvalidOrderBy) Error() string { + return "invalid order_by: maxLength either `asc` or `desc` or an empty value but got " + e.OrderBy +} + +type ErrInvalidNodeType struct { + PeerID string + Expected string + Actual string +} + +func (e ErrInvalidNodeType) Error() string { + return fmt.Sprintf("peer %s has an invalid node type: maxLength %s but got %s", e.PeerID, e.Expected, e.Actual) +} diff --git a/rpc/core/events.go b/rpc/core/events.go index a4da11a1b73..7eab69e77b0 100644 --- a/rpc/core/events.go +++ b/rpc/core/events.go @@ -18,24 +18,37 @@ const ( maxQueryLength = 512 ) +type ErrParseQuery struct { + Source error +} + +func (e ErrParseQuery) Error() string { + return fmt.Sprintf("failed to parse query: %v", e.Source) +} + +func (e ErrParseQuery) Unwrap() error { + return e.Source +} + // Subscribe for events via WebSocket. // More: https://docs.cometbft.com/main/rpc/#/Websocket/subscribe func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { addr := ctx.RemoteAddr() - if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { - return nil, fmt.Errorf("max_subscription_clients %d reached", env.Config.MaxSubscriptionClients) - } else if env.EventBus.NumClientSubscriptions(addr) >= env.Config.MaxSubscriptionsPerClient { - return nil, fmt.Errorf("max_subscriptions_per_client %d reached", env.Config.MaxSubscriptionsPerClient) - } else if len(query) > maxQueryLength { - return nil, errors.New("maximum query length exceeded") + switch { + case env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients: + return nil, ErrMaxSubscription{env.Config.MaxSubscriptionClients} + case env.EventBus.NumClientSubscriptions(addr) >= env.Config.MaxSubscriptionsPerClient: + return nil, ErrMaxPerClientSubscription{env.Config.MaxSubscriptionsPerClient} + case len(query) > maxQueryLength: + return nil, ErrQueryLength{len(query), maxQueryLength} } env.Logger.Info("Subscribe to query", "remote", addr, "query", query) q, err := cmtquery.New(query) if err != nil { - return nil, fmt.Errorf("failed to parse query: %w", err) + return nil, ErrParseQuery{Source: err} } subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) @@ -66,7 +79,7 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes. if closeIfSlow { var ( - err = errors.New("subscription was canceled (reason: slow client)") + err = ErrSubCanceled{ErrSlowClient.Error()} resp = rpctypes.RPCServerError(subscriptionID, err) ) if !ctx.WSConn.TryWriteRPCResponse(resp) { @@ -77,15 +90,15 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes. } } case <-sub.Canceled(): - if sub.Err() != cmtpubsub.ErrUnsubscribed { + if !errors.Is(sub.Err(), cmtpubsub.ErrUnsubscribed) { var reason string if sub.Err() == nil { - reason = "CometBFT exited" + reason = ErrCometBFTExited.Error() } else { reason = sub.Err().Error() } var ( - err = fmt.Errorf("subscription was canceled (reason: %s)", reason) + err = ErrSubCanceled{reason} resp = rpctypes.RPCServerError(subscriptionID, err) ) if !ctx.WSConn.TryWriteRPCResponse(resp) { @@ -108,12 +121,14 @@ func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*ctype env.Logger.Info("Unsubscribe from query", "remote", addr, "query", query) q, err := cmtquery.New(query) if err != nil { - return nil, fmt.Errorf("failed to parse query: %w", err) + return nil, ErrParseQuery{Source: err} } + err = env.EventBus.Unsubscribe(context.Background(), addr, q) if err != nil { return nil, err } + return &ctypes.ResultUnsubscribe{}, nil } diff --git a/rpc/core/evidence.go b/rpc/core/evidence.go index a71a1f9b1e3..ecf01f98823 100644 --- a/rpc/core/evidence.go +++ b/rpc/core/evidence.go @@ -1,8 +1,7 @@ package core import ( - "errors" - "fmt" + "reflect" ctypes "github.com/cometbft/cometbft/rpc/core/types" rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" @@ -16,15 +15,19 @@ func (env *Environment) BroadcastEvidence( ev types.Evidence, ) (*ctypes.ResultBroadcastEvidence, error) { if ev == nil { - return nil, errors.New("no evidence was provided") + return nil, ErrNoEvidence } if err := ev.ValidateBasic(); err != nil { - return nil, fmt.Errorf("evidence.ValidateBasic failed: %w", err) + return nil, ErrValidation{ + Source: err, + ValType: reflect.TypeOf(ev).String(), + } } if err := env.EvidencePool.AddEvidence(ev); err != nil { - return nil, fmt.Errorf("failed to add evidence: %w", err) + return nil, ErrAddEvidence{err} } + return &ctypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil } diff --git a/rpc/core/health.go b/rpc/core/health.go index c2918970ce0..628d6ed4dec 100644 --- a/rpc/core/health.go +++ b/rpc/core/health.go @@ -8,6 +8,6 @@ import ( // Health gets node health. Returns empty result (200 OK) on success, no // response - in case of an error. // More: https://docs.cometbft.com/main/rpc/#/Info/health -func (env *Environment) Health(*rpctypes.Context) (*ctypes.ResultHealth, error) { +func (*Environment) Health(*rpctypes.Context) (*ctypes.ResultHealth, error) { return &ctypes.ResultHealth{}, nil } diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index e8d2556d6ff..56a6325c27f 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -3,7 +3,6 @@ package core import ( "context" "errors" - "fmt" "time" abci "github.com/cometbft/cometbft/abci/types" @@ -12,9 +11,12 @@ import ( "github.com/cometbft/cometbft/types" ) -var ErrEndpointClosedCatchingUp = errors.New("endpoint is closed while node is catching up") +var ( + ErrEndpointClosedCatchingUp = errors.New("endpoint is closed while node is catching up") + ErrorEmptyTxHash = errors.New("transaction hash cannot be empty") +) -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // NOTE: tx should be signed, but this is only checked at the app level (not by CometBFT!) // BroadcastTxAsync returns right away, with no response. Does not wait for @@ -24,10 +26,13 @@ func (env *Environment) BroadcastTxAsync(_ *rpctypes.Context, tx types.Tx) (*cty if env.MempoolReactor.WaitSync() { return nil, ErrEndpointClosedCatchingUp } - _, err := env.Mempool.CheckTx(tx) + reqRes, err := env.MempoolReactor.TryAddTx(tx, nil) if err != nil { return nil, err } + if reqRes.Error() != nil { + return nil, reqRes.Error() + } return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil } @@ -39,20 +44,32 @@ func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ct return nil, ErrEndpointClosedCatchingUp } - resCh := make(chan *abci.ResponseCheckTx, 1) - reqRes, err := env.Mempool.CheckTx(tx) + resCh := make(chan *abci.CheckTxResponse, 1) + resErrCh := make(chan error, 1) + reqRes, err := env.MempoolReactor.TryAddTx(tx, nil) if err != nil { return nil, err } - reqRes.SetCallback(func(res *abci.Response) { + go func() { + // Wait for a response. The ABCI client guarantees that it will eventually call + // reqRes.Done(), even in the case of error. + reqRes.Wait() select { case <-ctx.Context().Done(): - case resCh <- reqRes.Response.GetCheckTx(): + default: + if reqRes.Error() != nil { + resErrCh <- reqRes.Error() + } else { + resCh <- reqRes.Response.GetCheckTx() + } } - }) + }() + select { case <-ctx.Context().Done(): - return nil, fmt.Errorf("broadcast confirmation not received: %w", ctx.Context().Err()) + return nil, ErrTxBroadcast{Source: ctx.Context().Err(), ErrReason: ErrConfirmationNotReceived} + case err := <-resErrCh: + return nil, ErrTxBroadcast{Source: ErrCheckTxFailed, ErrReason: err} case res := <-resCh: return &ctypes.ResultBroadcastTx{ Code: res.Code, @@ -74,9 +91,9 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* subscriber := ctx.RemoteAddr() if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { - return nil, fmt.Errorf("max_subscription_clients %d reached", env.Config.MaxSubscriptionClients) + return nil, ErrMaxSubscription{env.Config.MaxSubscriptionClients} } else if env.EventBus.NumClientSubscriptions(subscriber) >= env.Config.MaxSubscriptionsPerClient { - return nil, fmt.Errorf("max_subscriptions_per_client %d reached", env.Config.MaxSubscriptionsPerClient) + return nil, ErrMaxPerClientSubscription{env.Config.MaxSubscriptionsPerClient} } // Subscribe to tx being committed in block. @@ -85,7 +102,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* q := types.EventQueryTxFor(tx) txSub, err := env.EventBus.Subscribe(subCtx, subscriber, q) if err != nil { - err = fmt.Errorf("failed to subscribe to tx: %w", err) + err = ErrTxSubFailed{Source: err, TxHash: tx.Hash()} env.Logger.Error("Error on broadcast_tx_commit", "err", err) return nil, err } @@ -96,21 +113,33 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* }() // Broadcast tx and wait for CheckTx result - checkTxResCh := make(chan *abci.ResponseCheckTx, 1) - reqRes, err := env.Mempool.CheckTx(tx) + checkTxResCh := make(chan *abci.CheckTxResponse, 1) + resErrCh := make(chan error, 1) + reqRes, err := env.MempoolReactor.TryAddTx(tx, nil) if err != nil { env.Logger.Error("Error on broadcastTxCommit", "err", err) - return nil, fmt.Errorf("error on broadcastTxCommit: %v", err) + return nil, ErrTxBroadcast{Source: err, ErrReason: ErrCheckTxFailed} } - reqRes.SetCallback(func(res *abci.Response) { + go func() { + // Wait for a response. The ABCI client guarantees that it will eventually call + // reqRes.Done(), even in the case of error. + reqRes.Wait() select { case <-ctx.Context().Done(): - case checkTxResCh <- reqRes.Response.GetCheckTx(): + default: + if reqRes.Error() != nil { + resErrCh <- reqRes.Error() + } else { + checkTxResCh <- reqRes.Response.GetCheckTx() + } } - }) + }() + select { case <-ctx.Context().Done(): - return nil, fmt.Errorf("broadcast confirmation not received: %w", ctx.Context().Err()) + return nil, ErrTxBroadcast{Source: ctx.Context().Err(), ErrReason: ErrConfirmationNotReceived} + case err := <-resErrCh: + return nil, ErrTxBroadcast{Source: ErrCheckTxFailed, ErrReason: err} case checkTxRes := <-checkTxResCh: if checkTxRes.Code != abci.CodeTypeOK { return &ctypes.ResultBroadcastTxCommit{ @@ -133,11 +162,11 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* case <-txSub.Canceled(): var reason string if txSub.Err() == nil { - reason = "CometBFT exited" + reason = ErrCometBFTExited.Error() } else { reason = txSub.Err().Error() } - err = fmt.Errorf("txSub was canceled (reason: %s)", reason) + err = ErrSubCanceled{reason} env.Logger.Error("Error on broadcastTxCommit", "err", err) return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, @@ -145,7 +174,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* Hash: tx.Hash(), }, err case <-time.After(env.Config.TimeoutBroadcastTxCommit): - err = errors.New("timed out waiting for tx to be included in a block") + err = ErrTimedOutWaitingForTx env.Logger.Error("Error on broadcastTxCommit", "err", err) return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, @@ -156,6 +185,17 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* } } +// UnconfirmedTx gets unconfirmed transaction by hash. +func (env *Environment) UnconfirmedTx(_ *rpctypes.Context, hash []byte) (*ctypes.ResultUnconfirmedTx, error) { + if len(hash) == 0 { + return nil, ErrorEmptyTxHash + } + + return &ctypes.ResultUnconfirmedTx{ + Tx: env.Mempool.GetTxByHash(hash), + }, nil +} + // UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) // including their number. // More: https://docs.cometbft.com/main/rpc/#/Info/unconfirmed_txs @@ -186,9 +226,9 @@ func (env *Environment) NumUnconfirmedTxs(*rpctypes.Context) (*ctypes.ResultUnco // be added to the mempool either. // More: https://docs.cometbft.com/main/rpc/#/Tx/check_tx func (env *Environment) CheckTx(_ *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { - res, err := env.ProxyAppMempool.CheckTx(context.TODO(), &abci.RequestCheckTx{Tx: tx}) + res, err := env.ProxyAppMempool.CheckTx(context.TODO(), &abci.CheckTxRequest{Tx: tx, Type: abci.CHECK_TX_TYPE_CHECK}) if err != nil { return nil, err } - return &ctypes.ResultCheckTx{ResponseCheckTx: *res}, nil + return &ctypes.ResultCheckTx{CheckTxResponse: *res}, nil } diff --git a/rpc/core/net.go b/rpc/core/net.go index 12ed51a769d..da6549adfcb 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -1,24 +1,35 @@ package core import ( + "encoding/base64" "errors" "fmt" + "os" "strings" + cmtjson "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/p2p" + na "github.com/cometbft/cometbft/p2p/netaddr" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" ctypes "github.com/cometbft/cometbft/rpc/core/types" rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" + "github.com/cometbft/cometbft/types" ) // NetInfo returns network info. // More: https://docs.cometbft.com/main/rpc/#/Info/net_info func (env *Environment) NetInfo(*rpctypes.Context) (*ctypes.ResultNetInfo, error) { - peersList := env.P2PPeers.Peers().List() - peers := make([]ctypes.Peer, 0, len(peersList)) - for _, peer := range peersList { - nodeInfo, ok := peer.NodeInfo().(p2p.DefaultNodeInfo) + peers := make([]ctypes.Peer, 0) + var err error + env.P2PPeers.Peers().ForEach(func(peer p2p.Peer) { + nodeInfo, ok := peer.NodeInfo().(ni.Default) if !ok { - return nil, fmt.Errorf("peer.NodeInfo() is not DefaultNodeInfo") + err = ErrInvalidNodeType{ + PeerID: string(peer.ID()), + Expected: fmt.Sprintf("%T", ni.Default{}), + Actual: fmt.Sprintf("%T", peer.NodeInfo()), + } + return } peers = append(peers, ctypes.Peer{ NodeInfo: nodeInfo, @@ -26,6 +37,9 @@ func (env *Environment) NetInfo(*rpctypes.Context) (*ctypes.ResultNetInfo, error ConnectionStatus: peer.Status(), RemoteIP: peer.RemoteIP().String(), }) + }) + if err != nil { + return nil, err } // TODO: Should we include PersistentPeers and Seeds in here? // PRO: useful info @@ -97,32 +111,70 @@ func (env *Environment) UnsafeDialPeers( // Genesis returns genesis file. // More: https://docs.cometbft.com/main/rpc/#/Info/genesis func (env *Environment) Genesis(*rpctypes.Context) (*ctypes.ResultGenesis, error) { - if len(env.genChunks) > 1 { - return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") + if len(env.genesisChunksFiles) > 0 { + return nil, ErrGenesisRespSize } - return &ctypes.ResultGenesis{Genesis: env.GenDoc}, nil -} + fGenesis, err := os.ReadFile(env.GenesisFilePath) + if err != nil { + return nil, fmt.Errorf("retrieving genesis file from disk: %s", err) + } -func (env *Environment) GenesisChunked(_ *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { - if env.genChunks == nil { - return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") + genDoc := types.GenesisDoc{} + if err = cmtjson.Unmarshal(fGenesis, &genDoc); err != nil { + formatStr := "genesis file JSON format is invalid: %s" + return nil, fmt.Errorf(formatStr, err) } - if len(env.genChunks) == 0 { - return nil, fmt.Errorf("service configuration error, there are no chunks") + return &ctypes.ResultGenesis{Genesis: &genDoc}, nil +} + +func (env *Environment) GenesisChunked( + _ *rpctypes.Context, + chunkID uint, +) (*ctypes.ResultGenesisChunk, error) { + if len(env.genesisChunksFiles) == 0 { + // See discussion in the following PR for why we still serve chunk 0 even + // if env.genChunks is nil: + // https://github.com/cometbft/cometbft/pull/4235#issuecomment-2389109521 + if chunkID == 0 { + fGenesis, err := os.ReadFile(env.GenesisFilePath) + if err != nil { + return nil, fmt.Errorf("retrieving genesis file from disk: %w", err) + } + + genesisBase64 := base64.StdEncoding.EncodeToString(fGenesis) + + resp := &ctypes.ResultGenesisChunk{ + TotalChunks: 1, + ChunkNumber: 0, + Data: genesisBase64, + } + + return resp, nil + } + + return nil, ErrServiceConfig{ErrNoChunks} } - id := int(chunk) + id := int(chunkID) + + if id > len(env.genesisChunksFiles)-1 { + return nil, ErrInvalidChunkID{id, len(env.genesisChunksFiles) - 1} + } - if id > len(env.genChunks)-1 { - return nil, fmt.Errorf("there are %d chunks, %d is invalid", len(env.genChunks)-1, id) + chunkPath := env.genesisChunksFiles[id] + chunk, err := os.ReadFile(chunkPath) + if err != nil { + return nil, fmt.Errorf("retrieving chunk %d from disk: %w", id, err) } + chunkBase64 := base64.StdEncoding.EncodeToString(chunk) + return &ctypes.ResultGenesisChunk{ - TotalChunks: len(env.genChunks), + TotalChunks: len(env.genesisChunksFiles), ChunkNumber: id, - Data: env.genChunks[id], + Data: chunkBase64, }, nil } @@ -130,13 +182,11 @@ func getIDs(peers []string) ([]string, error) { ids := make([]string, 0, len(peers)) for _, peer := range peers { - spl := strings.Split(peer, "@") if len(spl) != 2 { - return nil, p2p.ErrNetAddressNoID{Addr: peer} + return nil, na.ErrNoID{Addr: peer} } ids = append(ids, spl[0]) - } return ids, nil } diff --git a/rpc/core/net_test.go b/rpc/core/net_test.go index 5791621129e..d1b58b3497f 100644 --- a/rpc/core/net_test.go +++ b/rpc/core/net_test.go @@ -14,7 +14,7 @@ import ( func TestUnsafeDialSeeds(t *testing.T) { sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, - func(n int, sw *p2p.Switch) *p2p.Switch { return sw }) + func(_ int, sw *p2p.Switch) *p2p.Switch { return sw }) err := sw.Start() require.NoError(t, err) t.Cleanup(func() { @@ -39,9 +39,9 @@ func TestUnsafeDialSeeds(t *testing.T) { for _, tc := range testCases { res, err := env.UnsafeDialSeeds(&rpctypes.Context{}, tc.seeds) if tc.isErr { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, res) } } @@ -49,7 +49,7 @@ func TestUnsafeDialSeeds(t *testing.T) { func TestUnsafeDialPeers(t *testing.T) { sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, - func(n int, sw *p2p.Switch) *p2p.Switch { return sw }) + func(_ int, sw *p2p.Switch) *p2p.Switch { return sw }) sw.SetAddrBook(&p2p.AddrBookMock{ Addrs: make(map[string]struct{}), OurAddrs: make(map[string]struct{}), @@ -80,9 +80,9 @@ func TestUnsafeDialPeers(t *testing.T) { for _, tc := range testCases { res, err := env.UnsafeDialPeers(&rpctypes.Context{}, tc.peers, tc.persistence, tc.unconditional, tc.private) if tc.isErr { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, res) } } diff --git a/rpc/core/routes.go b/rpc/core/routes.go index c7c13a52781..827c4f31b14 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -37,6 +37,7 @@ func (env *Environment) GetRoutes() RoutesMap { "dump_consensus_state": rpc.NewRPCFunc(env.DumpConsensusState, ""), "consensus_state": rpc.NewRPCFunc(env.GetConsensusState, ""), "consensus_params": rpc.NewRPCFunc(env.ConsensusParams, "height", rpc.Cacheable("height")), + "unconfirmed_tx": rpc.NewRPCFunc(env.UnconfirmedTx, "hash"), "unconfirmed_txs": rpc.NewRPCFunc(env.UnconfirmedTxs, "limit"), "num_unconfirmed_txs": rpc.NewRPCFunc(env.NumUnconfirmedTxs, ""), diff --git a/rpc/core/status.go b/rpc/core/status.go index 6f56d1a9cdc..d81eb5cff40 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -4,7 +4,7 @@ import ( "time" cmtbytes "github.com/cometbft/cometbft/libs/bytes" - "github.com/cometbft/cometbft/p2p" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" ctypes "github.com/cometbft/cometbft/rpc/core/types" rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" "github.com/cometbft/cometbft/types" @@ -52,7 +52,7 @@ func (env *Environment) Status(*rpctypes.Context) (*ctypes.ResultStatus, error) } result := &ctypes.ResultStatus{ - NodeInfo: env.P2PTransport.NodeInfo().(p2p.DefaultNodeInfo), + NodeInfo: env.P2PTransport.NodeInfo().(ni.Default), SyncInfo: ctypes.SyncInfo{ LatestBlockHash: latestBlockHash, LatestAppHash: latestAppHash, diff --git a/rpc/core/tx.go b/rpc/core/tx.go index 710cbb46441..d6aeadf6413 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -1,18 +1,19 @@ package core import ( - "errors" - "fmt" - "sort" - - cmtmath "github.com/cometbft/cometbft/libs/math" cmtquery "github.com/cometbft/cometbft/libs/pubsub/query" ctypes "github.com/cometbft/cometbft/rpc/core/types" rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" + "github.com/cometbft/cometbft/state/txindex" "github.com/cometbft/cometbft/state/txindex/null" "github.com/cometbft/cometbft/types" ) +const ( + Ascending = "asc" + Descending = "desc" +) + // Tx allows you to query the transaction results. `nil` could mean the // transaction is in the mempool, invalidated, or was not sent in the first // place. @@ -20,7 +21,7 @@ import ( func (env *Environment) Tx(_ *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { // if index is disabled, return error if _, ok := env.TxIndexer.(*null.TxIndex); ok { - return nil, fmt.Errorf("transaction indexing is disabled") + return nil, ErrTxIndexingDisabled } r, err := env.TxIndexer.Get(hash) @@ -29,13 +30,15 @@ func (env *Environment) Tx(_ *rpctypes.Context, hash []byte, prove bool) (*ctype } if r == nil { - return nil, fmt.Errorf("tx (%X) not found", hash) + return nil, ErrTxNotFound{hash} } var proof types.TxProof if prove { - block := env.BlockStore.LoadBlock(r.Height) - proof = block.Data.Txs.Proof(int(r.Index)) + block, _ := env.BlockStore.LoadBlock(r.Height) + if block != nil { + proof = block.Data.Txs.Proof(int(r.Index)) + } } return &ctypes.ResultTx{ @@ -60,61 +63,49 @@ func (env *Environment) TxSearch( ) (*ctypes.ResultTxSearch, error) { // if index is disabled, return error if _, ok := env.TxIndexer.(*null.TxIndex); ok { - return nil, errors.New("transaction indexing is disabled") + return nil, ErrTxIndexingDisabled } else if len(query) > maxQueryLength { - return nil, errors.New("maximum query length exceeded") + return nil, ErrQueryLength{len(query), maxQueryLength} } - q, err := cmtquery.New(query) - if err != nil { - return nil, err + // if orderBy is not "asc", "desc", or blank, return error + if orderBy != "" && orderBy != Ascending && orderBy != Descending { + return nil, ErrInvalidOrderBy{orderBy} } - results, err := env.TxIndexer.Search(ctx.Context(), q) + q, err := cmtquery.New(query) if err != nil { return nil, err } - // sort results (must be done before pagination) - switch orderBy { - case "desc": - sort.Slice(results, func(i, j int) bool { - if results[i].Height == results[j].Height { - return results[i].Index > results[j].Index - } - return results[i].Height > results[j].Height - }) - case "asc", "": - sort.Slice(results, func(i, j int) bool { - if results[i].Height == results[j].Height { - return results[i].Index < results[j].Index - } - return results[i].Height < results[j].Height - }) - default: - return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") + // Validate number of results per page + perPage := env.validatePerPage(perPagePtr) + if pagePtr == nil { + // Default to page 1 if not specified + pagePtr = new(int) + *pagePtr = 1 } - // paginate results - totalCount := len(results) - perPage := env.validatePerPage(perPagePtr) + pagSettings := txindex.Pagination{ + OrderDesc: orderBy == Descending, + IsPaginated: true, + Page: *pagePtr, + PerPage: perPage, + } - page, err := validatePage(pagePtr, perPage, totalCount) + results, totalCount, err := env.TxIndexer.Search(ctx.Context(), q, pagSettings) if err != nil { return nil, err } - skipCount := validateSkipCount(page, perPage) - pageSize := cmtmath.MinInt(perPage, totalCount-skipCount) - - apiResults := make([]*ctypes.ResultTx, 0, pageSize) - for i := skipCount; i < skipCount+pageSize; i++ { - r := results[i] - + apiResults := make([]*ctypes.ResultTx, 0, len(results)) + for _, r := range results { var proof types.TxProof if prove { - block := env.BlockStore.LoadBlock(r.Height) - proof = block.Data.Txs.Proof(int(r.Index)) + block, _ := env.BlockStore.LoadBlock(r.Height) + if block != nil { + proof = block.Data.Txs.Proof(int(r.Index)) + } } apiResults = append(apiResults, &ctypes.ResultTx{ diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 36d3ef87306..4740051c4d7 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -4,21 +4,22 @@ import ( "encoding/json" "time" - abci "github.com/cometbft/cometbft/abci/types" + abcitypes "github.com/cometbft/cometbft/abci/types" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/libs/bytes" "github.com/cometbft/cometbft/p2p" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" "github.com/cometbft/cometbft/types" ) -// List of blocks +// List of blocks. type ResultBlockchainInfo struct { LastHeight int64 `json:"last_height"` BlockMetas []*types.BlockMeta `json:"block_metas"` } -// Genesis file +// Genesis file. type ResultGenesis struct { Genesis *types.GenesisDoc `json:"genesis"` } @@ -33,38 +34,38 @@ type ResultGenesisChunk struct { Data string `json:"data"` } -// Single block (with meta) +// Single block (with meta). type ResultBlock struct { BlockID types.BlockID `json:"block_id"` Block *types.Block `json:"block"` } -// ResultHeader represents the response for a Header RPC Client query +// ResultHeader represents the response for a Header RPC Client query. type ResultHeader struct { Header *types.Header `json:"header"` } -// Commit and Header +// Commit and Header. type ResultCommit struct { types.SignedHeader `json:"signed_header"` CanonicalCommit bool `json:"canonical"` } -// ABCI results from a block +// ABCI results from a block. type ResultBlockResults struct { - Height int64 `json:"height"` - TxsResults []*abci.ExecTxResult `json:"txs_results"` - FinalizeBlockEvents []abci.Event `json:"finalize_block_events"` - ValidatorUpdates []abci.ValidatorUpdate `json:"validator_updates"` - ConsensusParamUpdates *cmtproto.ConsensusParams `json:"consensus_param_updates"` - AppHash []byte `json:"app_hash"` + Height int64 `json:"height"` + TxResults []*abcitypes.ExecTxResult `json:"txs_results"` + FinalizeBlockEvents []abcitypes.Event `json:"finalize_block_events"` + ValidatorUpdates []abcitypes.ValidatorUpdate `json:"validator_updates"` + ConsensusParamUpdates *cmtproto.ConsensusParams `json:"consensus_param_updates"` + AppHash []byte `json:"app_hash"` } // NewResultCommit is a helper to initialize the ResultCommit with -// the embedded struct +// the embedded struct. func NewResultCommit(header *types.Header, commit *types.Commit, - canonical bool) *ResultCommit { - + canonical bool, +) *ResultCommit { return &ResultCommit{ SignedHeader: types.SignedHeader{ Header: header, @@ -74,7 +75,7 @@ func NewResultCommit(header *types.Header, commit *types.Commit, } } -// Info about the node's syncing state +// Info about the node's syncing state. type SyncInfo struct { LatestBlockHash bytes.HexBytes `json:"latest_block_hash"` LatestAppHash bytes.HexBytes `json:"latest_app_hash"` @@ -89,21 +90,21 @@ type SyncInfo struct { CatchingUp bool `json:"catching_up"` } -// Info about the node's validator +// Info about the node's validator. type ValidatorInfo struct { Address bytes.HexBytes `json:"address"` PubKey crypto.PubKey `json:"pub_key"` VotingPower int64 `json:"voting_power"` } -// Node Status +// Node Status. type ResultStatus struct { - NodeInfo p2p.DefaultNodeInfo `json:"node_info"` - SyncInfo SyncInfo `json:"sync_info"` - ValidatorInfo ValidatorInfo `json:"validator_info"` + NodeInfo ni.Default `json:"node_info"` + SyncInfo SyncInfo `json:"sync_info"` + ValidatorInfo ValidatorInfo `json:"validator_info"` } -// Is TxIndexing enabled +// Is TxIndexing enabled. func (s *ResultStatus) TxIndexEnabled() bool { if s == nil { return false @@ -111,7 +112,7 @@ func (s *ResultStatus) TxIndexEnabled() bool { return s.NodeInfo.Other.TxIndex == "on" } -// Info about peer connections +// Info about peer connections. type ResultNetInfo struct { Listening bool `json:"listening"` Listeners []string `json:"listeners"` @@ -119,19 +120,19 @@ type ResultNetInfo struct { Peers []Peer `json:"peers"` } -// Log from dialing seeds +// Log from dialing seeds. type ResultDialSeeds struct { Log string `json:"log"` } -// Log from dialing peers +// Log from dialing peers. type ResultDialPeers struct { Log string `json:"log"` } -// A peer +// A peer. type Peer struct { - NodeInfo p2p.DefaultNodeInfo `json:"node_info"` + NodeInfo ni.Default `json:"node_info"` IsOutbound bool `json:"is_outbound"` ConnectionStatus p2p.ConnectionStatus `json:"connection_status"` RemoteIP string `json:"remote_ip"` @@ -147,31 +148,31 @@ type ResultValidators struct { Total int `json:"total"` } -// ConsensusParams for given height +// ConsensusParams for given height. type ResultConsensusParams struct { BlockHeight int64 `json:"block_height"` ConsensusParams types.ConsensusParams `json:"consensus_params"` } // Info about the consensus state. -// UNSTABLE +// UNSTABLE. type ResultDumpConsensusState struct { RoundState json.RawMessage `json:"round_state"` Peers []PeerStateInfo `json:"peers"` } -// UNSTABLE +// UNSTABLE. type PeerStateInfo struct { NodeAddress string `json:"node_address"` PeerState json.RawMessage `json:"peer_state"` } -// UNSTABLE +// UNSTABLE. type ResultConsensusState struct { RoundState json.RawMessage `json:"round_state"` } -// CheckTx result +// CheckTx result. type ResultBroadcastTx struct { Code uint32 `json:"code"` Data bytes.HexBytes `json:"data"` @@ -181,30 +182,30 @@ type ResultBroadcastTx struct { Hash bytes.HexBytes `json:"hash"` } -// CheckTx and ExecTx results +// CheckTx and ExecTx results. type ResultBroadcastTxCommit struct { - CheckTx abci.ResponseCheckTx `json:"check_tx"` - TxResult abci.ExecTxResult `json:"tx_result"` - Hash bytes.HexBytes `json:"hash"` - Height int64 `json:"height"` + CheckTx abcitypes.CheckTxResponse `json:"check_tx"` + TxResult abcitypes.ExecTxResult `json:"tx_result"` + Hash bytes.HexBytes `json:"hash"` + Height int64 `json:"height"` } -// ResultCheckTx wraps abci.ResponseCheckTx. +// ResultCheckTx wraps abci.CheckTxResponse. type ResultCheckTx struct { - abci.ResponseCheckTx + abcitypes.CheckTxResponse } -// Result of querying for a tx +// Result of querying for a tx. type ResultTx struct { - Hash bytes.HexBytes `json:"hash"` - Height int64 `json:"height"` - Index uint32 `json:"index"` - TxResult abci.ExecTxResult `json:"tx_result"` - Tx types.Tx `json:"tx"` - Proof types.TxProof `json:"proof,omitempty"` + Hash bytes.HexBytes `json:"hash"` + Height int64 `json:"height"` + Index uint32 `json:"index"` + TxResult abcitypes.ExecTxResult `json:"tx_result"` + Tx types.Tx `json:"tx"` + Proof types.TxProof `json:"proof,omitempty"` } -// Result of searching for txs +// Result of searching for txs. type ResultTxSearch struct { Txs []*ResultTx `json:"txs"` TotalCount int `json:"total_count"` @@ -216,7 +217,12 @@ type ResultBlockSearch struct { TotalCount int `json:"total_count"` } -// List of mempool txs +// Single mempool tx. +type ResultUnconfirmedTx struct { + Tx types.Tx `json:"tx"` +} + +// List of mempool txs. type ResultUnconfirmedTxs struct { Count int `json:"n_txs"` Total int `json:"total"` @@ -224,22 +230,22 @@ type ResultUnconfirmedTxs struct { Txs []types.Tx `json:"txs"` } -// Info abci msg +// Info abci msg. type ResultABCIInfo struct { - Response abci.ResponseInfo `json:"response"` + Response abcitypes.InfoResponse `json:"response"` } -// Query abci msg +// Query abci msg. type ResultABCIQuery struct { - Response abci.ResponseQuery `json:"response"` + Response abcitypes.QueryResponse `json:"response"` } -// Result of broadcasting evidence +// Result of broadcasting evidence. type ResultBroadcastEvidence struct { Hash []byte `json:"hash"` } -// empty results +// empty results. type ( ResultUnsafeFlushMempool struct{} ResultUnsafeProfile struct{} @@ -248,7 +254,7 @@ type ( ResultHealth struct{} ) -// Event data from a subscription +// Event data from a subscription. type ResultEvent struct { Query string `json:"query"` Data types.TMEventData `json:"data"` diff --git a/rpc/core/types/responses_test.go b/rpc/core/types/responses_test.go index 772fb6d6166..218068733b4 100644 --- a/rpc/core/types/responses_test.go +++ b/rpc/core/types/responses_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/cometbft/cometbft/p2p" + ni "github.com/cometbft/cometbft/p2p/nodeinfo" ) func TestStatusIndexer(t *testing.T) { @@ -15,17 +15,17 @@ func TestStatusIndexer(t *testing.T) { status = &ResultStatus{} assert.False(t, status.TxIndexEnabled()) - status.NodeInfo = p2p.DefaultNodeInfo{} + status.NodeInfo = ni.Default{} assert.False(t, status.TxIndexEnabled()) cases := []struct { expected bool - other p2p.DefaultNodeInfoOther + other ni.DefaultOther }{ - {false, p2p.DefaultNodeInfoOther{}}, - {false, p2p.DefaultNodeInfoOther{TxIndex: "aa"}}, - {false, p2p.DefaultNodeInfoOther{TxIndex: "off"}}, - {true, p2p.DefaultNodeInfoOther{TxIndex: "on"}}, + {false, ni.DefaultOther{}}, + {false, ni.DefaultOther{TxIndex: "aa"}}, + {false, ni.DefaultOther{TxIndex: "off"}}, + {true, ni.DefaultOther{TxIndex: "on"}}, } for _, tc := range cases { diff --git a/rpc/grpc/client/block_results_service.go b/rpc/grpc/client/block_results_service.go index be2eb23e0a8..fb88acb00e2 100644 --- a/rpc/grpc/client/block_results_service.go +++ b/rpc/grpc/client/block_results_service.go @@ -2,19 +2,17 @@ package client import ( "context" - "fmt" - - abci "github.com/cometbft/cometbft/abci/types" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cosmos/gogoproto/grpc" - brs "github.com/cometbft/cometbft/proto/tendermint/services/block_results/v1" + abci "github.com/cometbft/cometbft/abci/types" + brs "github.com/cometbft/cometbft/api/cometbft/services/block_results/v1" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" ) type BlockResults struct { Height int64 `json:"height"` - TxsResults []*abci.ExecTxResult `json:"txs_results"` + TxResults []*abci.ExecTxResult `json:"txs_results"` FinalizeBlockEvents []*abci.Event `json:"finalize_block_events"` ValidatorUpdates []*abci.ValidatorUpdate `json:"validator_updates"` ConsensusParamUpdates *cmtproto.ConsensusParams `json:"consensus_param_updates"` @@ -24,7 +22,6 @@ type BlockResults struct { // BlockResultsServiceClient provides the block results of a given height (or latest if none provided). type BlockResultsServiceClient interface { GetBlockResults(ctx context.Context, height int64) (*BlockResults, error) - GetLatestBlockResults(ctx context.Context) (*BlockResults, error) } type blockResultServiceClient struct { @@ -34,28 +31,12 @@ type blockResultServiceClient struct { func (b blockResultServiceClient) GetBlockResults(ctx context.Context, height int64) (*BlockResults, error) { res, err := b.client.GetBlockResults(ctx, &brs.GetBlockResultsRequest{Height: height}) if err != nil { - return nil, fmt.Errorf("error fetching BlockResults for height %d:: %s", height, err.Error()) - } - - return &BlockResults{ - Height: res.Height, - TxsResults: res.TxsResults, - FinalizeBlockEvents: res.FinalizeBlockEvents, - ValidatorUpdates: res.ValidatorUpdates, - ConsensusParamUpdates: res.ConsensusParamUpdates, - AppHash: res.AppHash, - }, nil -} - -func (b blockResultServiceClient) GetLatestBlockResults(ctx context.Context) (*BlockResults, error) { - res, err := b.client.GetLatestBlockResults(ctx, &brs.GetLatestBlockResultsRequest{}) - if err != nil { - return nil, fmt.Errorf("error fetching BlockResults for latest height :: %s", err.Error()) + return nil, ErrBlockResults{Height: height, Source: err} } return &BlockResults{ Height: res.Height, - TxsResults: res.TxsResults, + TxResults: res.TxResults, FinalizeBlockEvents: res.FinalizeBlockEvents, ValidatorUpdates: res.ValidatorUpdates, ConsensusParamUpdates: res.ConsensusParamUpdates, @@ -75,12 +56,7 @@ func newDisabledBlockResultsServiceClient() BlockResultsServiceClient { return &disabledBlockResultsServiceClient{} } -// GetBlockResults implements BlockResultsServiceClient +// GetBlockResults implements BlockResultsServiceClient. func (*disabledBlockResultsServiceClient) GetBlockResults(_ context.Context, _ int64) (*BlockResults, error) { panic("block results service client is disabled") } - -// GetLatestBlockResults implements BlockResultsServiceClient -func (*disabledBlockResultsServiceClient) GetLatestBlockResults(_ context.Context) (*BlockResults, error) { - panic("block results service client is disabled") -} diff --git a/rpc/grpc/client/block_service.go b/rpc/grpc/client/block_service.go index 271c1cbac6d..73c4765b35e 100644 --- a/rpc/grpc/client/block_service.go +++ b/rpc/grpc/client/block_service.go @@ -2,12 +2,12 @@ package client import ( "context" - "fmt" - blocksvc "github.com/cometbft/cometbft/proto/tendermint/services/block/v1" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - "github.com/cometbft/cometbft/types" "github.com/cosmos/gogoproto/grpc" + + blocksvc "github.com/cometbft/cometbft/api/cometbft/services/block/v1" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" + "github.com/cometbft/cometbft/types" ) // Block data returned by the CometBFT BlockService gRPC API. @@ -34,7 +34,7 @@ func blockFromProto(pblockID *cmtproto.BlockID, pblock *cmtproto.Block) (*Block, } // LatestHeightResult type used in GetLatestResult and send to the client -// via a channel +// via a channel. type LatestHeightResult struct { Height int64 Error error @@ -54,15 +54,12 @@ func GetLatestHeightChannelSize(sz uint) GetLatestHeightOption { } } -// BlockServiceClient provides block information +// BlockServiceClient provides block information. type BlockServiceClient interface { // GetBlockByHeight attempts to retrieve the block associated with the // given height. GetBlockByHeight(ctx context.Context, height int64) (*Block, error) - // GetLatestBlock attempts to retrieve the latest committed block. - GetLatestBlock(ctx context.Context) (*Block, error) - // GetLatestHeight provides sends the latest committed block height to the // resulting output channel as blocks are committed. GetLatestHeight(ctx context.Context, opts ...GetLatestHeightOption) (<-chan LatestHeightResult, error) @@ -78,7 +75,7 @@ func newBlockServiceClient(conn grpc.ClientConn) BlockServiceClient { } } -// GetBlockByHeight implements BlockServiceClient GetBlockByHeight +// GetBlockByHeight implements BlockServiceClient GetBlockByHeight. func (c *blockServiceClient) GetBlockByHeight(ctx context.Context, height int64) (*Block, error) { res, err := c.client.GetByHeight(ctx, &blocksvc.GetByHeightRequest{ Height: height, @@ -90,23 +87,13 @@ func (c *blockServiceClient) GetBlockByHeight(ctx context.Context, height int64) return blockFromProto(res.BlockId, res.Block) } -// GetLatestBlock implements BlockServiceClient. -func (c *blockServiceClient) GetLatestBlock(ctx context.Context) (*Block, error) { - res, err := c.client.GetLatest(ctx, &blocksvc.GetLatestRequest{}) - if err != nil { - return nil, err - } - - return blockFromProto(res.BlockId, res.Block) -} - -// GetLatestHeight implements BlockServiceClient GetLatestHeight +// GetLatestHeight implements BlockServiceClient GetLatestHeight. func (c *blockServiceClient) GetLatestHeight(ctx context.Context, opts ...GetLatestHeightOption) (<-chan LatestHeightResult, error) { req := blocksvc.GetLatestHeightRequest{} latestHeightClient, err := c.client.GetLatestHeight(ctx, &req) if err != nil { - return nil, fmt.Errorf("error getting a stream for the latest height: %w", err) + return nil, ErrStreamSetup{Source: err} } cfg := &getLatestHeightConfig{} @@ -120,7 +107,7 @@ func (c *blockServiceClient) GetLatestHeight(ctx context.Context, opts ...GetLat for { response, err := client.Recv() if err != nil { - res := LatestHeightResult{Error: fmt.Errorf("error receiving the latest height from a stream: %w", err)} + res := LatestHeightResult{Error: ErrStreamReceive{Source: err}} select { case <-ctx.Done(): case resultCh <- res: @@ -136,7 +123,6 @@ func (c *blockServiceClient) GetLatestHeight(ctx context.Context, opts ...GetLat // Skip sending this result because the channel is full - the // client will get the next one once the channel opens up again } - } }(latestHeightClient) @@ -149,7 +135,7 @@ func newDisabledBlockServiceClient() BlockServiceClient { return &disabledBlockServiceClient{} } -// GetBlockByHeight implements BlockServiceClient GetBlockByHeight - disabled client +// GetBlockByHeight implements BlockServiceClient GetBlockByHeight - disabled client. func (*disabledBlockServiceClient) GetBlockByHeight(context.Context, int64) (*Block, error) { panic("block service client is disabled") } @@ -159,7 +145,7 @@ func (*disabledBlockServiceClient) GetLatestBlock(context.Context) (*Block, erro panic("block service client is disabled") } -// GetLatestHeight implements BlockServiceClient GetLatestHeight - disabled client +// GetLatestHeight implements BlockServiceClient GetLatestHeight - disabled client. func (*disabledBlockServiceClient) GetLatestHeight(context.Context, ...GetLatestHeightOption) (<-chan LatestHeightResult, error) { panic("block service client is disabled") } diff --git a/rpc/grpc/client/client.go b/rpc/grpc/client/client.go index 2e76ca0ae79..d40a10a72ce 100644 --- a/rpc/grpc/client/client.go +++ b/rpc/grpc/client/client.go @@ -6,13 +6,12 @@ package client import ( "context" - "fmt" "net" ggrpc "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - cmtnet "github.com/cometbft/cometbft/libs/net" + cmtnet "github.com/cometbft/cometbft/internal/net" ) type Option func(*clientBuilder) @@ -95,6 +94,17 @@ func WithBlockServiceEnabled(enabled bool) Option { } } +// WithBlockResultsServiceEnabled allows control of whether or not to create a +// client for interacting with the block results service of a CometBFT node. +// +// If disabled and the client attempts to access the block results service API, the +// client will panic. +func WithBlockResultsServiceEnabled(enabled bool) Option { + return func(b *clientBuilder) { + b.blockResultsServiceEnabled = enabled + } +} + // WithGRPCDialOption allows passing lower-level gRPC dial options through to // the gRPC dialer when creating the client. func WithGRPCDialOption(opt ggrpc.DialOption) Option { @@ -112,15 +122,16 @@ func WithGRPCDialOption(opt ggrpc.DialOption) Option { // To connect to a gRPC server with TLS, use the WithGRPCDialOption option with // the appropriate gRPC credentials configuration. See // https://pkg.go.dev/google.golang.org/grpc#WithTransportCredentials -func New(ctx context.Context, addr string, opts ...Option) (Client, error) { +func New(_ context.Context, addr string, opts ...Option) (Client, error) { builder := newClientBuilder() for _, opt := range opts { opt(builder) } - conn, err := ggrpc.DialContext(ctx, addr, builder.grpcOpts...) + conn, err := ggrpc.NewClient(addr, builder.grpcOpts...) if err != nil { - return nil, fmt.Errorf("failed to dial %s: %w", addr, err) + return nil, ErrDial{addr, err} } + versionServiceClient := newDisabledVersionServiceClient() if builder.versionServiceEnabled { versionServiceClient = newVersionServiceClient(conn) diff --git a/rpc/grpc/client/errors.go b/rpc/grpc/client/errors.go new file mode 100644 index 00000000000..f5c4377635d --- /dev/null +++ b/rpc/grpc/client/errors.go @@ -0,0 +1,49 @@ +package client + +import "fmt" + +type ErrBlockResults struct { + Height int64 + Source error +} + +func (e ErrBlockResults) Error() string { + return fmt.Sprintf("error fetching BlockResults for height %d: %s", e.Height, e.Source.Error()) +} + +type ErrStreamSetup struct { + Source error +} + +func (e ErrStreamSetup) Error() string { + return "error getting a stream for the latest height: " + e.Source.Error() +} + +func (e ErrStreamSetup) Unwrap() error { + return e.Source +} + +type ErrStreamReceive struct { + Source error +} + +func (e ErrStreamReceive) Error() string { + return "error receiving the latest height from a stream: " + e.Source.Error() +} + +func (e ErrStreamReceive) Unwrap() error { + return e.Source +} + +type ErrDial struct { + Addr string + Source error +} + +func (e ErrDial) Error() string { + return fmt.Sprintf("failed to dial: address %s: %v", e.Addr, e.Source) +} + +func (e ErrDial) Unwrap() error { + return e.Source +} diff --git a/rpc/grpc/client/privileged/privileged.go b/rpc/grpc/client/privileged/privileged.go index 858082d7d77..fbbb5518c35 100644 --- a/rpc/grpc/client/privileged/privileged.go +++ b/rpc/grpc/client/privileged/privileged.go @@ -2,13 +2,13 @@ package privileged import ( "context" - "fmt" "net" ggrpc "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - cmtnet "github.com/cometbft/cometbft/libs/net" + cmtnet "github.com/cometbft/cometbft/internal/net" + grpcclient "github.com/cometbft/cometbft/rpc/grpc/client" ) type Option func(*clientBuilder) @@ -90,15 +90,16 @@ func WithGRPCDialOption(opt ggrpc.DialOption) Option { // To connect to a gRPC server with TLS, use the WithGRPCDialOption option with // the appropriate gRPC credentials configuration. See // https://pkg.go.dev/google.golang.org/grpc#WithTransportCredentials -func New(ctx context.Context, addr string, opts ...Option) (Client, error) { +func New(_ context.Context, addr string, opts ...Option) (Client, error) { builder := newClientBuilder() for _, opt := range opts { opt(builder) } - conn, err := ggrpc.DialContext(ctx, addr, builder.grpcOpts...) + conn, err := ggrpc.NewClient(addr, builder.grpcOpts...) if err != nil { - return nil, fmt.Errorf("failed to dial %s: %w", addr, err) + return nil, grpcclient.ErrDial{Addr: addr, Source: err} } + pruningServiceClient := newDisabledPruningServiceClient() if builder.pruningServiceEnabled { pruningServiceClient = newPruningServiceClient(conn) diff --git a/rpc/grpc/client/privileged/pruning_service.go b/rpc/grpc/client/privileged/pruning_service.go index e63c97f45da..8eb63d048f9 100644 --- a/rpc/grpc/client/privileged/pruning_service.go +++ b/rpc/grpc/client/privileged/pruning_service.go @@ -5,7 +5,7 @@ import ( "github.com/cosmos/gogoproto/grpc" - v1 "github.com/cometbft/cometbft/proto/tendermint/services/pruning/v1" + pbsvc "github.com/cometbft/cometbft/api/cometbft/services/pruning/v1" ) // RetainHeights provides information on which block height limits have been @@ -28,41 +28,41 @@ type PruningServiceClient interface { } type pruningServiceClient struct { - inner v1.PruningServiceClient + inner pbsvc.PruningServiceClient } func newPruningServiceClient(conn grpc.ClientConn) PruningServiceClient { return &pruningServiceClient{ - inner: v1.NewPruningServiceClient(conn), + inner: pbsvc.NewPruningServiceClient(conn), } } func (c *pruningServiceClient) SetBlockIndexerRetainHeight(ctx context.Context, height uint64) error { - _, err := c.inner.SetBlockIndexerRetainHeight(ctx, &v1.SetBlockIndexerRetainHeightRequest{ + _, err := c.inner.SetBlockIndexerRetainHeight(ctx, &pbsvc.SetBlockIndexerRetainHeightRequest{ Height: height, }) return err } func (c *pruningServiceClient) GetBlockIndexerRetainHeight(ctx context.Context) (uint64, error) { - res, err := c.inner.GetBlockIndexerRetainHeight(ctx, &v1.GetBlockIndexerRetainHeightRequest{}) + res, err := c.inner.GetBlockIndexerRetainHeight(ctx, &pbsvc.GetBlockIndexerRetainHeightRequest{}) if err != nil { return 0, err } return res.Height, nil } -// SetTxIndexerRetainHeight implements PruningServiceClient +// SetTxIndexerRetainHeight implements PruningServiceClient. func (c *pruningServiceClient) SetTxIndexerRetainHeight(ctx context.Context, height uint64) error { - _, err := c.inner.SetTxIndexerRetainHeight(ctx, &v1.SetTxIndexerRetainHeightRequest{ + _, err := c.inner.SetTxIndexerRetainHeight(ctx, &pbsvc.SetTxIndexerRetainHeightRequest{ Height: height, }) return err } -// GetTxIndexerRetainHeight implements PruningServiceClient +// GetTxIndexerRetainHeight implements PruningServiceClient. func (c *pruningServiceClient) GetTxIndexerRetainHeight(ctx context.Context) (uint64, error) { - res, err := c.inner.GetTxIndexerRetainHeight(ctx, &v1.GetTxIndexerRetainHeightRequest{}) + res, err := c.inner.GetTxIndexerRetainHeight(ctx, &pbsvc.GetTxIndexerRetainHeightRequest{}) if err != nil { return 0, err } @@ -71,7 +71,7 @@ func (c *pruningServiceClient) GetTxIndexerRetainHeight(ctx context.Context) (ui // SetBlockRetainHeight implements PruningServiceClient. func (c *pruningServiceClient) SetBlockRetainHeight(ctx context.Context, height uint64) error { - _, err := c.inner.SetBlockRetainHeight(ctx, &v1.SetBlockRetainHeightRequest{ + _, err := c.inner.SetBlockRetainHeight(ctx, &pbsvc.SetBlockRetainHeightRequest{ Height: height, }) return err @@ -79,7 +79,7 @@ func (c *pruningServiceClient) SetBlockRetainHeight(ctx context.Context, height // GetBlockRetainHeight implements PruningServiceClient. func (c *pruningServiceClient) GetBlockRetainHeight(ctx context.Context) (RetainHeights, error) { - res, err := c.inner.GetBlockRetainHeight(ctx, &v1.GetBlockRetainHeightRequest{}) + res, err := c.inner.GetBlockRetainHeight(ctx, &pbsvc.GetBlockRetainHeightRequest{}) if err != nil { return RetainHeights{}, err } @@ -91,7 +91,7 @@ func (c *pruningServiceClient) GetBlockRetainHeight(ctx context.Context) (Retain // SetBlockResultsRetainHeight implements PruningServiceClient. func (c *pruningServiceClient) SetBlockResultsRetainHeight(ctx context.Context, height uint64) error { - _, err := c.inner.SetBlockResultsRetainHeight(ctx, &v1.SetBlockResultsRetainHeightRequest{ + _, err := c.inner.SetBlockResultsRetainHeight(ctx, &pbsvc.SetBlockResultsRetainHeightRequest{ Height: height, }) return err @@ -99,7 +99,7 @@ func (c *pruningServiceClient) SetBlockResultsRetainHeight(ctx context.Context, // GetBlockResultsRetainHeight implements PruningServiceClient. func (c *pruningServiceClient) GetBlockResultsRetainHeight(ctx context.Context) (uint64, error) { - res, err := c.inner.GetBlockResultsRetainHeight(ctx, &v1.GetBlockResultsRetainHeightRequest{}) + res, err := c.inner.GetBlockResultsRetainHeight(ctx, &pbsvc.GetBlockResultsRetainHeightRequest{}) if err != nil { return 0, err } @@ -132,18 +132,18 @@ func (*disabledPruningServiceClient) GetBlockResultsRetainHeight(context.Context panic("pruning service client is disabled") } -func (c *disabledPruningServiceClient) SetTxIndexerRetainHeight(context.Context, uint64) error { +func (*disabledPruningServiceClient) SetTxIndexerRetainHeight(context.Context, uint64) error { panic("pruning service client is disabled") } -func (c *disabledPruningServiceClient) GetTxIndexerRetainHeight(context.Context) (uint64, error) { +func (*disabledPruningServiceClient) GetTxIndexerRetainHeight(context.Context) (uint64, error) { panic("pruning service client is disabled") } -func (c *disabledPruningServiceClient) SetBlockIndexerRetainHeight(context.Context, uint64) error { +func (*disabledPruningServiceClient) SetBlockIndexerRetainHeight(context.Context, uint64) error { panic("pruning service client is disabled") } -func (c *disabledPruningServiceClient) GetBlockIndexerRetainHeight(context.Context) (uint64, error) { +func (*disabledPruningServiceClient) GetBlockIndexerRetainHeight(context.Context) (uint64, error) { panic("pruning service client is disabled") } diff --git a/rpc/grpc/client/version_service.go b/rpc/grpc/client/version_service.go index c11fd109b36..bef0fa445eb 100644 --- a/rpc/grpc/client/version_service.go +++ b/rpc/grpc/client/version_service.go @@ -5,7 +5,7 @@ import ( "github.com/cosmos/gogoproto/grpc" - v1 "github.com/cometbft/cometbft/proto/tendermint/services/version/v1" + pbsvc "github.com/cometbft/cometbft/api/cometbft/services/version/v1" ) // Version provides version information about a particular CometBFT node. @@ -22,18 +22,18 @@ type VersionServiceClient interface { } type versionServiceClient struct { - client v1.VersionServiceClient + client pbsvc.VersionServiceClient } func newVersionServiceClient(conn grpc.ClientConn) VersionServiceClient { return &versionServiceClient{ - client: v1.NewVersionServiceClient(conn), + client: pbsvc.NewVersionServiceClient(conn), } } -// GetVersion implements VersionServiceClient +// GetVersion implements VersionServiceClient. func (c *versionServiceClient) GetVersion(ctx context.Context) (*Version, error) { - res, err := c.client.GetVersion(ctx, &v1.GetVersionRequest{}) + res, err := c.client.GetVersion(ctx, &pbsvc.GetVersionRequest{}) if err != nil { return nil, err } @@ -51,7 +51,7 @@ func newDisabledVersionServiceClient() VersionServiceClient { return &disabledVersionServiceClient{} } -// GetVersion implements VersionServiceClient +// GetVersion implements VersionServiceClient. func (*disabledVersionServiceClient) GetVersion(context.Context) (*Version, error) { panic("version service client is disabled") } diff --git a/rpc/grpc/errors/errors.go b/rpc/grpc/errors/errors.go new file mode 100644 index 00000000000..e799c942d11 --- /dev/null +++ b/rpc/grpc/errors/errors.go @@ -0,0 +1,11 @@ +package errors + +import "fmt" + +type ErrInvalidRemoteAddress struct { + Addr string +} + +func (e ErrInvalidRemoteAddress) Error() string { + return fmt.Sprintf("invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", e.Addr) +} diff --git a/rpc/grpc/server/privileged/privileged.go b/rpc/grpc/server/privileged/privileged.go index 0a3d2d54c98..317430d6aa7 100644 --- a/rpc/grpc/server/privileged/privileged.go +++ b/rpc/grpc/server/privileged/privileged.go @@ -6,8 +6,8 @@ import ( "google.golang.org/grpc" + pbpruningsvc "github.com/cometbft/cometbft/api/cometbft/services/pruning/v1" "github.com/cometbft/cometbft/libs/log" - pbpruningsvc "github.com/cometbft/cometbft/proto/tendermint/services/pruning/v1" "github.com/cometbft/cometbft/rpc/grpc/server/services/pruningservice" sm "github.com/cometbft/cometbft/state" ) @@ -31,7 +31,7 @@ func newServerBuilder(listener net.Listener) *serverBuilder { } } -// WithVersionService enables the version service on the CometBFT server. +// WithPruningService enables the pruning service on the CometBFT server. func WithPruningService(pruner *sm.Pruner, logger log.Logger) Option { return func(b *serverBuilder) { b.pruningService = pruningservice.New(pruner, logger) diff --git a/rpc/grpc/server/server.go b/rpc/grpc/server/server.go index 217e37d480b..69f948c784f 100644 --- a/rpc/grpc/server/server.go +++ b/rpc/grpc/server/server.go @@ -5,19 +5,18 @@ import ( "net" "strings" - sm "github.com/cometbft/cometbft/state" - "github.com/cometbft/cometbft/store" - - brs "github.com/cometbft/cometbft/proto/tendermint/services/block_results/v1" - "github.com/cometbft/cometbft/rpc/grpc/server/services/blockresultservice" - "google.golang.org/grpc" + pbblocksvc "github.com/cometbft/cometbft/api/cometbft/services/block/v1" + brs "github.com/cometbft/cometbft/api/cometbft/services/block_results/v1" + pbversionsvc "github.com/cometbft/cometbft/api/cometbft/services/version/v1" "github.com/cometbft/cometbft/libs/log" - pbblocksvc "github.com/cometbft/cometbft/proto/tendermint/services/block/v1" - pbversionsvc "github.com/cometbft/cometbft/proto/tendermint/services/version/v1" + grpcerr "github.com/cometbft/cometbft/rpc/grpc/errors" + "github.com/cometbft/cometbft/rpc/grpc/server/services/blockresultservice" "github.com/cometbft/cometbft/rpc/grpc/server/services/blockservice" "github.com/cometbft/cometbft/rpc/grpc/server/services/versionservice" + sm "github.com/cometbft/cometbft/state" + "github.com/cometbft/cometbft/store" "github.com/cometbft/cometbft/types" ) @@ -50,10 +49,7 @@ func newServerBuilder(listener net.Listener) *serverBuilder { func Listen(addr string) (net.Listener, error) { parts := strings.SplitN(addr, "://", 2) if len(parts) != 2 { - return nil, fmt.Errorf( - "invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", - addr, - ) + return nil, grpcerr.ErrInvalidRemoteAddress{Addr: addr} } return net.Listen(parts[0], parts[1]) } diff --git a/rpc/grpc/server/services/blockresultservice/service.go b/rpc/grpc/server/services/blockresultservice/service.go index ab67b061d71..6479ce97c99 100644 --- a/rpc/grpc/server/services/blockresultservice/service.go +++ b/rpc/grpc/server/services/blockresultservice/service.go @@ -6,11 +6,10 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + brs "github.com/cometbft/cometbft/api/cometbft/services/block_results/v1" "github.com/cometbft/cometbft/libs/log" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/store" - - brs "github.com/cometbft/cometbft/proto/tendermint/services/block_results/v1" ) type blockResultsService struct { @@ -48,31 +47,7 @@ func (s *blockResultsService) GetBlockResults(_ context.Context, req *brs.GetBlo return &brs.GetBlockResultsResponse{ Height: req.Height, - TxsResults: res.TxResults, - FinalizeBlockEvents: formatProtoToRef(res.Events), - ValidatorUpdates: formatProtoToRef(res.ValidatorUpdates), - AppHash: res.AppHash, - }, nil -} - -// GetLatest BlockResults returns the block results of the last committed height. -func (s *blockResultsService) GetLatestBlockResults(_ context.Context, _ *brs.GetLatestBlockResultsRequest) (*brs.GetBlockResultsResponse, error) { - logger := s.logger.With("endpoint", "GetBlockResults") - ss, err := s.stateStore.Load() - if err != nil { - logger.Error("Error loading store", "err", err) - return nil, status.Error(codes.Internal, "Internal server error") - } - - res, err := s.stateStore.LoadFinalizeBlockResponse(ss.LastBlockHeight) - if err != nil { - logger.Error("Error fetching BlockResults", "height", ss.LastBlockHeight, "err", err) - return nil, status.Error(codes.Internal, "Internal server error") - } - - return &brs.GetBlockResultsResponse{ - Height: ss.LastBlockHeight, - TxsResults: res.TxResults, + TxResults: res.TxResults, FinalizeBlockEvents: formatProtoToRef(res.Events), ValidatorUpdates: formatProtoToRef(res.ValidatorUpdates), AppHash: res.AppHash, diff --git a/rpc/grpc/server/services/blockservice/service.go b/rpc/grpc/server/services/blockservice/service.go index cbf66f57658..902054cdfba 100644 --- a/rpc/grpc/server/services/blockservice/service.go +++ b/rpc/grpc/server/services/blockservice/service.go @@ -1,18 +1,19 @@ package blockservice import ( - context "context" + "context" "fmt" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + blocksvc "github.com/cometbft/cometbft/api/cometbft/services/block/v1" + ptypes "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/internal/rpctrace" "github.com/cometbft/cometbft/libs/log" cmtpubsub "github.com/cometbft/cometbft/libs/pubsub" - blocksvc "github.com/cometbft/cometbft/proto/tendermint/services/block/v1" - ptypes "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/store" "github.com/cometbft/cometbft/types" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) type blockServiceServer struct { @@ -30,7 +31,7 @@ func New(store *store.BlockStore, eventBus *types.EventBus, logger log.Logger) b } } -// GetByHeight implements v1.BlockServiceServer GetByHeight method +// GetByHeight implements v1.BlockServiceServer GetByHeight method. func (s *blockServiceServer) GetByHeight(_ context.Context, req *blocksvc.GetByHeightRequest) (*blocksvc.GetByHeightResponse, error) { logger := s.logger.With("endpoint", "GetByHeight") if err := validateBlockHeight(req.Height, s.store.Base(), s.store.Height()); err != nil { @@ -48,26 +49,6 @@ func (s *blockServiceServer) GetByHeight(_ context.Context, req *blocksvc.GetByH }, nil } -// GetLatest implements v1.BlockServiceServer. -func (s *blockServiceServer) GetLatest(context.Context, *blocksvc.GetLatestRequest) (*blocksvc.GetLatestResponse, error) { - logger := s.logger.With("endpoint", "GetLatest") - - latestHeight := s.store.Height() - if latestHeight < 1 { - return nil, status.Error(codes.NotFound, "No block data yet") - } - - blockID, block, err := s.getBlock(latestHeight, logger) - if err != nil { - return nil, err - } - - return &blocksvc.GetLatestResponse{ - BlockId: blockID, - Block: block, - }, nil -} - func (s *blockServiceServer) getBlock(height int64, logger log.Logger) (*ptypes.BlockID, *ptypes.Block, error) { traceID, err := rpctrace.New() if err != nil { @@ -75,17 +56,16 @@ func (s *blockServiceServer) getBlock(height int64, logger log.Logger) (*ptypes. return nil, nil, status.Error(codes.Internal, "Internal server error - see logs for details") } - block := s.store.LoadBlock(height) + block, blockMeta := s.store.LoadBlock(height) if block == nil { - return nil, nil, status.Errorf(codes.NotFound, fmt.Sprintf("Block not found for height %d", height)) + return nil, nil, status.Errorf(codes.NotFound, "Block not found for height %d", height) } bp, err := block.ToProto() if err != nil { logger.Error("Error attempting to convert block to its Protobuf representation", "err", err, "traceID", traceID) - return nil, nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to load block from store (see logs for trace ID: %s)", traceID)) + return nil, nil, status.Errorf(codes.Internal, "Failed to load block from store (see logs for trace ID: %s)", traceID) } - blockMeta := s.store.LoadBlockMeta(height) if blockMeta == nil { logger.Error("Failed to load block meta when block was successfully loaded", "height", height) return nil, nil, status.Error(codes.Internal, "Internal server error - see logs for details") @@ -95,7 +75,7 @@ func (s *blockServiceServer) getBlock(height int64, logger log.Logger) (*ptypes. return &blockIDProto, bp, nil } -// GetLatestHeight implements v1.BlockServiceServer GetLatestHeight method +// GetLatestHeight implements v1.BlockServiceServer GetLatestHeight method. func (s *blockServiceServer) GetLatestHeight(_ *blocksvc.GetLatestHeightRequest, stream blocksvc.BlockService_GetLatestHeightServer) error { logger := s.logger.With("endpoint", "GetLatestHeight") diff --git a/rpc/grpc/server/services/pruningservice/service.go b/rpc/grpc/server/services/pruningservice/service.go index 06daf3d7728..ec4599a79eb 100644 --- a/rpc/grpc/server/services/pruningservice/service.go +++ b/rpc/grpc/server/services/pruningservice/service.go @@ -1,16 +1,15 @@ package pruningservice import ( - context "context" - "fmt" + "context" "math" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + pbsvc "github.com/cometbft/cometbft/api/cometbft/services/pruning/v1" "github.com/cometbft/cometbft/internal/rpctrace" "github.com/cometbft/cometbft/libs/log" - v1 "github.com/cometbft/cometbft/proto/tendermint/services/pruning/v1" sm "github.com/cometbft/cometbft/state" ) @@ -20,18 +19,18 @@ type pruningServiceServer struct { } // New creates a new CometBFT pruning service server. -func New(pruner *sm.Pruner, logger log.Logger) v1.PruningServiceServer { +func New(pruner *sm.Pruner, logger log.Logger) pbsvc.PruningServiceServer { return &pruningServiceServer{ pruner: pruner, logger: logger.With("service", "PruningService"), } } -func (s *pruningServiceServer) SetBlockIndexerRetainHeight(_ context.Context, request *v1.SetBlockIndexerRetainHeightRequest) (*v1.SetBlockIndexerRetainHeightResponse, error) { +func (s *pruningServiceServer) SetBlockIndexerRetainHeight(_ context.Context, request *pbsvc.SetBlockIndexerRetainHeightRequest) (*pbsvc.SetBlockIndexerRetainHeightResponse, error) { height := request.Height // Because we can't agree on a single type to represent tx indexer height. if height > uint64(math.MaxInt64) { - return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Invalid height %d", height)) + return nil, status.Errorf(codes.InvalidArgument, "Invalid height %d", height) } logger := s.logger.With("endpoint", "SetBlockIndexerRetainHeight") traceID, err := rpctrace.New() @@ -43,10 +42,10 @@ func (s *pruningServiceServer) SetBlockIndexerRetainHeight(_ context.Context, re logger.Error("Cannot set block indexer retain height", "err", err, "traceID", traceID) return nil, status.Errorf(codes.Internal, "Failed to set block indexer retain height (see logs for trace ID: %s)", traceID) } - return &v1.SetBlockIndexerRetainHeightResponse{}, nil + return &pbsvc.SetBlockIndexerRetainHeightResponse{}, nil } -func (s *pruningServiceServer) GetBlockIndexerRetainHeight(_ context.Context, _ *v1.GetBlockIndexerRetainHeightRequest) (*v1.GetBlockIndexerRetainHeightResponse, error) { +func (s *pruningServiceServer) GetBlockIndexerRetainHeight(_ context.Context, _ *pbsvc.GetBlockIndexerRetainHeightRequest) (*pbsvc.GetBlockIndexerRetainHeightResponse, error) { logger := s.logger.With("endpoint", "GetBLockIndexerRetainHeight") traceID, err := rpctrace.New() if err != nil { @@ -58,14 +57,14 @@ func (s *pruningServiceServer) GetBlockIndexerRetainHeight(_ context.Context, _ logger.Error("Cannot get block indexer retain height", "err", err, "traceID", traceID) return nil, status.Errorf(codes.Internal, "Failed to get block indexer retain height (see logs for trace ID: %s)", traceID) } - return &v1.GetBlockIndexerRetainHeightResponse{Height: uint64(height)}, nil + return &pbsvc.GetBlockIndexerRetainHeightResponse{Height: uint64(height)}, nil } -func (s *pruningServiceServer) SetTxIndexerRetainHeight(_ context.Context, request *v1.SetTxIndexerRetainHeightRequest) (*v1.SetTxIndexerRetainHeightResponse, error) { +func (s *pruningServiceServer) SetTxIndexerRetainHeight(_ context.Context, request *pbsvc.SetTxIndexerRetainHeightRequest) (*pbsvc.SetTxIndexerRetainHeightResponse, error) { height := request.Height // Because we can't agree on a single type to represent tx indexer height. if height > uint64(math.MaxInt64) { - return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Invalid height %d", height)) + return nil, status.Errorf(codes.InvalidArgument, "Invalid height %d", height) } logger := s.logger.With("endpoint", "SetTxIndexerRetainHeight") traceID, err := rpctrace.New() @@ -77,10 +76,10 @@ func (s *pruningServiceServer) SetTxIndexerRetainHeight(_ context.Context, reque logger.Error("Cannot set tx indexer retain height", "err", err, "traceID", traceID) return nil, status.Errorf(codes.Internal, "Failed to set tx indexer retain height (see logs for trace ID: %s)", traceID) } - return &v1.SetTxIndexerRetainHeightResponse{}, nil + return &pbsvc.SetTxIndexerRetainHeightResponse{}, nil } -func (s *pruningServiceServer) GetTxIndexerRetainHeight(_ context.Context, _ *v1.GetTxIndexerRetainHeightRequest) (*v1.GetTxIndexerRetainHeightResponse, error) { +func (s *pruningServiceServer) GetTxIndexerRetainHeight(_ context.Context, _ *pbsvc.GetTxIndexerRetainHeightRequest) (*pbsvc.GetTxIndexerRetainHeightResponse, error) { logger := s.logger.With("endpoint", "GetTxIndexerRetainHeight") traceID, err := rpctrace.New() if err != nil { @@ -92,15 +91,15 @@ func (s *pruningServiceServer) GetTxIndexerRetainHeight(_ context.Context, _ *v1 logger.Error("Cannot get tx indexer retain height", "err", err, "traceID", traceID) return nil, status.Errorf(codes.Internal, "Failed to get tx indexer retain height (see logs for trace ID: %s)", traceID) } - return &v1.GetTxIndexerRetainHeightResponse{Height: uint64(height)}, nil + return &pbsvc.GetTxIndexerRetainHeightResponse{Height: uint64(height)}, nil } // SetBlockRetainHeight implements v1.PruningServiceServer. -func (s *pruningServiceServer) SetBlockRetainHeight(_ context.Context, req *v1.SetBlockRetainHeightRequest) (*v1.SetBlockRetainHeightResponse, error) { +func (s *pruningServiceServer) SetBlockRetainHeight(_ context.Context, req *pbsvc.SetBlockRetainHeightRequest) (*pbsvc.SetBlockRetainHeightResponse, error) { height := req.Height // Because we can't agree on a single type to represent block height. if height > uint64(math.MaxInt64) { - return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Invalid height %d", height)) + return nil, status.Errorf(codes.InvalidArgument, "Invalid height %d", height) } logger := s.logger.With("endpoint", "SetBlockRetainHeight") traceID, err := rpctrace.New() @@ -112,11 +111,11 @@ func (s *pruningServiceServer) SetBlockRetainHeight(_ context.Context, req *v1.S logger.Error("Cannot set block retain height", "err", err, "traceID", traceID) return nil, status.Errorf(codes.Internal, "Failed to set block retain height (see logs for trace ID: %s)", traceID) } - return &v1.SetBlockRetainHeightResponse{}, nil + return &pbsvc.SetBlockRetainHeightResponse{}, nil } // GetBlockRetainHeight implements v1.PruningServiceServer. -func (s *pruningServiceServer) GetBlockRetainHeight(_ context.Context, _ *v1.GetBlockRetainHeightRequest) (*v1.GetBlockRetainHeightResponse, error) { +func (s *pruningServiceServer) GetBlockRetainHeight(_ context.Context, _ *pbsvc.GetBlockRetainHeightRequest) (*pbsvc.GetBlockRetainHeightResponse, error) { logger := s.logger.With("endpoint", "GetBlockRetainHeight") traceID, err := rpctrace.New() if err != nil { @@ -133,18 +132,18 @@ func (s *pruningServiceServer) GetBlockRetainHeight(_ context.Context, _ *v1.Get logger.Error("Cannot get block retain height specified by application", "err", err, "traceID", traceID) return nil, status.Errorf(codes.Internal, "Failed to get app block retain height (see logs for trace ID: %s)", traceID) } - return &v1.GetBlockRetainHeightResponse{ + return &pbsvc.GetBlockRetainHeightResponse{ PruningServiceRetainHeight: uint64(svcHeight), AppRetainHeight: uint64(appHeight), }, nil } // SetBlockResultsRetainHeight implements v1.PruningServiceServer. -func (s *pruningServiceServer) SetBlockResultsRetainHeight(_ context.Context, req *v1.SetBlockResultsRetainHeightRequest) (*v1.SetBlockResultsRetainHeightResponse, error) { +func (s *pruningServiceServer) SetBlockResultsRetainHeight(_ context.Context, req *pbsvc.SetBlockResultsRetainHeightRequest) (*pbsvc.SetBlockResultsRetainHeightResponse, error) { height := req.Height // Because we can't agree on a single type to represent block height. if height > uint64(math.MaxInt64) { - return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Invalid height %d", height)) + return nil, status.Errorf(codes.InvalidArgument, "Invalid height %d", height) } logger := s.logger.With("endpoint", "SetBlockResultsRetainHeight") traceID, err := rpctrace.New() @@ -156,11 +155,11 @@ func (s *pruningServiceServer) SetBlockResultsRetainHeight(_ context.Context, re logger.Error("Cannot set block results retain height", "err", err, "traceID", traceID) return nil, status.Errorf(codes.Internal, "Failed to set block results retain height (see logs for trace ID: %s)", traceID) } - return &v1.SetBlockResultsRetainHeightResponse{}, nil + return &pbsvc.SetBlockResultsRetainHeightResponse{}, nil } // GetBlockResultsRetainHeight implements v1.PruningServiceServer. -func (s *pruningServiceServer) GetBlockResultsRetainHeight(_ context.Context, _ *v1.GetBlockResultsRetainHeightRequest) (*v1.GetBlockResultsRetainHeightResponse, error) { +func (s *pruningServiceServer) GetBlockResultsRetainHeight(_ context.Context, _ *pbsvc.GetBlockResultsRetainHeightRequest) (*pbsvc.GetBlockResultsRetainHeightResponse, error) { logger := s.logger.With("endpoint", "GetBlockResultsRetainHeight") traceID, err := rpctrace.New() if err != nil { @@ -172,5 +171,5 @@ func (s *pruningServiceServer) GetBlockResultsRetainHeight(_ context.Context, _ logger.Error("Cannot get block results retain height", "err", err, "traceID", traceID) return nil, status.Errorf(codes.Internal, "Failed to get block results retain height (see logs for trace ID: %s)", traceID) } - return &v1.GetBlockResultsRetainHeightResponse{PruningServiceRetainHeight: uint64(height)}, nil + return &pbsvc.GetBlockResultsRetainHeightResponse{PruningServiceRetainHeight: uint64(height)}, nil } diff --git a/rpc/grpc/server/services/versionservice/service.go b/rpc/grpc/server/services/versionservice/service.go index 028d2711fcb..fc2feba7f30 100644 --- a/rpc/grpc/server/services/versionservice/service.go +++ b/rpc/grpc/server/services/versionservice/service.go @@ -1,23 +1,23 @@ package versionservice import ( - context "context" + "context" - v1 "github.com/cometbft/cometbft/proto/tendermint/services/version/v1" + pbsvc "github.com/cometbft/cometbft/api/cometbft/services/version/v1" "github.com/cometbft/cometbft/version" ) type versionServiceServer struct{} // New creates a new CometBFT version service server. -func New() v1.VersionServiceServer { +func New() pbsvc.VersionServiceServer { return &versionServiceServer{} } -// GetVersion implements v1.VersionServiceServer -func (s *versionServiceServer) GetVersion(context.Context, *v1.GetVersionRequest) (*v1.GetVersionResponse, error) { - return &v1.GetVersionResponse{ - Node: version.TMCoreSemVer, +// GetVersion implements v1.VersionServiceServer. +func (*versionServiceServer) GetVersion(context.Context, *pbsvc.GetVersionRequest) (*pbsvc.GetVersionResponse, error) { + return &pbsvc.GetVersionResponse{ + Node: version.CMTSemVer, Abci: version.ABCIVersion, P2P: version.P2PProtocol, Block: version.BlockProtocol, diff --git a/rpc/jsonrpc/client/args_test.go b/rpc/jsonrpc/client/args_test.go index 2506f307349..eba2b2862b8 100644 --- a/rpc/jsonrpc/client/args_test.go +++ b/rpc/jsonrpc/client/args_test.go @@ -19,7 +19,7 @@ func TestArgToJSON(t *testing.T) { require := require.New(t) cases := []struct { - input interface{} + input any expected string }{ {[]byte("1234"), "0x31323334"}, @@ -28,10 +28,10 @@ func TestArgToJSON(t *testing.T) { } for i, tc := range cases { - args := map[string]interface{}{"data": tc.input} + args := map[string]any{"data": tc.input} err := argsToJSON(args) - require.Nil(err, "%d: %+v", i, err) - require.Equal(1, len(args), "%d", i) + require.NoError(err, "%d: %+v", i, err) + require.Len(args, 1, "%d", i) data, ok := args["data"].(string) require.True(ok, "%d: %#v", i, args["data"]) assert.Equal(tc.expected, data, "%d", i) diff --git a/rpc/jsonrpc/client/decode.go b/rpc/jsonrpc/client/decode.go index 2ae917d97d1..e342750e5fd 100644 --- a/rpc/jsonrpc/client/decode.go +++ b/rpc/jsonrpc/client/decode.go @@ -6,15 +6,14 @@ import ( "fmt" cmtjson "github.com/cometbft/cometbft/libs/json" - types "github.com/cometbft/cometbft/rpc/jsonrpc/types" + "github.com/cometbft/cometbft/rpc/jsonrpc/types" ) func unmarshalResponseBytes( responseBytes []byte, expectedID types.JSONRPCIntID, - result interface{}, -) (interface{}, error) { - + result any, +) (any, error) { // Read response. If rpc/core/types is imported, the result will unmarshal // into the correct type. response := &types.RPCResponse{} @@ -38,51 +37,76 @@ func unmarshalResponseBytes( return result, nil } +// Separate the unmarshalling actions using different functions to improve readability and maintainability. +func unmarshalIndividualResponse(responseBytes []byte) (types.RPCResponse, error) { + var singleResponse types.RPCResponse + err := json.Unmarshal(responseBytes, &singleResponse) + return singleResponse, err +} + +func unmarshalMultipleResponses(responseBytes []byte) ([]types.RPCResponse, error) { + var responses []types.RPCResponse + err := json.Unmarshal(responseBytes, &responses) + return responses, err +} + func unmarshalResponseBytesArray( responseBytes []byte, expectedIDs []types.JSONRPCIntID, - results []interface{}, -) ([]interface{}, error) { - - var ( - responses []types.RPCResponse - ) - - if err := json.Unmarshal(responseBytes, &responses); err != nil { - return nil, fmt.Errorf("error unmarshalling: %w", err) - } - - // No response error checking here as there may be a mixture of successful - // and unsuccessful responses. + results []any, +) ([]any, error) { + var responses []types.RPCResponse + + // Try to unmarshal as multiple responses + responses, err := unmarshalMultipleResponses(responseBytes) + // if err == nil it could unmarshal in multiple responses + if err == nil { + // No response error checking here as there may be a mixture of successful + // and unsuccessful responses. + + if len(results) != len(responses) { + return nil, fmt.Errorf( + "expected %d result objects into which to inject responses, but got %d", + len(responses), + len(results), + ) + } - if len(results) != len(responses) { - return nil, fmt.Errorf( - "expected %d result objects into which to inject responses, but got %d", - len(responses), - len(results), - ) - } + // Intersect IDs from responses with expectedIDs. + ids := make([]types.JSONRPCIntID, len(responses)) + var ok bool + for i, resp := range responses { + ids[i], ok = resp.ID.(types.JSONRPCIntID) + if !ok { + return nil, fmt.Errorf("expected JSONRPCIntID, got %T", resp.ID) + } + } + if err := validateResponseIDs(ids, expectedIDs); err != nil { + return nil, fmt.Errorf("wrong IDs: %w", err) + } - // Intersect IDs from responses with expectedIDs. - ids := make([]types.JSONRPCIntID, len(responses)) - var ok bool - for i, resp := range responses { - ids[i], ok = resp.ID.(types.JSONRPCIntID) - if !ok { - return nil, fmt.Errorf("expected JSONRPCIntID, got %T", resp.ID) + for i := 0; i < len(responses); i++ { + if err := cmtjson.Unmarshal(responses[i].Result, results[i]); err != nil { + return nil, fmt.Errorf("error unmarshalling #%d result: %w", i, err) + } } + + return results, nil } - if err := validateResponseIDs(ids, expectedIDs); err != nil { - return nil, fmt.Errorf("wrong IDs: %w", err) + // check if it's a single response that should be an error + singleResponse, err := unmarshalIndividualResponse(responseBytes) + if err != nil { + // Here, an error means that even single response unmarshalling failed, + // so return the error. + return nil, fmt.Errorf("error unmarshalling: %w", err) } - - for i := 0; i < len(responses); i++ { - if err := cmtjson.Unmarshal(responses[i].Result, results[i]); err != nil { - return nil, fmt.Errorf("error unmarshalling #%d result: %w", i, err) - } + singleResult := make([]any, 0) + if singleResponse.Error != nil { + singleResult = append(singleResult, singleResponse.Error) + } else { + singleResult = append(singleResult, singleResponse.Result) } - - return results, nil + return singleResult, nil } func validateResponseIDs(ids, expectedIDs []types.JSONRPCIntID) error { @@ -92,11 +116,10 @@ func validateResponseIDs(ids, expectedIDs []types.JSONRPCIntID) error { } for i, id := range ids { - if m[id] { - delete(m, id) - } else { + if !m[id] { return fmt.Errorf("unsolicited ID #%d: %v", i, id) } + delete(m, id) } return nil @@ -114,7 +137,7 @@ func validateAndVerifyID(res *types.RPCResponse, expectedID types.JSONRPCIntID) return nil } -func validateResponseID(id interface{}) error { +func validateResponseID(id any) error { if id == nil { return errors.New("no ID") } diff --git a/rpc/jsonrpc/client/encode.go b/rpc/jsonrpc/client/encode.go index 42d997be478..e024abb8901 100644 --- a/rpc/jsonrpc/client/encode.go +++ b/rpc/jsonrpc/client/encode.go @@ -8,7 +8,7 @@ import ( cmtjson "github.com/cometbft/cometbft/libs/json" ) -func argsToURLValues(args map[string]interface{}) (url.Values, error) { +func argsToURLValues(args map[string]any) (url.Values, error) { values := make(url.Values) if len(args) == 0 { return values, nil @@ -26,7 +26,7 @@ func argsToURLValues(args map[string]interface{}) (url.Values, error) { return values, nil } -func argsToJSON(args map[string]interface{}) error { +func argsToJSON(args map[string]any) error { for k, v := range args { rt := reflect.TypeOf(v) isByteSlice := rt.Kind() == reflect.Slice && rt.Elem().Kind() == reflect.Uint8 diff --git a/rpc/jsonrpc/client/errors.go b/rpc/jsonrpc/client/errors.go new file mode 100644 index 00000000000..86e7008acd2 --- /dev/null +++ b/rpc/jsonrpc/client/errors.go @@ -0,0 +1,94 @@ +package client + +import "fmt" + +type ErrInvalidAddress struct { + Addr string + Source error +} + +func (e ErrInvalidAddress) Error() string { + return fmt.Sprintf("invalid address: %s: %v ", e.Addr, e.Source) +} + +func (e ErrInvalidAddress) Unwrap() error { + return e.Source +} + +type ErrEncodingParams struct { + Source error +} + +func (e ErrEncodingParams) Error() string { + return fmt.Sprintf("failed to encode params: %v", e.Source) +} + +func (e ErrEncodingParams) Unwrap() error { + return e.Source +} + +type ErrMarshalRequest struct { + Source error +} + +func (e ErrMarshalRequest) Error() string { + return fmt.Sprintf("failed to marshal request: %v", e.Source) +} + +func (e ErrMarshalRequest) Unwrap() error { + return e.Source +} + +type ErrCreateRequest struct { + Source error +} + +func (e ErrCreateRequest) Error() string { + return fmt.Sprintf("failed to create request: %v", e.Source) +} + +func (e ErrCreateRequest) Unwrap() error { + return e.Source +} + +type ErrFailedRequest struct { + Source error +} + +func (e ErrFailedRequest) Error() string { + return fmt.Sprintf("failed request: %v", e.Source) +} + +func (e ErrFailedRequest) Unwrap() error { + return e.Source +} + +type ErrReadResponse struct { + Source error + Description string +} + +func (e ErrReadResponse) Error() string { + if e.Description == "" { + return fmt.Sprintf("failed to read response: %s : %v", e.Source.Error(), e.Source) + } + + return fmt.Sprintf("failed to read response: %s : %v", e.Description, e.Source) +} + +func (e ErrReadResponse) Unwrap() error { + return e.Source +} + +type ErrUnmarshalResponse struct { + Source error + Description string +} + +func (e ErrUnmarshalResponse) Error() string { + if e.Description == "" { + return fmt.Sprintf("failed to unmarshal response: %s : %v", e.Source.Error(), e.Source) + } + + return fmt.Sprintf("failed to unmarshal response: %s : %v", e.Description, e.Source) +} diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index ebe91e8a340..ea1f66f395d 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -9,10 +9,11 @@ import ( "net" "net/http" "net/url" + "regexp" "strings" cmtsync "github.com/cometbft/cometbft/libs/sync" - types "github.com/cometbft/cometbft/rpc/jsonrpc/types" + "github.com/cometbft/cometbft/rpc/jsonrpc/types" ) const ( @@ -24,16 +25,18 @@ const ( protoUNIX = "unix" ) -//------------------------------------------------------------- +var endsWithPortPattern = regexp.MustCompile(`:[0-9]+$`) -// Parsed URL structure +// ------------------------------------------------------------- + +// Parsed URL structure. type parsedURL struct { url.URL isUnixSocket bool } -// Parse URL and set defaults +// Parse URL and set defaults. func newParsedURL(remoteAddr string) (*parsedURL, error) { u, err := url.Parse(remoteAddr) if err != nil { @@ -57,7 +60,7 @@ func newParsedURL(remoteAddr string) (*parsedURL, error) { return pu, nil } -// Change protocol to HTTP for unknown protocols and TCP protocol - useful for RPC connections +// Change protocol to HTTP for unknown protocols and TCP protocol - useful for RPC connections. func (u *parsedURL) SetDefaultSchemeHTTP() { // protocol to use for http operations, to support both http and https switch u.Scheme { @@ -69,13 +72,13 @@ func (u *parsedURL) SetDefaultSchemeHTTP() { } } -// Get full address without the protocol - useful for Dialer connections +// Get full address without the protocol - useful for Dialer connections. func (u parsedURL) GetHostWithPath() string { // Remove protocol, userinfo and # fragment, assume opaque is empty return u.Host + u.EscapedPath() } -// Get a trimmed address - useful for WS connections +// Get a trimmed address - useful for WS connections. func (u parsedURL) GetTrimmedHostWithPath() string { // if it's not an unix socket we return the normal URL if !u.isUnixSocket { @@ -87,35 +90,46 @@ func (u parsedURL) GetTrimmedHostWithPath() string { return strings.ReplaceAll(u.GetHostWithPath(), "/", ".") } -// GetDialAddress returns the endpoint to dial for the parsed URL +// GetDialAddress returns the endpoint to dial for the parsed URL. func (u parsedURL) GetDialAddress() string { - // if it's not a unix socket we return the host, example: localhost:443 + // if it's not a unix socket we return the host with port, example: localhost:443 if !u.isUnixSocket { + hasPort := endsWithPortPattern.MatchString(u.Host) + if !hasPort { + // http and ws default to port 80, https and wss default to port 443 + // https://www.rfc-editor.org/rfc/rfc9110#section-4.2 + // https://www.rfc-editor.org/rfc/rfc6455.html#section-3 + if u.Scheme == protoHTTP || u.Scheme == protoWS { + return u.Host + `:80` + } else if u.Scheme == protoHTTPS || u.Scheme == protoWSS { + return u.Host + `:443` + } + } return u.Host } // otherwise we return the path of the unix socket, ex /tmp/socket return u.GetHostWithPath() } -// Get a trimmed address with protocol - useful as address in RPC connections +// Get a trimmed address with protocol - useful as address in RPC connections. func (u parsedURL) GetTrimmedURL() string { return u.Scheme + "://" + u.GetTrimmedHostWithPath() } -//------------------------------------------------------------- +// ------------------------------------------------------------- // HTTPClient is a common interface for JSON-RPC HTTP clients. type HTTPClient interface { // Call calls the given method with the params and returns a result. - Call(ctx context.Context, method string, params map[string]interface{}, result interface{}) (interface{}, error) + Call(ctx context.Context, method string, params map[string]any, result any) (any, error) } // Caller implementers can facilitate calling the JSON-RPC endpoint. type Caller interface { - Call(ctx context.Context, method string, params map[string]interface{}, result interface{}) (interface{}, error) + Call(ctx context.Context, method string, params map[string]any, result any) (any, error) } -//------------------------------------------------------------- +// ------------------------------------------------------------- // Client is a JSON-RPC client, which sends POST HTTP requests to the // remote server. @@ -136,8 +150,10 @@ var _ HTTPClient = (*Client)(nil) // Both Client and RequestBatch can facilitate calls to the JSON // RPC endpoint. -var _ Caller = (*Client)(nil) -var _ Caller = (*RequestBatch)(nil) +var ( + _ Caller = (*Client)(nil) + _ Caller = (*RequestBatch)(nil) +) var _ fmt.Stringer = (*Client)(nil) @@ -161,7 +177,7 @@ func NewWithHTTPClient(remote string, client *http.Client) (*Client, error) { parsedURL, err := newParsedURL(remote) if err != nil { - return nil, fmt.Errorf("invalid remote %s: %s", remote, err) + return nil, ErrInvalidAddress{Addr: remote, Source: err} } parsedURL.SetDefaultSchemeHTTP() @@ -185,25 +201,25 @@ func NewWithHTTPClient(remote string, client *http.Client) (*Client, error) { func (c *Client) Call( ctx context.Context, method string, - params map[string]interface{}, - result interface{}, -) (interface{}, error) { + params map[string]any, + result any, +) (any, error) { id := c.nextRequestID() request, err := types.MapToRequest(id, method, params) if err != nil { - return nil, fmt.Errorf("failed to encode params: %w", err) + return nil, ErrEncodingParams{Source: err} } requestBytes, err := json.Marshal(request) if err != nil { - return nil, fmt.Errorf("failed to marshal request: %w", err) + return nil, ErrMarshalRequest{Source: err} } requestBuf := bytes.NewBuffer(requestBytes) httpRequest, err := http.NewRequestWithContext(ctx, http.MethodPost, c.address, requestBuf) if err != nil { - return nil, fmt.Errorf("request failed: %w", err) + return nil, ErrCreateRequest{Source: err} } httpRequest.Header.Set("Content-Type", "application/json") @@ -214,18 +230,18 @@ func (c *Client) Call( httpResponse, err := c.client.Do(httpRequest) if err != nil { - return nil, fmt.Errorf("post failed: %w", err) + return nil, ErrFailedRequest{Source: err} } defer httpResponse.Body.Close() responseBytes, err := io.ReadAll(httpResponse.Body) if err != nil { - return nil, fmt.Errorf("%s. Failed to read response body: %w", getHTTPRespErrPrefix(httpResponse), err) + return nil, ErrReadResponse{Source: err, Description: getHTTPRespErrPrefix(httpResponse)} } res, err := unmarshalResponseBytes(responseBytes, id, result) if err != nil { - return nil, fmt.Errorf("%s. %w", getHTTPRespErrPrefix(httpResponse), err) + return nil, ErrUnmarshalResponse{Source: err, Description: getHTTPRespErrPrefix(httpResponse)} } return res, nil } @@ -246,9 +262,9 @@ func (c *Client) NewRequestBatch() *RequestBatch { } } -func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedRequest) ([]interface{}, error) { +func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedRequest) ([]any, error) { reqs := make([]types.RPCRequest, 0, len(requests)) - results := make([]interface{}, 0, len(requests)) + results := make([]any, 0, len(requests)) for _, req := range requests { reqs = append(reqs, req.request) results = append(results, req.result) @@ -300,13 +316,13 @@ func (c *Client) nextRequestID() types.JSONRPCIntID { return types.JSONRPCIntID(id) } -//------------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------------ // jsonRPCBufferedRequest encapsulates a single buffered request, as well as its // anticipated response structure. type jsonRPCBufferedRequest struct { request types.RPCRequest - result interface{} // The result will be deserialized into this object. + result any // The result will be deserialized into this object. } // RequestBatch allows us to buffer multiple request/response structures @@ -348,7 +364,7 @@ func (b *RequestBatch) clear() int { // Send will attempt to send the current batch of enqueued requests, and then // will clear out the requests once done. On success, this returns the // deserialized list of results from each of the enqueued requests. -func (b *RequestBatch) Send(ctx context.Context) ([]interface{}, error) { +func (b *RequestBatch) Send(ctx context.Context) ([]any, error) { b.mtx.Lock() defer func() { b.clear() @@ -362,9 +378,9 @@ func (b *RequestBatch) Send(ctx context.Context) ([]interface{}, error) { func (b *RequestBatch) Call( _ context.Context, method string, - params map[string]interface{}, - result interface{}, -) (interface{}, error) { + params map[string]any, + result any, +) (any, error) { id := b.client.nextRequestID() request, err := types.MapToRequest(id, method, params) if err != nil { @@ -374,9 +390,10 @@ func (b *RequestBatch) Call( return result, nil } -//------------------------------------------------------------- +// ------------------------------------------------------------- -func makeHTTPDialer(remoteAddr string) (func(string, string) (net.Conn, error), error) { +// MakeHTTPDialer creates an HTTP client dialer based on the given URL. +func MakeHTTPDialer(remoteAddr string) (func(string, string) (net.Conn, error), error) { u, err := newParsedURL(remoteAddr) if err != nil { return nil, err @@ -390,7 +407,7 @@ func makeHTTPDialer(remoteAddr string) (func(string, string) (net.Conn, error), protocol = protoTCP } - dialFn := func(proto, addr string) (net.Conn, error) { + dialFn := func(_, _ string) (net.Conn, error) { return net.Dial(protocol, u.GetDialAddress()) } @@ -402,7 +419,7 @@ func makeHTTPDialer(remoteAddr string) (func(string, string) (net.Conn, error), // remoteAddr should be fully featured (eg. with tcp:// or unix://). // An error will be returned in case of invalid remoteAddr. func DefaultHTTPClient(remoteAddr string) (*http.Client, error) { - dialFn, err := makeHTTPDialer(remoteAddr) + dialFn, err := MakeHTTPDialer(remoteAddr) if err != nil { return nil, err } @@ -412,6 +429,7 @@ func DefaultHTTPClient(remoteAddr string) (*http.Client, error) { // Set to true to prevent GZIP-bomb DoS attacks DisableCompression: true, Dial: dialFn, + Proxy: http.ProxyFromEnvironment, }, } diff --git a/rpc/jsonrpc/client/http_json_client_test.go b/rpc/jsonrpc/client/http_json_client_test.go index 4932f4096b6..1688dc4e8c9 100644 --- a/rpc/jsonrpc/client/http_json_client_test.go +++ b/rpc/jsonrpc/client/http_json_client_test.go @@ -11,7 +11,7 @@ import ( ) func TestHTTPClientMakeHTTPDialer(t *testing.T) { - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { _, _ = w.Write([]byte("Hi!\n")) }) ts := httptest.NewServer(handler) @@ -26,8 +26,8 @@ func TestHTTPClientMakeHTTPDialer(t *testing.T) { for _, testURL := range []string{ts.URL, tsTLS.URL} { u, err := newParsedURL(testURL) require.NoError(t, err) - dialFn, err := makeHTTPDialer(testURL) - require.Nil(t, err) + dialFn, err := MakeHTTPDialer(testURL) + require.NoError(t, err) addr, err := dialFn(u.Scheme, u.GetHostWithPath()) require.NoError(t, err) @@ -52,26 +52,40 @@ func Test_parsedURL(t *testing.T) { }, "http endpoint": { + url: "http://example.com", + expectedURL: "http://example.com", + expectedHostWithPath: "example.com", + expectedDialAddress: "example.com:80", + }, + + "http endpoint with port": { + url: "http://example.com:8080", + expectedURL: "http://example.com:8080", + expectedHostWithPath: "example.com:8080", + expectedDialAddress: "example.com:8080", + }, + + "https endpoint": { url: "https://example.com", expectedURL: "https://example.com", expectedHostWithPath: "example.com", - expectedDialAddress: "example.com", + expectedDialAddress: "example.com:443", }, - "http endpoint with port": { + "https endpoint with port": { url: "https://example.com:8080", expectedURL: "https://example.com:8080", expectedHostWithPath: "example.com:8080", expectedDialAddress: "example.com:8080", }, - "http path routed endpoint": { + "https path routed endpoint": { url: "https://example.com:8080/rpc", expectedURL: "https://example.com:8080/rpc", expectedHostWithPath: "example.com:8080/rpc", expectedDialAddress: "example.com:8080", }, - "http path routed endpoint with version": { + "https path routed endpoint with version": { url: "https://example.com:8080/rpc/v1", expectedURL: "https://example.com:8080/rpc/v1", expectedHostWithPath: "example.com:8080/rpc/v1", @@ -80,7 +94,6 @@ func Test_parsedURL(t *testing.T) { } for name, tt := range tests { - tt := tt // suppressing linter t.Run(name, func(t *testing.T) { parsed, err := newParsedURL(tt.url) require.NoError(t, err) diff --git a/rpc/jsonrpc/client/http_uri_client.go b/rpc/jsonrpc/client/http_uri_client.go index b8bce3cfc09..a10c12f94d1 100644 --- a/rpc/jsonrpc/client/http_uri_client.go +++ b/rpc/jsonrpc/client/http_uri_client.go @@ -2,16 +2,15 @@ package client import ( "context" - "fmt" "io" "net/http" "strings" - types "github.com/cometbft/cometbft/rpc/jsonrpc/types" + "github.com/cometbft/cometbft/rpc/jsonrpc/types" ) const ( - // URIClientRequestID in a request ID used by URIClient + // URIClientRequestID in a request ID used by URIClient. URIClientRequestID = types.JSONRPCIntID(-1) ) @@ -52,11 +51,11 @@ func NewURI(remote string) (*URIClient, error) { // Call issues a POST form HTTP request. func (c *URIClient) Call(ctx context.Context, method string, - params map[string]interface{}, result interface{}) (interface{}, error) { - + params map[string]any, result any, +) (any, error) { values, err := argsToURLValues(params) if err != nil { - return nil, fmt.Errorf("failed to encode params: %w", err) + return nil, ErrEncodingParams{Source: err} } req, err := http.NewRequestWithContext( @@ -66,19 +65,19 @@ func (c *URIClient) Call(ctx context.Context, method string, strings.NewReader(values.Encode()), ) if err != nil { - return nil, fmt.Errorf("new request: %w", err) + return nil, ErrCreateRequest{Source: err} } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") resp, err := c.client.Do(req) if err != nil { - return nil, fmt.Errorf("post: %w", err) + return nil, ErrFailedRequest{Source: err} } defer resp.Body.Close() responseBytes, err := io.ReadAll(resp.Body) if err != nil { - return nil, fmt.Errorf("read response body: %w", err) + return nil, ErrReadResponse{Source: err} } return unmarshalResponseBytes(responseBytes, URIClientRequestID, result) diff --git a/rpc/jsonrpc/client/integration_test.go b/rpc/jsonrpc/client/integration_test.go index 45ce6afd11f..dc063547750 100644 --- a/rpc/jsonrpc/client/integration_test.go +++ b/rpc/jsonrpc/client/integration_test.go @@ -25,13 +25,13 @@ func TestWSClientReconnectWithJitter(t *testing.T) { // Max wait time is ceil(1+0.999) + ceil(2+0.999) + ceil(4+0.999) + ceil(...) = 2 + 3 + 5 = 10s + ... maxSleepTime := time.Second * time.Duration(((1<") w.Header().Set("Content-Type", "text/html") - w.WriteHeader(200) + w.WriteHeader(http.StatusOK) w.Write(buf.Bytes()) //nolint: errcheck } diff --git a/rpc/jsonrpc/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go index 9bb7948b5dd..51cb33cfd25 100644 --- a/rpc/jsonrpc/server/http_json_handler_test.go +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -13,17 +13,17 @@ import ( "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/libs/log" - types "github.com/cometbft/cometbft/rpc/jsonrpc/types" + "github.com/cometbft/cometbft/rpc/jsonrpc/types" ) func testMux() *http.ServeMux { funcMap := map[string]*RPCFunc{ - "c": NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), - "block": NewRPCFunc(func(ctx *types.Context, h int) (string, error) { return "block", nil }, "height", Cacheable("height")), + "c": NewRPCFunc(func(_ *types.Context, _ string, _ int) (string, error) { return "foo", nil }, "s,i"), + "block": NewRPCFunc(func(_ *types.Context, _ int) (string, error) { return "block", nil }, "height", Cacheable("height")), } mux := http.NewServeMux() buf := new(bytes.Buffer) - logger := log.NewTMLogger(buf) + logger := log.NewLogger(buf) RegisterRPCFuncs(mux, funcMap, logger) return mux @@ -39,7 +39,7 @@ func TestRPCParams(t *testing.T) { tests := []struct { payload string wantErr string - expectedID interface{} + expectedID any }{ // bad {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")}, @@ -60,7 +60,7 @@ func TestRPCParams(t *testing.T) { } for i, tt := range tests { - req, _ := http.NewRequest("POST", "http://localhost/", strings.NewReader(tt.payload)) + req, _ := http.NewRequest(http.MethodPost, "http://localhost/", strings.NewReader(tt.payload)) rec := httptest.NewRecorder() mux.ServeHTTP(rec, req) res := rec.Result() @@ -74,13 +74,13 @@ func TestRPCParams(t *testing.T) { } recv := new(types.RPCResponse) - assert.Nil(t, json.Unmarshal(blob, recv), "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) + require.NoError(t, json.Unmarshal(blob, recv), "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) assert.Equal(t, tt.expectedID, recv.ID, "#%d: expected ID not matched in RPCResponse", i) if tt.wantErr == "" { assert.Nil(t, recv.Error, "#%d: not expecting an error", i) } else { - assert.True(t, recv.Error.Code < 0, "#%d: not expecting a positive JSONRPC code", i) + assert.Less(t, recv.Error.Code, 0, "#%d: not expecting a positive JSONRPC code", i) // The wanted error is either in the message or the data assert.Contains(t, recv.Error.Message+recv.Error.Data, tt.wantErr, "#%d: expected substring", i) } @@ -92,7 +92,7 @@ func TestJSONRPCID(t *testing.T) { tests := []struct { payload string wantErr bool - expectedID interface{} + expectedID any }{ // good id {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": ["a", "10"]}`, false, types.JSONRPCStringID("0")}, @@ -108,7 +108,7 @@ func TestJSONRPCID(t *testing.T) { } for i, tt := range tests { - req, _ := http.NewRequest("POST", "http://localhost/", strings.NewReader(tt.payload)) + req, _ := http.NewRequest(http.MethodPost, "http://localhost/", strings.NewReader(tt.payload)) rec := httptest.NewRecorder() mux.ServeHTTP(rec, req) res := rec.Result() @@ -123,13 +123,13 @@ func TestJSONRPCID(t *testing.T) { recv := new(types.RPCResponse) err = json.Unmarshal(blob, recv) - assert.Nil(t, err, "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) + require.NoError(t, err, "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) if !tt.wantErr { assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) assert.Equal(t, tt.expectedID, recv.ID, "#%d: expected ID not matched in RPCResponse", i) assert.Nil(t, recv.Error, "#%d: not expecting an error", i) } else { - assert.True(t, recv.Error.Code < 0, "#%d: not expecting a positive JSONRPC code", i) + assert.Less(t, recv.Error.Code, 0, "#%d: not expecting a positive JSONRPC code", i) } } } @@ -137,7 +137,7 @@ func TestJSONRPCID(t *testing.T) { func TestRPCNotification(t *testing.T) { mux := testMux() body := strings.NewReader(`{"jsonrpc": "2.0"}`) - req, _ := http.NewRequest("POST", "http://localhost/", body) + req, _ := http.NewRequest(http.MethodPost, "http://localhost/", body) rec := httptest.NewRecorder() mux.ServeHTTP(rec, req) res := rec.Result() @@ -146,8 +146,8 @@ func TestRPCNotification(t *testing.T) { require.True(t, statusOK(res.StatusCode), "should always return 2XX") blob, err := io.ReadAll(res.Body) res.Body.Close() - require.Nil(t, err, "reading from the body should not give back an error") - require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server") + require.NoError(t, err, "reading from the body should not give back an error") + require.Empty(t, blob, "a notification SHOULD NOT be responded to by the server") } func TestRPCNotificationInBatch(t *testing.T) { @@ -174,7 +174,7 @@ func TestRPCNotificationInBatch(t *testing.T) { }, } for i, tt := range tests { - req, _ := http.NewRequest("POST", "http://localhost/", strings.NewReader(tt.payload)) + req, _ := http.NewRequest(http.MethodPost, "http://localhost/", strings.NewReader(tt.payload)) rec := httptest.NewRecorder() mux.ServeHTTP(rec, req) res := rec.Result() @@ -218,7 +218,7 @@ func TestRPCNotificationInBatch(t *testing.T) { func TestUnknownRPCPath(t *testing.T) { mux := testMux() - req, _ := http.NewRequest("GET", "http://localhost/unknownrpcpath", nil) + req, _ := http.NewRequest(http.MethodGet, "http://localhost/unknownrpcpath", nil) rec := httptest.NewRecorder() mux.ServeHTTP(rec, req) res := rec.Result() @@ -231,48 +231,48 @@ func TestUnknownRPCPath(t *testing.T) { func TestRPCResponseCache(t *testing.T) { mux := testMux() body := strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": ["1"]}`) - req, _ := http.NewRequest("Get", "http://localhost/", body) + req, _ := http.NewRequest(http.MethodGet, "http://localhost/", body) rec := httptest.NewRecorder() mux.ServeHTTP(rec, req) res := rec.Result() // Always expecting back a JSONRPCResponse require.True(t, statusOK(res.StatusCode), "should always return 2XX") - require.Equal(t, "public, max-age=86400", res.Header.Get("Cache-control")) + require.Equal(t, "public, max-age=86400", res.Header.Get("Cache-Control")) _, err := io.ReadAll(res.Body) res.Body.Close() - require.Nil(t, err, "reading from the body should not give back an error") + require.NoError(t, err, "reading from the body should not give back an error") // send a request with default height. body = strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": ["0"]}`) - req, _ = http.NewRequest("Get", "http://localhost/", body) + req, _ = http.NewRequest(http.MethodGet, "http://localhost/", body) rec = httptest.NewRecorder() mux.ServeHTTP(rec, req) res = rec.Result() // Always expecting back a JSONRPCResponse require.True(t, statusOK(res.StatusCode), "should always return 2XX") - require.Equal(t, "", res.Header.Get("Cache-control")) + require.Equal(t, "", res.Header.Get("Cache-Control")) _, err = io.ReadAll(res.Body) res.Body.Close() - require.Nil(t, err, "reading from the body should not give back an error") + require.NoError(t, err, "reading from the body should not give back an error") // send a request with default height, but as empty set of parameters. body = strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": []}`) - req, _ = http.NewRequest("Get", "http://localhost/", body) + req, _ = http.NewRequest(http.MethodGet, "http://localhost/", body) rec = httptest.NewRecorder() mux.ServeHTTP(rec, req) res = rec.Result() // Always expecting back a JSONRPCResponse require.True(t, statusOK(res.StatusCode), "should always return 2XX") - require.Equal(t, "", res.Header.Get("Cache-control")) + require.Equal(t, "", res.Header.Get("Cache-Control")) _, err = io.ReadAll(res.Body) res.Body.Close() - require.Nil(t, err, "reading from the body should not give back an error") + require.NoError(t, err, "reading from the body should not give back an error") } diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index 4393abf29df..be4a917d36c 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -3,20 +3,25 @@ package server import ( "bufio" + "bytes" "encoding/json" "errors" "fmt" + "io" "net" "net/http" "os" "runtime/debug" + "strconv" "strings" "time" "golang.org/x/net/netutil" "github.com/cometbft/cometbft/libs/log" - types "github.com/cometbft/cometbft/rpc/jsonrpc/types" + grpcerr "github.com/cometbft/cometbft/rpc/grpc/errors" + "github.com/cometbft/cometbft/rpc/jsonrpc/types" + cmttime "github.com/cometbft/cometbft/types/time" ) // Config is a RPC server configuration. @@ -32,16 +37,19 @@ type Config struct { MaxBodyBytes int64 // mirrors http.Server#MaxHeaderBytes MaxHeaderBytes int + // maximum number of requests in a batch request + MaxRequestBatchSize int } // DefaultConfig returns a default configuration. func DefaultConfig() *Config { return &Config{ - MaxOpenConnections: 0, // unlimited - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxBodyBytes: int64(1000000), // 1MB - MaxHeaderBytes: 1 << 20, // same as the net/http default + MaxOpenConnections: 0, // unlimited + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + MaxBodyBytes: int64(1000000), // 1MB + MaxHeaderBytes: 1 << 20, // same as the net/http default + MaxRequestBatchSize: 10, // default to max 10 requests per batch } } @@ -51,20 +59,15 @@ func DefaultConfig() *Config { // // NOTE: This function blocks - you may want to call it in a go-routine. func Serve(listener net.Listener, handler http.Handler, logger log.Logger, config *Config) error { - logger.Info("serve", "msg", log.NewLazySprintf("Starting RPC HTTP server on %s", listener.Addr())) - s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), - ReadTimeout: config.ReadTimeout, - ReadHeaderTimeout: config.ReadTimeout, - WriteTimeout: config.WriteTimeout, - MaxHeaderBytes: config.MaxHeaderBytes, - } - err := s.Serve(listener) - logger.Info("RPC HTTP server stopped", "err", err) - return err + return ServeWithShutdown( + listener, + handler, + logger, + config, + ) } -// Serve creates a http.Server and calls ServeTLS with the given listener, +// ServeTLS creates a http.Server and calls ServeTLS with the given listener, // certFile and keyFile. It wraps handler with RecoverAndLogHandler and a // handler, which limits the max body size to config.MaxBodyBytes. // @@ -76,18 +79,100 @@ func ServeTLS( logger log.Logger, config *Config, ) error { - logger.Info("serve tls", "msg", log.NewLazySprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", - listener.Addr(), certFile, keyFile)) - s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), - ReadTimeout: config.ReadTimeout, - ReadHeaderTimeout: config.ReadTimeout, - WriteTimeout: config.WriteTimeout, - MaxHeaderBytes: config.MaxHeaderBytes, + return ServeTLSWithShutdown( + listener, + handler, + certFile, + keyFile, + logger, + config, + ) +} + +// ServeWithShutdown creates a http.Server and calls Serve with the given +// listener. It wraps handler with RecoverAndLogHandler and a handler, which limits +// the max body size to config.MaxBodyBytes. +// The function optionally takes a list of shutdown tasks to execute on shutdown. +// +// NOTE: This function blocks - you may want to call it in a go-routine. +func ServeWithShutdown( + listener net.Listener, + handler http.Handler, + logger log.Logger, + config *Config, + shutdownTasks ...func() error, +) error { + logMsg := log.NewLazySprintf("Starting RPC HTTP server on %s", listener.Addr()) + logger.Info("serve", "msg", logMsg) + + var ( + rlHandler = RecoverAndLogHandler(defaultHandler{h: handler}, logger) + s = &http.Server{ + Handler: PreChecksHandler(rlHandler, config), + ReadTimeout: config.ReadTimeout, + ReadHeaderTimeout: config.ReadTimeout, + WriteTimeout: config.WriteTimeout, + MaxHeaderBytes: config.MaxHeaderBytes, + } + ) + err := s.Serve(listener) + + logger.Info("RPC HTTP server stopped", "err", err) + + for _, f := range shutdownTasks { + if err := f(); err != nil { + logger.Error("executing RPC HTTP server post-shutdown task", "err", err) + } } + + return err +} + +// ServeTLSWithShutdown creates a http.Server and calls ServeTLS with the given +// listener, certFile and keyFile. It wraps handler with RecoverAndLogHandler and a +// handler, which limits the max body size to config.MaxBodyBytes. +// The function optionally takes a list of shutdown tasks to execute on shutdown. +// +// NOTE: This function blocks - you may want to call it in a go-routine. +func ServeTLSWithShutdown( + listener net.Listener, + handler http.Handler, + certFile, keyFile string, + logger log.Logger, + config *Config, + shutdownTasks ...func() error, +) error { + var ( + formatStr = "Starting RPC HTTPS server on %s (cert: %q, key: %q)" + logMsg = log.NewLazySprintf( + formatStr, + listener.Addr(), + certFile, + keyFile, + ) + ) + logger.Info("serve tls", "msg", logMsg) + + var ( + rlHandler = RecoverAndLogHandler(defaultHandler{h: handler}, logger) + s = &http.Server{ + Handler: PreChecksHandler(rlHandler, config), + ReadTimeout: config.ReadTimeout, + ReadHeaderTimeout: config.ReadTimeout, + WriteTimeout: config.WriteTimeout, + MaxHeaderBytes: config.MaxHeaderBytes, + } + ) err := s.ServeTLS(listener, certFile, keyFile) logger.Error("RPC HTTPS server stopped", "err", err) + + for _, f := range shutdownTasks { + if err := f(); err != nil { + logger.Error("executing RPC HTTP server post-shutdown task", "err", err) + } + } + return err } @@ -106,7 +191,7 @@ func WriteRPCResponseHTTPError( jsonBytes, err := json.Marshal(res) if err != nil { - return fmt.Errorf("json marshal: %w", err) + return ErrMarshalResponse{Source: err} } w.Header().Set("Content-Type", "application/json") @@ -121,7 +206,7 @@ func WriteRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error } // WriteCacheableRPCResponseHTTP marshals res as JSON (with indent) and writes -// it to w. Adds cache-control to the response header and sets the expiry to +// it to w. Adds Cache-Control to the response header and sets the expiry to // one day. func WriteCacheableRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error { return writeRPCResponseHTTP(w, []httpHeader{{"Cache-Control", "public, max-age=86400"}}, res...) @@ -133,7 +218,7 @@ type httpHeader struct { } func writeRPCResponseHTTP(w http.ResponseWriter, headers []httpHeader, res ...types.RPCResponse) error { - var v interface{} + var v any if len(res) == 1 { v = res[0] } else { @@ -142,18 +227,18 @@ func writeRPCResponseHTTP(w http.ResponseWriter, headers []httpHeader, res ...ty jsonBytes, err := json.Marshal(v) if err != nil { - return fmt.Errorf("json marshal: %w", err) + return ErrMarshalResponse{Source: err} } w.Header().Set("Content-Type", "application/json") for _, header := range headers { w.Header().Set(header.name, header.value) } - w.WriteHeader(200) + w.WriteHeader(http.StatusOK) _, err = w.Write(jsonBytes) return err } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // RecoverAndLogHandler wraps an HTTP handler, adding error logging. // If the inner function panics, the outer function recovers, logs, sends an @@ -162,21 +247,21 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Wrap the ResponseWriter to remember the status rww := &responseWriterWrapper{-1, w} - begin := time.Now() + begin := cmttime.Now() - rww.Header().Set("X-Server-Time", fmt.Sprintf("%v", begin.Unix())) + rww.Header().Set("X-Server-Time", strconv.FormatInt(begin.Unix(), 10)) defer func() { // Handle any panics in the panic handler below. Does not use the logger, since we want // to avoid any further panics. However, we try to return a 500, since it otherwise // defaults to 200 and there is no other way to terminate the connection. If that // should panic for whatever reason then the Go HTTP server will handle it and - // terminate the connection - panicing is the de-facto and only way to get the Go HTTP + // terminate the connection - panicking is the de-facto and only way to get the Go HTTP // server to terminate the request and close the connection/stream: // https://github.com/golang/go/issues/17790#issuecomment-258481416 if e := recover(); e != nil { fmt.Fprintf(os.Stderr, "Panic during RPC panic recovery: %v\n%v\n", e, string(debug.Stack())) - w.WriteHeader(500) + w.WriteHeader(http.StatusInternalServerError) } }() @@ -213,7 +298,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler } // Finally, log. - durationMS := time.Since(begin).Nanoseconds() / 1000000 + durationMS := cmttime.Since(begin).Nanoseconds() / 1000000 if rww.Status == -1 { rww.Status = 200 } @@ -230,7 +315,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler }) } -// Remember the status for logging +// Remember the status for logging. type responseWriterWrapper struct { Status int http.ResponseWriter @@ -241,18 +326,16 @@ func (w *responseWriterWrapper) WriteHeader(status int) { w.ResponseWriter.WriteHeader(status) } -// implements http.Hijacker +// implements http.Hijacker. func (w *responseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) { return w.ResponseWriter.(http.Hijacker).Hijack() } -type maxBytesHandler struct { +type defaultHandler struct { h http.Handler - n int64 } -func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - r.Body = http.MaxBytesReader(w, r.Body, h.n) +func (h defaultHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.h.ServeHTTP(w, r) } @@ -261,15 +344,12 @@ func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func Listen(addr string, maxOpenConnections int) (listener net.Listener, err error) { parts := strings.SplitN(addr, "://", 2) if len(parts) != 2 { - return nil, fmt.Errorf( - "invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", - addr, - ) + return nil, grpcerr.ErrInvalidRemoteAddress{Addr: addr} } proto, addr := parts[0], parts[1] listener, err = net.Listen(proto, addr) if err != nil { - return nil, fmt.Errorf("failed to listen on %v: %v", addr, err) + return nil, ErrListening{Addr: addr, Source: err} } if maxOpenConnections > 0 { listener = netutil.LimitListener(listener, maxOpenConnections) @@ -277,3 +357,50 @@ func Listen(addr string, maxOpenConnections int) (listener net.Listener, err err return listener, nil } + +// Middleware + +// PreChecksHandler is a middleware function that checks the size of batch requests and returns an error +// if it exceeds the maximum configured size. It also checks if the request body is not greater than the +// configured maximum request body bytes limit. +func PreChecksHandler(next http.Handler, config *Config) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // ensure that the current request body bytes is not greater than the configured maximum request body bytes + r.Body = http.MaxBytesReader(w, r.Body, config.MaxBodyBytes) + + // if maxBatchSize is 0 then don't constraint the limit of requests per batch + // It cannot be negative because the config.toml validation requires it to be + // greater than or equal to 0 + if config.MaxRequestBatchSize > 0 { + var requests []types.RPCRequest + var responses []types.RPCResponse + var err error + + data, err := io.ReadAll(r.Body) + if err != nil { + res := types.RPCInvalidRequestError(nil, fmt.Errorf("error reading request body: %w", err)) + _ = WriteRPCResponseHTTPError(w, http.StatusBadRequest, res) + return + } + + err = json.Unmarshal(data, &requests) + // if no err it means multiple requests, check if the number of request exceeds + // the maximum batch size configured + if err == nil { + // if the number of requests in batch exceed the maximum configured then return an error + if len(requests) > config.MaxRequestBatchSize { + res := types.RPCInvalidRequestError(nil, fmt.Errorf("batch request exceeds maximum (%d) allowed number of requests", config.MaxRequestBatchSize)) + responses = append(responses, res) + _ = WriteRPCResponseHTTP(w, responses...) + return + } + } + + // ensure the request body can be read again by other handlers + r.Body = io.NopCloser(bytes.NewBuffer(data)) + } + + // next handler + next.ServeHTTP(w, r) + }) +} diff --git a/rpc/jsonrpc/server/http_server_test.go b/rpc/jsonrpc/server/http_server_test.go index 56f6dba2bfe..2a0a4d35fc9 100644 --- a/rpc/jsonrpc/server/http_server_test.go +++ b/rpc/jsonrpc/server/http_server_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/libs/log" - types "github.com/cometbft/cometbft/rpc/jsonrpc/types" + "github.com/cometbft/cometbft/rpc/jsonrpc/types" ) type sampleResult struct { @@ -30,7 +30,7 @@ func TestMaxOpenConnections(t *testing.T) { // Start the server. var open int32 mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) { if n := atomic.AddInt32(&open, 1); n > int32(max) { t.Errorf("%d open connections, want <= %d", n, max) } @@ -76,7 +76,7 @@ func TestServeTLS(t *testing.T) { defer ln.Close() mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, "some body") }) @@ -119,7 +119,7 @@ func TestWriteRPCResponseHTTP(t *testing.T) { require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) - assert.Equal(t, "public, max-age=86400", resp.Header.Get("Cache-control")) + assert.Equal(t, "public, max-age=86400", resp.Header.Get("Cache-Control")) assert.Equal(t, `{"jsonrpc":"2.0","id":-1,"result":{"value":"hello"}}`, string(body)) // multiple arguments @@ -138,6 +138,7 @@ func TestWriteRPCResponseHTTP(t *testing.T) { assert.Equal(t, `[{"jsonrpc":"2.0","id":-1,"result":{"value":"hello"}},{"jsonrpc":"2.0","id":-1,"result":{"value":"world"}}]`, string(body)) } +// TestWriteRPCResponseHTTPError tests WriteRPCResponseHTTPError. func TestWriteRPCResponseHTTPError(t *testing.T) { w := httptest.NewRecorder() err := WriteRPCResponseHTTPError( @@ -145,10 +146,14 @@ func TestWriteRPCResponseHTTPError(t *testing.T) { http.StatusInternalServerError, types.RPCInternalError(types.JSONRPCIntID(-1), errors.New("foo"))) require.NoError(t, err) + resp := w.Result() body, err := io.ReadAll(resp.Body) - _ = resp.Body.Close() require.NoError(t, err) + + err = resp.Body.Close() + require.NoError(t, err) + assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) assert.Equal(t, `{"jsonrpc":"2.0","id":-1,"error":{"code":-32603,"message":"Internal error","data":"foo"}}`, string(body)) diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index 6381d91d70f..cadbf0d142a 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -10,21 +10,21 @@ import ( cmtjson "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/libs/log" - types "github.com/cometbft/cometbft/rpc/jsonrpc/types" + "github.com/cometbft/cometbft/rpc/jsonrpc/types" ) // HTTP + URI handler var reInt = regexp.MustCompile(`^-?[0-9]+$`) -// convert from a function name to the http handler +// convert from a function name to the http handler. func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWriter, *http.Request) { // Always return -1 as there's no ID here. dummyID := types.JSONRPCIntID(-1) // URIClientRequestID // Exception for websocket endpoints if rpcFunc.ws { - return func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, _ *http.Request) { res := types.RPCMethodNotFoundError(dummyID) if wErr := WriteRPCResponseHTTPError(w, http.StatusNotFound, res); wErr != nil { logger.Error("failed to write response", "err", wErr) @@ -34,7 +34,13 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit // All other endpoints return func(w http.ResponseWriter, r *http.Request) { - logger.Debug("HTTP HANDLER", "req", r) + logger.Debug("HTTP HANDLER", "req", map[string]any{ + "method": r.Method, + "url": r.URL.String(), + "header": r.Header, + "ip": r.RemoteAddr, + "postForm": r.PostForm, + }) ctx := &types.Context{HTTPReq: r} args := []reflect.Value{reflect.ValueOf(ctx)} @@ -53,8 +59,12 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit returns := rpcFunc.f.Call(args) - logger.Debug("HTTPRestRPC", "method", r.URL.Path, "args", args, "returns", returns) + logArgs := make([]any, 0, len(fnArgs)) + for _, arg := range fnArgs { + logArgs = append(logArgs, arg.Interface()) + } result, err := unreflectResult(returns) + logger.Debug("HTTPRestRPC", "method", r.URL.Path, "args", logArgs, "result", result, "error", err) if err != nil { if err := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, types.RPCInternalError(dummyID, err)); err != nil { @@ -77,7 +87,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit } } -// Covert an http query to a list of properly typed values. +// Convert an http query to a list of properly typed values. // To be properly decoded the arg must be a concrete type from CometBFT (if its an interface). func httpParamsToArgs(rpcFunc *RPCFunc, r *http.Request) ([]reflect.Value, error) { // skip types.Context @@ -128,24 +138,19 @@ func jsonStringToArg(rt reflect.Type, arg string) (reflect.Value, error) { func nonJSONStringToArg(rt reflect.Type, arg string) (reflect.Value, bool, error) { if rt.Kind() == reflect.Ptr { rv1, ok, err := nonJSONStringToArg(rt.Elem(), arg) - switch { - case err != nil: - return reflect.Value{}, false, err - case ok: - rv := reflect.New(rt.Elem()) - rv.Elem().Set(rv1) - return rv, true, nil - default: - return reflect.Value{}, false, nil - } - } else { - return _nonJSONStringToArg(rt, arg) + if err != nil || !ok { + return reflect.Value{}, ok, err + } + rv := reflect.New(rt.Elem()) + rv.Elem().Set(rv1) + return rv, true, nil } + return _nonJSONStringToArg(rt, arg) } // NOTE: rt.Kind() isn't a pointer. func _nonJSONStringToArg(rt reflect.Type, arg string) (reflect.Value, bool, error) { - isIntString := reInt.Match([]byte(arg)) + isIntString := reInt.MatchString(arg) isQuotedString := strings.HasPrefix(arg, `"`) && strings.HasSuffix(arg, `"`) isHexString := strings.HasPrefix(strings.ToLower(arg), "0x") diff --git a/rpc/jsonrpc/server/parse_test.go b/rpc/jsonrpc/server/parse_test.go index c29224ea2e4..de25be18679 100644 --- a/rpc/jsonrpc/server/parse_test.go +++ b/rpc/jsonrpc/server/parse_test.go @@ -8,18 +8,19 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/libs/bytes" - types "github.com/cometbft/cometbft/rpc/jsonrpc/types" + "github.com/cometbft/cometbft/rpc/jsonrpc/types" ) func TestParseJSONMap(t *testing.T) { input := []byte(`{"value":"1234","height":22}`) // naive is float,string - var p1 map[string]interface{} + var p1 map[string]any err := json.Unmarshal(input, &p1) - if assert.Nil(t, err) { + if assert.NoError(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here h, ok := p1["height"].(float64) if assert.True(t, ok, "%#v", p1["height"]) { assert.EqualValues(t, 22, h) @@ -32,12 +33,12 @@ func TestParseJSONMap(t *testing.T) { // preloading map with values doesn't help tmp := 0 - p2 := map[string]interface{}{ + p2 := map[string]any{ "value": &bytes.HexBytes{}, "height": &tmp, } err = json.Unmarshal(input, &p2) - if assert.Nil(t, err) { + if assert.NoError(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here h, ok := p2["height"].(float64) if assert.True(t, ok, "%#v", p2["height"]) { assert.EqualValues(t, 22, h) @@ -52,14 +53,14 @@ func TestParseJSONMap(t *testing.T) { // struct has unknown types, but hard-coded keys tmp = 0 p3 := struct { - Value interface{} `json:"value"` - Height interface{} `json:"height"` + Value any `json:"value"` + Height any `json:"height"` }{ Height: &tmp, Value: &bytes.HexBytes{}, } err = json.Unmarshal(input, &p3) - if assert.Nil(t, err) { + if assert.NoError(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here h, ok := p3.Height.(*int) if assert.True(t, ok, "%#v", p3.Height) { assert.Equal(t, 22, *h) @@ -76,7 +77,7 @@ func TestParseJSONMap(t *testing.T) { Height int `json:"height"` }{} err = json.Unmarshal(input, &p4) - if assert.Nil(t, err) { + if assert.NoError(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.EqualValues(t, 22, p4.Height) assert.EqualValues(t, []byte{0x12, 0x34}, p4.Value) } @@ -85,16 +86,16 @@ func TestParseJSONMap(t *testing.T) { // dynamic keys on map, and we can deserialize to the desired types var p5 map[string]*json.RawMessage err = json.Unmarshal(input, &p5) - if assert.Nil(t, err) { + if assert.NoError(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here var h int err = json.Unmarshal(*p5["height"], &h) - if assert.Nil(t, err) { + if assert.NoError(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, 22, h) } var v bytes.HexBytes err = json.Unmarshal(*p5["value"], &v) - if assert.Nil(t, err) { + if assert.NoError(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, bytes.HexBytes{0x12, 0x34}, v) } } @@ -104,9 +105,9 @@ func TestParseJSONArray(t *testing.T) { input := []byte(`["1234",22]`) // naive is float,string - var p1 []interface{} + var p1 []any err := json.Unmarshal(input, &p1) - if assert.Nil(t, err) { + if assert.NoError(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here v, ok := p1[0].(string) if assert.True(t, ok, "%#v", p1[0]) { assert.EqualValues(t, "1234", v) @@ -119,9 +120,9 @@ func TestParseJSONArray(t *testing.T) { // preloading map with values helps here (unlike map - p2 above) tmp := 0 - p2 := []interface{}{&bytes.HexBytes{}, &tmp} + p2 := []any{&bytes.HexBytes{}, &tmp} err = json.Unmarshal(input, &p2) - if assert.Nil(t, err) { + if assert.NoError(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here v, ok := p2[0].(*bytes.HexBytes) if assert.True(t, ok, "%#v", p2[0]) { assert.EqualValues(t, []byte{0x12, 0x34}, *v) @@ -134,7 +135,7 @@ func TestParseJSONArray(t *testing.T) { } func TestParseJSONRPC(t *testing.T) { - demo := func(ctx *types.Context, height int, name string) {} + demo := func(_ *types.Context, _ int, _ string) {} call := NewRPCFunc(demo, "height,name") cases := []struct { @@ -158,20 +159,19 @@ func TestParseJSONRPC(t *testing.T) { data := []byte(tc.raw) vals, err := jsonParamsToArgs(call, data) if tc.fail { - assert.NotNil(t, err, i) + require.Error(t, err) } else { - assert.Nil(t, err, "%s: %+v", i, err) - if assert.Equal(t, 2, len(vals), i) { + require.NoError(t, err, "%s: %+v", i, err) + if assert.Len(t, vals, 2, i) { assert.Equal(t, tc.height, vals[0].Int(), i) assert.Equal(t, tc.name, vals[1].String(), i) } } - } } func TestParseURI(t *testing.T) { - demo := func(ctx *types.Context, height int, name string) {} + demo := func(_ *types.Context, _ int, _ string) {} call := NewRPCFunc(demo, "height,name") cases := []struct { @@ -187,7 +187,7 @@ func TestParseURI(t *testing.T) { // can parse numbers quoted, too {[]string{`"7"`, `"flew"`}, 7, "flew", false}, {[]string{`"-10"`, `"bob"`}, -10, "bob", false}, - // cant parse strings uquoted + // can't parse strings uquoted {[]string{`"-10"`, `bob`}, -10, "bob", true}, } for idx, tc := range cases { @@ -196,18 +196,17 @@ func TestParseURI(t *testing.T) { url := fmt.Sprintf( "test.com/method?height=%v&name=%v", tc.raw[0], tc.raw[1]) - req, err := http.NewRequest("GET", url, nil) - assert.NoError(t, err) + req, err := http.NewRequest(http.MethodGet, url, nil) + require.NoError(t, err) vals, err := httpParamsToArgs(call, req) if tc.fail { - assert.NotNil(t, err, i) + require.Error(t, err, i) } else { - assert.Nil(t, err, "%s: %+v", i, err) - if assert.Equal(t, 2, len(vals), i) { + require.NoError(t, err, "%s: %+v", i, err) + if assert.Len(t, vals, 2, i) { assert.Equal(t, tc.height, vals[0].Int(), i) assert.Equal(t, tc.name, vals[1].String(), i) } } - } } diff --git a/rpc/jsonrpc/server/rpc_func.go b/rpc/jsonrpc/server/rpc_func.go index cd7e5273e14..678a95e3b16 100644 --- a/rpc/jsonrpc/server/rpc_func.go +++ b/rpc/jsonrpc/server/rpc_func.go @@ -12,7 +12,7 @@ import ( // RegisterRPCFuncs adds a route for each function in the funcMap, as well as // general jsonrpc and websocket handlers for all functions. "result" is the // interface on which the result objects are registered, and is popualted with -// every RPCResponse +// every RPCResponse. func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger log.Logger) { // HTTP endpoints for funcName, rpcFunc := range funcMap { @@ -37,7 +37,7 @@ type Option func(*RPCFunc) func Cacheable(noCacheDefArgs ...string) Option { return func(r *RPCFunc) { r.cacheable = true - r.noCacheDefArgs = make(map[string]interface{}) + r.noCacheDefArgs = make(map[string]any) for _, arg := range noCacheDefArgs { r.noCacheDefArgs[arg] = nil } @@ -51,25 +51,25 @@ func Ws() Option { } } -// RPCFunc contains the introspected type information for a function +// RPCFunc contains the introspected type information for a function. type RPCFunc struct { - f reflect.Value // underlying rpc function - args []reflect.Type // type of each function arg - returns []reflect.Type // type of each return arg - argNames []string // name of each argument - cacheable bool // enable cache control - ws bool // enable websocket communication - noCacheDefArgs map[string]interface{} // a lookup table of args that, if not supplied or are set to default values, cause us to not cache + f reflect.Value // underlying rpc function + args []reflect.Type // type of each function arg + returns []reflect.Type // type of each return arg + argNames []string // name of each argument + cacheable bool // enable cache control + ws bool // enable websocket communication + noCacheDefArgs map[string]any // a lookup table of args that, if not supplied or are set to default values, cause us to not cache } // NewRPCFunc wraps a function for introspection. -// f is the function, args are comma separated argument names -func NewRPCFunc(f interface{}, args string, options ...Option) *RPCFunc { +// f is the function, args are comma separated argument names. +func NewRPCFunc(f any, args string, options ...Option) *RPCFunc { return newRPCFunc(f, args, options...) } // NewWSRPCFunc wraps a function for introspection and use in the websockets. -func NewWSRPCFunc(f interface{}, args string, options ...Option) *RPCFunc { +func NewWSRPCFunc(f any, args string, options ...Option) *RPCFunc { options = append(options, Ws()) return newRPCFunc(f, args, options...) } @@ -98,7 +98,7 @@ func (f *RPCFunc) cacheableWithArgs(args []reflect.Value) bool { return true } -func newRPCFunc(f interface{}, args string, options ...Option) *RPCFunc { +func newRPCFunc(f any, args string, options ...Option) *RPCFunc { var argNames []string if args != "" { argNames = strings.Split(args, ",") @@ -118,8 +118,8 @@ func newRPCFunc(f interface{}, args string, options ...Option) *RPCFunc { return r } -// return a function's argument types -func funcArgTypes(f interface{}) []reflect.Type { +// return a function's argument types. +func funcArgTypes(f any) []reflect.Type { t := reflect.TypeOf(f) n := t.NumIn() typez := make([]reflect.Type, n) @@ -129,8 +129,8 @@ func funcArgTypes(f interface{}) []reflect.Type { return typez } -// return a function's return types -func funcReturnTypes(f interface{}) []reflect.Type { +// return a function's return types. +func funcReturnTypes(f any) []reflect.Type { t := reflect.TypeOf(f) n := t.NumOut() typez := make([]reflect.Type, n) @@ -140,10 +140,10 @@ func funcReturnTypes(f interface{}) []reflect.Type { return typez } -//------------------------------------------------------------- +// ------------------------------------------------------------- -// NOTE: assume returns is result struct and error. If error is not nil, return it -func unreflectResult(returns []reflect.Value) (interface{}, error) { +// NOTE: assume returns is result struct and error. If error is not nil, return it. +func unreflectResult(returns []reflect.Value) (any, error) { errV := returns[1] if errV.Interface() != nil { return nil, fmt.Errorf("%v", errV.Interface()) diff --git a/rpc/jsonrpc/server/ws_handler.go b/rpc/jsonrpc/server/ws_handler.go index 8e07d68dd7a..13f28d3f605 100644 --- a/rpc/jsonrpc/server/ws_handler.go +++ b/rpc/jsonrpc/server/ws_handler.go @@ -3,7 +3,6 @@ package server import ( "context" "encoding/json" - "errors" "fmt" "net/http" "reflect" @@ -14,7 +13,7 @@ import ( "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/service" - types "github.com/cometbft/cometbft/rpc/jsonrpc/types" + "github.com/cometbft/cometbft/rpc/jsonrpc/types" ) // WebSocket handler @@ -28,7 +27,7 @@ const ( // WebsocketManager provides a WS handler for incoming connections and passes a // map of functions along with any additional params to new connections. -// NOTE: The websocket path is defined externally, e.g. in node/node.go +// NOTE: The websocket path is defined externally, e.g. in node/node.go. type WebsocketManager struct { websocket.Upgrader @@ -46,7 +45,7 @@ func NewWebsocketManager( return &WebsocketManager{ funcMap: funcMap, Upgrader: websocket.Upgrader{ - CheckOrigin: func(r *http.Request) bool { + CheckOrigin: func(_ *http.Request) bool { // TODO ??? // // The default behavior would be relevant to browser-based clients, @@ -243,7 +242,7 @@ func (wsc *wsConnection) OnStop() { } // GetRemoteAddr returns the remote address of the underlying connection. -// It implements WSRPCConnection +// It implements WSRPCConnection. func (wsc *wsConnection) GetRemoteAddr() string { return wsc.remoteAddr } @@ -254,7 +253,7 @@ func (wsc *wsConnection) GetRemoteAddr() string { func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp types.RPCResponse) error { select { case <-wsc.Quit(): - return errors.New("connection was stopped") + return ErrConnectionStopped case <-ctx.Done(): return ctx.Err() case wsc.writeChan <- resp: @@ -264,7 +263,7 @@ func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp types.RPCRes // TryWriteRPCResponse attempts to push a response to the writeChan, but does // not block. -// It implements WSRPCConnection. It is Goroutine-safe +// It implements WSRPCConnection. It is Goroutine-safe. func (wsc *wsConnection) TryWriteRPCResponse(resp types.RPCResponse) bool { select { case <-wsc.Quit(): @@ -286,7 +285,7 @@ func (wsc *wsConnection) Context() context.Context { return wsc.ctx } -// Read from the socket and subscribe to or unsubscribe from events +// Read from the socket and subscribe to or unsubscribe from events. func (wsc *wsConnection) readRoutine() { // readRoutine will block until response is written or WS connection is closed writeCtx := context.Background() @@ -305,7 +304,7 @@ func (wsc *wsConnection) readRoutine() { } }() - wsc.baseConn.SetPongHandler(func(m string) error { + wsc.baseConn.SetPongHandler(func(_ string) error { return wsc.baseConn.SetReadDeadline(time.Now().Add(wsc.readWait)) }) @@ -381,7 +380,7 @@ func (wsc *wsConnection) readRoutine() { returns := rpcFunc.f.Call(args) // TODO: Need to encode args/returns to string if we want to log them - wsc.Logger.Info("WSJSONRPC", "method", request.Method) + wsc.Logger.Debug("WSJSONRPC", "method", request.Method) result, err := unreflectResult(returns) if err != nil { @@ -398,7 +397,7 @@ func (wsc *wsConnection) readRoutine() { } } -// receives on a write channel and writes out on the socket +// receives on a write channel and writes out on the socket. func (wsc *wsConnection) writeRoutine() { pingTicker := time.NewTicker(wsc.pingPeriod) defer pingTicker.Stop() diff --git a/rpc/jsonrpc/server/ws_handler_test.go b/rpc/jsonrpc/server/ws_handler_test.go index b403151950c..5f4ccb5cef9 100644 --- a/rpc/jsonrpc/server/ws_handler_test.go +++ b/rpc/jsonrpc/server/ws_handler_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/libs/log" - types "github.com/cometbft/cometbft/rpc/jsonrpc/types" + "github.com/cometbft/cometbft/rpc/jsonrpc/types" ) func TestWebsocketManagerHandler(t *testing.T) { @@ -30,7 +30,7 @@ func TestWebsocketManagerHandler(t *testing.T) { req, err := types.MapToRequest( types.JSONRPCStringID("TestWebsocketManager"), "c", - map[string]interface{}{"s": "a", "i": 10}, + map[string]any{"s": "a", "i": 10}, ) require.NoError(t, err) err = c.WriteJSON(req) @@ -46,7 +46,7 @@ func TestWebsocketManagerHandler(t *testing.T) { func newWSServer() *httptest.Server { funcMap := map[string]*RPCFunc{ - "c": NewWSRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), + "c": NewWSRPCFunc(func(_ *types.Context, _ string, _ int) (string, error) { return "foo", nil }, "s,i"), } wm := NewWebsocketManager(funcMap) wm.SetLogger(log.TestingLogger()) diff --git a/rpc/jsonrpc/test/main.go b/rpc/jsonrpc/test/main.go index 74ed3495185..f033bb2fb4c 100644 --- a/rpc/jsonrpc/test/main.go +++ b/rpc/jsonrpc/test/main.go @@ -5,8 +5,8 @@ import ( "net/http" "os" + cmtos "github.com/cometbft/cometbft/internal/os" "github.com/cometbft/cometbft/libs/log" - cmtos "github.com/cometbft/cometbft/libs/os" rpcserver "github.com/cometbft/cometbft/rpc/jsonrpc/server" rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" ) @@ -26,7 +26,7 @@ type Result struct { func main() { var ( mux = http.NewServeMux() - logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + logger = log.NewLogger(os.Stdout) ) // Stop upon receiving SIGTERM or CTRL-C. diff --git a/rpc/jsonrpc/types/types.go b/rpc/jsonrpc/types/types.go index 08ee2f2bbd5..1cf1cfee429 100644 --- a/rpc/jsonrpc/types/types.go +++ b/rpc/jsonrpc/types/types.go @@ -17,19 +17,19 @@ type jsonrpcid interface { isJSONRPCID() } -// JSONRPCStringID a wrapper for JSON-RPC string IDs +// JSONRPCStringID a wrapper for JSON-RPC string IDs. type JSONRPCStringID string func (JSONRPCStringID) isJSONRPCID() {} func (id JSONRPCStringID) String() string { return string(id) } -// JSONRPCIntID a wrapper for JSON-RPC integer IDs +// JSONRPCIntID a wrapper for JSON-RPC integer IDs. type JSONRPCIntID int func (JSONRPCIntID) isJSONRPCID() {} func (id JSONRPCIntID) String() string { return fmt.Sprintf("%d", id) } -func idFromInterface(idInterface interface{}) (jsonrpcid, error) { +func idFromInterface(idInterface any) (jsonrpcid, error) { switch id := idInterface.(type) { case string: return JSONRPCStringID(id), nil @@ -45,23 +45,23 @@ func idFromInterface(idInterface interface{}) (jsonrpcid, error) { } } -//---------------------------------------- +// ---------------------------------------- // REQUEST type RPCRequest struct { JSONRPC string `json:"jsonrpc"` ID jsonrpcid `json:"id,omitempty"` Method string `json:"method"` - Params json.RawMessage `json:"params"` // must be map[string]interface{} or []interface{} + Params json.RawMessage `json:"params"` // must be map[string]any or []any } -// UnmarshalJSON custom JSON unmarshalling due to jsonrpcid being string or int +// UnmarshalJSON custom JSON unmarshalling due to jsonrpcid being string or int. func (req *RPCRequest) UnmarshalJSON(data []byte) error { unsafeReq := struct { JSONRPC string `json:"jsonrpc"` - ID interface{} `json:"id,omitempty"` + ID any `json:"id,omitempty"` Method string `json:"method"` - Params json.RawMessage `json:"params"` // must be map[string]interface{} or []interface{} + Params json.RawMessage `json:"params"` // must be map[string]any or []any }{} err := json.Unmarshal(data, &unsafeReq) @@ -98,8 +98,8 @@ func (req RPCRequest) String() string { return fmt.Sprintf("RPCRequest{%s %s/%X}", req.ID, req.Method, req.Params) } -func MapToRequest(id jsonrpcid, method string, params map[string]interface{}) (RPCRequest, error) { - var paramsMap = make(map[string]json.RawMessage, len(params)) +func MapToRequest(id jsonrpcid, method string, params map[string]any) (RPCRequest, error) { + paramsMap := make(map[string]json.RawMessage, len(params)) for name, value := range params { valueJSON, err := cmtjson.Marshal(value) if err != nil { @@ -116,8 +116,8 @@ func MapToRequest(id jsonrpcid, method string, params map[string]interface{}) (R return NewRPCRequest(id, method, payload), nil } -func ArrayToRequest(id jsonrpcid, method string, params []interface{}) (RPCRequest, error) { - var paramsMap = make([]json.RawMessage, len(params)) +func ArrayToRequest(id jsonrpcid, method string, params []any) (RPCRequest, error) { + paramsMap := make([]json.RawMessage, len(params)) for i, value := range params { valueJSON, err := cmtjson.Marshal(value) if err != nil { @@ -134,7 +134,7 @@ func ArrayToRequest(id jsonrpcid, method string, params []interface{}) (RPCReque return NewRPCRequest(id, method, payload), nil } -//---------------------------------------- +// ---------------------------------------- // RESPONSE type RPCError struct { @@ -158,11 +158,11 @@ type RPCResponse struct { Error *RPCError `json:"error,omitempty"` } -// UnmarshalJSON custom JSON unmarshalling due to jsonrpcid being string or int +// UnmarshalJSON custom JSON unmarshalling due to jsonrpcid being string or int. func (resp *RPCResponse) UnmarshalJSON(data []byte) error { unsafeResp := &struct { JSONRPC string `json:"jsonrpc"` - ID interface{} `json:"id,omitempty"` + ID any `json:"id,omitempty"` Result json.RawMessage `json:"result,omitempty"` Error *RPCError `json:"error,omitempty"` }{} @@ -184,7 +184,7 @@ func (resp *RPCResponse) UnmarshalJSON(data []byte) error { return nil } -func NewRPCSuccessResponse(id jsonrpcid, res interface{}) RPCResponse { +func NewRPCSuccessResponse(id jsonrpcid, res any) RPCResponse { var rawMsg json.RawMessage if res != nil { @@ -246,16 +246,16 @@ func RPCServerError(id jsonrpcid, err error) RPCResponse { return NewRPCErrorResponse(id, -32000, "Server error", err.Error()) } -//---------------------------------------- +// ---------------------------------------- // WSRPCConnection represents a websocket connection. type WSRPCConnection interface { // GetRemoteAddr returns a remote address of the connection. GetRemoteAddr() string // WriteRPCResponse writes the response onto connection (BLOCKING). - WriteRPCResponse(context.Context, RPCResponse) error + WriteRPCResponse(ctx context.Context, res RPCResponse) error // TryWriteRPCResponse tries to write the response onto connection (NON-BLOCKING). - TryWriteRPCResponse(RPCResponse) bool + TryWriteRPCResponse(res RPCResponse) bool // Context returns the connection's context. Context() context.Context } @@ -312,12 +312,12 @@ func (ctx *Context) Context() context.Context { return context.Background() } -//---------------------------------------- +// ---------------------------------------- // SOCKETS // Determine if its a unix or tcp socket. // If tcp, must specify the port; `0.0.0.0` will return incorrectly as "unix" since there's no port -// TODO: deprecate +// TODO: deprecate. func SocketType(listenAddr string) string { socketType := "unix" if len(strings.Split(listenAddr, ":")) >= 2 { diff --git a/rpc/jsonrpc/types/types_test.go b/rpc/jsonrpc/types/types_test.go index 8434f208b8a..8f39d139ea0 100644 --- a/rpc/jsonrpc/types/types_test.go +++ b/rpc/jsonrpc/types/types_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type SampleResult struct { @@ -34,17 +35,20 @@ func TestResponses(t *testing.T) { for _, tt := range responseTests { jsonid := tt.id a := NewRPCSuccessResponse(jsonid, &SampleResult{"hello"}) - b, _ := json.Marshal(a) + b, err := json.Marshal(a) + require.NoError(t, err) s := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, tt.expected) assert.Equal(s, string(b)) d := RPCParseError(errors.New("hello world")) - e, _ := json.Marshal(d) + e, err := json.Marshal(d) + require.NoError(t, err) f := `{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error. Invalid JSON","data":"hello world"}}` assert.Equal(f, string(e)) g := RPCMethodNotFoundError(jsonid) - h, _ := json.Marshal(g) + h, err := json.Marshal(g) + require.NoError(t, err) i := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"error":{"code":-32601,"message":"Method not found"}}`, tt.expected) assert.Equal(string(h), i) } @@ -58,26 +62,43 @@ func TestUnmarshallResponses(t *testing.T) { []byte(fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, tt.expected)), response, ) - assert.Nil(err) + require.NoError(t, err) a := NewRPCSuccessResponse(tt.id, &SampleResult{"hello"}) assert.Equal(*response, a) } response := &RPCResponse{} err := json.Unmarshal([]byte(`{"jsonrpc":"2.0","id":true,"result":{"Value":"hello"}}`), response) - assert.NotNil(err) + require.Error(t, err) } func TestRPCError(t *testing.T) { - assert.Equal(t, "RPC error 12 - Badness: One worse than a code 11", - fmt.Sprintf("%v", &RPCError{ - Code: 12, - Message: "Badness", - Data: "One worse than a code 11", - })) + testCases := []struct { + name string + err *RPCError + expected string + }{ + { + name: "With data", + err: &RPCError{ + Code: 12, + Message: "Badness", + Data: "One worse than a code 11", + }, + expected: "RPC error 12 - Badness: One worse than a code 11", + }, + { + name: "Without data", + err: &RPCError{ + Code: 12, + Message: "Badness", + }, + expected: "RPC error 12 - Badness", + }, + } - assert.Equal(t, "RPC error 12 - Badness", - fmt.Sprintf("%v", &RPCError{ - Code: 12, - Message: "Badness", - })) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expected, tc.err.Error()) + }) + } } diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 3086b8ffa83..e19ada92bac 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -54,6 +54,17 @@ info: However, the support for unsuffixed methods will be dropped in future releases and all users are encouraged to migrate as soon as possible. + ## Encoding + + All data is encoded in amino-compatible JSON. This mostly differs from + `encoding/json` in encoding of integers (64-bit integers are encoded as + strings, not numbers), and handling of interfaces (wrapped in an interface + object with type/value keys). + + Please refer to [this + documentation](https://pkg.go.dev/github.com/cometbft/cometbft/libs/json) + for additional details. + ## URI/HTTP A REST like interface. @@ -474,7 +485,7 @@ paths: `Cache-Control` header will be set with the default maximum age. responses: "200": - description: Header informations. + description: Header information. content: application/json: schema: @@ -506,7 +517,7 @@ paths: maximum age. responses: "200": - description: Header informations. + description: Header information. content: application/json: schema: @@ -538,7 +549,7 @@ paths: `Cache-Control` header will be set with the default maximum age. responses: "200": - description: Block informations. + description: Block information. content: application/json: schema: @@ -570,7 +581,7 @@ paths: maximum age. responses: "200": - description: Block informations. + description: Block information. content: application/json: schema: @@ -620,7 +631,7 @@ paths: parameters: - in: query name: height - description: height to return. If no height is provided, it will fetch commit informations regarding the latest block. + description: height to return. If no height is provided, it will fetch commit information regarding the latest block. schema: type: integer default: 0 @@ -812,7 +823,7 @@ paths: parameters: - in: query name: height - description: height to return. If no height is provided, it will fetch commit informations regarding the latest block. + description: height to return. If no height is provided, it will fetch commit information regarding the latest block. schema: type: integer default: 0 @@ -837,6 +848,35 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" + /v1/unconfirmed_tx: + get: + summary: Get an unconfirmed transaction by hash + operationId: unconfirmed_tx + parameters: + - in: query + name: hash + description: hash of transaction to retrieve + required: true + schema: + type: string + example: "0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" + tags: + - Info + description: | + Get an unconfirmed transaction by hash + responses: + "200": + description: Unconfirmed transaction + content: + application/json: + schema: + $ref: "#/components/schemas/UnconfirmedTransactionResponse" + "500": + description: Error + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" /v1/unconfirmed_txs: get: summary: Get the list of unconfirmed transactions @@ -966,7 +1006,7 @@ paths: required: true schema: type: string - example: '"block.height > 1000 AND valset.changed > 0"' + example: '"block.height > 1000"' - in: query name: page description: "Page number (1-based)" @@ -1539,7 +1579,7 @@ components: block: $ref: "#/components/schemas/Block" BlockResponse: - description: Blockc info + description: Block info allOf: - $ref: "#/components/schemas/JSONRPC" - type: object @@ -2237,6 +2277,29 @@ components: # - "gAPwYl3uCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUA75/FmYq9WymsOBJ0XSJ8yV8zmQKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhQbrvwbvlNiT+Yjr86G+YQNx7kRVgowjE1xDQoUjJyJG+WaWBwSiGannBRFdrbma+8SFK2m+1oxgILuQLO55n8mWfnbIzyPCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUQNGfkmhTNMis4j+dyMDIWXdIPiYKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhS8sL0D0wwgGCItQwVowak5YB38KRIUCg4KBXVhdG9tEgUxMDA1NBDoxRgaagom61rphyECn8x7emhhKdRCB2io7aS/6Cpuq5NbVqbODmqOT3jWw6kSQKUresk+d+Gw0BhjiggTsu8+1voW+VlDCQ1GRYnMaFOHXhyFv7BCLhFWxLxHSAYT8a5XqoMayosZf9mANKdXArA=" type: object + UnconfirmedTransactionResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: string + example: "2.0" + id: + type: integer + example: 0 + result: + type: object + required: + - "tx" + properties: + tx: + type: string + nullable: true + example: "gAPwYl3uCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUA75/FmYq9WymsOBJ0XSJ8yV8zmQKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhQbrvwbvlNiT+Yjr86G+YQNx7kRVgowjE1xDQoUjJyJG+WaWBwSiGannBRFdrbma+8SFK2m+1oxgILuQLO55n8mWfnbIzyPCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUQNGfkmhTNMis4j+dyMDIWXdIPiYKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhS8sL0D0wwgGCItQwVowak5YB38KRIUCg4KBXVhdG9tEgUxMDA1NBDoxRgaagom61rphyECn8x7emhhKdRCB2io7aS/6Cpuq5NbVqbODmqOT3jWw6kSQKUresk+d+Gw0BhjiggTsu8+1voW+VlDCQ1GRYnMaFOHXhyFv7BCLhFWxLxHSAYT8a5XqoMayosZf9mANKdXArA=" + UnconfirmedTransactionsResponse: type: object required: @@ -2446,8 +2509,10 @@ components: response: required: - "data" - - "app_version" - "version" + - "app_version" + - "last_block_height" + - "last_block_app_hash" properties: data: type: string @@ -2456,8 +2521,14 @@ components: type: string example: "0.16.1" app_version: + type: string + example: "1" + last_block_height: type: string example: "1314126" + last_block_app_hash: + type: string + example: "C9AEBB441B787D9F1D846DE51F3826F4FD386108B59B08239653ABF59455C3F8" type: object type: object @@ -2734,9 +2805,9 @@ components: example: 2 type: object - ###### Reuseable types ###### + ###### Reusable types ###### - # Validator type with proposer prioirty + # Validator type with proposer priority ValidatorPriority: type: object properties: diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 5dfbaa4ff35..957176ac54e 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -9,13 +9,12 @@ import ( "time" abci "github.com/cometbft/cometbft/abci/types" + cfg "github.com/cometbft/cometbft/config" + cmtnet "github.com/cometbft/cometbft/internal/net" "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/libs/log" - - cfg "github.com/cometbft/cometbft/config" - cmtnet "github.com/cometbft/cometbft/libs/net" nm "github.com/cometbft/cometbft/node" - "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/privval" "github.com/cometbft/cometbft/proxy" ctypes "github.com/cometbft/cometbft/rpc/core/types" @@ -25,8 +24,9 @@ import ( // Options helps with specifying some parameters for our RPC testing for greater // control. type Options struct { - suppressStdout bool - recreateConfig bool + suppressStdout bool + recreateConfig bool + maxReqBatchSize int } var ( @@ -45,7 +45,7 @@ func waitForRPC() { } result := new(ctypes.ResultStatus) for { - _, err := client.Call(context.Background(), "status", map[string]interface{}{}, result) + _, err := client.Call(context.Background(), "status", map[string]any{}, result) if err == nil { return } @@ -55,7 +55,7 @@ func waitForRPC() { } } -// f**ing long, but unique for each test +// f**ing long, but unique for each test. func makePathname() string { // get path p, err := os.Getwd() @@ -97,7 +97,7 @@ func createConfig() *cfg.Config { return c } -// GetConfig returns a config for the test cases as a singleton +// GetConfig returns a config for the test cases as a singleton. func GetConfig(forceCreate ...bool) *cfg.Config { if globalConfig == nil || (len(forceCreate) > 0 && forceCreate[0]) { globalConfig = createConfig() @@ -105,7 +105,7 @@ func GetConfig(forceCreate ...bool) *cfg.Config { return globalConfig } -// StartCometBFT starts a test CometBFT server in a go routine and returns when it is initialized +// StartCometBFT starts a test CometBFT server in a go routine and returns when it is initialized. func StartCometBFT(app abci.Application, opts ...func(*Options)) *nm.Node { nodeOpts := defaultOptions for _, opt := range opts { @@ -137,7 +137,7 @@ func StopCometBFT(node *nm.Node) { os.RemoveAll(node.Config().RootDir) } -// NewCometBFT creates a new CometBFT server and sleeps forever +// NewCometBFT creates a new CometBFT server and sleeps forever. func NewCometBFT(app abci.Application, opts *Options) *nm.Node { // Create & start node config := GetConfig(opts.recreateConfig) @@ -145,14 +145,20 @@ func NewCometBFT(app abci.Application, opts *Options) *nm.Node { if opts.suppressStdout { logger = log.NewNopLogger() } else { - logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + logger = log.NewLogger(os.Stdout) logger = log.NewFilter(logger, log.AllowError()) } + if opts.maxReqBatchSize > 0 { + config.RPC.MaxRequestBatchSize = opts.maxReqBatchSize + } pvKeyFile := config.PrivValidatorKeyFile() pvKeyStateFile := config.PrivValidatorStateFile() - pv := privval.LoadOrGenFilePV(pvKeyFile, pvKeyStateFile) + pv, err := privval.LoadOrGenFilePV(pvKeyFile, pvKeyStateFile, nil) + if err != nil { + panic(err) + } papp := proxy.NewLocalClientCreator(app) - nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + nodeKey, err := nodekey.LoadOrGen(config.NodeKeyFile()) if err != nil { panic(err) } @@ -178,3 +184,8 @@ func SuppressStdout(o *Options) { func RecreateConfig(o *Options) { o.recreateConfig = true } + +// MaxReqBatchSize is an option to limit the maximum number of requests per batch. +func MaxReqBatchSize(o *Options) { + o.maxReqBatchSize = 2 +} diff --git a/scripts/README.md b/scripts/README.md deleted file mode 100644 index 18624117816..00000000000 --- a/scripts/README.md +++ /dev/null @@ -1 +0,0 @@ -* http://redsymbol.net/articles/unofficial-bash-strict-mode/ diff --git a/scripts/authors.sh b/scripts/authors.sh deleted file mode 100755 index 7aafb0127e6..00000000000 --- a/scripts/authors.sh +++ /dev/null @@ -1,16 +0,0 @@ -#! /bin/bash - -# Usage: -# `./authors.sh` -# Print a list of all authors who have committed to develop since master. -# -# `./authors.sh ` -# Lookup the email address on Github and print the associated username - -author=$1 - -if [[ "$author" == "" ]]; then - git log master..develop | grep Author | sort | uniq -else - curl -s "https://api.github.com/search/users?q=$author+in%3Aemail&type=Users&utf8=%E2%9C%93" | jq .items[0].login -fi diff --git a/scripts/dist.sh b/scripts/dist.sh index c615dc6dbf6..c0d2ccadf84 100755 --- a/scripts/dist.sh +++ b/scripts/dist.sh @@ -6,7 +6,7 @@ set -e # Get the version from the environment, or try to figure it out. if [ -z $VERSION ]; then - VERSION=$(awk -F\" 'TMCoreSemVer =/ { print $2; exit }' < version/version.go) + VERSION=$(awk -F\" 'CMTSemVer =/ { print $2; exit }' < version/version.go) fi if [ -z "$VERSION" ]; then echo "Please specify a version." @@ -41,7 +41,7 @@ for arch in "${arch_list[@]}"; do for os in "${os_list[@]}"; do if [[ "$XC_EXCLUDE" != *" $os/$arch "* ]]; then echo "--> $os/$arch" - GOOS=${os} GOARCH=${arch} go build -ldflags "-s -w -X ${GIT_IMPORT}.TMCoreSemVer=${VERSION}" -tags="${BUILD_TAGS}" -o "build/pkg/${os}_${arch}/cometbft" ./cmd/cometbft + GOOS=${os} GOARCH=${arch} go build -ldflags "-s -w -X ${GIT_IMPORT}.CMTSemVer=${VERSION}" -tags="${BUILD_TAGS}" -o "build/pkg/${os}_${arch}/cometbft" ./cmd/cometbft fi done done diff --git a/scripts/get_nodejs.sh b/scripts/get_nodejs.sh deleted file mode 100755 index 59469cc5016..00000000000 --- a/scripts/get_nodejs.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -VERSION=v12.9.0 -NODE_FULL=node-${VERSION}-linux-x64 - -mkdir -p ~/.local/bin -mkdir -p ~/.local/node -wget http://nodejs.org/dist/${VERSION}/${NODE_FULL}.tar.gz -O ~/.local/node/${NODE_FULL}.tar.gz -tar -xzf ~/.local/node/${NODE_FULL}.tar.gz -C ~/.local/node/ -ln -s ~/.local/node/${NODE_FULL}/bin/node ~/.local/bin/node -ln -s ~/.local/node/${NODE_FULL}/bin/npm ~/.local/bin/npm -export PATH=~/.local/bin:$PATH -npm i -g dredd -ln -s ~/.local/node/${NODE_FULL}/bin/dredd ~/.local/bin/dredd diff --git a/scripts/json2wal/main.go b/scripts/json2wal/main.go index 48470858708..2d2912c3793 100644 --- a/scripts/json2wal/main.go +++ b/scripts/json2wal/main.go @@ -9,12 +9,13 @@ package main import ( "bufio" + "errors" "fmt" "io" "os" "strings" - cs "github.com/cometbft/cometbft/consensus" + cs "github.com/cometbft/cometbft/internal/consensus" cmtjson "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/types" ) @@ -31,7 +32,7 @@ func main() { } defer f.Close() - walFile, err := os.OpenFile(os.Args[2], os.O_EXCL|os.O_WRONLY|os.O_CREATE, 0666) + walFile, err := os.OpenFile(os.Args[2], os.O_EXCL|os.O_WRONLY|os.O_CREATE, 0o666) if err != nil { panic(fmt.Errorf("failed to open WAL file: %v", err)) } @@ -45,7 +46,7 @@ func main() { for { msgJSON, _, err := br.ReadLine() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } else if err != nil { panic(fmt.Errorf("failed to read file: %v", err)) diff --git a/scripts/linkify_changelog.py b/scripts/linkify_changelog.py deleted file mode 100644 index 00a8165553a..00000000000 --- a/scripts/linkify_changelog.py +++ /dev/null @@ -1,13 +0,0 @@ -import fileinput -import re - -# This script goes through the provided file, and replaces any " \#", -# with the valid mark down formatted link to it. e.g. -# " [\#number](https://github.com/cometbft/cometbft/issues/) -# Note that if the number is for a PR, github will auto-redirect you when you click the link. -# It is safe to run the script multiple times in succession. -# -# Example usage $ python3 linkify_changelog.py ../CHANGELOG_PENDING.md -for line in fileinput.input(inplace=1): - line = re.sub(r"\s\\#([0-9]*)", r" [\\#\1](https://github.com/cometbft/cometbft/issues/\1)", line.rstrip()) - print(line) diff --git a/scripts/metricsgen/metricsdiff/metricsdiff.go b/scripts/metricsgen/metricsdiff/metricsdiff.go index 5ed72ff97cc..2070b175cba 100644 --- a/scripts/metricsgen/metricsdiff/metricsdiff.go +++ b/scripts/metricsgen/metricsdiff/metricsdiff.go @@ -64,16 +64,16 @@ func main() { if err != nil { log.Fatalf("Open: %v", err) } - defer fa.Close() fb, err := os.Open(flag.Arg(1)) if err != nil { log.Fatalf("Open: %v", err) } - defer fb.Close() md, err := DiffFromReaders(fa, fb) if err != nil { log.Fatalf("Generating diff: %v", err) } + fa.Close() + fb.Close() fmt.Print(md) } @@ -126,7 +126,7 @@ func toList(l map[string]*dto.MetricFamily) metricsList { for name, family := range l { r[idx] = parsedMetric{ name: name, - labels: labelsToStringList(family.Metric[0].Label), + labels: labelsToStringList(family.GetMetric()[0].GetLabel()), } idx++ } diff --git a/scripts/metricsgen/metricsdiff/metricsdiff_test.go b/scripts/metricsgen/metricsdiff/metricsdiff_test.go index 122eaf67393..c72d2d4c501 100644 --- a/scripts/metricsgen/metricsdiff/metricsdiff_test.go +++ b/scripts/metricsgen/metricsdiff/metricsdiff_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" - metricsdiff "github.com/cometbft/cometbft/scripts/metricsgen/metricsdiff" + metricsdiff "github.com/cometbft/cometbft/scripts/metricsgen/metricsdiff" //nolint:revive // this only works with metricsdiff in front ) func TestDiff(t *testing.T) { diff --git a/scripts/metricsgen/metricsgen.go b/scripts/metricsgen/metricsgen.go index eb1163ca78c..b0936130eda 100644 --- a/scripts/metricsgen/metricsgen.go +++ b/scripts/metricsgen/metricsgen.go @@ -38,7 +38,7 @@ Options: } } -const metricsPackageName = "github.com/go-kit/kit/metrics" +const metricsPackageName = "github.com/cometbft/cometbft/libs/metrics" const ( metricNameTag = "metrics_name" @@ -63,8 +63,8 @@ var tmpl = template.Must(template.New("tmpl").Parse(`// Code generated by metric package {{ .Package }} import ( - "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/cometbft/cometbft/libs/metrics/discard" + prometheus "github.com/cometbft/cometbft/libs/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) @@ -163,15 +163,15 @@ func ParseMetricsDir(dir string, structName string) (TemplateData, error) { return TemplateData{}, fmt.Errorf("multiple packages found in %s", dir) } if len(d) == 0 { - return TemplateData{}, fmt.Errorf("no go pacakges found in %s", dir) + return TemplateData{}, fmt.Errorf("no go packages found in %s", dir) } // Grab the package name. var pkgName string - var pkg *ast.Package + var pkg *ast.Package //nolint:staticcheck // TODO(thane): Figure out a more readable way of implementing this. - //nolint:revive - for pkgName, pkg = range d { + + for pkgName, pkg = range d { //nolint:revive } td := TemplateData{ Package: pkgName, @@ -273,7 +273,7 @@ func extractHelpMessage(cg *ast.CommentGroup) string { } var help []string //nolint: prealloc for _, c := range cg.List { - mt := strings.TrimPrefix(c.Text, "//metrics:") + mt := strings.TrimPrefix(c.Text, "// metrics:") if mt != c.Text { return strings.TrimSpace(mt) } @@ -283,7 +283,7 @@ func extractHelpMessage(cg *ast.CommentGroup) string { } func isMetric(e ast.Expr, mPkgName string) bool { - return strings.Contains(types.ExprString(e), fmt.Sprintf("%s.", mPkgName)) + return strings.Contains(types.ExprString(e), mPkgName+".") } func extractLabels(bl *ast.BasicLit) string { diff --git a/scripts/metricsgen/metricsgen_test.go b/scripts/metricsgen/metricsgen_test.go index 8a797dca4e8..b6578d0ce01 100644 --- a/scripts/metricsgen/metricsgen_test.go +++ b/scripts/metricsgen/metricsgen_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" - metricsgen "github.com/cometbft/cometbft/scripts/metricsgen" + metricsgen "github.com/cometbft/cometbft/scripts/metricsgen" //nolint:revive // this only works with metricsgen in front ) const testDataDir = "./testdata" @@ -37,6 +37,7 @@ func TestSimpleTemplate(t *testing.T) { } } +// TestFromData tests that the metricsgen tool can parse a directory of metrics and generate a file. func TestFromData(t *testing.T) { infos, err := os.ReadDir(testDataDir) if err != nil { @@ -180,6 +181,49 @@ func TestParseMetricsStruct(t *testing.T) { }, }, }, + { + name: "parse description from comments", + metricsStruct: `type Metrics struct { + // myCounter is a counter. + // It does count. + myCounter metrics.Counter + nonMetric string + }`, + expected: metricsgen.TemplateData{ + Package: pkgName, + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + Description: "myCounter is a counter. It does count.", + TypeName: "Counter", + FieldName: "myCounter", + MetricName: "my_counter", + }, + }, + }, + }, + { + name: "parse short description from comments", + metricsStruct: `type Metrics struct { + // myCounter is a counter. + // + // myCounter needs a super long description, + // we don't want it on the description. + // metrics:It does count. + myCounter metrics.Counter + nonMetric string + }`, + expected: metricsgen.TemplateData{ + Package: pkgName, + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + Description: "It does count.", + TypeName: "Counter", + FieldName: "myCounter", + MetricName: "my_counter", + }, + }, + }, + }, } for _, testCase := range metricsTests { t.Run(testCase.name, func(t *testing.T) { @@ -195,7 +239,7 @@ func TestParseMetricsStruct(t *testing.T) { pkgLine := fmt.Sprintf("package %s\n", pkgName) importClause := ` import( - "github.com/go-kit/kit/metrics" + "github.com/cometbft/cometbft/libs/metrics" ) ` @@ -222,7 +266,7 @@ func TestParseAliasedMetric(t *testing.T) { package mypkg import( - mymetrics "github.com/go-kit/kit/metrics" + mymetrics "github.com/cometbft/cometbft/libs/metrics" ) type Metrics struct { m mymetrics.Gauge @@ -244,16 +288,15 @@ func TestParseAliasedMetric(t *testing.T) { td, err := metricsgen.ParseMetricsDir(dir, "Metrics") require.NoError(t, err) - expected := - metricsgen.TemplateData{ - Package: "mypkg", - ParsedMetrics: []metricsgen.ParsedMetricField{ - { - TypeName: "Gauge", - FieldName: "m", - MetricName: "m", - }, + expected := metricsgen.TemplateData{ + Package: "mypkg", + ParsedMetrics: []metricsgen.ParsedMetricField{ + { + TypeName: "Gauge", + FieldName: "m", + MetricName: "m", }, - } + }, + } require.Equal(t, expected, td) } diff --git a/scripts/metricsgen/testdata/basic/metrics.gen.go b/scripts/metricsgen/testdata/basic/metrics.gen.go index d541cb2dbbf..81809baa817 100644 --- a/scripts/metricsgen/testdata/basic/metrics.gen.go +++ b/scripts/metricsgen/testdata/basic/metrics.gen.go @@ -3,8 +3,8 @@ package basic import ( - "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/cometbft/cometbft/libs/metrics/discard" + prometheus "github.com/cometbft/cometbft/libs/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) diff --git a/scripts/metricsgen/testdata/basic/metrics.go b/scripts/metricsgen/testdata/basic/metrics.go index 1a361f90f66..dfcb6063307 100644 --- a/scripts/metricsgen/testdata/basic/metrics.go +++ b/scripts/metricsgen/testdata/basic/metrics.go @@ -1,6 +1,6 @@ package basic -import "github.com/go-kit/kit/metrics" +import "github.com/cometbft/cometbft/libs/metrics" //go:generate go run ../../../../scripts/metricsgen -struct=Metrics diff --git a/scripts/metricsgen/testdata/commented/metrics.gen.go b/scripts/metricsgen/testdata/commented/metrics.gen.go index c1346da3849..e5d17356c84 100644 --- a/scripts/metricsgen/testdata/commented/metrics.gen.go +++ b/scripts/metricsgen/testdata/commented/metrics.gen.go @@ -3,8 +3,8 @@ package commented import ( - "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/cometbft/cometbft/libs/metrics/discard" + prometheus "github.com/cometbft/cometbft/libs/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) diff --git a/scripts/metricsgen/testdata/commented/metrics.go b/scripts/metricsgen/testdata/commented/metrics.go index 174f1e2333b..c5c9358af61 100644 --- a/scripts/metricsgen/testdata/commented/metrics.go +++ b/scripts/metricsgen/testdata/commented/metrics.go @@ -1,6 +1,6 @@ package commented -import "github.com/go-kit/kit/metrics" +import "github.com/cometbft/cometbft/libs/metrics" //go:generate go run ../../../../scripts/metricsgen -struct=Metrics diff --git a/scripts/metricsgen/testdata/tags/metrics.gen.go b/scripts/metricsgen/testdata/tags/metrics.gen.go index 43779c7a164..422dc82a442 100644 --- a/scripts/metricsgen/testdata/tags/metrics.gen.go +++ b/scripts/metricsgen/testdata/tags/metrics.gen.go @@ -3,8 +3,8 @@ package tags import ( - "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/cometbft/cometbft/libs/metrics/discard" + prometheus "github.com/cometbft/cometbft/libs/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) diff --git a/scripts/mockery_generate.sh b/scripts/mockery_generate.sh index 2509e0cdbeb..16e205311f9 100755 --- a/scripts/mockery_generate.sh +++ b/scripts/mockery_generate.sh @@ -2,5 +2,5 @@ # # Invoke Mockery v2 to update generated mocks for the given type. -go run github.com/vektra/mockery/v2 --disable-version-string --case underscore --name "$*" +go run github.com/vektra/mockery/v2@latest --disable-version-string --case underscore --name "$*" diff --git a/scripts/proto-gen.sh b/scripts/proto-gen.sh deleted file mode 100755 index 981cec74027..00000000000 --- a/scripts/proto-gen.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh -# -# Update the generated code for protocol buffers in the CometBFT repository. -# This must be run from inside a CometBFT working directory. -# -set -euo pipefail - -# Work from the root of the repository. -cd "$(git rev-parse --show-toplevel)" - -# Run inside Docker to install the correct versions of the required tools -# without polluting the local system. -docker run --rm -i -v "$PWD":/w --workdir=/w golang:1.20-alpine sh <<"EOF" -apk add git make - -go install github.com/bufbuild/buf/cmd/buf -go install github.com/cosmos/gogoproto/protoc-gen-gogofaster@latest -make proto-gen -EOF diff --git a/scripts/qa/reporting/README.md b/scripts/qa/reporting/README.md index d8598e22145..3d63bfb9f1a 100644 --- a/scripts/qa/reporting/README.md +++ b/scripts/qa/reporting/README.md @@ -1,6 +1,6 @@ # Reporting Scripts -This directory contains some utility scripts used in the reporting/QA. +This directory contains some utility scripts used for generating reports of QA processes. * [`latency_throughput.py`](./latency_throughput.py) is a Python script that uses [matplotlib] to plot a graph of transaction latency vs throughput rate based on @@ -17,8 +17,8 @@ This directory contains some utility scripts used in the reporting/QA. ## Setup -Execute the following within this directory (the same directory as the -`latency_throughput.py` file). +Before running the Python scripts, execute the following within this directory (the same directory +as the `latency_throughput.py` file). ```bash # Create a virtual environment into which to install your dependencies @@ -32,66 +32,48 @@ pip install -r requirements.txt ``` ## Latency vs Throughput Plotting -To show the instructions and parameter options, execute +To show the instructions and parameter options, execute ```bash ./latency_throughput.py --help ``` +Be sure that the virtual environment is enabled before running the script. -Example: - +For example, the following command will generate a PNG file called `cmt_v1.png` in the current +directory based on the `raw.csv` file generated by the reporting tool. The `-t` flag overrides the +default title at the top of the plot. ```bash -# Do the following while ensuring that the virtual environment is activated (see -# the Setup steps). -# -# This will generate a plot in a PNG file called 'tm034.png' in the current -# directory based on the reporting tool CSV output in the "raw.csv" file. The -# '-t' flag overrides the default title at the top of the plot. - -./latency_throughput.py \ - -t 'CometBFT v0.34.x Latency vs Throughput' \ - ./tm034.png \ - /path/to/csv/files/raw.csv +./latency_throughput.py -t 'CometBFT v1.x Latency vs Throughput' ./cmt_v1.png /path/to/results/raw.csv ``` ## Latency vs Throughput Plotting (version 2) -Example: +The `latency_plotter.py` script generates a series of plots in the `imgs` folder. +Plots include combined experiment plots and experiments as subplots. +- `all_experiments`: plots of all experiments as individual subplots. +- `all_configs`: plots of all experiments, grouped by configuration (r,c). +- `cXrY.png`: Independent plot of experiments of configuration (c=X,r=Y) as different curves. +- `cXrY_merged.png`: Independent plot of experiments of configuration (c=X,r=Y) combined as single curve. +- `e_ID.png`: independent plot with just experiment with id=ID as a single curve. + +Example: ```bash -# Do the following while ensuring that the virtual environment is activated (see -# the Setup steps). -# -# This will generate a series of plots in the `imgs` folder. -# Plots include combined experiment plots and experiments as subplots. -# - all_experiments - plots of all experiments as individual subplots. -# - all_configs - plots of all experiments, grouped by configuration (r,c). -# cXrY.png - Independent plot of experiments of configuration (c=X,r=Y) as different curves. -# cXrY_merged.png - Independent plot of experiments of configuration (c=X,r=Y) combined as single curve. -# e_ID.png - independent plot with just experiment with id ID as a single curve. - -mkdir -p imgs -python3 latency_plotter.py /path/to/csv/files/raw.csv +./latency_plotter.py v1.0.0-alpha.2 /path/to/results/raw.csv ``` +Be sure that the virtual environment is enabled before running the script. ## Prometheus metrics -1. Ensure that Prometheus is running locally and listening on port 9090. -2. Tweak the script to your needs - 1. Adjust the time window - 2. Select the right fork - 3. Select the right test case - 4. Tweak/add/remove metrics -3. Run the script as follows - ```bash - # Do the following while ensuring that the virtual environment is activated (see - # the Setup steps). - # - # This will generate a series of plots in the folder `imgs` of the current folder. - - mkdir imgs - python3 prometheus_plotter.py - ``` -4. Plots are saved in the `imgs` folder. +The `prometheus_plotter.py` script generates a series of plots in the folder `imgs` of the current folder. + +Before running the script, check that a Prometheus server in `localhost:9090`. This is the default URL hardcoded in the script. + +Run the script from the virtual environment as follows: +```bash +./prometheus_plotter.py +``` + +For details and examples of how to run the script, just run `python3 prometheus_plotter.py` [matplotlib]: https://matplotlib.org/ [pandas]: https://pandas.pydata.org diff --git a/scripts/qa/reporting/latency_plotter.py b/scripts/qa/reporting/latency_plotter.py old mode 100644 new mode 100755 index 3b42eedff89..c754354e38c --- a/scripts/qa/reporting/latency_plotter.py +++ b/scripts/qa/reporting/latency_plotter.py @@ -1,150 +1,221 @@ +#!/usr/bin/env python3 + import sys import os -from datetime import datetime import pytz +from datetime import datetime import matplotlib as mpl import matplotlib.pyplot as plt - import numpy as np import pandas as pd -release = 'v0.38.0-alpha2' +IMAGES_DIR = 'imgs' +#fig_title = 'Vote Extensions Testnet' +#fig_title = 'Rotating Nodes Test' +fig_title = 'Experiment title goes here' + +def usage(): + print(f"Usage: {sys.argv[0]} release_name raw_csv_path") + exit(1) + #FIXME: figure out in which timezone prometheus was running to adjust to UTC. -tz = pytz.timezone('America/Sao_Paulo') - -if len(sys.argv) != 2: - print('Pls provide the raw.csv file') - exit() -else: - csvpath = sys.argv[1] - if not os.path.exists(csvpath): - print('Pls provide a valid the raw.csv file') - exit() - - print(csvpath) - -path = os.path.join('imgs') - -#Load the CSV -csv = pd.read_csv(csvpath) - -#Transform ns to s in the latency/duration -csv['duration_ns'] = csv['duration_ns'].apply(lambda x: x/10**9) -csv['block_time'] = csv['block_time'].apply(lambda x: x/10**9) - -#Group by experiment -groups = csv.groupby(['experiment_id']) - -#number of rows and columns in the graph -ncols = 2 if groups.ngroups > 1 else 1 -nrows = int( np.ceil(groups.ngroups / ncols)) if groups.ngroups > 1 else 1 -fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6*ncols, 4*nrows), sharey=False) -fig.tight_layout(pad=5.0) - - -#Plot experiments as subplots -for (key,ax) in zip(groups.groups.keys(), [axes] if ncols == 1 else axes.flatten()): - group = groups.get_group(key) - ax.set_ylabel('latency (s)') - ax.set_xlabel('experiment time (s)') - ax.set_title(key) - ax.grid(True) - - #Group by connection number and transaction rate - paramGroups = group.groupby(['connections','rate']) - for (subKey) in paramGroups.groups.keys(): - subGroup = paramGroups.get_group(subKey) - startTime = subGroup.block_time.min() - endTime = subGroup.block_time.max() - localStartTime = tz.localize(datetime.fromtimestamp(startTime)).astimezone(pytz.utc) - localEndTime = tz.localize(datetime.fromtimestamp(endTime)).astimezone(pytz.utc) - subGroup.block_time.apply(lambda x: x - startTime ) - mean = subGroup.duration_ns.mean() - print('exp', key ,'start', localEndTime.strftime("%Y-%m-%dT%H:%M:%SZ"), 'end', localStartTime.strftime("%Y-%m-%dT%H:%M:%SZ"), 'duration', endTime - startTime, "mean", mean) - - (con,rate) = subKey - label = 'c='+str(con) + ' r='+ str(rate) - ax.axhline(y = mean, color = 'r', linestyle = '-', label="mean") - ax.scatter(subGroup.block_time, subGroup.duration_ns, label=label) - ax.legend() - - #Save individual axes - extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) - fig.savefig(os.path.join(path,'e_'+key + '.png'), bbox_inches=extent.expanded(1.2, 1.3)) - -fig.suptitle('Vote Extensions Testnet - ' + release) - -# Save the figure with subplots -fig.savefig(os.path.join(path,'all_experiments.png')) - - - -#Group by configuration -groups = csv.groupby(['connections','rate']) - -#number of rows and columns in the graph -ncols = 2 if groups.ngroups > 1 else 1 -nrows = int( np.ceil(groups.ngroups / ncols)) if groups.ngroups > 1 else 1 -fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6*ncols, 4*nrows), sharey=True) -fig.tight_layout(pad=5.0) - -#Plot configurations as subplots -for (key,ax) in zip(groups.groups.keys(), [axes] if ncols == 1 else axes.flatten()): - group = groups.get_group(key) - ax.set_ylabel('latency (s)') - ax.set_xlabel('experiment time (s)') - ax.grid(True) - (con,rate) = key - label = 'c='+str(con) + ' r='+ str(rate) - ax.set_title(label) - - #Group by experiment - paramGroups = group.groupby(['experiment_id']) - for (subKey) in paramGroups.groups.keys(): - subGroup = paramGroups.get_group(subKey) - startTime = subGroup.block_time.min() - subGroupMod = subGroup.block_time.apply(lambda x: x - startTime) - ax.scatter(subGroupMod, subGroup.duration_ns, label=label) - #ax.legend() +tz = pytz.timezone('UTC') + + +def plot_all_experiments(release, csv): + # Group by experiment + groups = csv.groupby(['experiment_id']) + + # number of rows and columns in the graph + ncols = 2 if groups.ngroups > 1 else 1 + nrows = int( np.ceil(groups.ngroups / ncols)) if groups.ngroups > 1 else 1 + fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6*ncols, 4*nrows), sharey=False) + fig.tight_layout(pad=5.0) + + # Plot experiments as subplots + for (key,ax) in zip(groups.groups.keys(), [axes] if ncols == 1 else axes.flatten()): + group = groups.get_group(key) + ax.set_ylabel('latency (s)') + ax.set_xlabel('experiment time (s)') + ax.set_title(key) + ax.grid(True) + + # Group by connection number and transaction rate + paramGroups = group.groupby(['connections','rate']) + for (subKey) in paramGroups.groups.keys(): + subGroup = paramGroups.get_group(subKey) + startTime = subGroup.block_time.min() + endTime = subGroup.block_time.max() + subGroup.block_time = subGroup.block_time.apply(lambda x: x - startTime ) + mean = subGroup.duration_ns.mean() + localStartTime = tz.localize(datetime.fromtimestamp(startTime)).astimezone(pytz.utc) + localEndTime = tz.localize(datetime.fromtimestamp(endTime)).astimezone(pytz.utc) + print('experiment', key ,'start', localStartTime.strftime("%Y-%m-%dT%H:%M:%SZ"), 'end', localEndTime.strftime("%Y-%m-%dT%H:%M:%SZ"), 'duration', endTime - startTime, "mean", mean) + (con,rate) = subKey + label = 'c='+str(con) + ' r='+ str(rate) + ax.axhline(y = mean, color = 'r', linestyle = '-', label="mean") + ax.scatter(subGroup.block_time, subGroup.duration_ns, label=label) + ax.legend() + + # Save individual axes + extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) + img_path = os.path.join(IMAGES_DIR, f'e_{key}.png') + fig.savefig(img_path, bbox_inches=extent.expanded(1.4, 1.5)) + + fig.suptitle(fig_title + ' - ' + release) + + # Save the figure with subplots + fig.savefig(os.path.join(IMAGES_DIR, 'all_experiments.png')) + +def plot_all_experiments_lane(release, csv): + # Group by experiment + groups = csv.groupby(['experiment_id']) + + # number of rows and columns in the graph + ncols = 2 if groups.ngroups > 1 else 1 + nrows = int( np.ceil(groups.ngroups / ncols)) if groups.ngroups > 1 else 1 + fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6*ncols, 4*nrows), sharey=False) + fig.tight_layout(pad=5.0) + # Plot experiments as subplots + for (key,ax) in zip(groups.groups.keys(), [axes] if ncols == 1 else axes.flatten()): + group = groups.get_group(key) + ax.set_ylabel('latency (s)') + ax.set_xlabel('experiment timestamp (s)') + ax.set_title(key) + ax.grid(True) + + + # Group by connection number and transaction rate and lane + paramGroups = group.groupby(['connections','rate', 'lane']) + + for (subKey) in paramGroups.groups.keys(): + subGroup = paramGroups.get_group(subKey) + startTime = subGroup.block_time.min() + endTime = subGroup.block_time.max() + subGroup.block_time = subGroup.block_time.apply(lambda x: x - startTime ) + mean = subGroup.duration_ns.mean() + localStartTime = tz.localize(datetime.fromtimestamp(startTime)).astimezone(pytz.utc) + localEndTime = tz.localize(datetime.fromtimestamp(endTime)).astimezone(pytz.utc) + print('experiment', key ,'start', localStartTime.strftime("%Y-%m-%dT%H:%M:%SZ"), 'end', localEndTime.strftime("%Y-%m-%dT%H:%M:%SZ"), 'duration', endTime - startTime, "mean", mean) + + (con,rate,lane) = subKey + label = 'c='+str(con) + ' r='+ str(rate) +' l='+ str(lane) + ax.axhline(y = mean, color='r', linestyle = '-', label="mean_l"+str(lane)) + ax.scatter(subGroup.block_time, subGroup.duration_ns, label=label) + ax.legend() + + # Save individual axes + extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) + img_path = os.path.join(IMAGES_DIR, f'e_{key}_lane.png') + fig.savefig(img_path, bbox_inches=extent.expanded(1.4, 1.5)) + + fig.suptitle(fig_title + ' - ' + release) + + # Save the figure with subplots + fig.savefig(os.path.join(IMAGES_DIR, 'all_experiments_lane.png')) + + + +def plot_all_configs(release, csv): + # Group by configuration + groups = csv.groupby(['connections','rate', 'lane']) + # number of rows and columns in the graph + ncols = 2 if groups.ngroups > 1 else 1 + nrows = int( np.ceil(groups.ngroups / ncols)) if groups.ngroups > 1 else 1 + fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6*ncols, 4*nrows), sharey=True) + fig.tight_layout(pad=5.0) + + # Plot configurations as subplots + for (key,ax) in zip(groups.groups.keys(), [axes] if ncols == 1 else axes.flatten()): + group = groups.get_group(key) + ax.set_ylabel('latency (s)') + ax.set_xlabel('experiment time (s)') + ax.grid(True) + (con,rate,lane) = key + label = 'c='+str(con) + ' r='+ str(rate)+ ' l='+ str(lane) + ax.set_title(label) - #Save individual axes - extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) - fig.savefig(os.path.join(path,'c'+str(con) + 'r'+ str(rate) + '.png'), bbox_inches=extent.expanded(1.2, 1.3)) + + # Group by experiment + paramGroups = group.groupby(['experiment_id']) + for (subKey) in paramGroups.groups.keys(): + subGroup = paramGroups.get_group((subKey)) + startTime = subGroup.block_time.min() + subGroupMod = subGroup.block_time.apply(lambda x: x - startTime) + ax.scatter(subGroupMod, subGroup.duration_ns, label=label) + #ax.legend() + -fig.suptitle('Vote Extensions Testnet - ' + release) + #Save individual axes + extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) + img_path = os.path.join(IMAGES_DIR, f'c{con}r{rate}l{lane}.png') + fig.savefig(img_path, bbox_inches=extent.expanded(1.4, 1.5)) + + fig.suptitle(fig_title + ' - ' + release) + + # Save the figure with subplots + fig.savefig(os.path.join(IMAGES_DIR, 'all_configs.png')) + + +def plot_merged(release, csv): + # Group by configuration + groups = csv.groupby(['connections','rate','lane']) + + # number of rows and columns in the graph + ncols = 2 if groups.ngroups > 1 else 1 + nrows = int( np.ceil(groups.ngroups / ncols)) if groups.ngroups > 1 else 1 + fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6*ncols, 4*nrows), sharey=True) + fig.tight_layout(pad=5.0) + + # Plot configurations as subplots + for (key,ax) in zip(groups.groups.keys(), [axes] if ncols == 1 else axes.flatten()): + group = groups.get_group(key) + ax.set_ylabel('latency (s)') + ax.set_xlabel('experiment time (s)') + ax.grid(True) + (con,rate,lane) = key + label = 'c='+str(con) + ' r='+ str(rate) + ' l='+ str(lane) + ax.set_title(label) + + # Group by experiment, but merge them as a single experiment + paramGroups = group.groupby(['experiment_id']) + for (subKey) in paramGroups.groups.keys(): + subGroup = paramGroups.get_group((subKey)) + startTime = subGroup.block_time.min() + subGroupMod = subGroup.block_time.apply(lambda x: x - startTime) + ax.scatter(subGroupMod, subGroup.duration_ns, marker='o',c='#1f77b4') + + # Save individual axes + extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) + (con, rate, lane) = key + img_path = os.path.join(IMAGES_DIR, f'c{con}r{rate}l{lane}_merged.png') + fig.savefig(img_path, bbox_inches=extent) + plt.show() -# Save the figure with subplots -fig.savefig(os.path.join(path,'all_configs.png')) +if __name__ == "__main__": + if len(sys.argv) < 2 or not (sys.argv[1] and sys.argv[2]): + usage() + release = sys.argv[1] + csv_path = sys.argv[2] -fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6*ncols, 4*nrows), sharey=True) -fig.tight_layout(pad=5.0) + if not os.path.exists(csv_path): + print('Please provide a valid raw.csv file') + exit() + csv = pd.read_csv(csv_path) -#Plot configurations as subplots -for (key,ax) in zip(groups.groups.keys(), [axes] if ncols == 1 else axes.flatten()): - group = groups.get_group(key) - ax.set_ylabel('latency (s)') - ax.set_xlabel('experiment time (s)') - ax.grid(True) - (con,rate) = key - label = 'c='+str(con) + ' r='+ str(rate) - ax.set_title(label) + # Transform ns to s in the latency/duration + csv['duration_ns'] = csv['duration_ns'].apply(lambda x: x/10**9) + csv['block_time'] = csv['block_time'].apply(lambda x: x/10**9) - #Group by experiment, but merge them as a single experiment - paramGroups = group.groupby(['experiment_id']) - for (subKey) in paramGroups.groups.keys(): - subGroup = paramGroups.get_group(subKey) - startTime = subGroup.block_time.min() - subGroupMod = subGroup.block_time.apply(lambda x: x - startTime) - ax.scatter(subGroupMod, subGroup.duration_ns, marker='o',c='#1f77b4') - - #Save individual axes - extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) - (con,rate) = key - fig.savefig(os.path.join(path,'c'+str(con) + 'r'+ str(rate) + '_merged.png'), bbox_inches=extent) + if not os.path.exists(IMAGES_DIR): + os.makedirs(IMAGES_DIR) -plt.show() + plot_all_experiments(release, csv) + plot_all_experiments_lane(release, csv) + plot_all_configs(release, csv) + plot_merged(release, csv) diff --git a/scripts/qa/reporting/latency_throughput.py b/scripts/qa/reporting/latency_throughput.py index adaa4b76ca2..5d07e5bb1ad 100755 --- a/scripts/qa/reporting/latency_throughput.py +++ b/scripts/qa/reporting/latency_throughput.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 + """ A simple script to parse the CSV output from the loadtime reporting tool (see https://github.com/cometbft/cometbft/tree/main/test/loadtime/cmd/report). @@ -15,37 +16,12 @@ import matplotlib.pyplot as plt import numpy as np -DEFAULT_TITLE = "CometBFT latency vs throughput" - - -def main(): - parser = argparse.ArgumentParser( - description="Renders a latency vs throughput diagram " - "for a set of transactions provided by the loadtime reporting tool", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('-t', - '--title', - default=DEFAULT_TITLE, - help='Plot title') - parser.add_argument('output_image', - help='Output image file (in PNG format)') - parser.add_argument( - 'input_csv_file', - nargs='+', - help="CSV input file from which to read transaction data " - "- must have been generated by the loadtime reporting tool") - args = parser.parse_args() - logging.basicConfig(format='%(levelname)s\t%(message)s', - stream=sys.stdout, - level=logging.INFO) - plot_latency_vs_throughput(args.input_csv_file, - args.output_image, - title=args.title) +DEFAULT_TITLE = "CometBFT latency vs throughput" -def plot_latency_vs_throughput(input_files, output_image, title=DEFAULT_TITLE): - avg_latencies, throughput_rates = process_input_files(input_files, ) +def plot_latency_vs_throughput(input_files, output_image, output_image_lane, title=DEFAULT_TITLE): + avg_latencies, throughput_rates, avg_latencies_lane, throughput_rates_lane = process_input_files(input_files, ) fig, ax = plt.subplots() @@ -63,11 +39,30 @@ def plot_latency_vs_throughput(input_files, output_image, title=DEFAULT_TITLE): plt.legend(loc='upper left') plt.savefig(output_image) + fig, ax = plt.subplots() + + lanes = sorted(avg_latencies_lane.keys()) + for l in lanes: + tr = np.array(throughput_rates_lane[l]) + al = np.array(avg_latencies_lane[l]) + label = 'lane %d' % (l) + ax.plot(tr, al, 'o-', label=label) + + ax.set_title(title) + ax.set_xlabel('Throughput rate (tx/s)') + ax.set_ylabel('Average transaction latency (s)') + + plt.legend(loc='upper left') + plt.savefig('output_image_lane') + + + def process_input_files(input_files): # Experimental data from which we will derive the latency vs throughput # statistics experiments = {} + experiments_lane = {} for input_file in input_files: logging.info('Reading %s...' % input_file) @@ -76,8 +71,60 @@ def process_input_files(input_files): reader = csv.DictReader(inf) for tx in reader: experiments = process_tx(experiments, tx) + experiments_lane = process_tx_lane(experiments_lane, tx) + - return compute_experiments_stats(experiments) + avg_latencies, throughput_rates = compute_experiments_stats(experiments) + avg_latencies_lane, throughput_rates_lane = compute_lane_stats(experiments_lane) + + return avg_latencies, throughput_rates, avg_latencies_lane, throughput_rates_lane + + +def process_tx_lane(experiments_lane, tx): + exp_id = tx['experiment_id'] + lane = tx['lane'] + # Block time is nanoseconds from the epoch - convert to seconds + block_time = float(tx['block_time']) / (10**9) + # Duration is also in nanoseconds - convert to seconds + duration = float(tx['duration_ns']) / (10**9) + connections = int(tx['connections']) + rate = int(tx['rate']) + lane = int(tx['lane']) + + if (exp_id,lane) not in experiments_lane: + experiments_lane[(exp_id,lane)] = { + 'connections': connections, + 'rate': rate, + 'block_time_min': block_time, + # We keep track of the latency associated with the minimum block + # time to estimate the start time of the experiment + 'block_time_min_duration': duration, + 'block_time_max': block_time, + 'total_latencies': duration, + 'tx_count': 1, + } + logging.info('Found experiment %s ,lane=%d with rate=%d, connections=%d' % + (exp_id, lane, rate, connections)) + else: + # Validation + for field in ['connections', 'rate']: + val = int(tx[field]) + if val != experiments_lane[(exp_id,lane)][field]: + raise Exception( + 'Found multiple distinct values for field ' + '"%s" for the same experiment (%s): %d and %d' % + (field, exp_id, val, experiments_lane[(exp_id,lane)][field])) + + if block_time < experiments_lane[(exp_id,lane)]['block_time_min']: + experiments_lane[(exp_id,lane)]['block_time_min'] = block_time + experiments_lane[(exp_id,lane)]['block_time_min_duration'] = duration + if block_time > experiments_lane[(exp_id,lane)]['block_time_max']: + experiments_lane[(exp_id,lane)]['block_time_max'] = block_time + + experiments_lane[(exp_id,lane)]['total_latencies'] += duration + experiments_lane[(exp_id,lane)]['tx_count'] += 1 + + return experiments_lane def process_tx(experiments, tx): @@ -88,6 +135,7 @@ def process_tx(experiments, tx): duration = float(tx['duration_ns']) / (10**9) connections = int(tx['connections']) rate = int(tx['rate']) + lane = int(tx['lane']) if exp_id not in experiments: experiments[exp_id] = { @@ -125,11 +173,53 @@ def process_tx(experiments, tx): return experiments +def compute_lane_stats(experiments): + """Compute average latency vs throughput rate statistics from the given + experiments""" + statsLane = {} + # Compute average latency and throughput rate for each experiment + for (exp_id,lane), exp in experiments.items(): + conns = exp['connections'] + + avg_latency = exp['total_latencies'] / exp['tx_count'] + exp_start_time = exp['block_time_min'] - exp['block_time_min_duration'] + exp_duration = exp['block_time_max'] - exp_start_time + throughput_rate = exp['tx_count'] / exp_duration + if lane not in statsLane: + statsLane[lane] = [] + statsLane[lane].append({ + 'avg_latency': avg_latency, + 'throughput_rate': throughput_rate, + }) + + # Sort stats for each lane in an experiment in order of increasing + # throughput rate, and then extract average latencies and throughput rates + # as separate data series. + + lanesSorted = sorted(statsLane.keys()) + + avg_latencies_lane = {} + throughput_rates_lane = {} + + for l in lanesSorted: + statsLane[l] = sorted(statsLane[l], key=lambda s: s['throughput_rate']) + avg_latencies_lane[l] = [] + throughput_rates_lane[l] = [] + for s in statsLane[l]: + avg_latencies_lane[l].append(s['avg_latency']) + throughput_rates_lane[l].append(s['throughput_rate']) + logging.info('For lane %d: ' + 'throughput rate = %.6f tx/s\t' + 'average latency = %.6fs' % + (l, s['throughput_rate'], s['avg_latency'])) + + return (avg_latencies_lane, throughput_rates_lane) + + def compute_experiments_stats(experiments): """Compute average latency vs throughput rate statistics from the given experiments""" stats = {} - # Compute average latency and throughput rate for each experiment for exp_id, exp in experiments.items(): conns = exp['connections'] @@ -167,4 +257,27 @@ def compute_experiments_stats(experiments): if __name__ == "__main__": - main() + parser = argparse.ArgumentParser( + description="Renders a latency vs throughput diagram " + "for a set of transactions provided by the loadtime reporting tool", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-t', + '--title', + default=DEFAULT_TITLE, + help='Plot title') + parser.add_argument('output_image', + help='Output image file (in PNG format)') + parser.add_argument('output_image_lane', + help='Output image file lane (in PNG format)') + parser.add_argument( + 'input_csv_file', + nargs='+', + help="CSV input file from which to read transaction data " + "- must have been generated by the loadtime reporting tool") + args = parser.parse_args() + + logging.basicConfig(format='%(levelname)s\t%(message)s', + stream=sys.stdout, + level=logging.INFO) + + plot_latency_vs_throughput(args.input_csv_file, args.output_image, args.output_image_lane, title=args.title) diff --git a/scripts/qa/reporting/prometheus_plotter.py b/scripts/qa/reporting/prometheus_plotter.py old mode 100644 new mode 100755 index fbc62050f89..a83fd9211de --- a/scripts/qa/reporting/prometheus_plotter.py +++ b/scripts/qa/reporting/prometheus_plotter.py @@ -1,151 +1,153 @@ -# pip install numpy pandas matplotlib requests +#!/usr/bin/env python3 -import sys +# Requirements: +# pip install requests matplotlib pandas prometheus-pandas import os +import requests +import sys -import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.dates as md - import numpy as np import pandas as pd -import requests from urllib.parse import urljoin - -from prometheus_pandas import query - -#release = 'v0.37.0-alpha.2' -release = 'v0.38.0-alpha.2' -path = os.path.join('imgs') -prometheus = query.Prometheus('http://localhost:9090') - -# Time window -#window_size = dict(seconds=150) #CMT 0.37.x-alpha3 -#window_size = dict(seconds=126) #TM v0.37 (200 nodes) baseline -#window_size = dict(hours=1, minutes=28, seconds=25) #TM v0.37.0-alpha.2 (rotating) -#window_size = dict(seconds=130) #homogeneous -#window_size = dict(seconds=127) #baseline -#window_size = dict(seconds=115) #CMT v0.38.0-alpha.2 (200 nodes) -#window_size = dict(hours=1, minutes=46) #CMT v0.38.0-alpha.2 (rotating) -window_size = dict(seconds=150) #CMT v0.38.0-alpha.2 (ve baseline) - -ext_window_size = dict(seconds=200) - -# Use the time provided by latency_plotter for the selected experiment. -#left_end = '2023-02-08T13:12:20Z' #cmt2 tm1 -#left_end = '2023-02-08T10:31:50Z' #cmt1 tm2 -#left_end = '2023-02-14T15:18:00Z' #cmt1 tm1 -#left_end = '2023-02-07T18:07:00Z' #homogeneous -#left_end = '2022-10-13T19:41:23Z' #baseline -#left_end = '2023-02-22T18:56:29Z' #CMT v0.37.x-alpha3 -#left_end = '2022-10-13T15:57:50Z' #TM v0.37 (200 nodes) baseline -#left_end = '2023-03-20T19:45:35Z' #feature/abci++vef merged with main (7d8c9d426) -#left_end = '2023-05-22T09:39:20Z' #CMT v0.38.0-alpha.2 - 200 nodes -#left_end = '2022-10-10T15:47:15Z' #TM v0.37.0-alpha.2 - rotating -#left_end = '2023-05-23T08:09:50Z' #CMT v0.38.0-alpha.2 - rotating - -#left_end = '2023-05-25T18:18:04Z' #CMT v0.38.0-alpha.2 - ve baseline -#left_end = '2023-05-30T19:05:32Z' #CMT v0.38.0-alpha.2 - ve 2k -left_end = '2023-05-30T20:44:46Z' #CMT v0.38.0-alpha.2 - ve 4k -#left_end = '2023-05-25T19:42:08Z' #CMT v0.38.0-alpha.2 - ve 8k -#left_end = '2023-05-26T00:28:12Z' #CMT v0.38.0-alpha.2 - ve 16k -#left_end = '2023-05-26T02:12:27Z' #CMT v0.38.0-alpha.2 - ve 32k - -useManualrightEnd = False -if useManualrightEnd: - #right_end = '2023-05-25T18:54:04Z' #CMT v0.38.0-alpha.2 - ve baseline - #right_end = '2023-05-30T19:40:41Z' #CMT v0.38.0-alpha.2 - ve 2k - right_end = '2023-05-30T21:15:37Z' #CMT v0.38.0-alpha.2 - ve 4k - #right_end = '2023-05-25T20:16:00Z' #CMT v0.38.0-alpha.2 - ve 8k - #right_end = '2023-05-26T01:01:57Z' #CMT v0.38.0-alpha.2 - ve 16k - #right_end = '2023-05-26T02:46:19Z' #CMT v0.38.0-alpha.2 - ve 32k - time_window = (left_end, right_end) -else: - right_end = pd.to_datetime(left_end) + pd.Timedelta(**window_size) - time_window = (left_end, right_end.strftime('%Y-%m-%dT%H:%M:%SZ')) - -ext_right_end = pd.to_datetime(left_end) + pd.Timedelta(**ext_window_size) -ext_time_window = (left_end, ext_right_end.strftime('%Y-%m-%dT%H:%M:%SZ')) - - -fork='cometbft' -#fork='tendermint' - -# Do prometheus queries, depending on the test case -queries200Nodes = [ - (( fork + '_mempool_size', time_window[0], time_window[1], '1s'), 'mempool_size', dict(ylabel='TXs', xlabel='time (s)', title='Mempool Size', legend=False, figsize=(10,6), grid=True, kind='area',stacked=True), False), - (( fork + '_p2p_peers', time_window[0], time_window[1], '1s'), 'peers', dict(ylabel='# Peers', xlabel='time (s)', title='Peers', legend=False, figsize=(10,6), grid=True), True), - (( 'avg(' + fork + '_mempool_size)', time_window[0], time_window[1], '1s'), 'avg_mempool_size', dict(ylabel='TXs', xlabel='time (s)', title='Average Mempool Size', legend=False, figsize=(10,6), grid=True), False), - #(( 'cometbft_consensus_height', time_window[0], time_window[1], '1s'), 'blocks_regular', dict(ylabel='# Blocks', xlabel='time (s)', title='Blocks in time', legend=False, figsize=(10,6), grid=True), False), - (( fork + '_consensus_rounds', time_window[0], time_window[1], '1s'), 'rounds', dict(ylabel='# Rounds', xlabel='time (s)', title='Rounds per block', legend=False, figsize=(10,6), grid=True), False), - (( 'rate(' + fork + '_consensus_height[20s])*60', time_window[0], time_window[1], '1s'), 'block_rate_regular', dict(ylabel='Blocks/min', xlabel='time (s)', title='Rate of block creation', legend=False, figsize=(10,6), grid=True), True), - #(( 'avg(rate(cometbft_consensus_height[20s])*60)', time_window[0], time_window[1], '1s'), 'block_rate_avg_reg', dict(ylabel='Blocks/min', xlabel='time (s)', title='Rate of block creation', legend=False, figsize=(10,6), grid=True), False), - #(( 'cometbft_consensus_total_txs', time_window[0], time_window[1], '1s'), 'total_txs_regular', dict(ylabel='# TXs', xlabel='time (s)', title='Transactions in time', legend=False, figsize=(10,6), grid=True), False), - (( 'rate(' + fork + '_consensus_total_txs[20s])*60', time_window[0], time_window[1], '1s'), 'total_txs_rate_regular', dict(ylabel='TXs/min', xlabel='time (s)', title='Rate of transaction processing', legend=False, figsize=(10,6), grid=True), True), - #(( 'avg(rate(cometbft_consensus_total_txs[20s])*60)', time_window[0], time_window[1], '1s'), 'total_txs_rate_avg_reg', dict(ylabel='TXs/min', xlabel='time (s)', title='Rate of transaction processing', legend=False, figsize=(10,6), grid=True), False), - (( 'process_resident_memory_bytes', time_window[0], time_window[1], '1s'), 'memory', dict(ylabel='Memory (bytes)', xlabel='time (s)', title='Memory usage', legend=False, figsize=(10,6), grid=True), False), - (( 'avg(process_resident_memory_bytes)', time_window[0], time_window[1], '1s'), 'avg_memory', dict(ylabel='Memory (bytes)', xlabel='time (s)', title='Average Memory usage', legend=False, figsize=(10,6), grid=True), False), - (( 'node_load1', time_window[0], time_window[1], '1s'), 'cpu', dict(ylabel='Load', xlabel='time (s)', title='Node load', legend=False, figsize=(10,6), grid=True), False), - (( 'avg(node_load1)', time_window[0], time_window[1], '1s'), 'avg_cpu', dict(ylabel='Load', xlabel='time (s)', title='Average Node load', legend=False, figsize=(10,6), grid=True), False), - #extended window metrics - (( fork + '_consensus_height', ext_time_window[0], ext_time_window[1], '1s'), 'blocks', dict(ylabel='# Blocks', xlabel='time (s)', title='Blocks in time', legend=False, figsize=(10,6), grid=True), False), - (( 'rate(' + fork + '_consensus_height[20s])*60', ext_time_window[0], ext_time_window[1], '1s'), 'block_rate', dict(ylabel='Blocks/min', xlabel='time (s)', title='Rate of block creation', legend=False, figsize=(10,6), grid=True), True), - (( fork + '_consensus_total_txs', ext_time_window[0], ext_time_window[1], '1s'), 'total_txs', dict(ylabel='# TXs', xlabel='time (s)', title='Transactions in time', legend=False, figsize=(10,6), grid=True), False), - (( 'rate(' + fork + '_consensus_total_txs[20s])*60', ext_time_window[0], ext_time_window[1], '1s'), 'total_txs_rate', dict(ylabel='TXs/min', xlabel='time (s)', title='Rate of transaction processing', legend=False, figsize=(10,6), grid=True), True), -] - -queriesRotating = [ - (( 'rate(' + fork + '_consensus_height[20s])*60', time_window[0], time_window[1], '1s'), 'rotating_block_rate', dict(ylabel='blocks/min', xlabel='time', title='Rate of Block Creation', legend=False, figsize=(10,6), grid=True), False), - (( 'rate(' + fork + '_consensus_total_txs[20s])*60', time_window[0], time_window[1], '1s'), 'rotating_txs_rate', dict(ylabel='TXs/min', xlabel='time', title='Rate of Transaction processing', legend=False, figsize=(10,6), grid=True), False), - (( fork + '_consensus_height{job=~"ephemeral.*"} or ' + fork + '_blocksync_latest_block_height{job=~"ephemeral.*"}', - time_window[0], time_window[1], '1s'), 'rotating_eph_heights', dict(ylabel='height', xlabel='time', title='Heights of Ephemeral Nodes', legend=False, figsize=(10,6), grid=True), False), - (( fork + '_p2p_peers', time_window[0], time_window[1], '1s'), 'rotating_peers', dict(ylabel='# peers', xlabel='time', title='Peers', legend=False, figsize=(10,6), grid=True), False), - (( 'avg(process_resident_memory_bytes)', time_window[0], time_window[1], '1s'), 'rotating_avg_memory', dict(ylabel='memory (bytes)', xlabel='time', title='Average Memory Usage', legend=False, figsize=(10,6), grid=True), False), - (( 'node_load1', time_window[0], time_window[1], '1s'), 'rotating_cpu', dict(ylabel='load', xlabel='time', title='Node Load', legend=False, figsize=(10,6), grid=True), False), -] - -queriesVExtension= [ - (( fork + '_mempool_size', time_window[0], time_window[1], '1s'), 'mempool_size', dict(ylabel='TXs', xlabel='time (s)', title='Mempool Size', legend=False, figsize=(10,6), grid=True, kind='area',stacked=True), False), - (( fork + '_mempool_size', time_window[0], time_window[1], '1s'), 'mempool_size_not_stacked', dict(ylabel='TXs', xlabel='time (s)', title='Mempool Size', legend=False, figsize=(10,6), grid=True, stacked=False), False), - (( fork + '_p2p_peers', time_window[0], time_window[1], '1s'), 'peers', dict(ylabel='# Peers', xlabel='time (s)', title='Peers', legend=False, figsize=(10,6), grid=True), True), - (( 'avg(' + fork + '_mempool_size)', time_window[0], time_window[1], '1s'), 'avg_mempool_size', dict(ylabel='TXs', xlabel='time (s)', title='Average Mempool Size', legend=False, figsize=(10,6), grid=True), False), - (( fork + '_consensus_rounds', time_window[0], time_window[1], '1s'), 'rounds', dict(ylabel='# Rounds', xlabel='time (s)', title='Rounds per block', legend=False, figsize=(10,6), grid=True), False), - (( 'process_resident_memory_bytes', time_window[0], time_window[1], '1s'), 'memory', dict(ylabel='Memory (bytes)', xlabel='time (s)', title='Memory usage', legend=False, figsize=(10,6), grid=True), False), - (( 'avg(process_resident_memory_bytes)', time_window[0], time_window[1], '1s'), 'avg_memory', dict(ylabel='Memory (bytes)', xlabel='time (s)', title='Average Memory usage', legend=False, figsize=(10,6), grid=True), False), - (( 'node_load1', time_window[0], time_window[1], '1s'), 'cpu', dict(ylabel='Load', xlabel='time (s)', title='Node load', legend=False, figsize=(10,6), grid=True), False), - (( 'avg(node_load1)', time_window[0], time_window[1], '1s'), 'avg_cpu', dict(ylabel='Load', xlabel='time (s)', title='Average Node load', legend=False, figsize=(10,6), grid=True), False), - (( fork + '_consensus_height', time_window[0], time_window[1], '1s'), 'blocks', dict(ylabel='# Blocks', xlabel='time (s)', title='Blocks in time', legend=False, figsize=(10,6), grid=True), False), - (( 'rate(' + fork + '_consensus_height[20s])*60', time_window[0], time_window[1], '1s'), 'block_rate', dict(ylabel='Blocks/min', xlabel='time (s)', title='Rate of block creation', legend=False, figsize=(10,6), grid=True), True), - (( fork + '_consensus_total_txs', time_window[0], time_window[1], '1s'), 'total_txs', dict(ylabel='# TXs', xlabel='time (s)', title='Transactions in time', legend=False, figsize=(10,6), grid=True), False), - (( 'rate(' + fork + '_consensus_total_txs[20s])*60', time_window[0], time_window[1], '1s'), 'total_txs_rate', dict(ylabel='TXs/min', xlabel='time (s)', title='Rate of transaction processing', legend=False, figsize=(10,6), grid=True), True), -] - -#queries = queries200Nodes -#queries = queriesRotating -queries = queriesVExtension - - -for (query, file_name, pandas_params, plot_average) in queries: - print(query) - - data_frame = prometheus.query_range(*query) - #Tweak the x ticks - data_frame = data_frame.set_index(md.date2num(data_frame.index)) - - - pandas_params["title"] += " - " + release - ax = data_frame.plot(**pandas_params) - if plot_average: - average = data_frame.mean(axis=1) - data_frame['__average__'] = average - pandas_params['lw'] = 8 - pandas_params['style'] = ['--'] - pandas_params['color'] = ['red'] - ax = data_frame['__average__'].plot(**pandas_params) - - ax.xaxis.set_major_formatter(md.DateFormatter('%H:%M:%S')) - plt.savefig(os.path.join(path, file_name + '.png')) - plt.plot() - -plt.show() +from prometheus_pandas import query as prometheus_query + + +PROMETHEUS_URL = 'http://localhost:9090' +IMAGES_DIR = 'imgs' +TEST_CASES = ['200_nodes', 'rotating', 'vote_extensions'] + + +def usage(): + print("Usage:") + print(f"\t{sys.argv[0]} release_name start_time window_size test_case") + print("where:") + print(f"- start_time is a UTF time in '%Y-%m-%dT%H:%M:%SZ' format") + print(f"- window size is in seconds") + print(f"- test_case is one of {TEST_CASES}") + print(f"Example: \t{sys.argv[0]} v1.0.0-alpha.2 2024-03-21T08:45:23Z 180 200_nodes") + exit(1) + + +def queries_200_nodes(time_window, ext_time_window): + return [ + (( 'cometbft_mempool_size', time_window[0], time_window[1], '1s'), 'mempool_size', dict(ylabel='TXs', xlabel='time (s)', title='Mempool Size', legend=False, figsize=(10,6), grid=True, ylim=(0, 5100), kind='area',stacked=True), False), + (( 'avg(cometbft_mempool_size)', time_window[0], time_window[1], '1s'), 'avg_mempool_size', dict(ylabel='TXs', xlabel='time (s)', title='Average Mempool Size', legend=False, figsize=(10,6), grid=True, ylim=(0, 5100)), False), + (( 'max(cometbft_mempool_size)', time_window[0], time_window[1], '1s'), 'mempool_size_max', dict(ylabel='TXs', xlabel='time (s)', title='Maximum Mempool Size', legend=False, figsize=(10,6), grid=True, ylim=(0, 5100)), False), + (( 'cometbft_p2p_peers', time_window[0], time_window[1], '1s'), 'peers', dict(ylabel='# Peers', xlabel='time (s)', title='Peers', legend=False, figsize=(10,6), grid=True, ylim=(0, 150)), True), + #(( 'cometbft_consensus_height', time_window[0], time_window[1], '1s'), 'blocks_regular', dict(ylabel='# Blocks', xlabel='time (s)', title='Blocks in time', legend=False, figsize=(10,6), grid=True), False), + (( 'cometbft_consensus_rounds', time_window[0], time_window[1], '1s'), 'rounds', dict(ylabel='# Rounds', xlabel='time (s)', title='Rounds per block', legend=False, figsize=(10,6), grid=True, ylim=(0, 4)), False), + (( 'rate(cometbft_consensus_height[20s])*60', time_window[0], time_window[1], '1s'), 'block_rate_regular', dict(ylabel='Blocks/min', xlabel='time (s)', title='Rate of block creation', legend=False, figsize=(10,6), grid=True, ylim=(0, 120)), True), + #(( 'avg(rate(cometbft_consensus_height[20s])*60)', time_window[0], time_window[1], '1s'), 'block_rate_avg_reg', dict(ylabel='Blocks/min', xlabel='time (s)', title='Rate of block creation', legend=False, figsize=(10,6), grid=True), False), + #(( 'cometbft_consensus_total_txs', time_window[0], time_window[1], '1s'), 'total_txs_regular', dict(ylabel='# TXs', xlabel='time (s)', title='Transactions in time', legend=False, figsize=(10,6), grid=True), False), + (( 'rate(cometbft_consensus_total_txs[20s])*60', time_window[0], time_window[1], '1s'), 'total_txs_rate_regular', dict(ylabel='TXs/min', xlabel='time (s)', title='Rate of transaction processing', legend=False, figsize=(10,6), grid=True, ylim=(0, 50000)), True), + #(( 'avg(rate(cometbft_consensus_total_txs[20s])*60)', time_window[0], time_window[1], '1s'), 'total_txs_rate_avg_reg', dict(ylabel='TXs/min', xlabel='time (s)', title='Rate of transaction processing', legend=False, figsize=(10,6), grid=True), False), + (( 'process_resident_memory_bytes', time_window[0], time_window[1], '1s'), 'memory', dict(ylabel='Memory (bytes)', xlabel='time (s)', title='Memory usage', legend=False, figsize=(10,6), grid=True, ylim=(0, 2e9)), False), + (( 'avg(process_resident_memory_bytes)', time_window[0], time_window[1], '1s'), 'avg_memory', dict(ylabel='Memory (bytes)', xlabel='time (s)', title='Average Memory usage', legend=False, figsize=(10,6), grid=True, ylim=(0, 2e9)), False), + (( 'node_load1', time_window[0], time_window[1], '1s'), 'cpu', dict(ylabel='Load', xlabel='time (s)', title='Node load', legend=False, figsize=(10,6), grid=True, ylim=(0, 6)), False), + (( 'avg(node_load1)', time_window[0], time_window[1], '1s'), 'avg_cpu', dict(ylabel='Load', xlabel='time (s)', title='Average Node load', legend=False, figsize=(10,6), grid=True, ylim=(0, 6)), False), + (( 'cometbft_consensus_block_size_bytes/1024/1024', time_window[0], time_window[1], '1s'), 'block_size_bytes', dict(ylabel='Mb', xlabel='time (s)', title='Block size (Mb)', legend=False, figsize=(10,6), grid=True, ylim=(0, 4.1)), False), + + # Extended window metrics + (( 'cometbft_consensus_height', ext_time_window[0], ext_time_window[1], '1s'), 'blocks', dict(ylabel='# Blocks', xlabel='time (s)', title='Blocks in time', legend=False, figsize=(10,6), grid=True), False), + (( 'rate(cometbft_consensus_height[20s])*60', ext_time_window[0], ext_time_window[1], '1s'), 'block_rate', dict(ylabel='Blocks/min', xlabel='time (s)', title='Rate of block creation', legend=False, figsize=(10,6), grid=True), True), + (( 'cometbft_consensus_total_txs', ext_time_window[0], ext_time_window[1], '1s'), 'total_txs', dict(ylabel='# TXs', xlabel='time (s)', title='Transactions in time', legend=False, figsize=(10,6), grid=True), False), + (( 'rate(cometbft_consensus_total_txs[20s])*60', ext_time_window[0], ext_time_window[1], '1s'), 'total_txs_rate', dict(ylabel='TXs/min', xlabel='time (s)', title='Rate of transaction processing', legend=False, figsize=(10,6), grid=True, ylim=(0, 50000)), True), + ] + + +def queries_rotating(time_window): + return [ + (( 'rate(cometbft_consensus_height[20s])*60<1000>0', time_window[0], time_window[1], '1s'), 'rotating_block_rate', dict(ylabel='blocks/min', xlabel='time', title='Rate of Block Creation', legend=False, figsize=(10,6), grid=True), False), + (( 'rate(cometbft_consensus_total_txs[20s])*60', time_window[0], time_window[1], '1s'), 'rotating_txs_rate', dict(ylabel='TXs/min', xlabel='time', title='Rate of Transaction processing', legend=False, figsize=(10,6), grid=True), False), + (( 'cometbft_consensus_height{job=~"ephemeral.*"}>cometbft_blocksync_latest_block_height{job=~"ephemeral.*"} or cometbft_blocksync_latest_block_height{job=~"ephemeral.*"}', + time_window[0], time_window[1], '1s'), 'rotating_eph_heights', dict(ylabel='height', xlabel='time', title='Heights of Ephemeral Nodes', legend=False, figsize=(10,6), grid=True), False), + (( 'cometbft_p2p_peers', time_window[0], time_window[1], '1s'), 'rotating_peers', dict(ylabel='# peers', xlabel='time', title='Peers', legend=False, figsize=(10,6), grid=True), False), + (( 'avg(process_resident_memory_bytes)', time_window[0], time_window[1], '1s'), 'rotating_avg_memory', dict(ylabel='memory (bytes)', xlabel='time', title='Average Memory Usage', legend=False, figsize=(10,6), grid=True), False), + (( 'node_load1', time_window[0], time_window[1], '1s'), 'rotating_cpu', dict(ylabel='load', xlabel='time', title='Node Load', legend=False, figsize=(10,6), grid=True), False), + ] + + +def queries_vote_extensions(time_window): + return [ + (( 'cometbft_mempool_size', time_window[0], time_window[1], '1s'), 'mempool_size', dict(ylabel='TXs', xlabel='time (s)', title='Mempool Size', legend=False, figsize=(10,6), grid=True, kind='area',stacked=True), False), + (( 'cometbft_mempool_size', time_window[0], time_window[1], '1s'), 'mempool_size_not_stacked', dict(ylabel='TXs', xlabel='time (s)', title='Mempool Size', legend=False, figsize=(10,6), grid=True, stacked=False), False), + (( 'cometbft_p2p_peers', time_window[0], time_window[1], '1s'), 'peers', dict(ylabel='# Peers', xlabel='time (s)', title='Peers', legend=False, figsize=(10,6), grid=True), True), + (( 'avg(cometbft_mempool_size)', time_window[0], time_window[1], '1s'), 'avg_mempool_size', dict(ylabel='TXs', xlabel='time (s)', title='Average Mempool Size', legend=False, figsize=(10,6), grid=True), False), + (( 'cometbft_consensus_rounds', time_window[0], time_window[1], '1s'), 'rounds', dict(ylabel='# Rounds', xlabel='time (s)', title='Rounds per block', legend=False, figsize=(10,6), grid=True), False), + (( 'process_resident_memory_bytes', time_window[0], time_window[1], '1s'), 'memory', dict(ylabel='Memory (bytes)', xlabel='time (s)', title='Memory usage', legend=False, figsize=(10,6), grid=True), False), + (( 'avg(process_resident_memory_bytes)', time_window[0], time_window[1], '1s'), 'avg_memory', dict(ylabel='Memory (bytes)', xlabel='time (s)', title='Average Memory usage', legend=False, figsize=(10,6), grid=True), False), + (( 'node_load1', time_window[0], time_window[1], '1s'), 'cpu', dict(ylabel='Load', xlabel='time (s)', title='Node load', legend=False, figsize=(10,6), grid=True), False), + (( 'avg(node_load1)', time_window[0], time_window[1], '1s'), 'avg_cpu', dict(ylabel='Load', xlabel='time (s)', title='Average Node load', legend=False, figsize=(10,6), grid=True), False), + (( 'cometbft_consensus_height', time_window[0], time_window[1], '1s'), 'blocks', dict(ylabel='# Blocks', xlabel='time (s)', title='Blocks in time', legend=False, figsize=(10,6), grid=True), False), + (( 'rate(cometbft_consensus_height[20s])*60', time_window[0], time_window[1], '1s'), 'block_rate', dict(ylabel='Blocks/min', xlabel='time (s)', title='Rate of block creation', legend=False, figsize=(10,6), grid=True), True), + (( 'cometbft_consensus_total_txs', time_window[0], time_window[1], '1s'), 'total_txs', dict(ylabel='# TXs', xlabel='time (s)', title='Transactions in time', legend=False, figsize=(10,6), grid=True), False), + (( 'rate(cometbft_consensus_total_txs[20s])*60', time_window[0], time_window[1], '1s'), 'total_txs_rate', dict(ylabel='TXs/min', xlabel='time (s)', title='Rate of transaction processing', legend=False, figsize=(10,6), grid=True), True), + ] + + +def main(release, start_time, window_size, test_case): + prometheus = prometheus_query.Prometheus(PROMETHEUS_URL) + + end_time = pd.to_datetime(start_time) + pd.Timedelta(**dict(seconds=window_size)) + time_window = (start_time, end_time.strftime('%Y-%m-%dT%H:%M:%SZ')) + + ext_end_time = pd.to_datetime(start_time) + pd.Timedelta(**dict(seconds=window_size+50)) + ext_time_window = (start_time, ext_end_time.strftime('%Y-%m-%dT%H:%M:%SZ')) + + # Select queries depending on the test case. + match test_case: + case "200_nodes": + queries = queries_200_nodes(time_window, ext_time_window) + case "rotating": + queries = queries_rotating(time_window) + case "vote_extensions": + queries = queries_vote_extensions(time_window) + case _: + print(f"Error: Unknown test case {test_case}") + return + + imgs_dir = os.path.join(IMAGES_DIR, test_case) + if not os.path.exists(imgs_dir): + os.makedirs(imgs_dir) + + # Query Prometheus and plot images. + for (query, file_name, pandas_params, plot_average) in queries: + print(f"query: {query}") + + df = prometheus.query_range(*query) + #Tweak the x ticks + df = df.set_index(md.date2num(df.index)) + + if df.empty: + print('No data found! Check the timestamps or the query.') + continue + + pandas_params["title"] += " - " + release + ax = df.plot(**pandas_params) + if plot_average: + average = df.mean(axis=1) + df['__average__'] = average + pandas_params['lw'] = 8 + pandas_params['style'] = ['--'] + pandas_params['color'] = ['red'] + ax = df['__average__'].plot(**pandas_params) + + ax.xaxis.set_major_formatter(md.DateFormatter('%H:%M:%S')) + plt.savefig(os.path.join(imgs_dir, file_name + '.png')) + plt.plot() + + plt.show() + + +if __name__ == "__main__": + if len(sys.argv) < 5 or not (sys.argv[1] and sys.argv[2] and sys.argv[3] and sys.argv[4]): + usage() + + release = sys.argv[1] + start_time = sys.argv[2] + window_size = sys.argv[3] + test_case = sys.argv[4] + main(release, start_time, int(window_size), test_case) diff --git a/scripts/qa/reporting/requirements.txt b/scripts/qa/reporting/requirements.txt index d7205cb5be1..ebd949edd31 100644 --- a/scripts/qa/reporting/requirements.txt +++ b/scripts/qa/reporting/requirements.txt @@ -1,11 +1,11 @@ contourpy==1.0.5 cycler==0.11.0 -fonttools==4.37.4 +fonttools==4.43.0 kiwisolver==1.4.4 matplotlib==3.6.3 -numpy==1.24.2 +numpy==1.26.4 packaging==21.3 -Pillow==10.0.1 +Pillow==10.2.0 pyparsing==3.0.9 python-dateutil==2.8.2 six==1.16.0 diff --git a/scripts/txs/random.sh b/scripts/txs/random.sh deleted file mode 100644 index 231fabcfeab..00000000000 --- a/scripts/txs/random.sh +++ /dev/null @@ -1,19 +0,0 @@ -#! /bin/bash -set -u - -function toHex() { - echo -n $1 | hexdump -ve '1/1 "%.2X"' -} - -N=$1 -PORT=$2 - -for i in `seq 1 $N`; do - # store key value pair - KEY=$(head -c 10 /dev/urandom) - VALUE="$i" - echo $(toHex $KEY=$VALUE) - curl 127.0.0.1:$PORT/broadcast_tx_sync?tx=0x$(toHex $KEY=$VALUE) -done - - diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go index 7c2c3c87c7e..97edd7c28bc 100644 --- a/scripts/wal2json/main.go +++ b/scripts/wal2json/main.go @@ -8,11 +8,12 @@ package main import ( + "errors" "fmt" "io" "os" - cs "github.com/cometbft/cometbft/consensus" + cs "github.com/cometbft/cometbft/internal/consensus" cmtjson "github.com/cometbft/cometbft/libs/json" ) @@ -31,7 +32,7 @@ func main() { dec := cs.NewWALDecoder(f) for { msg, err := dec.Decode() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } else if err != nil { panic(fmt.Errorf("failed to decode msg: %v", err)) @@ -57,6 +58,5 @@ func main() { fmt.Println("Failed to write message", err) os.Exit(1) //nolint:gocritic } - } } diff --git a/spec/README.md b/spec/README.md index 61c4d3fc927..3963b70388c 100644 --- a/spec/README.md +++ b/spec/README.md @@ -12,9 +12,9 @@ This is a markdown specification of CometBFT. It defines the base data structures, how they are validated, and how they are communicated over the network. -If you find discrepancies between the spec and the code that -do not have an associated issue or pull request on github, -please submit them to our [bug bounty](https://cometbft.com/security)! +If you find discrepancies between the spec and the code that do not have an +associated issue or pull request on github, please submit them to our [bug +bounty](https://github.com/cometbft/cometbft#security)! ## Contents @@ -30,7 +30,7 @@ please submit them to our [bug bounty](https://cometbft.com/security)! - [Consensus Algorithm](./consensus/consensus.md) - [Creating a proposal](./consensus/creating-proposal.md) -- [Time](./consensus/bft-time.md) +- [Time](./consensus/time.md) - [Light-Client](./consensus/light-client/README.md) ### P2P and Network Protocols diff --git a/spec/abci/README.md b/spec/abci/README.md index 4c29cc547f8..5842047a971 100644 --- a/spec/abci/README.md +++ b/spec/abci/README.md @@ -1,39 +1,41 @@ --- order: 1 parent: - title: ABCI++ + title: ABCI 2.0 order: 3 --- -# ABCI++ +# ABCI 2.0 ## Introduction -ABCI++ is a major evolution of ABCI (**A**pplication **B**lock**c**hain **I**nterface). -Like its predecessor, ABCI++ is the interface between CometBFT (a state-machine +ABCI 2.0 is a major evolution of ABCI (**A**pplication **B**lock**c**hain **I**nterface). +ABCI is the interface between CometBFT (a state-machine replication engine) and the actual state machine being replicated (i.e., the Application). The API consists of a set of _methods_, each with a corresponding `Request` and `Response` message type. +> Note: ABCI 2.0 is colloquially called ABCI++. To be precise in these documents, we will refer to the exact version of ABCI under discussion, currently 2.0. + The methods are always initiated by CometBFT. The Application implements its logic -for handling all ABCI++ methods. -Thus, CometBFT always sends the `Request*` messages and receives the `Response*` messages +for handling all ABCI methods. +Thus, CometBFT always sends the `*Request` messages and receives the `*Response` messages in return. -All ABCI++ messages and methods are defined in [protocol buffers](https://github.com/cometbft/cometbft/blob/main/proto/tendermint/abci/types.proto). +All ABCI messages and methods are defined in [protocol buffers](https://github.com/cometbft/cometbft/blob/main/proto/cometbft/abci/v1/types.proto). This allows CometBFT to run with applications written in many programming languages. This specification is split as follows: - [Overview and basic concepts](./abci++_basic_concepts.md) - interface's overview and concepts needed to understand other parts of this specification. -- [Methods](./abci++_methods.md) - complete details on all ABCI++ methods +- [Methods](./abci++_methods.md) - complete details on all ABCI methods and message types. - [Requirements for the Application](./abci++_app_requirements.md) - formal requirements on the Application's logic to ensure CometBFT properties such as liveness. These requirements define what CometBFT expects from the Application; second part on managing ABCI application state and related topics. - [CometBFT's expected behavior](./abci++_comet_expected_behavior.md) - specification of - how the different ABCI++ methods may be called by CometBFT. This explains what the Application + how the different ABCI methods may be called by CometBFT. This explains what the Application is to expect from CometBFT. - [Example scenarios](./abci++_example_scenarios.md) - specific scenarios showing why the Application needs to account for any CometBFT's behaviour prescribed by the specification. diff --git a/spec/abci/abci++_app_requirements.md b/spec/abci/abci++_app_requirements.md index 4055fcf7b9d..37fdc2d9640 100644 --- a/spec/abci/abci++_app_requirements.md +++ b/spec/abci/abci++_app_requirements.md @@ -5,33 +5,56 @@ title: Requirements for the Application # Requirements for the Application -- [Formal Requirements](#formal-requirements) - - [Consensus Connection Requirements](#consensus-connection-requirements) - - [Mempool Connection Requirements](#mempool-connection-requirements) -- [Managing the Application state and related topics](#managing-the-application-state-and-related-topics) - - [Connection State](#connection-state) - - [Concurrency](#concurrency) - - [Finalize Block](#finalizeblock) - - [Commit](#commit) - - [Candidate States](#candidate-states) - - [States and ABCI++ Connections](#states-and-abci%2B%2B-connections) - - [Consensus Connection](#consensus-connection) - - [Mempool Connection](#mempool-connection) - - [Info/Query Connection](#infoquery-connection) - - [Snapshot Connection](#snapshot-connection) - - [Transaction Results](#transaction-results) - - [Updating the Validator Set](#updating-the-validator-set) - - [Consensus Parameters](#consensus-parameters) - - [List of Parameters](#list-of-parameters) - - [Updating Consensus Parameters](#updating-consensus-parameters) - - [Query](#query) - - [Query Proofs](#query-proofs) - - [Peer Filtering](#peer-filtering) - - [Paths](#paths) - - [Crash Recovery](#crash-recovery) - - [State Sync](#state-sync) -- [Application configuration required to switch to ABCI2.0](#application-configuration-required-to-switch-to-abci-20) - +- [Requirements for the Application](#requirements-for-the-application) + - [Formal Requirements](#formal-requirements) + - [Consensus Connection Requirements](#consensus-connection-requirements) + - [Mempool Connection Requirements](#mempool-connection-requirements) + - [Managing the Application state and related topics](#managing-the-application-state-and-related-topics) + - [Connection State](#connection-state) + - [Concurrency](#concurrency) + - [FinalizeBlock](#finalizeblock) + - [Commit](#commit) + - [Candidate States](#candidate-states) + - [States and ABCI Connections](#states-and-abci-connections) + - [Consensus Connection](#consensus-connection) + - [Mempool Connection](#mempool-connection) + - [Replay Protection](#replay-protection) + - [Info/Query Connection](#infoquery-connection) + - [Snapshot Connection](#snapshot-connection) + - [Transaction Results](#transaction-results) + - [Gas](#gas) + - [Specifics of `CheckTxResponse`](#specifics-of-checktxresponse) + - [Specifics of `ExecTxResult`](#specifics-of-exectxresult) + - [Updating the Validator Set](#updating-the-validator-set) + - [Consensus Parameters](#consensus-parameters) + - [List of Parameters](#list-of-parameters) + - [BlockParams.MaxBytes](#blockparamsmaxbytes) + - [BlockParams.MaxGas](#blockparamsmaxgas) + - [EvidenceParams.MaxAgeDuration](#evidenceparamsmaxageduration) + - [EvidenceParams.MaxAgeNumBlocks](#evidenceparamsmaxagenumblocks) + - [EvidenceParams.MaxBytes](#evidenceparamsmaxbytes) + - [FeatureParams.PbtsEnableHeight](#featureparamspbtsenableheight) + - [FeatureParams.VoteExtensionsEnableHeight](#featureparamsvoteextensionsenableheight) + - [ValidatorParams.PubKeyTypes](#validatorparamspubkeytypes) + - [VersionParams.App](#versionparamsapp) + - [SynchronyParams.Precision](#synchronyparamsprecision) + - [SynchronyParams.MessageDelay](#synchronyparamsmessagedelay) + - [Updating Consensus Parameters](#updating-consensus-parameters) + - [`InitChain`](#initchain) + - [`FinalizeBlock`, `PrepareProposal`/`ProcessProposal`](#finalizeblock-prepareproposalprocessproposal) + - [`Query`](#query) + - [Query Proofs](#query-proofs) + - [Peer Filtering](#peer-filtering) + - [Paths](#paths) + - [Crash Recovery](#crash-recovery) + - [State Sync](#state-sync) + - [Taking Snapshots](#taking-snapshots) + - [Bootstrapping a Node](#bootstrapping-a-node) + - [Snapshot Discovery](#snapshot-discovery) + - [Snapshot Restoration](#snapshot-restoration) + - [Snapshot Verification](#snapshot-verification) + - [Transition to Consensus](#transition-to-consensus) + - [Application configuration required to switch to ABCI 2.0](#application-configuration-required-to-switch-to-abci-20) ## Formal Requirements @@ -46,14 +69,14 @@ proposer. Let *sp,h-1* be *p*'s Application's state committed for height *h-1*. Let *vp* (resp. *vq*) be the block that *p*'s (resp. *q*'s) CometBFT passes on to the Application -via `RequestPrepareProposal` as proposer of round *rp* (resp *rq*), height *h*, +via `PrepareProposalRequest` as proposer of round *rp* (resp *rq*), height *h*, also known as the raw proposal. Let *up* (resp. *uq*) the possibly modified block *p*'s (resp. *q*'s) Application -returns via `ResponsePrepareProposal` to CometBFT, also known as the prepared proposal. +returns via `PrepareProposalResponse` to CometBFT, also known as the prepared proposal. Process *p*'s prepared proposal can differ in two different rounds where *p* is the proposer. -* Requirement 1 [`PrepareProposal`, timeliness]: If *p*'s Application fully executes prepared blocks in +- Requirement 1 [`PrepareProposal`, timeliness]: If *p*'s Application fully executes prepared blocks in `PrepareProposal` and the network is in a synchronous period while processes *p* and *q* are in *rp*, then the value of *TimeoutPropose* at *q* must be such that *q*'s propose timer does not time out (which would result in *q* prevoting `nil` in *rp*). @@ -66,21 +89,21 @@ compromise liveness because even though `TimeoutPropose` is used as the initial value for proposal timeouts, CometBFT will be dynamically adjust these timeouts such that they will eventually be enough for completing `PrepareProposal`. -* Requirement 2 [`PrepareProposal`, tx-size]: When *p*'s Application calls `ResponsePrepareProposal`, the - total size in bytes of the transactions returned does not exceed `RequestPrepareProposal.max_tx_bytes`. +- Requirement 2 [`PrepareProposal`, tx-size]: When *p*'s Application calls `PrepareProposal`, the + total size in bytes of the transactions returned does not exceed `PrepareProposalRequest.max_tx_bytes`. Busy blockchains might seek to gain full visibility into transactions in CometBFT's mempool, rather than having visibility only on *a* subset of those transactions that fit in a block. The application can do so by setting `ConsensusParams.Block.MaxBytes` to -1. This instructs CometBFT (a) to enforce the maximum possible value for `MaxBytes` (100 MB) at CometBFT level, -and (b) to provide *all* transactions in the mempool when calling `RequestPrepareProposal`. -Under these settings, the aggregated size of all transactions may exceed `RequestPrepareProposal.max_tx_bytes`. +and (b) to provide *all* transactions in the mempool when calling `PrepareProposal`. +Under these settings, the aggregated size of all transactions may exceed `PrepareProposalRequest.max_tx_bytes`. Hence, Requirement 2 ensures that the size in bytes of the transaction list returned by the application will never cause the resulting block to go beyond its byte size limit. -* Requirement 3 [`PrepareProposal`, `ProcessProposal`, coherence]: For any two correct processes *p* and *q*, - if *q*'s CometBFT calls `RequestProcessProposal` on *up*, - *q*'s Application returns Accept in `ResponseProcessProposal`. +- Requirement 3 [`PrepareProposal`, `ProcessProposal`, coherence]: For any two correct processes *p* and *q*, + if *q*'s CometBFT calls `ProcessProposal` on *up*, + *q*'s Application returns Accept in `ProcessProposalResponse`. Requirement 3 makes sure that blocks proposed by correct processes *always* pass the correct receiving process's `ProcessProposal` check. @@ -91,14 +114,14 @@ likely hit the bug at the same time. This would result in most (or all) processe serious consequences on CometBFT's liveness that this entails. Due to its criticality, Requirement 3 is a target for extensive testing and automated verification. -* Requirement 4 [`ProcessProposal`, determinism-1]: `ProcessProposal` is a (deterministic) function of the current +- Requirement 4 [`ProcessProposal`, determinism-1]: `ProcessProposal` is a (deterministic) function of the current state and the block that is about to be applied. In other words, for any correct process *p*, and any arbitrary block *u*, - if *p*'s CometBFT calls `RequestProcessProposal` on *u* at height *h*, + if *p*'s CometBFT calls `ProcessProposal` on *u* at height *h*, then *p*'s Application's acceptance or rejection **exclusively** depends on *u* and *sp,h-1*. -* Requirement 5 [`ProcessProposal`, determinism-2]: For any two correct processes *p* and *q*, and any arbitrary +- Requirement 5 [`ProcessProposal`, determinism-2]: For any two correct processes *p* and *q*, and any arbitrary block *u*, - if *p*'s (resp. *q*'s) CometBFT calls `RequestProcessProposal` on *u* at height *h*, + if *p*'s (resp. *q*'s) CometBFT calls `ProcessProposal` on *u* at height *h*, then *p*'s Application accepts *u* if and only if *q*'s Application accepts *u*. Note that this requirement follows from Requirement 4 and the Agreement property of consensus. @@ -109,24 +132,24 @@ the bug from fulfilling Requirements 4 or 5 (effectively making those processes In such a scenario, CometBFT's liveness cannot be guaranteed. Again, this is a problem in practice if most validators are running the same software, as they are likely to hit the bug at the same point. There is currently no clear solution to help with this situation, so -the Application designers/implementors must proceed very carefully with the logic/implementation +the Application designers/implementers must proceed very carefully with the logic/implementation of `ProcessProposal`. As a general rule `ProcessProposal` SHOULD always accept the block. According to the Tendermint consensus algorithm, currently adopted in CometBFT, a correct process can broadcast at most one precommit message in round *r*, height *h*. -Since, as stated in the [Methods](./abci++_methods.md#extendvote) section, `ResponseExtendVote` +Since, as stated in the [Methods](./abci++_methods.md#extendvote) section, `ExtendVote` is only called when the consensus algorithm is about to broadcast a non-`nil` precommit message, a correct process can only produce one vote extension in round *r*, height *h*. Let *erp* be the vote extension that the Application of a correct process *p* returns via -`ResponseExtendVote` in round *r*, height *h*. -Let *wrp* be the proposed block that *p*'s CometBFT passes to the Application via `RequestExtendVote` +`ExtendVoteResponse` in round *r*, height *h*. +Let *wrp* be the proposed block that *p*'s CometBFT passes to the Application via `ExtendVoteRequest` in round *r*, height *h*. -* Requirement 6 [`ExtendVote`, `VerifyVoteExtension`, coherence]: For any two different correct +- Requirement 6 [`ExtendVote`, `VerifyVoteExtension`, coherence]: For any two different correct processes *p* and *q*, if *q* receives *erp* from *p* in height *h*, *q*'s - Application returns Accept in `ResponseVerifyVoteExtension`. + Application returns Accept in `VerifyVoteExtensionResponse`. Requirement 6 constrains the creation and handling of vote extensions in a similar way as Requirement 3 constrains the creation and handling of proposed blocks. @@ -136,15 +159,15 @@ However, if there is a (deterministic) bug in `ExtendVote` or `VerifyVoteExtensi we will face the same liveness issues as described for Requirement 5, as Precommit messages with invalid vote extensions will be discarded. -* Requirement 7 [`VerifyVoteExtension`, determinism-1]: `VerifyVoteExtension` is a (deterministic) function of +- Requirement 7 [`VerifyVoteExtension`, determinism-1]: `VerifyVoteExtension` is a (deterministic) function of the current state, the vote extension received, and the prepared proposal that the extension refers to. In other words, for any correct process *p*, and any arbitrary vote extension *e*, and any arbitrary - block *w*, if *p*'s (resp. *q*'s) CometBFT calls `RequestVerifyVoteExtension` on *e* and *w* at height *h*, + block *w*, if *p*'s (resp. *q*'s) CometBFT calls `VerifyVoteExtension` on *e* and *w* at height *h*, then *p*'s Application's acceptance or rejection **exclusively** depends on *e*, *w* and *sp,h-1*. -* Requirement 8 [`VerifyVoteExtension`, determinism-2]: For any two correct processes *p* and *q*, +- Requirement 8 [`VerifyVoteExtension`, determinism-2]: For any two correct processes *p* and *q*, and any arbitrary vote extension *e*, and any arbitrary block *w*, - if *p*'s (resp. *q*'s) CometBFT calls `RequestVerifyVoteExtension` on *e* and *w* at height *h*, + if *p*'s (resp. *q*'s) CometBFT calls `VerifyVoteExtension` on *e* and *w* at height *h*, then *p*'s Application accepts *e* if and only if *q*'s Application accepts *e*. Note that this requirement follows from Requirement 7 and the Agreement property of consensus. @@ -157,23 +180,22 @@ Requirements 7 and 8 can be violated by a bug inducing non-determinism in Extra care should be put in the implementation of `ExtendVote` and `VerifyVoteExtension`. As a general rule, `VerifyVoteExtension` SHOULD always accept the vote extension. -* Requirement 9 [*all*, no-side-effects]: *p*'s calls to `RequestPrepareProposal`, - `RequestProcessProposal`, `RequestExtendVote`, and `RequestVerifyVoteExtension` at height *h* do +- Requirement 9 [*all*, no-side-effects]: *p*'s calls to `PrepareProposal`, + `ProcessProposal`, `ExtendVote`, and `VerifyVoteExtension` at height *h* do not modify *sp,h-1*. - -* Requirement 10 [`ExtendVote`, `FinalizeBlock`, non-dependency]: for any correct process *p*, +- Requirement 10 [`ExtendVote`, `FinalizeBlock`, non-dependency]: for any correct process *p*, and any vote extension *e* that *p* received at height *h*, the computation of *sp,h* does not depend on *e*. -The call to correct process *p*'s `RequestFinalizeBlock` at height *h*, with block *vp,h* +The call to correct process *p*'s `FinalizeBlock` at height *h*, with block *vp,h* passed as parameter, creates state *sp,h*. Additionally, *p*'s `FinalizeBlock` creates a set of transaction results *Tp,h*. -* Requirement 11 [`FinalizeBlock`, determinism-1]: For any correct process *p*, +- Requirement 11 [`FinalizeBlock`, determinism-1]: For any correct process *p*, *sp,h* exclusively depends on *sp,h-1* and *vp,h*. -* Requirement 12 [`FinalizeBlock`, determinism-2]: For any correct process *p*, +- Requirement 12 [`FinalizeBlock`, determinism-2]: For any correct process *p*, the contents of *Tp,h* exclusively depend on *sp,h-1* and *vp,h*. Note that Requirements 11 and 12, combined with the Agreement property of consensus ensure @@ -183,31 +205,31 @@ Also, notice that neither `PrepareProposal` nor `ExtendVote` have determinism-re requirements associated. Indeed, `PrepareProposal` is not required to be deterministic: -* *up* may depend on *vp* and *sp,h-1*, but may also depend on other values or operations. -* *vp = vq ⇏ up = uq*. +- *up* may depend on *vp* and *sp,h-1*, but may also depend on other values or operations. +- *vp = vq ⇏ up = uq*. Likewise, `ExtendVote` can also be non-deterministic: -* *erp* may depend on *wrp* and *sp,h-1*, +- *erp* may depend on *wrp* and *sp,h-1*, but may also depend on other values or operations. -* *wrp = wrq ⇏ +- *wrp = wrq ⇏ erp = erq* ### Mempool Connection Requirements Let *CheckTxCodestx,p,h* denote the set of result codes returned by *p*'s Application, -via `ResponseCheckTx`, -to successive calls to `RequestCheckTx` occurring while the Application is at height *h* +via `CheckTxResponse`, +to successive calls to `CheckTx` occurring while the Application is at height *h* and having transaction *tx* as parameter. *CheckTxCodestx,p,h* is a set since *p*'s Application may return different result codes during height *h*. If *CheckTxCodestx,p,h* is a singleton set, i.e. the Application always returned -the same result code in `ResponseCheckTx` while at height *h*, +the same result code in `CheckTxResponse` while at height *h*, we define *CheckTxCodetx,p,h* as the singleton value of *CheckTxCodestx,p,h*. If *CheckTxCodestx,p,h* is not a singleton set, *CheckTxCodetx,p,h* is undefined. Let predicate *OK(CheckTxCodetx,p,h)* denote whether *CheckTxCodetx,p,h* is `SUCCESS`. -* Requirement 13 [`CheckTx`, eventual non-oscillation]: For any transaction *tx*, +- Requirement 13 [`CheckTx`, eventual non-oscillation]: For any transaction *tx*, there exists a boolean value *b*, and a height *hstable* such that, for any correct process *p*, @@ -231,7 +253,7 @@ In contrast, the value of *b* MUST be the same across all processes. ### Connection State -CometBFT maintains four concurrent ABCI++ connections, namely +CometBFT maintains four concurrent ABCI connections, namely [Consensus Connection](#consensus-connection), [Mempool Connection](#mempool-connection), [Info/Query Connection](#infoquery-connection), and @@ -241,7 +263,7 @@ the state for each connection, which are synchronized upon `Commit` calls. #### Concurrency -In principle, each of the four ABCI++ connections operates concurrently with one +In principle, each of the four ABCI connections operates concurrently with one another. This means applications need to ensure access to state is thread safe. Both the [default in-process ABCI client](https://github.com/cometbft/cometbft/blob/main/abci/client/local_client.go#L13) @@ -255,20 +277,6 @@ time. The existence of this global mutex means Go application developers can get thread safety for application state by routing all reads and writes through the ABCI system. Thus it may be unsafe to expose application state directly to an RPC interface, and unless explicit measures are taken, all queries should be routed through the ABCI Query method. - - - - - #### FinalizeBlock When the consensus algorithm decides on a block, CometBFT uses `FinalizeBlock` to send the @@ -290,7 +298,9 @@ will be received on the mempool connection during this processing step, providin update all four connection states to the latest committed state at the same time. -When `Commit` returns, CometBFT unlocks the mempool. +CometBFT unlocks the mempool after it has finished updating for the new block, +which occurs asynchronously from `Commit`. +See [Mempool Update](../mempool/mempool.md) for more information on what the `update` task does. WARNING: if the ABCI app logic processing the `Commit` message sends a `/broadcast_tx_sync` or `/broadcast_tx` and waits for the response @@ -306,22 +316,22 @@ Likewise, CometBFT calls `ProcessProposal` upon reception of a proposed block fr network. The proposed block's data that is disclosed to the Application by these two methods is the following: -* the transaction list -* the `LastCommit` referring to the previous block -* the block header's hash (except in `PrepareProposal`, where it is not known yet) -* list of validators that misbehaved -* the block's timestamp -* `NextValidatorsHash` -* Proposer address +- the transaction list +- the `LastCommit` referring to the previous block +- the block header's hash (except in `PrepareProposal`, where it is not known yet) +- list of validators that misbehaved +- the block's timestamp +- `NextValidatorsHash` +- Proposer address The Application may decide to *immediately* execute the given block (i.e., upon `PrepareProposal` or `ProcessProposal`). There are two main reasons why the Application may want to do this: -* *Avoiding invalid transactions in blocks*. +- *Avoiding invalid transactions in blocks*. In order to be sure that the block does not contain *any* invalid transaction, there may be no way other than fully executing the transactions in the block as though it was the *decided* block. -* *Quick `FinalizeBlock` execution*. +- *Quick `FinalizeBlock` execution*. Upon reception of the decided block via `FinalizeBlock`, if that same block was executed upon `PrepareProposal` or `ProcessProposal` and the resulting state was kept in memory, the Application can simply apply that state (faster) to the main state, rather than reexecuting @@ -344,7 +354,7 @@ to bound memory usage. As a general rule, the Application should be ready to dis before `FinalizeBlock`, even if one of them might end up corresponding to the decided block and thus have to be reexecuted upon `FinalizeBlock`. -### States and ABCI++ Connections +### States and ABCI Connections #### Consensus Connection @@ -372,9 +382,9 @@ responded to and no new ones can begin. After the `Commit` call returns, while still holding the mempool lock, `CheckTx` is run again on all transactions that remain in the node's local mempool after filtering those included in the block. -Parameter `Type` in `RequestCheckTx` -indicates whether an incoming transaction is new (`CheckTxType_New`), or a -recheck (`CheckTxType_Recheck`). +Parameter `Type` in `CheckTxRequest` +indicates whether an incoming transaction is new (`CHECK_TX_TYPE_NEW`), or a +recheck (`CHECK_TX_TYPE_RECHECK`). Finally, after re-checking transactions in the mempool, CometBFT will unlock the mempool connection. New transactions are once again able to be processed through `CheckTx`. @@ -385,7 +395,7 @@ Since the transaction cannot be guaranteed to be checked against the exact same will be executed as part of a (potential) decided block, `CheckTx` shouldn't check *everything* that affects the transaction's validity, in particular those checks whose validity may depend on transaction ordering. `CheckTx` is weak because a Byzantine node need not care about `CheckTx`; -it can propose a block full of invalid transactions if it wants. The mechanism ABCI++ has +it can propose a block full of invalid transactions if it wants. The mechanism ABCI, from version 1.0, has in place for dealing with such behavior is `ProcessProposal`. ##### Replay Protection @@ -424,11 +434,11 @@ For more information, see Section [State Sync](#state-sync). The Application is expected to return a list of [`ExecTxResult`](./abci%2B%2B_methods.md#exectxresult) in -[`ResponseFinalizeBlock`](./abci%2B%2B_methods.md#finalizeblock). The list of transaction +[`FinalizeBlockResponse`](./abci%2B%2B_methods.md#finalizeblock). The list of transaction results MUST respect the same order as the list of transactions delivered via -[`RequestFinalizeBlock`](./abci%2B%2B_methods.md#finalizeblock). +[`FinalizeBlockRequest`](./abci%2B%2B_methods.md#finalizeblock). This section discusses the fields inside this structure, along with the fields in -[`ResponseCheckTx`](./abci%2B%2B_methods.md#checktx), +[`CheckTxResponse`](./abci%2B%2B_methods.md#checktx), whose semantics are similar. The `Info` and `Log` fields are @@ -460,8 +470,8 @@ or validation should fail before it can use more resources than it requested. When `MaxGas > -1`, CometBFT enforces the following rules: -* `GasWanted <= MaxGas` for every transaction in the mempool -* `(sum of GasWanted in a block) <= MaxGas` when proposing a block +- `GasWanted <= MaxGas` for every transaction in the mempool +- `(sum of GasWanted in a block) <= MaxGas` when proposing a block If `MaxGas == -1`, no rules about gas are enforced. @@ -479,11 +489,11 @@ it can use `PrepareProposal` and `ProcessProposal` to enforce that `(sum of GasW in all proposed or prevoted blocks, we have: -* `(sum of GasUsed in a block) <= MaxGas` for every block +- `(sum of GasUsed in a block) <= MaxGas` for every block The `GasUsed` field is ignored by CometBFT. -#### Specifics of `ResponseCheckTx` +#### Specifics of `CheckTxResponse` If `Code != 0`, it will be rejected from the mempool and hence not broadcasted to other peers and not included in a proposal block. @@ -492,9 +502,9 @@ not broadcasted to other peers and not included in a proposal block. deterministic since, given a transaction, nodes' Applications might have a different *CheckTxState* values when they receive it and check their validity via `CheckTx`. -CometBFT ignores this value in `ResponseCheckTx`. +CometBFT ignores this value in `CheckTxResponse`. -From v0.34.x on, there is a `Priority` field in `ResponseCheckTx` that can be +From v0.34.x on, there is a `Priority` field in `CheckTxResponse` that can be used to explicitly prioritize transactions in the mempool for inclusion in a block proposal. @@ -540,21 +550,21 @@ duplicates, the block execution will fail irrecoverably. Structure `ValidatorUpdate` contains a public key, which is used to identify the validator: The public key currently supports three types: -* `ed25519` -* `secp256k1` -* `sr25519` +- `ed25519` +- `secp256k1` +- `bls12381` Structure `ValidatorUpdate` also contains an `ìnt64` field denoting the validator's new power. Applications must ensure that `ValidatorUpdate` structures abide by the following rules: -* power must be non-negative -* if power is set to 0, the validator must be in the validator set; it will be removed from the set -* if power is greater than 0: - * if the validator is not in the validator set, it will be added to the +- power must be non-negative +- if power is set to 0, the validator must be in the validator set; it will be removed from the set +- if power is greater than 0: + - if the validator is not in the validator set, it will be added to the set with the given power - * if the validator is in the validator set, its power will be adjusted to the given power -* the total power of the new validator set must not exceed `MaxTotalVotingPower`, where + - if the validator is in the validator set, its power will be adjusted to the given power +- the total power of the new validator set must not exceed `MaxTotalVotingPower`, where `MaxTotalVotingPower = MaxInt64 / 8` Note the updates returned after processing the block at height `H` will only take effect @@ -573,25 +583,19 @@ all full nodes have the same value at a given height. #### List of Parameters -These are the current consensus parameters (as of v0.37.x): - -1. [BlockParams.MaxBytes](#blockparamsmaxbytes) -2. [BlockParams.MaxGas](#blockparamsmaxgas) -3. [EvidenceParams.MaxAgeDuration](#evidenceparamsmaxageduration) -4. [EvidenceParams.MaxAgeNumBlocks](#evidenceparamsmaxagenumblocks) -5. [EvidenceParams.MaxBytes](#evidenceparamsmaxbytes) -6. [ValidatorParams.PubKeyTypes](#validatorparamspubkeytypes) -7. [VersionParams.App](#versionparamsapp) - +These are the current consensus parameters (as of v1.0.x): + +1. [BlockParams.MaxBytes](#blockparamsmaxbytes) +2. [BlockParams.MaxGas](#blockparamsmaxgas) +3. [EvidenceParams.MaxAgeDuration](#evidenceparamsmaxageduration) +4. [EvidenceParams.MaxAgeNumBlocks](#evidenceparamsmaxagenumblocks) +5. [EvidenceParams.MaxBytes](#evidenceparamsmaxbytes) +6. [FeatureParams.PbtsEnableHeight](#featureparamspbtsenableheight) +7. [FeatureParams.VoteExtensionsEnableHeight](#featureparamsvoteextensionsenableheight) +8. [ValidatorParams.PubKeyTypes](#validatorparamspubkeytypes) +9. [VersionParams.App](#versionparamsapp) +10. [SynchronyParams.Precision](#synchronyparamsprecision) +11. [SynchronyParams.MessageDelay](#synchronyparamsmessagedelay) ##### BlockParams.MaxBytes @@ -601,10 +605,10 @@ This is enforced by the consensus algorithm. This implies a maximum transaction size that is this `MaxBytes`, less the expected size of the header, the validator set, and any included evidence in the block. -The Application should be aware that honest validators _may_ produce and +The Application should be aware that honest validators *may* produce and broadcast blocks with up to the configured `MaxBytes` size. As a result, the consensus -[timeout parameters](../../docs/core/configuration.md#consensus-timeouts-explained) +[timeout parameters](../../docs/explanation/core/configuration.md#consensus-timeouts-explained) adopted by nodes should be configured so as to account for the worst-case latency for the delivery of a full block with `MaxBytes` size to all validators. @@ -672,110 +676,70 @@ a block minus its overhead ( ~ `BlockParams.MaxBytes`). Must have `MaxBytes > 0`. -##### ValidatorParams.PubKeyTypes +##### FeatureParams.PbtsEnableHeight -The parameter restricts the type of keys validators can use. The parameter uses ABCI pubkey naming, not Amino names. +Height at which Proposer-Based Timestamps (PBTS) will be enabled. -##### VersionParams.App +A value of 0 means that PBTS is disabled. A value > 0 denotes the +height at which PBTS will be (or has been) enabled. -This is the version of the ABCI application. - - -##### ABCIParams.VoteExtensionsEnableHeight - -This parameter is either 0 or a positive height at which vote extensions -become mandatory. If the value is zero (which is the default), vote -extensions are not required. Otherwise, at all heights greater than the -configured height `H` vote extensions must be present (even if empty). -When the configured height `H` is reached, `PrepareProposal` will not -include vote extensions yet, but `ExtendVote` and `VerifyVoteExtension` will -be called. Then, when reaching height `H+1`, `PrepareProposal` will -include the vote extensions from height `H`. For all heights after `H` +##### SynchronyParams.MessageDelay -* vote extensions cannot be disabled, -* they are mandatory: all precommit messages sent MUST have an extension - attached. Nevertheless, the application MAY provide 0-length - extensions. +This sets a bound on how long a proposal message may take to reach all +validators on a network and still be considered valid. -Must always be set to a future height. Once set to a value different from -0, its value must not be changed. +This parameter is used by the +[Proposer-Based Timestamps (PBTS)](../consensus/proposer-based-timestamp/README.md) +algorithm. #### Updating Consensus Parameters @@ -791,7 +755,7 @@ value to be updated to the default. ##### `InitChain` -`ResponseInitChain` includes a `ConsensusParams` parameter. +`InitChainResponse` includes a `ConsensusParams` parameter. If `ConsensusParams` is `nil`, CometBFT will use the params loaded in the genesis file. If `ConsensusParams` is not `nil`, CometBFT will use it. This way the application can determine the initial consensus parameters for the @@ -799,7 +763,7 @@ blockchain. ##### `FinalizeBlock`, `PrepareProposal`/`ProcessProposal` -`ResponseFinalizeBlock` accepts a `ConsensusParams` parameter. +`FinalizeBlockResponse` accepts a `ConsensusParams` parameter. If `ConsensusParams` is `nil`, CometBFT will do nothing. If `ConsensusParams` is not `nil`, CometBFT will use it. This way the application can update the consensus parameters over time. @@ -842,9 +806,9 @@ For such applications, the `AppHash` provides a much more efficient way to verif ABCI applications can take advantage of more efficient light-client proofs for their state as follows: -* return the Merkle root of the deterministic application state in - `ResponseFinalizeBlock.Data`. This Merkle root will be included as the `AppHash` in the next block. -* return efficient Merkle proofs about that application state in `ResponseQuery.Proof` +- return the Merkle root of the deterministic application state in + `FinalizeBlockResponse.Data`. This Merkle root will be included as the `AppHash` in the next block. +- return efficient Merkle proofs about that application state in `QueryResponse.Proof` that can be verified using the `AppHash` of the corresponding block. For instance, this allows an application's light-client to verify proofs of @@ -852,7 +816,7 @@ absence in the application state, something which is much less efficient to do u Some applications (eg. Ethereum, Cosmos-SDK) have multiple "levels" of Merkle trees, where the leaves of one tree are the root hashes of others. To support this, and -the general variability in Merkle proofs, the `ResponseQuery.Proof` has some minimal structure: +the general variability in Merkle proofs, the `QueryResponse.Proof` has some minimal structure: ```protobuf message ProofOps { @@ -879,9 +843,9 @@ the list should match the `AppHash` being verified against. When CometBFT connects to a peer, it sends two queries to the ABCI application using the following paths, with no additional data: -* `/p2p/filter/addr/`, where `` denote the IP address and +- `/p2p/filter/addr/`, where `` denote the IP address and the port of the connection -* `p2p/filter/id/`, where `` is the peer node ID (ie. the +- `p2p/filter/id/`, where `` is the peer node ID (ie. the pubkey.Address() for the peer's PubKey) If either of these queries return a non-zero ABCI code, CometBFT will refuse @@ -899,33 +863,34 @@ implementation of ### Crash Recovery -CometBFT and the application are expected to crash together and there should not +CometBFT and the application are expected to crash together and there should not exist a scenario where the application has persisted state of a height greater than the latest height persisted by CometBFT. -In practice, persisting the state of a height consists of three steps, the last of which +In practice, persisting the state of a height consists of three steps, the last of which is the call to the application's `Commit` method, the only place where the application is expected to persist/commit its state. On startup (upon recovery), CometBFT calls the `Info` method on the Info Connection to get the latest committed state of the app. The app MUST return information consistent with the -last block for which it successfully completed `Commit`. +last block for which it successfully completed `Commit`. + +The three steps performed before the state of a height is considered persisted are: -The three steps performed before the state of a height is considered persisted are: - The block is stored by CometBFT in the blockstore - CometBFT has stored the state returned by the application through `FinalizeBlockResponse` -- The application has committed its state within `Commit`. - +- The application has committed its state within `Commit`. + The following diagram depicts the order in which these events happen, and the corresponding ABCI functions that are called and executed by CometBFT and the application: -``` +``` APP: Execute block Persist application state / return ResultFinalizeBlock / - / / + / / Event: ------------- block_stored ------------ / ------------ state_stored --------------- / ----- app_persisted_state | / | / | -CometBFT: Decide --- Persist block -- Call FinalizeBlock - Persist results ---------- Call Commit -- +CometBFT: Decide --- Persist block -- Call FinalizeBlock - Persist results ---------- Call Commit -- on in the (txResults, validator Block block store updates...) @@ -933,26 +898,27 @@ CometBFT: Decide --- Persist block -- Call FinalizeBlock - Persist results ----- As these three steps are not atomic, we observe different cases based on which steps have been executed before the crash occurred -(we assume that at least `block_stored` has been executed, otherwise, there is no state persisted, +(we assume that at least `block_stored` has been executed, otherwise, there is no state persisted, and the operations for this height are repeated entirely): - `block_stored`: we replay `FinalizeBlock` and the steps afterwards. - `block_stored` and `state_stored`: As the app did not persist its state within `Commit`, we need to re-execute - `FinalizeBlock` to retrieve the results and compare them to the state stored by CometBFT within `state_stored`. + `FinalizeBlock` to retrieve the results and compare them to the state stored by CometBFT within `state_stored`. The expected case is that the states will match, otherwise CometBFT panics. -- `block_stored`, `state_stored`, `app_persisted_state`: we move on to the next height. +- `block_stored`, `state_stored`, `app_persisted_state`: we move on to the next height. Based on the sequence of these events, CometBFT will panic if any of the steps in the sequence happen out of order, -that is if: +that is if: + - The application has persisted a block at a height higher than the blocked saved during `state_stored`. - The `block_stored` step persisted a block at a height smaller than the `state_stored` -- And the difference between the heights of the blocks persisted by `state_stored` and `block_stored` is more +- And the difference between the heights of the blocks persisted by `state_stored` and `block_stored` is more than 1 (this corresponds to a scenario where we stored two blocks in the block store but never persisted the state of the first block, which should never happen). -A special case is when a crash happens before the first block is committed - that is, after calling +A special case is when a crash happens before the first block is committed - that is, after calling `InitChain`. In that case, the application's state should still be at height 0 and thus `InitChain` -will be called again. +will be called again. ### State Sync @@ -978,20 +944,20 @@ Applications that want to support state syncing must take state snapshots at reg this is accomplished is entirely up to the application. A snapshot consists of some metadata and a set of binary chunks in an arbitrary format: -* `Height (uint64)`: The height at which the snapshot is taken. It must be taken after the given +- `Height (uint64)`: The height at which the snapshot is taken. It must be taken after the given height has been committed, and must not contain data from any later heights. -* `Format (uint32)`: An arbitrary snapshot format identifier. This can be used to version snapshot +- `Format (uint32)`: An arbitrary snapshot format identifier. This can be used to version snapshot formats, e.g. to switch from Protobuf to MessagePack for serialization. The application can use this when restoring to choose whether to accept or reject a snapshot. -* `Chunks (uint32)`: The number of chunks in the snapshot. Each chunk contains arbitrary binary +- `Chunks (uint32)`: The number of chunks in the snapshot. Each chunk contains arbitrary binary data, and should be less than 16 MB; 10 MB is a good starting point. -* `Hash ([]byte)`: An arbitrary hash of the snapshot. This is used to check whether a snapshot is +- `Hash ([]byte)`: An arbitrary hash of the snapshot. This is used to check whether a snapshot is the same across nodes when downloading chunks. -* `Metadata ([]byte)`: Arbitrary snapshot metadata, e.g. chunk hashes for verification or any other +- `Metadata ([]byte)`: Arbitrary snapshot metadata, e.g. chunk hashes for verification or any other necessary info. For a snapshot to be considered the same across nodes, all of these fields must be identical. When @@ -1002,14 +968,14 @@ application via the ABCI `ListSnapshots` method to discover available snapshots, snapshot chunks via `LoadSnapshotChunk`. The application is free to choose how to implement this and which formats to use, but must provide the following guarantees: -* **Consistent:** A snapshot must be taken at a single isolated height, unaffected by +- **Consistent:** A snapshot must be taken at a single isolated height, unaffected by concurrent writes. This can be accomplished by using a data store that supports ACID transactions with snapshot isolation. -* **Asynchronous:** Taking a snapshot can be time-consuming, so it must not halt chain progress, +- **Asynchronous:** Taking a snapshot can be time-consuming, so it must not halt chain progress, for example by running in a separate thread. -* **Deterministic:** A snapshot taken at the same height in the same format must be identical +- **Deterministic:** A snapshot taken at the same height in the same format must be identical (at the byte level) across nodes, including all metadata. This ensures good availability of chunks, and that they fit together across nodes. @@ -1086,7 +1052,7 @@ can be spoofed by adversaries. Apps may also want to consider state sync denial-of-service vectors, where adversaries provide invalid or harmful snapshots to prevent nodes from joining the network. The application can counteract this by asking CometBFT to ban peers. As a last resort, node operators can use -P2P configuration options to whitelist a set of trusted peers that can provide valid snapshots. +P2P configuration options to list an exclusive set of trusted peers that can provide valid snapshots. ##### Transition to Consensus @@ -1094,17 +1060,17 @@ Once the snapshots have all been restored, CometBFT gathers additional informati bootstrapping the node (e.g. chain ID, consensus parameters, validator sets, and block headers) from the genesis file and light client RPC servers. It also calls `Info` to verify the following: -* that the app hash from the snapshot it has delivered to the Application matches the apphash +- that the app hash from the snapshot it has delivered to the Application matches the apphash stored in the next height's block -* that the version that the Application returns in `ResponseInfo` matches the version in the +- that the version that the Application returns in `InfoResponse` matches the version in the current height's block header Once the state machine has been restored and CometBFT has gathered this additional information, it transitions to consensus. As of ABCI 2.0, CometBFT ensures the necessary conditions -to switch are met [RFC-100](./../../docs/rfc/rfc-100-abci-vote-extension-propag.md#base-implementation-persist-and-propagate-extended-commit-history). -From the application's point of view, these operations are transparent, unless the application has just upgraded to ABCI 2.0. +to switch are met [RFC-100](../../docs/references/rfc/rfc-100-abci-vote-extension-propag.md#base-implementation-persist-and-propagate-extended-commit-history). +From the application's point of view, these operations are transparent, unless the application has just upgraded to ABCI 2.0. In that case, the application needs to be properly configured and aware of certain constraints in terms of when -to provide vote extensions. More details can be found in the section below. +to provide vote extensions. More details can be found in the section below. Once a node switches to consensus, it operates like any other node, apart from having a truncated block history at the height of the restored snapshot. @@ -1112,21 +1078,21 @@ Once a node switches to consensus, it operates like any other node, apart from h Introducing vote extensions requires changes to the configuration of the application. -First of all, switching to a version of CometBFT with vote extensions, requires a coordinated upgrade. -For a detailed description on the upgrade path, please refer to the corresponding -[section](./../../docs/rfc/rfc-100-abci-vote-extension-propag.md#upgrade-path) in RFC-100. +First of all, switching to a version of CometBFT with vote extensions, requires a coordinated upgrade. +For a detailed description on the upgrade path, please refer to the corresponding +[section](../../docs/references/rfc/rfc-100-abci-vote-extension-propag.md#upgrade-path) in RFC-100. -There is a newly introduced [**consensus parameter**](./abci%2B%2B_app_requirements.md#abciparamsvoteextensionsenableheight): `VoteExtensionsEnableHeight`. -This parameter represents the height at which vote extensions are -required for consensus to proceed, with 0 being the default value (no vote extensions). +There is a newly introduced [**consensus parameter**](./abci%2B%2B_app_requirements.md#abciparamsvoteextensionsenableheight): `VoteExtensionsEnableHeight`. +This parameter represents the height at which vote extensions are +required for consensus to proceed, with 0 being the default value (vote extensions disabled). A chain can enable vote extensions either: -* at genesis by setting `VoteExtensionsEnableHeight` to be equal, e.g., to the `InitialHeight` -* or via the application logic by changing the `ConsensusParam` to configure the +- at genesis by setting `VoteExtensionsEnableHeight` to be equal, e.g., to the `InitialHeight` +- or via the application logic by changing the `ConsensusParam` to configure the `VoteExtensionsEnableHeight`. Once the (coordinated) upgrade to ABCI 2.0 has taken place, at height *hu*, the value of `VoteExtensionsEnableHeight` MAY be set to some height, *he*, -which MUST be higher than the current height of the chain. Thus the earliest value for +which MUST be higher than the current height of the chain. Thus the earliest value for *he* is *hu* + 1. Once a node reaches the configured height, @@ -1138,7 +1104,7 @@ Likewise, for all heights *h < he*, any precommit messages that *do* will also be rejected as malformed. Height *he* is somewhat special, as calls to `PrepareProposal` MUST NOT have vote extension data, but all precommit votes in that height MUST carry a vote extension, -even if the extension is `nil`. +even if the extension is `nil`. Height *he + 1* is the first height for which `PrepareProposal` MUST have vote extension data and all precommit votes in that height MUST have a vote extension. diff --git a/spec/abci/abci++_basic_concepts.md b/spec/abci/abci++_basic_concepts.md index 4185585c659..094af79faf0 100644 --- a/spec/abci/abci++_basic_concepts.md +++ b/spec/abci/abci++_basic_concepts.md @@ -6,7 +6,7 @@ title: Overview and basic concepts ## Outline - [Overview and basic concepts](#overview-and-basic-concepts) - - [ABCI++ vs. ABCI](#abci-vs-abci) + - [ABCI 2.0 vs. legacy ABCI](#abci-20-vs-legacy-abci) - [Method overview](#method-overview) - [Consensus/block execution methods](#consensusblock-execution-methods) - [Mempool methods](#mempool-methods) @@ -16,15 +16,14 @@ title: Overview and basic concepts - [Proposal timeout](#proposal-timeout) - [Deterministic State-Machine Replication](#deterministic-state-machine-replication) - [Events](#events) - - [Evidence](#evidence) + - [Evidence of Misbehavior](#evidence-of-misbehavior) - [Errors](#errors) - [`CheckTx`](#checktx) - [`ExecTxResult` (as part of `FinalizeBlock`)](#exectxresult-as-part-of-finalizeblock) - [`Query`](#query) - # Overview and basic concepts -## ABCI 2.0 vs. ABCI +## ABCI 2.0 vs. legacy ABCI [↑ Back to Outline](#outline) @@ -46,12 +45,12 @@ proposal is to be validated, and (c) at the moment a (precommit) vote is sent/re The new interface allows block proposers to perform application-dependent work in a block through the `PrepareProposal` method (a); and validators to perform application-dependent work and checks in a proposed block through the `ProcessProposal` method (b); and applications to require their validators -to do more than just validate blocks through the `ExtendVote` and `VerifyVoteExtensions` methods (c). +to do more than just validate blocks through the `ExtendVote` and `VerifyVoteExtension` methods (c). Furthermore, ABCI 2.0 coalesces {`BeginBlock`, [`DeliverTx`], `EndBlock`} into `FinalizeBlock`, as a simplified, efficient way to deliver a decided block to the Application. -## Methods overview +## Method overview [↑ Back to Outline](#outline) @@ -75,21 +74,19 @@ call sequences of these methods. proposer to perform application-dependent work in a block before proposing it. This enables, for instance, batch optimizations to a block, which has been empirically demonstrated to be a key component for improved performance. Method `PrepareProposal` is called - every time CometBFT is about to broadcast a Proposal message and _validValue_ is `nil`. + every time CometBFT is about to broadcast a Proposal message and *validValue* is `nil`. CometBFT gathers outstanding transactions from the mempool, generates a block header, and uses them to create a block to propose. Then, it calls - `RequestPrepareProposal` with the newly created proposal, called *raw proposal*. The Application - can make changes to the raw proposal, such as modifying the set of transactions or the order - in which they appear, and returns the - (potentially) modified proposal, called *prepared proposal* in the `ResponsePrepareProposal` - call. + `PrepareProposal` with the newly created proposal, called *raw proposal*. The Application + can make changes to the raw proposal, such as reordering, adding and removing transactions, before returning the + (potentially) modified proposal, called *prepared proposal* in the `PrepareProposalResponse`. The logic modifying the raw proposal MAY be non-deterministic. - [**ProcessProposal:**](./abci++_methods.md#processproposal) It allows a validator to perform application-dependent work in a proposed block. This enables features such as immediate block execution, and allows the Application to reject invalid blocks. - CometBFT calls it when it receives a proposal and _validValue_ is `nil`. + CometBFT calls it when it receives a proposal and *validValue* is `nil`. The Application cannot modify the proposal at this point but can reject it if invalid. If that is the case, the consensus algorithm will prevote `nil` on the proposal, which has strong liveness implications for CometBFT. As a general rule, the Application @@ -115,22 +112,24 @@ call sequences of these methods. This has a negative impact on liveness, i.e., if vote extensions repeatedly cannot be verified by correct validators, the consensus algorithm may not be able to finalize a block even if sufficiently many (+2/3) validators send precommit votes for that block. Thus, `VerifyVoteExtension` - should be used with special care. + should be implemented with special care. As a general rule, an Application that detects an invalid vote extension SHOULD - accept it in `ResponseVerifyVoteExtension` and ignore it in its own logic. CometBFT calls it when - a process receives a precommit message with a (possibly empty) vote extension. + accept it in `VerifyVoteExtensionResponse` and ignore it in its own logic. CometBFT calls it when + a process receives a precommit message with a (possibly empty) vote extension, for the current height. It is not called for precommit votes received after the height is concluded but while waiting to accumulate more precommit votes. The logic in `VerifyVoteExtension` MUST be deterministic. - [**FinalizeBlock:**](./abci++_methods.md#finalizeblock) It delivers a decided block to the Application. The Application must execute the transactions in the block deterministically and update its state accordingly. Cryptographic commitments to the block and transaction results, - returned via the corresponding parameters in `ResponseFinalizeBlock`, are included in the header + returned via the corresponding parameters in `FinalizeBlockResponse`, are included in the header of the next block. CometBFT calls it when a new block is decided. + When calling `FinalizeBlock` with a block, the consensus algorithm run by CometBFT guarantees + that at least one non-byzantine validator has run `ProcessProposal` on that block. - [**Commit:**](./abci++_methods.md#commit) Instructs the Application to persist its state. It is a fundamental part of CometBFT's crash-recovery mechanism that ensures the synchronization between CometBFT and the Application upon recovery. CometBFT calls it just after - having persisted the data returned by calls to `ResponseFinalizeBlock`. The Application can now discard + having persisted the data returned by calls to `FinalizeBlockResponse`. The Application can now discard any state or data except the one resulting from executing the transactions in the decided block. ### Mempool methods @@ -246,11 +245,14 @@ The state changes caused by processing those proposed blocks must never replace the previous state until `FinalizeBlock` confirms that the proposed block was decided and `Commit` is invoked for it. -The same is true to Applications that quickly accept blocks and execute the blocks optimistically in parallel with the remaining consensus steps to save time during `FinalizeBlock`; they must only apply state changes in `Commit`. +The same is true to Applications that quickly accept blocks and execute the +blocks optimistically in parallel with the remaining consensus steps to save +time during `FinalizeBlock`; they must only apply state changes in `Commit`. Additionally, vote extensions or the validation thereof (via `ExtendVote` or `VerifyVoteExtension`) must *never* have side effects on the current state. -They can only be used when their data is provided in a `RequestPrepareProposal` call. +Their data can only be used when provided in a `PrepareProposal` call but, again, +without side effects to the app state. If there is some non-determinism in the state machine, consensus will eventually fail as nodes disagree over the correct values for the block header. The @@ -275,18 +277,19 @@ Sources of non-determinism in applications may include: See [#56](https://github.com/tendermint/abci/issues/56) for the original discussion. -Note that some methods (`Query`, `FinalizeBlock`) return non-deterministic data in the form -of `Info` and `Log` fields. The `Log` is intended for the literal output from the Application's -logger, while the `Info` is any additional info that should be returned. These are the only fields -that are not included in block header computations, so we don't need agreement -on them. All other fields in the `Response*` must be strictly deterministic. +Note that some methods (e.g., `Query` and `FinalizeBlock`) may return +non-deterministic data in the form of `Info`, `Log` and/or `Events` fields. The +`Log` is intended for the literal output from the Application's logger, while +the `Info` is any additional info that should be returned. These fields are not +included in block header computations, so we don't need agreement on them. See +each field's description on whether it must be deterministic or not. ## Events [↑ Back to Outline](#outline) Method `FinalizeBlock` includes an `events` field at the top level in its -`Response*`, and one `events` field per transaction included in the block. +`FinalizeBlockResponse`, and one `events` field per transaction included in the block. Applications may respond to this ABCI 2.0 method with an event list for each executed transaction, and a general event list for the block itself. Events allow applications to associate metadata with transactions and blocks. @@ -299,7 +302,8 @@ execution. `Event` values can be used to index transactions and blocks according happened during their execution. Each event has a `type` which is meant to categorize the event for a particular -`Response*` or `Tx`. A `Response*` or `Tx` may contain multiple events with duplicate +`FinalizeBlockResponse` or `Tx`. A `FinalizeBlockResponse` or `Tx` may contain +multiple events with duplicate `type` values, where each distinct entry is meant to categorize attributes for a particular event. Every key and value in an event's attributes must be UTF-8 encoded strings along with the event type itself. @@ -311,9 +315,11 @@ message Event { } ``` -The attributes of an `Event` consist of a `key`, a `value`, and an `index` flag. The -index flag notifies the CometBFT indexer to index the attribute. The value of -the `index` flag is non-deterministic and may vary across different nodes in the network. +The attributes of an `Event` consist of a `key`, a `value`, and an `index` +flag. The index flag notifies the CometBFT indexer to index the attribute. + +The `type` and `attributes` fields are non-deterministic and may vary across +different nodes in the network. ```protobuf message EventAttribute { @@ -326,7 +332,7 @@ message EventAttribute { Example: ```go - abci.ResponseFinalizeBlock{ + abci.FinalizeBlockResponse{ // ... Events: []abci.Event{ { @@ -358,51 +364,57 @@ Example: } ``` -## Evidence +## Evidence of Misbehavior [↑ Back to Outline](#outline) -CometBFT's security model relies on the use of evidences of misbehavior. An evidence is an +CometBFT's security model relies on the use of evidence of misbehavior. An evidence is an irrefutable proof of malicious behavior by a network participant. It is the responsibility of CometBFT to detect such malicious behavior. When malicious behavior is detected, CometBFT -will gossip evidences of misbehavior to other nodes and commit the evidences to -the chain once they are verified by a subset of validators. These evidences will then be -passed on to the Application through ABCI++. It is the responsibility of the -Application to handle evidence of misbehavior and exercise punishment. +will gossip evidence of misbehavior to other nodes and commit the evidence to +the chain once they are verified by a subset of validators. These evidence of misbehavior will then be +passed on to the Application through ABCI. It is the responsibility of the +Application to handle the evidence of misbehavior and exercise punishment. -There are two forms of evidence: Duplicate Vote and Light Client Attack. More -information can be found in either [data structures](../core/data_structures.md) -or [accountability](../light-client/accountability/). +There are two forms of misbehavior: `Duplicate Vote` and `Light Client Attack`. More +information can be found in the consensus [evidence](../consensus/evidence.md) document. -EvidenceType has the following protobuf format: +`MisbehaviorType` has the following protobuf format: ```protobuf -enum EvidenceType { - UNKNOWN = 0; - DUPLICATE_VOTE = 1; - LIGHT_CLIENT_ATTACK = 2; +// The type of misbehavior committed by a validator. +enum MisbehaviorType { + // Unknown + MISBEHAVIOR_TYPE_UNKNOWN = 0; + // Duplicate vote + MISBEHAVIOR_TYPE_DUPLICATE_VOTE = 1; + // Light client attack + MISBEHAVIOR_TYPE_LIGHT_CLIENT_ATTACK = 2; } ``` -## Errors +## Returning Errors [↑ Back to Outline](#outline) -The `Query` and `CheckTx` methods include a `Code` field in their `Response*`. +Please note that the method signature for the ABCI methods includes a response and an error return, such as +`(*abcitypes.[Method_Name]Response, error)`. + +### ABCI response error codes (e.g. `Code` and `Codespace`) + +Some of the ABCI methods' responses feature a field (e.g., the `Code` field) that can be used to return an error +in the `[Method_Name]Response`. These fields play a significant role as they can return an error in the response, indicating +to CometBFT that a problem has occurred during data processing, such as transaction validation or a query. + +The `Query` and `CheckTx` methods include a `Code` field in their `*Response`. Field `Code` is meant to contain an application-specific response code. -A response code of `0` indicates no error. Any other response code -indicates to CometBFT that an error occurred. +A response code of `0` indicates no error. Any other response where the `Code` field is +different from `0` indicates to CometBFT that an error occurred. These methods also return a `Codespace` string to CometBFT. This field is used to disambiguate `Code` values returned by different domains of the Application. The `Codespace` is a namespace for the `Code`. -Methods `Echo`, `Info`, `Commit` and `InitChain` do not return errors. -An error in any of these methods represents a critical issue that CometBFT -has no reasonable way to handle. If there is an error in one -of these methods, the Application must crash to ensure that the error is safely -handled by an operator. - Method `FinalizeBlock` is a special case. It contains a number of `Code` and `Codespace` fields as part of type `ExecTxResult`. Each of these codes reports errors related to the transaction it is attached to. @@ -414,18 +426,36 @@ The handling of non-zero response codes by CometBFT is described below. ### `CheckTx` -When CometBFT receives a `ResponseCheckTx` with a non-zero `Code`, the associated -transaction will not be added to CometBFT's mempool or it will be removed if +When CometBFT receives a `CheckTxResponse` with a non-zero `Code`, the associated +transaction will not be added to CometBFT's mempool, or it will be removed if it is already included. ### `ExecTxResult` (as part of `FinalizeBlock`) The `ExecTxResult` type delivers transaction results from the Application to CometBFT. When -CometBFT receives a `ResponseFinalizeBlock` containing an `ExecTxResult` with a non-zero `Code`, +CometBFT receives a `FinalizeBlockResponse` containing an `ExecTxResult` with a non-zero `Code`, the response code is logged. Past `Code` values can be queried by clients. As the transaction was part of a decided block, the `Code` does not influence consensus. ### `Query` -When CometBFT receives a `ResponseQuery` with a non-zero `Code`, this code is +When CometBFT receives a `QueryResponse` with a non-zero `Code`, this code is returned directly to the client that initiated the query. + +### ABCI methods' `error` return + +The `error` return, the second object returned in an ABCI method e.g., `(*abcitypes.[Method_Name]Response, error)`, is utilized in situations +involving unrecoverable errors. + +All ABCI methods return errors. An error returned in any of these methods represents a critical issue that CometBFT +has no reasonable way to handle. Therefore, if there is an error in one of these methods, CometBFT will crash +to ensure that an operator safely handles the error: the application must be terminated to avoid any further unintended consequences. + +As a result, upon detecting an non-recoverable error condition, the application has the choice of either +(a) crashing itself (e.g., a`panic` in the code detecting the unrecoverable condition), or +(b) returning an error as the second return value in the ABCI method that detects the error condition, +knowing that CometBFT will panic upon receiving the error. +The choice between (a) and (b) is up to the application -- both are equivalent -- and depends on +whether an application (e.g. running in a different process that CometBFT) +prefers CometBFT to crash first. + diff --git a/spec/abci/abci++_client_server.md b/spec/abci/abci++_client_server.md index b6b11a18bb9..e14f5301a01 100644 --- a/spec/abci/abci++_client_server.md +++ b/spec/abci/abci++_client_server.md @@ -8,7 +8,7 @@ title: Client and Server This section is for those looking to implement their own ABCI Server, perhaps in a new programming language. -You are expected to have read all previous sections of ABCI++ specification, namely +You are expected to have read all previous sections of ABCI specification, namely [Basic Concepts](./abci%2B%2B_basic_concepts.md), [Methods](./abci%2B%2B_methods.md), [Application Requirements](./abci%2B%2B_app_requirements.md), and @@ -17,39 +17,26 @@ You are expected to have read all previous sections of ABCI++ specification, nam ## Message Protocol and Synchrony The message protocol consists of pairs of requests and responses defined in the -[protobuf file](https://github.com/cometbft/cometbft/blob/main/proto/tendermint/abci/types.proto). +[protobuf file](https://github.com/cometbft/cometbft/blob/main/proto/cometbft/abci/v1/types.proto). Some messages have no fields, while others may include byte-arrays, strings, integers, or custom protobuf types. For more details on protobuf, see the [documentation](https://developers.google.com/protocol-buffers/docs/overview). - ## Server Implementations To use ABCI in your programming language of choice, there must be an ABCI -server in that language. CometBFT supports four implementations of the ABCI server: +server in that language. There are a few implementations of the ABCI server: -- in CometBFT's repository: +- In CometBFT repository: - In-process - - ABCI-socket - - GRPC + - [ABCI-socket server](../../abci/server/socket_server.go) + - [GRPC server](../../abci/server/grpc_server.go) - [tendermint-rs](https://github.com/informalsystems/tendermint-rs) - [tower-abci](https://github.com/penumbra-zone/tower-abci) -The implementations in CometBFT's repository can be tested using `abci-cli` by setting -the `--abci` flag appropriately. - -See examples, in various stages of maintenance, in -[Go](https://github.com/cometbft/cometbft/tree/master/abci/server), -[JavaScript](https://github.com/tendermint/js-abci), -[C++](https://github.com/mdyring/cpp-tmsp), and -[Java](https://github.com/jTendermint/jabci). +The implementations in CometBFT repository can be tested using the [ABCI-CLI](../../docs/guides/app-dev/abci-cli.md) tool. ### In Process diff --git a/spec/abci/abci++_comet_expected_behavior.md b/spec/abci/abci++_comet_expected_behavior.md index d7f15e1cb65..ff6310d69c9 100644 --- a/spec/abci/abci++_comet_expected_behavior.md +++ b/spec/abci/abci++_comet_expected_behavior.md @@ -17,10 +17,10 @@ what will happen during a block height _h_ in these frequent, benign conditions: * Consensus will decide in round 0, for height _h_; * `PrepareProposal` will be called exactly once at the proposer process of round 0, height _h_; * `ProcessProposal` will be called exactly once at all processes, and - will return _accept_ in its `Response*`; + will return _accept_ in its `ProcessProposalResponse`; * `ExtendVote` will be called exactly once at all processes; * `VerifyVoteExtension` will be called exactly _n-1_ times at each validator process, where _n_ is - the number of validators, and will always return _accept_ in its `Response*`; + the number of validators, and will always return _accept_ in its `VerifyVoteExtensionResponse`; * `FinalizeBlock` will be called exactly once at all processes, conveying the same prepared block that all calls to `PrepareProposal` and `ProcessProposal` had previously reported for height _h_; and @@ -28,29 +28,30 @@ what will happen during a block height _h_ in these frequent, benign conditions: However, the Application logic must be ready to cope with any possible run of the consensus algorithm for a given height, including bad periods (byzantine proposers, network being asynchronous). -In these cases, the sequence of calls to ABCI++ methods may not be so straightforward, but +In these cases, the sequence of calls to ABCI methods may not be so straightforward, but the Application should still be able to handle them, e.g., without crashing. The purpose of this section is to define what these sequences look like in a precise way. As mentioned in the [Basic Concepts](./abci%2B%2B_basic_concepts.md) section, CometBFT -acts as a client of ABCI++ and the Application acts as a server. Thus, it is up to CometBFT to -determine when and in which order the different ABCI++ methods will be called. A well-written +acts as a client of ABCI and the Application acts as a server. Thus, it is up to CometBFT to +determine when and in which order the different ABCI methods will be called. A well-written Application design should consider _any_ of these possible sequences. The following grammar, written in case-sensitive Augmented Backus–Naur form (ABNF, specified in [IETF rfc7405](https://datatracker.ietf.org/doc/html/rfc7405)), specifies all possible -sequences of calls to ABCI++, taken by a **correct process**, across all heights from the genesis block, +sequences of calls to ABCI, taken by a **correct process**, across all heights from the genesis block, including recovery runs, from the point of view of the Application. ```abnf start = clean-start / recovery -clean-start = init-chain [state-sync] consensus-exec +clean-start = ( app-handshake / state-sync ) consensus-exec +app-handshake = info init-chain state-sync = *state-sync-attempt success-sync info state-sync-attempt = offer-snapshot *apply-chunk success-sync = offer-snapshot 1*apply-chunk -recovery = info consensus-exec +recovery = info [init-chain] consensus-exec consensus-exec = (inf)consensus-height consensus-height = *consensus-round finalize-block commit @@ -88,7 +89,7 @@ by the grammar above. Other reasons depend on the method in question: Finally, method `Info` is a special case. The method's purpose is three-fold, it can be used 1. as part of handling an RPC call from an external client, -2. as a handshake between CometBFT and the Application upon recovery to check whether any blocks need +2. as a handshake between CometBFT and the Application to check whether any blocks need to be replayed, and 3. at the end of _state-sync_ to verify that the correct state has been reached. @@ -104,12 +105,19 @@ Let us now examine the grammar line by line, providing further details. >start = clean-start / recovery >``` -* If the process is starting from scratch, CometBFT first calls `InitChain`, then it may optionally - start a _state-sync_ mechanism to catch up with other processes. Finally, it enters normal - consensus execution. +* If the process is starting from scratch, depending on whether the _state-sync_ is enabled, it engages in the handshake +with the Application, or it starts the _state-sync_ mechanism to catch up with other processes. Finally, it enters +normal consensus execution. >```abnf ->clean-start = init-chain [state-sync] consensus-exec +>clean-start = ( app-handshake / state-sync ) consensus-exec +>``` + +* If _state-sync_ is disabled, CometBFT calls `Info` method and then +since the process is starting from scratch and the Application has no state CometBFT calls `InitChain`. + +>```abnf +>app-handshake = info init_chain >``` * In _state-sync_ mode, CometBFT makes one or more attempts at synchronizing the Application's state. @@ -118,10 +126,10 @@ Let us now examine the grammar line by line, providing further details. to provide the Application with all the snapshots needed, in order to reconstruct the state locally. A successful attempt must provide at least one chunk via `ApplySnapshotChunk`. At the end of a successful attempt, CometBFT calls `Info` to make sure the reconstructed state's - _AppHash_ matches the one in the block header at the corresponding height. Note that the state - of the application does not contain vote extensions itself. The application can rely on - [CometBFT to ensure](./../../docs/rfc/rfc-100-abci-vote-extension-propag.md#base-implementation-persist-and-propagate-extended-commit-history) - the node has all the relevant data to proceed with the execution beyond this point. + _AppHash_ matches the one in the block header at the corresponding height. Note that the state + of the application does not contain vote extensions itself. The application can rely on + [CometBFT to ensure](../../docs/references/rfc/rfc-100-abci-vote-extension-propag.md#base-implementation-persist-and-propagate-extended-commit-history) + the node has all the relevant data to proceed with the execution beyond this point. >```abnf >state-sync = *state-sync-attempt success-sync info @@ -129,12 +137,11 @@ Let us now examine the grammar line by line, providing further details. >success-sync = offer-snapshot 1*apply-chunk >``` -* In recovery mode, CometBFT first calls `Info` to know from which height it needs to replay decisions - to the Application. After this, CometBFT enters consensus execution, first in replay mode and then - in normal mode. +* In recovery mode, CometBFT first calls `Info` to know from which height it needs to replay decisions to the Application. If the Application +did not store any state CometBFT calls `InitChain`. After this, CometBFT enters consensus execution, first in replay mode, if there are blocks to replay, and then in normal mode. >```abnf ->recovery = info consensus-exec +>recovery = info [init-chain] consensus-exec >``` * The non-terminal `consensus-exec` is a key point in this grammar. It is an infinite sequence of @@ -150,6 +157,9 @@ Let us now examine the grammar line by line, providing further details. `FinalizeBlock`, followed by a call to `Commit`. In each round, the sequence of method calls depends on whether the local process is the proposer or not. Note that, if a height contains zero rounds, this means the process is replaying an already decided value (catch-up mode). + When calling `FinalizeBlock` with a block, the consensus algorithm run by CometBFT guarantees + that at least one non-byzantine validator has run `ProcessProposal` on that block. + >```abnf >consensus-height = *consensus-round finalize-block commit @@ -157,7 +167,7 @@ Let us now examine the grammar line by line, providing further details. >``` * For every round, if the local process is the proposer of the current round, CometBFT calls `PrepareProposal`. - A successful execution of `PrepareProposal` implies in a proposal block being (i)signed and (ii)stored + A successful execution of `PrepareProposal` results in a proposal block being (i) signed and (ii) stored (e.g., in stable storage). A crash during this step will direct how the node proceeds the next time it is executed, for the same round, after restarted. @@ -165,7 +175,7 @@ Let us now examine the grammar line by line, providing further details. Following a crash between (i) and (ii) and in (the likely) case `PrepareProposal` produces a different block, the signing of this block will fail, which means that the new block will not be stored or broadcast. If the crash happened after (ii), then signing fails but nothing happens to the stored block. - + If a block was stored, it is sent to all validators, including the proposer. Receiving a proposal block triggers `ProcessProposal` with such a block. @@ -180,7 +190,10 @@ Let us now examine the grammar line by line, providing further details. >``` * Also for every round, if the local process is _not_ the proposer of the current round, CometBFT - will call `ProcessProposal` at most once. At most one call to `ExtendVote` may occur only after + will call `ProcessProposal` at most once. + Under certain conditions, CometBFT may not call `ProcessProposal` in a round; + see [this section](./abci++_example_scenarios.md#scenario-3) for an example. + At most one call to `ExtendVote` may occur only after `ProcessProposal` is called. A number of calls to `VerifyVoteExtension` can occur in any order with respect to `ProcessProposal` and `ExtendVote` throughout the round. The reasons are the same as above, namely, the process running slightly late in the current round, or votes from future @@ -190,7 +203,7 @@ Let us now examine the grammar line by line, providing further details. >non-proposer = *got-vote [process-proposal] [extend] >``` -* Finally, the grammar describes all its terminal symbols, which denote the different ABCI++ method calls that +* Finally, the grammar describes all its terminal symbols, which denote the different ABCI method calls that may appear in a sequence. >```abnf @@ -206,12 +219,12 @@ Let us now examine the grammar line by line, providing further details. >commit = %s"" >``` -## Adapting existing Applications that use ABCI +## Adapting existing Applications that use legacy ABCI -In some cases, an existing Application using the legacy ABCI may need to be adapted to work with ABCI++ -with as minimal changes as possible. In this case, of course, ABCI++ will not provide any advantage with respect -to the existing implementation, but will keep the same guarantees already provided by ABCI. -Here is how ABCI++ methods should be implemented. +In some cases, an existing Application using the legacy ABCI may need to be adapted to work with new version of ABCI +with as minimal changes as possible. In this case, of course, new ABCI versions will not provide any advantage with respect +to the legacy ABCI implementation, but will keep the same guarantees. +Here is how ABCI methods should be implemented. First of all, all the methods that did not change from ABCI 0.17.0 to ABCI 2.0, namely `Echo`, `Flush`, `Info`, `InitChain`, `Query`, `CheckTx`, `ListSnapshots`, `LoadSnapshotChunk`, `OfferSnapshot`, and `ApplySnapshotChunk`, do not need @@ -219,56 +232,62 @@ to undergo any changes in their implementation. As for the new methods: +Introduced in ABCI 1.0: + * `PrepareProposal` must create a list of [transactions](./abci++_methods.md#prepareproposal) - by copying over the transaction list passed in `RequestPrepareProposal.txs`, in the same order. - + by copying over the transaction list passed in `PrepareProposalRequest.txs`, in the same order. The Application must check whether the size of all transactions exceeds the byte limit - (`RequestPrepareProposal.max_tx_bytes`). If so, the Application must remove transactions at the + (`PrepareProposalRequest.max_tx_bytes`). If so, the Application must remove transactions at the end of the list until the total byte size is at or below the limit. -* `ProcessProposal` must set `ResponseProcessProposal.status` to _accept_ and return. -* `ExtendVote` is to set `ResponseExtendVote.extension` to an empty byte array and return. -* `VerifyVoteExtension` must set `ResponseVerifyVoteExtension.accept` to _true_ if the extension is +* `ProcessProposal` must set `ProcessProposalResponse.status` to _accept_ and return. + +Introduced in ABCI 2.0: + +* `ExtendVote` is to set `ExtendVoteResponse.extension` to an empty byte array and return. +* `VerifyVoteExtension` must set `VerifyVoteExtensionResponse.accept` to _true_ if the extension is an empty byte array and _false_ otherwise, then return. * `FinalizeBlock` is to coalesce the implementation of methods `BeginBlock`, `DeliverTx`, and `EndBlock`. Legacy applications looking to reuse old code that implemented `DeliverTx` should wrap the legacy `DeliverTx` logic in a loop that executes one transaction iteration per - transaction in `RequestFinalizeBlock.tx`. + transaction in `FinalizeBlockRequest.tx`. -Finally, `Commit`, which is kept in ABCI++, no longer returns the `AppHash`. It is now up to +Finally, `Commit`, which is kept in ABCI 2.0, no longer returns the `AppHash`. It is now up to `FinalizeBlock` to do so. Thus, a slight refactoring of the old `Commit` implementation will be needed to move the return of `AppHash` to `FinalizeBlock`. ## Accommodating for vote extensions In a manner transparent to the application, CometBFT ensures the node is provided with all -the data it needs to participate in consensus. +the data it needs to participate in consensus. In the case of recovering from a crash, or joining the network via state sync, CometBFT will make -sure the node acquires the necessary vote extensions before switching to consensus. +sure the node acquires the necessary vote extensions before switching to consensus. -If a node is already in consensus but falls behind, during catch-up, CometBFT will provide the node with +If a node is already in consensus but falls behind, during catch-up, CometBFT will provide the node with vote extensions from past heights by retrieving the extensions within `ExtendedCommit` for old heights that it had previously stored. -We realize this is sub-optimal due to the increase in storage needed to store the extensions, we are +We realize this is sub-optimal due to the increase in storage needed to store the extensions, we are working on an optimization of this implementation which should alleviate this concern. However, the application can use the existing `retain_height` parameter to decide how much history it wants to keep, just as is done with the block history. The network-wide implications of the usage of `retain_height` stay the same. -The decision to store -historical commits and potential optimizations, are discussed in detail in [RFC-100](./../../docs/rfc/rfc-100-abci-vote-extension-propag.md#current-limitations-and-possible-implementations) +The decision to store +historical commits and potential optimizations, are discussed in detail in [RFC-100](../../docs/references/rfc/rfc-100-abci-vote-extension-propag.md#current-limitations-and-possible-implementations) ## Handling upgrades to ABCI 2.0 If applications upgrade to ABCI 2.0, CometBFT internally ensures that the [application setup](./abci%2B%2B_app_requirements.md#application-configuration-required-to-switch-to-abci-20) is reflected in its operation. -CometBFT retrieves from the application configuration the value of `VoteExtensionsEnableHeight`( *he*,), +CometBFT retrieves from the application configuration the value of `VoteExtensionsEnableHeight`( _he_,), the height at which vote extensions are required for consensus to proceed, and uses it to determine the data it stores and data it sends to a peer that is catching up. -Namely, upon saving the block for a given height *h* in the block store at decision time -* if *h ≥ he*, the corresponding extended commit that was used to decide locally is saved as well -* if *h < he*, there are no changes to the data saved +Namely, upon saving the block for a given height _h_ in the block store at decision time + +* if _h ≥ he_, the corresponding extended commit that was used to decide locally is saved as well +* if _h < he_, there are no changes to the data saved + +In the catch-up mechanism, when a node _f_ realizes that another peer is at height _hp_, which is more than 2 heights behind height _hf_, -In the catch-up mechanism, when a node *f* realizes that another peer is at height *hp*, which is more than 2 heights behind, -* if *hp ≥ he*, *f* uses the extended commit to +* if _hp ≥ he_, _f_ uses the extended commit to reconstruct the precommit votes with their corresponding extensions -* if *hp < he*, *f* uses the canonical commit to reconstruct the precommit votes, +* if _hp < he_, _f_ uses the canonical commit to reconstruct the precommit votes, as done for ABCI 1.0 and earlier. diff --git a/spec/abci/abci++_example_scenarios.md b/spec/abci/abci++_example_scenarios.md index c903be21bba..c92431cc20a 100644 --- a/spec/abci/abci++_example_scenarios.md +++ b/spec/abci/abci++_example_scenarios.md @@ -1,6 +1,6 @@ --- order: 6 -title: ABCI++ extra +title: ABCI extra --- # Introduction @@ -10,8 +10,8 @@ However, the grammar specified in the same section is more general and covers mo that an Application designer needs to account for. In this section, we give more information about these possible scenarios. We focus on methods -introduced by ABCI++: `PrepareProposal` and `ProcessProposal`. Specifically, we concentrate -on the part of the grammar presented below. +introduced by ABCI 1.0: `PrepareProposal` and `ProcessProposal`. Specifically, we concentrate +on the part of the grammar presented below. ```abnf consensus-height = *consensus-round finalize-block commit @@ -44,11 +44,12 @@ application needs to account for any number of rounds, where each round can exhi behaviours. Recall that the application is unaware of the internals of consensus and thus of the rounds. # Possible scenarios -The unknown number of rounds we can have when following the consensus algorithm yields a vast number of -scenarios we can expect. Listing them all is unfeasible. However, here we give several of them and draw the + +The unknown number of rounds we can have when following the consensus algorithm yields a vast number of +scenarios we can expect. Listing them all is unfeasible. However, here we give several of them and draw the main conclusions. Specifically, we will show that before block $X$ is decided: - -1. On a correct node, `PrepareProposal` may be called multiple times and for different blocks ([**Scenario 1**](#scenario-1)). + +1. On a correct node, `PrepareProposal` may be called multiple times and for different blocks ([**Scenario 1**](#scenario-1)). 1. On a correct node, `ProcessProposal` may be called multiple times and for different blocks ([**Scenario 2**](#scenario-2)). 1. On a correct node, `PrepareProposal` and `ProcessProposal` for block $X$ may not be called ([**Scenario 3**](#scenario-3)). 1. On a correct node, `PrepareProposal` and `ProcessProposal` may not be called at all ([**Scenario 4**](#scenario-4)). @@ -56,12 +57,12 @@ main conclusions. Specifically, we will show that before block $X$ is decided: ## Basic information -Each scenario is presented from the perspective of a process $p$. More precisely, we show what happens in -each round's $step$ of the [Tendermint consensus algorithm](https://arxiv.org/pdf/1807.04938.pdf). While in -practice the consensus algorithm works with respect to voting power of the validators, in this document -we refer to number of processes (e.g., $n$, $f+1$, $2f+1$) for simplicity. The legend is below: +Each scenario is presented from the perspective of a process $p$. More precisely, we show what happens in +each round's $step$ of the [Tendermint consensus algorithm](https://arxiv.org/pdf/1807.04938.pdf). While in +practice the consensus algorithm works with respect to voting power of the validators, in this document +we refer to number of processes (e.g., $n$, $f+1$, $2f+1$) for simplicity. The legend is below: -### Round X: +### Round X 1. **Propose:** Describes what happens while $step_p = propose$. 1. **Prevote:** Describes what happens while $step_p = prevote$. @@ -71,60 +72,60 @@ we refer to number of processes (e.g., $n$, $f+1$, $2f+1$) for simplicity. The l $p$ calls `ProcessProposal` many times with different values. -### Round 0: - -1. **Propose:** The proposer of this round is a Byzantine process, and it chooses not to send the proposal -message. Therefore, $p$'s $timeoutPropose$ expires, it sends $Prevote$ for $nil$, and it does not call -`ProcessProposal`. All correct processes do the same. -1. **Prevote:** $p$ eventually receives $2f+1$ $Prevote$ messages for $nil$ and starts $timeoutPrevote$. -When $timeoutPrevote$ expires it sends $Precommit$ for $nil$. -1. **Precommit:** $p$ eventually receives $2f+1$ $Precommit$ messages for $nil$ and starts $timeoutPrecommit$. -When it expires, it moves to the next round. - -### Round 1: - -1. **Propose:** A correct process is the proposer in this round. Its $validValue$ is $nil$, and it is free -to generate and propose a new block $Y$. Process $p$ receives this proposal in time, calls `ProcessProposal` -for block $Y$, and broadcasts a $Prevote$ message for it. -1. **Prevote:** Due to network asynchrony less than $2f+1$ processes send $Prevote$ for this block. -Therefore, $p$ does not update $validValue$ in this round. -1. **Precommit:** Since less than $2f+1$ processes send $Prevote$, no correct process will lock on this -block and send $Precommit$ message. As a consequence, $p$ does not decide on $Y$. - -### Round 2: - -1. **Propose:** Same as in [**Round 1**](#round-1), just another correct process is the proposer, and it -proposes another value $Z$. Process $p$ receives the proposal on time, calls `ProcessProposal` for new block -$Z$, and broadcasts a $Prevote$ message for it. +### Round 0 + +1. **Propose:** The proposer of this round is a Byzantine process, and it chooses not to send the proposal +message. Therefore, $p$'s $timeoutPropose$ expires, it sends $Prevote$ for $nil$, and it does not call +`ProcessProposal`. All correct processes do the same. +1. **Prevote:** $p$ eventually receives $2f+1$ $Prevote$ messages for $nil$ and starts $timeoutPrevote$. +When $timeoutPrevote$ expires it sends $Precommit$ for $nil$. +1. **Precommit:** $p$ eventually receives $2f+1$ $Precommit$ messages for $nil$ and starts $timeoutPrecommit$. +When it expires, it moves to the next round. + +### Round 1 + +1. **Propose:** A correct process is the proposer in this round. Its $validValue$ is $nil$, and it is free +to generate and propose a new block $Y$. Process $p$ receives this proposal in time, calls `ProcessProposal` +for block $Y$, and broadcasts a $Prevote$ message for it. +1. **Prevote:** Due to network asynchrony less than $2f+1$ processes send $Prevote$ for this block. +Therefore, $p$ does not update $validValue$ in this round. +1. **Precommit:** Since less than $2f+1$ processes send $Prevote$, no correct process will lock on this +block and send $Precommit$ message. As a consequence, $p$ does not decide on $Y$. + +### Round 2 + +1. **Propose:** Same as in [**Round 1**](#round-1), just another correct process is the proposer, and it +proposes another value $Z$. Process $p$ receives the proposal on time, calls `ProcessProposal` for new block +$Z$, and broadcasts a $Prevote$ message for it. 1. **Prevote:** Same as in [**Round 1**](#round-1). 1. **Precommit:** Same as in [**Round 1**](#round-1). -Rounds like these can continue until we have a round in which process $p$ updates its $validValue$ or until -we reach round $r$ where process $p$ decides on a block. After that, it will not call `ProcessProposal` -anymore for this height. +Rounds like these can continue until we have a round in which process $p$ updates its $validValue$ or until +we reach round $r$ where process $p$ decides on a block. After that, it will not call `ProcessProposal` +anymore for this height. -## Scenario 2 +## Scenario 2 $p$ calls `PrepareProposal` many times with different values. -### Round 0: +### Round 0 -1. **Propose:** Process $p$ is the proposer in this round. Its $validValue$ is $nil$, and it is free to -generate and propose new block $Y$. Before proposing, it calls `PrepareProposal` for $Y$. After that, it -broadcasts the proposal, delivers it to itself, calls `ProcessProposal` and broadcasts $Prevote$ for it. -1. **Prevote:** Due to network asynchrony less than $2f+1$ processes receive the proposal on time and send -$Prevote$ for it. Therefore, $p$ does not update $validValue$ in this round. -1. **Precommit:** Since less than $2f+1$ processes send $Prevote$, no correct process will lock on this -block and send non-$nil$ $Precommit$ message. As a consequence, $p$ does not decide on $Y$. +1. **Propose:** Process $p$ is the proposer in this round. Its $validValue$ is $nil$, and it is free to +generate and propose new block $Y$. Before proposing, it calls `PrepareProposal` for $Y$. After that, it +broadcasts the proposal, delivers it to itself, calls `ProcessProposal` and broadcasts $Prevote$ for it. +1. **Prevote:** Due to network asynchrony less than $2f+1$ processes receive the proposal on time and send +$Prevote$ for it. Therefore, $p$ does not update $validValue$ in this round. +1. **Precommit:** Since less than $2f+1$ processes send $Prevote$, no correct process will lock on this +block and send non-$nil$ $Precommit$ message. As a consequence, $p$ does not decide on $Y$. -After this round, we can have multiple rounds like those in [Scenario 1](#scenario-1). The important thing -is that process $p$ should not update its $validValue$. Consequently, when process $p$ reaches the round -when it is again the proposer, it will ask the mempool for the new block again, and the mempool may return a -different block $Z$, and we can have the same round as [Round 0](#round-0-1) just for a different block. As -a result, process $p$ calls `PrepareProposal` again but for a different value. When it reaches round $r$ -some process will propose block $X$ and if $p$ receives $2f+1$ $Precommit$ messages, it will decide on this -value. +After this round, we can have multiple rounds like those in [Scenario 1](#scenario-1). The important thing +is that process $p$ should not update its $validValue$. Consequently, when process $p$ reaches the round +when it is again the proposer, it will ask the mempool for the new block again, and the mempool may return a +different block $Z$, and we can have the same round as [Round 0](#round-0-1) just for a different block. As +a result, process $p$ calls `PrepareProposal` again but for a different value. When it reaches round $r$ +some process will propose block $X$ and if $p$ receives $2f+1$ $Precommit$ messages, it will decide on this +value. ## Scenario 3 @@ -140,24 +141,24 @@ so it did not call `ProcessProposal`, and * if $p$ was the proposer it proposed some other value $\neq X$. -### Round $r$: +### Round $r$ -1. **Propose:** A correct process is the proposer in this round, and it proposes block $X$. +1. **Propose:** A correct process is the proposer in this round, and it proposes block $X$. Due to asynchrony, the proposal message arrives to process $p$ after its $timeoutPropose$ expires and it sends $Prevote$ for $nil$. Consequently, process $p$ does not call `ProcessProposal` for block $X$. However, the same proposal arrives at other processes before their $timeoutPropose$ expires, and they send $Prevote$ for this proposal. -1. **Prevote:** Process $p$ receives $2f+1$ $Prevote$ messages for proposal $X$, updates correspondingly its -$validValue$ and $lockedValue$ and sends $Precommit$ message. All correct processes do the same. -1. **Precommit:** Finally, process $p$ receives $2f+1$ $Precommit$ messages, and decides on block $X$. +1. **Prevote:** Process $p$ receives $2f+1$ $Prevote$ messages for proposal $X$, updates correspondingly its +$validValue$ and $lockedValue$ and sends $Precommit$ message. All correct processes do the same. +1. **Precommit:** Finally, process $p$ receives $2f+1$ $Precommit$ messages, and decides on block $X$. ## Scenario 4 -[Scenario 3](#scenario-3) can be translated into a scenario where $p$ does not call `PrepareProposal` and -`ProcessProposal` at all. For this, it is necessary that process $p$ is not the proposer in any of the -rounds $0 <= r' <= r$ and that due to network asynchrony or Byzantine proposer, it does not receive the -proposal before $timeoutPropose$ expires. As a result, it will enter round $r$ without calling -`PrepareProposal` and `ProcessProposal` before it, and as shown in Round $r$ of [Scenario 3](#scenario-3) it -will decide in this round. Again without calling any of these two calls. +[Scenario 3](#scenario-3) can be translated into a scenario where $p$ does not call `PrepareProposal` and +`ProcessProposal` at all. For this, it is necessary that process $p$ is not the proposer in any of the +rounds $0 <= r' <= r$ and that due to network asynchrony or Byzantine proposer, it does not receive the +proposal before $timeoutPropose$ expires. As a result, it will enter round $r$ without calling +`PrepareProposal` and `ProcessProposal` before it, and as shown in Round $r$ of [Scenario 3](#scenario-3) it +will decide in this round. Again without calling any of these two calls. diff --git a/spec/abci/abci++_methods.md b/spec/abci/abci++_methods.md index f1ed284a92d..ff1d34a5c85 100644 --- a/spec/abci/abci++_methods.md +++ b/spec/abci/abci++_methods.md @@ -14,7 +14,7 @@ title: Methods * **Response**: * `Message (string)`: The input string * **Usage**: - * Echo a string to test an abci client/server implementation + * Echo a string to test an ABCI client/server implementation ### Flush @@ -29,22 +29,24 @@ title: Methods * **Request**: - | Name | Type | Description | Field Number | - |---------------|--------|------------------------------------------|--------------| + | Name | Type | Description | Field Number | + |---------------|--------|----------------------------------------|--------------| | version | string | The CometBFT software semantic version | 1 | - | block_version | uint64 | The CometBFT Block Protocol version | 2 | - | p2p_version | uint64 | The CometBFT P2P Protocol version | 3 | + | block_version | uint64 | The CometBFT Block version | 2 | + | p2p_version | uint64 | The CometBFT P2P version | 3 | | abci_version | string | The CometBFT ABCI semantic version | 4 | * **Response**: - | Name | Type | Description | Field Number | - |---------------------|--------|-----------------------------------------------------|--------------| - | data | string | Some arbitrary information | 1 | - | version | string | The application software semantic version | 2 | - | app_version | uint64 | The application protocol version | 3 | - | last_block_height | int64 | Latest height for which the app persisted its state | 4 | - | last_block_app_hash | bytes | Latest AppHash returned by `FinalizeBlock` | 5 | + | Name | Type | Description | Field Number | Deterministic | + |---------------------|--------|---------------------------------------------------------------------------|--------------|---------------| + | data | string | Some arbitrary information | 1 | N/A | + | version | string | The application software semantic version | 2 | N/A | + | app_version | uint64 | The application version | 3 | N/A | + | last_block_height | int64 | Latest height for which the app persisted its state | 4 | N/A | + | last_block_app_hash | bytes | Latest AppHash returned by `FinalizeBlock` | 5 | N/A | + | lane_priorities | map | Map of lane identifiers and their corresponding priorities | 6 | N/A | + | default_lane | uint32 | The identifier of the default lane | 7 | N/A | * **Usage**: * Return information about the application state. @@ -53,6 +55,11 @@ title: Methods * The returned `app_version` will be included in the Header of every block. * CometBFT expects `last_block_app_hash` and `last_block_height` to be updated and persisted during `Commit`. + * The application does not have to define `lane_priorities`. In that case, CometBFT will assign all transactions to one lane. + * `lane_priorities` is empty if and only if `default_lane` is empty. + * `default_lane` has to be one of the identifiers defined in `lane_priorities`. + * The lowest priority a lane can have is `1`. The value `0` is reserved for when applications do not assign lanes (empty `lane_id` in `ResponseCheckTx`). + > Note: Semantic version is a reference to [semantic versioning](https://semver.org/). Semantic versions in info will be displayed as X.X.x. @@ -71,48 +78,48 @@ title: Methods * **Response**: - | Name | Type | Description | Field Number | - |------------------|----------------------------------------------|--------------------------------------------------|--------------| - | consensus_params | [ConsensusParams](#consensusparams) | Initial consensus-critical parameters (optional) | 1 | - | validators | repeated [ValidatorUpdate](#validatorupdate) | Initial validator set (optional). | 2 | - | app_hash | bytes | Initial application hash. | 3 | + | Name | Type | Description | Field Number | Deterministic | + |------------------|----------------------------------------------|--------------------------------------------------|--------------|---------------| + | consensus_params | [ConsensusParams](#consensusparams) | Initial consensus-critical parameters (optional) | 1 | Yes | + | validators | repeated [ValidatorUpdate](#validatorupdate) | Initial validator set (optional). | 2 | Yes | + | app_hash | bytes | Initial application hash. | 3 | Yes | * **Usage**: * Called once upon genesis. - * If `ResponseInitChain.Validators` is empty, the initial validator set will be the `RequestInitChain.Validators` - * If `ResponseInitChain.Validators` is not empty, it will be the initial - validator set (regardless of what is in `RequestInitChain.Validators`). + * If `InitChainResponse.Validators` is empty, the initial validator set will be the `InitChainRequest.Validators` + * If `InitChainResponse.Validators` is not empty, it will be the initial + validator set (regardless of what is in `InitChainRequest.Validators`). * This allows the app to decide if it wants to accept the initial validator set proposed by CometBFT (ie. in the genesis file), or if it wants to use a different one (perhaps computed based on some application specific information in the genesis file). - * Both `RequestInitChain.Validators` and `ResponseInitChain.Validators` are [ValidatorUpdate](#validatorupdate) structs. + * Both `InitChainRequest.Validators` and `InitChainResponse.Validators` are [ValidatorUpdate](#validatorupdate) structs. So, technically, they both are _updating_ the set of validators from the empty set. ### Query * **Request**: - | Name | Type | Description | Field Number | - |--------|--------|-------------|--------------| - | data | bytes | Request parameters for the application to interpret analogously to a [URI query component](https://www.rfc-editor.org/rfc/rfc3986#section-3.4). Can be used with or in lieu of `path`. | 1 | - | path | string | A request path for the application to interpret analogously to a [URI path component](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) in e.g. routing. Can be used with or in lieu of `data`. Applications MUST interpret "/store" or any path starting with "/store/" as a query by key on the underlying store, in which case a key SHOULD be specified in `data`. Applications SHOULD allow queries over specific types like `/accounts/...` or `/votes/...`. | 2 | - | height | int64 | The block height against which to query (default=0 returns data for the latest committed block). Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1. | 3 | - | prove | bool | Return Merkle proof with response if possible. | 4 | + | Name | Type | Description | Field Number | + |--------|--------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | data | bytes | Request parameters for the application to interpret analogously to a [URI query component](https://www.rfc-editor.org/rfc/rfc3986#section-3.4). Can be used with or in lieu of `path`. | 1 | + | path | string | A request path for the application to interpret analogously to a [URI path component](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) in e.g. routing. Can be used with or in lieu of `data`. Applications MUST interpret "/store" or any path starting with "/store/" as a query by key on the underlying store, in which case a key SHOULD be specified in `data`. Applications SHOULD allow queries over specific types like `/accounts/...` or `/votes/...`. | 2 | + | height | int64 | The block height against which to query (default=0 returns data for the latest committed block). Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1. | 3 | + | prove | bool | Return Merkle proof with response if possible. | 4 | * **Response**: - | Name | Type | Description | Field Number | - |-----------|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| - | code | uint32 | Response code. | 1 | - | log | string | The output of the application's logger. **May be non-deterministic.** | 3 | - | info | string | Additional information. **May be non-deterministic.** | 4 | - | index | int64 | The index of the key in the tree. | 5 | - | key | bytes | The key of the matching data. | 6 | - | value | bytes | The value of the matching data. | 7 | - | proof_ops | [ProofOps](#proofops) | Serialized proof for the value data, if requested, to be verified against the `app_hash` for the given Height. | 8 | - | height | int64 | The block height from which data was derived. Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1 | 9 | - | codespace | string | Namespace for the `code`. | 10 | + | Name | Type | Description | Field Number | Deterministic | + |-----------|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|---------------| + | code | uint32 | Response code. | 1 | N/A | + | log | string | The output of the application's logger. | 3 | N/A | + | info | string | Additional information. | 4 | N/A | + | index | int64 | The index of the key in the tree. | 5 | N/A | + | key | bytes | The key of the matching data. | 6 | N/A | + | value | bytes | The value of the matching data. | 7 | N/A | + | proof_ops | [ProofOps](#proofops) | Serialized proof for the value data, if requested, to be verified against the `app_hash` for the given Height. | 8 | N/A | + | height | int64 | The block height from which data was derived. Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1 | 9 | N/A | + | codespace | string | Namespace for the `code`. | 10 | N/A | * **Usage**: * Query for data from the application at current or past height. @@ -124,21 +131,25 @@ title: Methods * **Request**: - | Name | Type | Description | Field Number | - |------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| - | tx | bytes | The request transaction bytes | 1 | - | type | CheckTxType | One of `CheckTx_New` or `CheckTx_Recheck`. `CheckTx_New` is the default and means that a full check of the tranasaction is required. `CheckTx_Recheck` types are used when the mempool is initiating a normal recheck of a transaction. | 2 | + | Name | Type | Description | Field Number | + |------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | tx | bytes | The request transaction bytes | 1 | + | type | CheckTxType | One of `CheckTx_New` or `CheckTx_Recheck`. `CheckTx_New` is the default and means that a full check of the tranasaction is required. `CheckTx_Recheck` types are used when the mempool is initiating a normal recheck of a transaction. | 2 | * **Response**: - | Name | Type | Description | Field Number | - |------------|-------------------------------------------------------------|-----------------------------------------------------------------------|--------------| - | code | uint32 | Response code. | 1 | - | data | bytes | Result bytes, if any. | 2 | - | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | - | codespace | string | Namespace for the `code`. | 8 | - | sender | string | The transaction's sender (e.g. the signer) | 9 | - | priority | int64 | The transaction's priority (for mempool ordering) | 10 | + | Name | Type | Description | Field Number | Deterministic | + |------------|---------------------------------------------------|----------------------------------------------------------------------|--------------|---------------| + | code | uint32 | Response code. | 1 | N/A | + | data | bytes | Result bytes, if any. | 2 | N/A | + | log | string | The output of the application's logger. | 3 | N/A | + | info | string | Additional information. | 4 | N/A | + | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | N/A | + | gas_used | int64 | Amount of gas consumed by transaction. | 6 | N/A | + | events | repeated [Event](abci++_basic_concepts.md#events) | Type & Key-Value events for indexing transactions (e.g. by account). | 7 | N/A | + | codespace | string | Namespace for the `code`. | 8 | N/A | + | lane_id | string | The id of the lane to which the transaction is assigned. | 12 | N/A | + * **Usage**: @@ -149,9 +160,12 @@ title: Methods * `CheckTx` validates the transaction against the current state of the application, for example, checking signatures and account balances, but does not apply any of the state changes described in the transaction. - * Transactions where `ResponseCheckTx.Code != 0` will be rejected - they will not be broadcast + * Transactions where `CheckTxResponse.Code != 0` will be rejected - they will not be broadcast to other nodes or included in a proposal block. CometBFT attributes no other value to the response code. + * If `lane_id` is an empty string, it means that the application did not set any lane in the + response message, so the transaction will be assigned to the default lane. + * The value of `lane_id` has to be in the range of lanes defined by the application in `ResponseInfo`. ### Commit @@ -159,22 +173,19 @@ title: Methods * **Request**: - | Name | Type | Description | Field Number | - |--------|-------|------------------------------------|--------------| - Commit signals the application to persist application state. It takes no parameters. * **Response**: - | Name | Type | Description | Field Number | - |---------------|-------|------------------------------------------------------------------------|--------------| - | retain_height | int64 | Blocks below this height may be removed. Defaults to `0` (retain all). | 3 | + | Name | Type | Description | Field Number | Deterministic | + |---------------|-------|------------------------------------------------------------------------|--------------|---------------| + | retain_height | int64 | Blocks below this height may be removed. Defaults to `0` (retain all). | 3 | No | * **Usage**: * Signal the Application to persist the application state. - Application is expected to persist its state at the end of this call, before calling `ResponseCommit`. - * Use `ResponseCommit.retain_height` with caution! If all nodes in the network remove historical + Application is expected to persist its state at the end of this call, before calling `Commit`. + * Use `CommitResponse.retain_height` with caution! If all nodes in the network remove historical blocks then this data is permanently lost, and no new nodes will be able to join the network and bootstrap, unless state sync is enabled on the chain. Historical blocks may also be required for other purposes, e.g. auditing, replay of non-persisted heights, light client verification, and so on. @@ -183,16 +194,13 @@ title: Methods * **Request**: - | Name | Type | Description | Field Number | - |--------|-------|------------------------------------|--------------| - Empty request asking the application for a list of snapshots. * **Response**: - | Name | Type | Description | Field Number | - |-----------|--------------------------------|--------------------------------|--------------| - | snapshots | repeated [Snapshot](#snapshot) | List of local state snapshots. | 1 | + | Name | Type | Description | Field Number | Deterministic | + |-----------|--------------------------------|--------------------------------|--------------|---------------| + | snapshots | repeated [Snapshot](#snapshot) | List of local state snapshots. | 1 | N/A | * **Usage**: * Used during state sync to discover available snapshots on peers. @@ -210,9 +218,9 @@ title: Methods * **Response**: - | Name | Type | Description | Field Number | - |-------|-------|--------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| - | chunk | bytes | The binary chunk contents, in an arbitrary format. Chunk messages cannot be larger than 16 MB _including metadata_, so 10 MB is a good starting point. | 1 | + | Name | Type | Description | Field Number | Deterministic | + |-------|-------|--------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|---------------| + | chunk | bytes | The binary chunk contents, in an arbitrary format. Chunk messages cannot be larger than 16 MB _including metadata_, so 10 MB is a good starting point. | 1 | N/A | * **Usage**: * Used during state sync to retrieve snapshot chunks from peers. @@ -228,9 +236,9 @@ title: Methods * **Response**: - | Name | Type | Description | Field Number | - |--------|-------------------|-----------------------------------|--------------| - | result | [Result](#result) | The result of the snapshot offer. | 1 | + | Name | Type | Description | Field Number | Deterministic | + |--------|-------------------|-----------------------------------|--------------|---------------| + | result | [Result](#result) | The result of the snapshot offer. | 1 | N/A | #### Result @@ -261,19 +269,19 @@ title: Methods * **Request**: - | Name | Type | Description | Field Number | - |--------|--------|-----------------------------------------------------------------------------|--------------| + | Name | Type | Description | Field Number | + |--------|--------|---------------------------------------------------------------------------|--------------| | index | uint32 | The chunk index, starting from `0`. CometBFT applies chunks sequentially. | 1 | - | chunk | bytes | The binary chunk contents, as returned by `LoadSnapshotChunk`. | 2 | - | sender | string | The P2P ID of the node who sent this chunk. | 3 | + | chunk | bytes | The binary chunk contents, as returned by `LoadSnapshotChunk`. | 2 | + | sender | string | The P2P ID of the node who sent this chunk. | 3 | * **Response**: - | Name | Type | Description | Field Number | - |----------------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| - | result | Result (see below) | The result of applying this chunk. | 1 | - | refetch_chunks | repeated uint32 | Refetch and reapply the given chunks, regardless of `result`. Only the listed chunks will be refetched, and reapplied in sequential order. | 2 | - | reject_senders | repeated string | Reject the given P2P senders, regardless of `Result`. Any chunks already applied will not be refetched unless explicitly requested, but queued chunks from these senders will be discarded, and new chunks or other snapshots rejected. | 3 | + | Name | Type | Description | Field Number | Deterministic | + |----------------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|---------------| + | result | Result (see below) | The result of applying this chunk. | 1 | N/A | + | refetch_chunks | repeated uint32 | Refetch and reapply the given chunks, regardless of `result`. Only the listed chunks will be refetched, and reapplied in sequential order. | 2 | N/A | + | reject_senders | repeated string | Reject the given P2P senders, regardless of `Result`. Any chunks already applied will not be refetched unless explicitly requested, but queued chunks from these senders will be discarded, and new chunks or other snapshots rejected. | 3 | N/A | ```proto enum Result { @@ -311,37 +319,38 @@ title: Methods |----------------------|-------------------------------------------------|-----------------------------------------------------------------------------------------------|--------------| | max_tx_bytes | int64 | Currently configured maximum size in bytes taken by the modified transactions. | 1 | | txs | repeated bytes | Preliminary list of transactions that have been picked as part of the block to propose. | 2 | - | local_last_commit | [ExtendedCommitInfo](#extendedcommitinfo) | Info about the last commit, obtained locally from CometBFT's data structures. | 3 | + | local_last_commit | [ExtendedCommitInfo](#extendedcommitinfo) | Info about the last commit, obtained locally from CometBFT's data structures. | 3 | | misbehavior | repeated [Misbehavior](#misbehavior) | List of information about validators that misbehaved. | 4 | | height | int64 | The height of the block that will be proposed. | 5 | - | time | [google.protobuf.Timestamp][protobuf-timestamp] | Timestamp of the block that that will be proposed. | 6 | + | time | [google.protobuf.Timestamp][protobuf-timestamp] | Timestamp of the block that will be proposed. | 6 | | next_validators_hash | bytes | Merkle root of the next validator set. | 7 | | proposer_address | bytes | [Address](../core/data_structures.md#address) of the validator that is creating the proposal. | 8 | * **Response**: - | Name | Type | Description | Field Number | - |-------------------------|--------------------------------------------------|---------------------------------------------------------------------------------------------|--------------| - | txs | repeated bytes | Possibly modified list of transactions that have been picked as part of the proposed block. | 2 | + | Name | Type | Description | Field Number | Deterministic | + |------|----------------|---------------------------------------------------------------------------------------------|--------------|---------------| + | txs | repeated bytes | Possibly modified list of transactions that have been picked as part of the proposed block. | 2 | No | * **Usage**: - * `RequestPrepareProposal`'s parameters `txs`, `misbehavior`, `height`, `time`, - `next_validators_hash`, and `proposer_address` are the same as in `RequestProcessProposal` - and `RequestFinalizeBlock`. - * `RequestPrepareProposal.local_last_commit` is a set of the precommit votes that allowed the - decision of the previous block, together with their corresponding vote extensions. + * `PrepareProposalRequest`'s fields `txs`, `misbehavior`, `height`, `time`, + `next_validators_hash`, and `proposer_address` are the same as in `ProcessProposalRequest` + and `FinalizeBlockRequest`. + * `PrepareProposalRequest.local_last_commit` is a set of the precommit votes for the previous + height, including the ones that led to the decision of the previous block, + together with their corresponding vote extensions. * The `height`, `time`, and `proposer_address` values match the values from the header of the proposed block. - * `RequestPrepareProposal` contains a preliminary set of transactions `txs` that CometBFT + * `PrepareProposalRequest` contains a preliminary set of transactions `txs` that CometBFT retrieved from the mempool, called _raw proposal_. The Application can modify this - set and return a modified set of transactions via `ResponsePrepareProposal.txs` . + set and return a modified set of transactions via `PrepareProposalResponse.txs` . * The Application _can_ modify the raw proposal: it can reorder, remove or add transactions. - Let `tx` be a transaction in `txs` (set of transactions within `RequestPrepareProposal`): + Let `tx` be a transaction in `txs` (set of transactions within `PrepareProposalRequest`): * If the Application considers that `tx` should not be proposed in this block, e.g., there are other transactions with higher priority, then it should not include it in - `ResponsePrepareProposal.txs`. However, this will not remove `tx` from the mempool. + `PrepareProposalResponse.txs`. However, this will not remove `tx` from the mempool. * If the Application wants to add a new transaction to the proposed block, then the - Application includes it in `ResponsePrepareProposal.txs`. CometBFT will not add + Application includes it in `PrepareProposalResponse.txs`. CometBFT will not add the transaction to the mempool. * The Application should be aware that removing and adding transactions may compromise _traceability_. @@ -355,40 +364,35 @@ title: Methods traceability, it is its responsibility's to support it. For instance, the Application could attach to a transformed transaction a list with the hashes of the transactions it derives from. - * The Application MAY configure CometBFT to include a list of transactions in `RequestPrepareProposal.txs` - whose total size in bytes exceeds `RequestPrepareProposal.max_tx_bytes`. + * The Application MAY configure CometBFT to include a list of transactions in `PrepareProposalRequest.txs` + whose total size in bytes exceeds `PrepareProposalRequest.max_tx_bytes`. If the Application sets `ConsensusParams.Block.MaxBytes` to -1, CometBFT - will include _all_ transactions currently in the mempool in `RequestPrepareProposal.txs`, - which may not fit in `RequestPrepareProposal.max_tx_bytes`. - Therefore, if the size of `RequestPrepareProposal.txs` is greater than - `RequestPrepareProposal.max_tx_bytes`, the Application MUST remove transactions to ensure - that the `RequestPrepareProposal.max_tx_bytes` limit is respected by those transactions - returned in `ResponsePrepareProposal.txs`. + will include _all_ transactions currently in the mempool in `PrepareProposalRequest.txs`, + which may not fit in `PrepareProposalRequest.max_tx_bytes`. + Therefore, if the size of `PrepareProposalRequest.txs` is greater than + `PrepareProposalRequest.max_tx_bytes`, the Application MUST remove transactions to ensure + that the `PrepareProposalRequest.max_tx_bytes` limit is respected by those transactions + returned in `PrepareProposalResponse.txs`. This is specified in [Requirement 2](./abci%2B%2B_app_requirements.md). * As a result of executing the prepared proposal, the Application may produce block events or transaction events. The Application must keep those events until a block is decided and then pass them on to CometBFT via - `ResponseFinalizeBlock`. + `FinalizeBlockResponse`. * CometBFT does NOT provide any additional validity checks (such as checking for duplicate transactions). - - * If CometBFT fails to validate the `ResponsePrepareProposal`, CometBFT will assume the + * If CometBFT fails to validate the `PrepareProposalResponse`, CometBFT will assume the Application is faulty and crash. - * The implementation of `PrepareProposal` can be non-deterministic. + * The implementation of `PrepareProposal` MAY be non-deterministic. #### When does CometBFT call "PrepareProposal" ? - When a validator _p_ enters consensus round _r_, height _h_, in which _p_ is the proposer, and _p_'s _validValue_ is `nil`: 1. CometBFT collects outstanding transactions from _p_'s mempool * the transactions will be collected in order of priority * _p_'s CometBFT creates a block header. -2. _p_'s CometBFT calls `RequestPrepareProposal` with the newly generated block, the local +2. _p_'s CometBFT calls `PrepareProposal` with the newly generated block, the local commit of the previous height (with vote extensions), and any outstanding evidence of misbehavior. The call is synchronous: CometBFT's execution will block until the Application returns from the call. @@ -399,16 +403,19 @@ and _p_'s _validValue_ is `nil`: * leave transactions untouched * add new transactions (not present initially) to the proposal * remove transactions from the proposal (but not from the mempool thus effectively _delaying_ them) - the - Application does not include the transaction in `ResponsePrepareProposal.txs`. + Application does not include the transaction in `PrepareProposalResponse.txs`. * modify transactions (e.g. aggregate them). As explained above, this compromises client traceability, unless it is implemented at the Application level. * reorder transactions - the Application reorders transactions in the list + * the Application MAY use the vote extensions in the commit info to modify the proposal, in which case it is suggested + that extensions be validated in the same maner as done in `VerifyVoteExtension`, since extensions of votes included + in the commit info after the minimum of +2/3 had been reached are not verified. 4. The Application includes the transaction list (whether modified or not) in the return parameters (see the rules in section _Usage_), and returns from the call. 5. _p_ uses the (possibly) modified block as _p_'s proposal in round _r_, height _h_. Note that, if _p_ has a non-`nil` _validValue_ in round _r_, height _h_, -the consensus algorithm will use it as proposal and will not call `RequestPrepareProposal`. +the consensus algorithm will use it as proposal and will not call `PrepareProposal`. ### ProcessProposal @@ -429,31 +436,31 @@ the consensus algorithm will use it as proposal and will not call `RequestPrepar * **Response**: - | Name | Type | Description | Field Number | - |-------------------------|--------------------------------------------------|-----------------------------------------------------------------------------------|--------------| - | status | [ProposalStatus](#proposalstatus) | `enum` that signals if the application finds the proposal valid. | 1 | + | Name | Type | Description | Field Number | Deterministic | + |--------|-----------------------------------|------------------------------------------------------------------|--------------|---------------| + | status | [ProposalStatus](#proposalstatus) | `enum` that signals if the application finds the proposal valid. | 1 | Yes | * **Usage**: * Contains all information on the proposed block needed to fully execute it. * The Application may fully execute the block as though it was handling - `RequestFinalizeBlock`. + `FinalizeBlock`. * However, any resulting state changes must be kept as _candidate state_, and the Application should be ready to discard it in case another block is decided. - * `RequestProcessProposal` is also called at the proposer of a round. - Normally the call to `RequestProcessProposal` occurs right after the call to `RequestPrepareProposal` and - `RequestProcessProposal` matches the block produced based on `ResponsePrepareProposal` (i.e., - `RequestPrepareProposal.txs` equals `RequestProcessProposal.txs`). - However, no such guarantee is made since, in the presence of failures, `RequestProcessProposal` may match - `ResponsePrepareProposal` from an earlier invocation or `ProcessProposal` may not be invoked at all. + * `ProcessProposal` is also called at the proposer of a round. + Normally the call to `ProcessProposal` occurs right after the call to `PrepareProposal` and + `ProcessProposalRequest` matches the block produced based on `PrepareProposalResponse` (i.e., + `ProcessProposalRequest.txs` equals `PrepareProposalResponse.txs`). + However, no such guarantee is made since, in the presence of failures, `ProcessProposalRequest` may match + `PrepareProposalResponse` from an earlier invocation or `ProcessProposal` may not be invoked at all. * The height and time values match the values from the header of the proposed block. - * If `ResponseProcessProposal.status` is `REJECT`, consensus assumes the proposal received + * If `ProcessProposalResponse.status` is `REJECT`, consensus assumes the proposal received is not valid. - * The Application MAY fully execute the block — immediate execution + * The Application MAY fully execute the block (immediate execution) * The implementation of `ProcessProposal` MUST be deterministic. Moreover, the value of - `ResponseProcessProposal.status` MUST **exclusively** depend on the parameters passed in - the call to `RequestProcessProposal`, and the last committed Application state + `ProcessProposalResponse.status` MUST **exclusively** depend on the parameters passed in + the `ProcessProposalRequest`, and the last committed Application state (see [Requirements](./abci++_app_requirements.md) section). - * Moreover, application implementors SHOULD always set `ResponseProcessProposal.status` to `ACCEPT`, + * Moreover, application implementers SHOULD always set `ProcessProposalResponse.status` to `ACCEPT`, unless they _really_ know what the potential liveness implications of returning `REJECT` are. #### When does CometBFT call "ProcessProposal" ? @@ -468,10 +475,10 @@ When a node _p_ enters consensus round _r_, height _h_, in which _q_ is the prop from _q_, _p_ follows the validators' algorithm to check whether it should prevote for the proposed block, or `nil`. 5. If the validators' consensus algorithm indicates _p_ should prevote non-nil: - 1. CometBFT calls `RequestProcessProposal` with the block. The call is synchronous. + 1. CometBFT calls `ProcessProposal` with the block. The call is synchronous. 2. The Application checks/processes the proposed block, which is read-only, and returns - `ACCEPT` or `REJECT` in the `ResponseProcessProposal.status` field. - * The Application, depending on its needs, may call `ResponseProcessProposal` + `ACCEPT` or `REJECT` in the `ProcessProposalResponse.status` field. + * The Application, depending on its needs, may call `ProcessProposal` * either after it has completely processed the block (immediate execution), * or after doing some basic checks, and process the block asynchronously. In this case the Application will not be able to reject the block, or force prevote/precommit `nil` @@ -501,18 +508,18 @@ When a node _p_ enters consensus round _r_, height _h_, in which _q_ is the prop * **Response**: - | Name | Type | Description | Field Number | - |-------------------|-------|---------------------------------------------------------|--------------| - | vote_extension | bytes | Information signed by by CometBFT. Can have 0 length. | 1 | + | Name | Type | Description | Field Number | Deterministic | + |----------------|-------|-------------------------------------------------------|--------------|---------------| + | vote_extension | bytes | Information signed by CometBFT. Can have 0 length. | 1 | No | * **Usage**: - * `ResponseExtendVote.vote_extension` is application-generated information that will be signed + * `ExtendVoteResponse.vote_extension` is application-generated information that will be signed by CometBFT and attached to the Precommit message. * The Application may choose to use an empty vote extension (0 length). - * The contents of `RequestExtendVote` correspond to the proposed block on which the consensus algorithm + * The contents of `ExtendVoteRequest` correspond to the proposed block on which the consensus algorithm will send the Precommit message. - * `ResponseExtendVote.vote_extension` will only be attached to a non-`nil` Precommit message. If the consensus algorithm is to - precommit `nil`, it will not call `RequestExtendVote`. + * `ExtendVoteResponse.vote_extension` will only be attached to a non-`nil` Precommit message. If the consensus algorithm is to + precommit `nil`, it will not call `ExtendVote`. * The Application logic that creates the extension can be non-deterministic. #### When does CometBFT call `ExtendVote`? @@ -525,9 +532,9 @@ When a validator _p_ is in consensus state _prevote_ of round _r_, height _h_, i then _p_ locks _v_ and sends a Precommit message in the following way 1. _p_ sets _lockedValue_ and _validValue_ to _v_, and sets _lockedRound_ and _validRound_ to _r_ -2. _p_'s CometBFT calls `RequestExtendVote` with _v_ (`RequestExtendVote`). The call is synchronous. -3. The Application returns an array of bytes, `ResponseExtendVote.extension`, which is not interpreted by the consensus algorithm. -4. _p_ sets `ResponseExtendVote.extension` as the value of the `extension` field of type +2. _p_'s CometBFT calls `ExtendVote` with _v_ (in `ExtendVoteRequest`). The call is synchronous. +3. The Application returns an array of bytes, `ExtendVoteResponse.extension`, which is not interpreted by the consensus algorithm. +4. _p_ sets `ExtendVoteResponse.extension` as the value of the `extension` field of type [CanonicalVoteExtension](../core/data_structures.md#canonicalvoteextension), populates the other fields in [CanonicalVoteExtension](../core/data_structures.md#canonicalvoteextension), and signs the populated data structure. @@ -538,7 +545,7 @@ then _p_ locks _v_ and sends a Precommit message in the following way 7. _p_ broadcasts the Precommit message. In the cases when _p_ is to broadcast `precommit nil` messages (either _2f+1_ `prevote nil` messages received, -or _timeoutPrevote_ triggered), _p_'s CometBFT does **not** call `RequestExtendVote` and will not include +or _timeoutPrevote_ triggered), _p_'s CometBFT does **not** call `ExtendVote` and will not include a [CanonicalVoteExtension](../core/data_structures.md#canonicalvoteextension) field in the `precommit nil` message. ### VerifyVoteExtension @@ -552,30 +559,30 @@ a [CanonicalVoteExtension](../core/data_structures.md#canonicalvoteextension) fi | hash | bytes | The hash of the proposed block that the vote extension refers to. | 1 | | validator_address | bytes | [Address](../core/data_structures.md#address) of the validator that signed the extension. | 2 | | height | int64 | Height of the block (for sanity check). | 3 | - | vote_extension | bytes | Application-specific information signed by CometBFT. Can have 0 length. | 4 | + | vote_extension | bytes | Application-specific information signed by CometBFT. Can have 0 length. | 4 | * **Response**: - | Name | Type | Description | Field Number | - |--------|-------------------------------|----------------------------------------------------------------|--------------| - | status | [VerifyStatus](#verifystatus) | `enum` signaling if the application accepts the vote extension | 1 | + | Name | Type | Description | Field Number | Deterministic | + |--------|-------------------------------|----------------------------------------------------------------|--------------|---------------| + | status | [VerifyStatus](#verifystatus) | `enum` signaling if the application accepts the vote extension | 1 | Yes | * **Usage**: - * `RequestVerifyVoteExtension.vote_extension` can be an empty byte array. The Application's + * `VerifyVoteExtensionRequest.vote_extension` can be an empty byte array. The Application's interpretation of it should be that the Application running at the process that sent the vote chose not to extend it. - CometBFT will always call `RequestVerifyVoteExtension`, even for 0 length vote extensions. - * `RequestVerifyVoteExtension` is not called for precommit votes sent by the local process. - * `RequestVerifyVoteExtension.hash` refers to a proposed block. There is not guarantee that + CometBFT will always call `VerifyVoteExtension`, even for 0 length vote extensions. + * `VerifyVoteExtension` is not called for precommit votes sent by the local process. + * `VerifyVoteExtensionRequest.hash` refers to a proposed block. There is no guarantee that this proposed block has previously been exposed to the Application via `ProcessProposal`. - * If `ResponseVerifyVoteExtension.status` is `REJECT`, the consensus algorithm will reject the whole received vote. + * If `VerifyVoteExtensionResponse.status` is `REJECT`, the consensus algorithm will reject the whole received vote. See the [Requirements](./abci++_app_requirements.md) section to understand the potential liveness implications of this. * The implementation of `VerifyVoteExtension` MUST be deterministic. Moreover, the value of - `ResponseVerifyVoteExtension.status` MUST **exclusively** depend on the parameters passed in - the call to `RequestVerifyVoteExtension`, and the last committed Application state + `VerifyVoteExtensionResponse.status` MUST **exclusively** depend on the parameters passed in + the `VerifyVoteExtensionRequest`, and the last committed Application state (see [Requirements](./abci++_app_requirements.md) section). - * Moreover, application implementers SHOULD always set `ResponseVerifyVoteExtension.status` to `ACCEPT`, + * Moreover, application implementers SHOULD always set `VerifyVoteExtensionResponse.status` to `ACCEPT`, unless they _really_ know what the potential liveness implications of returning `REJECT` are. #### When does CometBFT call `VerifyVoteExtension`? @@ -586,14 +593,20 @@ message for round _r_, height _h_ from validator _q_ (_q_ ≠ _p_): 1. If the Precommit message does not contain a vote extension with a valid signature, _p_ discards the Precommit message as invalid. * a 0-length vote extension is valid as long as its accompanying signature is also valid. -2. Else, _p_'s CometBFT calls `RequestVerifyVoteExtension`. -3. The Application returns `ACCEPT` or `REJECT` via `ResponseVerifyVoteExtension.status`. +2. Else, _p_'s CometBFT calls `VerifyVoteExtension`. +3. The Application returns `ACCEPT` or `REJECT` via `VerifyVoteExtensionResponse.status`. 4. If the Application returns * `ACCEPT`, _p_ will keep the received vote, together with its corresponding vote extension in its internal data structures. It will be used to populate the [ExtendedCommitInfo](#extendedcommitinfo) - structure in calls to `RequestPrepareProposal`, in rounds of height _h + 1_ where _p_ is the proposer. + structure in calls to `PrepareProposal`, in rounds of height _h + 1_ where _p_ is the proposer. * `REJECT`, _p_ will deem the Precommit message invalid and discard it. +When a node _p_ is in consensus round _0_, height _h_, and _p_ receives a Precommit +message for CommitRound _r_, height _h-1_ from validator _q_ (_q_ ≠ _p_), _p_ +MAY add the Precommit message and associated extension to [ExtendedCommitInfo](#extendedcommitinfo) +without calling `VerifyVoteExtension` to verify it. + + ### FinalizeBlock #### Parameters and Types @@ -610,56 +623,75 @@ message for round _r_, height _h_ from validator _q_ (_q_ ≠ _p_): | time | [google.protobuf.Timestamp][protobuf-timestamp] | Timestamp of the finalized block. | 6 | | next_validators_hash | bytes | Merkle root of the next validator set. | 7 | | proposer_address | bytes | [Address](../core/data_structures.md#address) of the validator that created the proposal. | 8 | + | syncing_to_height | int64 | If the node is syncing/replaying blocks then syncing_to_height == target height. If not, syncing_to_height == height. | 9 | * **Response**: - | Name | Type | Description | Field Number | - |-------------------------|-------------------------------------------------------------|----------------------------------------------------------------------------------|--------------| - | events | repeated [Event](abci++_basic_concepts.md#events) | Type & Key-Value events for indexing | 1 | - | tx_results | repeated [ExecTxResult](#exectxresult) | List of structures containing the data resulting from executing the transactions | 2 | - | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 3 | - | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to gas, size, and other consensus-related parameters. | 4 | - | app_hash | bytes | The Merkle root hash of the application state. | 5 | + | Name | Type | Description | Field Number | Deterministic | + |-------------------------|---------------------------------------------------|-------------------------------------------------------------------------------------|--------------|---------------| + | events | repeated [Event](abci++_basic_concepts.md#events) | Type & Key-Value events for indexing | 1 | No | + | tx_results | repeated [ExecTxResult](#exectxresult) | List of structures containing the data resulting from executing the transactions | 2 | Yes | + | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 3 | Yes | + | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to gas, size, and other consensus-related parameters. | 4 | Yes | + | app_hash | bytes | The Merkle root hash of the application state. | 5 | Yes | + | next_block_delay | [google.protobuf.Duration][protobuf-duration] | Delay between the time when this block is committed and the next height is started. | 6 | No | * **Usage**: * Contains the fields of the newly decided block. * This method is equivalent to the call sequence `BeginBlock`, [`DeliverTx`], - and `EndBlock` in the previous version of ABCI. + and `EndBlock` in ABCI 1.0. * The height and time values match the values from the header of the proposed block. - * The Application can use `RequestFinalizeBlock.decided_last_commit` and `RequestFinalizeBlock.misbehavior` + * The Application can use `FinalizeBlockRequest.decided_last_commit` and `FinalizeBlockRequest.misbehavior` to determine rewards and punishments for the validators. - * The Application executes the transactions in `RequestFinalizeBlock.txs` deterministically, + * The Application executes the transactions in `FinalizeBlockRequest.txs` deterministically, according to the rules set up by the Application, before returning control to CometBFT. Alternatively, it can apply the candidate state corresponding to the same block previously executed via `PrepareProposal` or `ProcessProposal`. - * `ResponseFinalizeBlock.tx_results[i].Code == 0` only if the _i_-th transaction is fully valid. - * The Application must provide values for `ResponseFinalizeBlock.app_hash`, - `ResponseFinalizeBlock.tx_results`, `ResponseFinalizeBlock.validator_updates`, and - `ResponseFinalizeBlock.consensus_param_updates` as a result of executing the block. - * The values for `ResponseFinalizeBlock.validator_updates`, or - `ResponseFinalizeBlock.consensus_param_updates` may be empty. In this case, CometBFT will keep + * `FinalizeBlockResponse.tx_results[i].Code == 0` only if the _i_-th transaction is fully valid. + * The Application must provide values for `FinalizeBlockResponse.app_hash`, + `FinalizeBlockResponse.tx_results`, `FinalizeBlockResponse.validator_updates`, and + `FinalizeBlockResponse.consensus_param_updates` as a result of executing the block. + * The values for `FinalizeBlockResponse.validator_updates`, or + `FinalizeBlockResponse.consensus_param_updates` may be empty. In this case, CometBFT will keep the current values. - * `ResponseFinalizeBlock.validator_updates`, triggered by block `H`, affect validation + * `FinalizeBlockResponse.validator_updates`, triggered by block `H`, affect validation for blocks `H+1`, `H+2`, and `H+3`. Heights following a validator update are affected in the following way: * Height `H+1`: `NextValidatorsHash` includes the new `validator_updates` value. * Height `H+2`: The validator set change takes effect and `ValidatorsHash` is updated. * Height `H+3`: `*_last_commit` fields in `PrepareProposal`, `ProcessProposal`, and `FinalizeBlock` now include the altered validator set. - * `ResponseFinalizeBlock.consensus_param_updates` returned for block `H` apply to the consensus + * `FinalizeBlockResponse.consensus_param_updates` returned for block `H` apply to the consensus params for block `H+1`. For more information on the consensus parameters, see the [consensus parameters](./abci%2B%2B_app_requirements.md#consensus-parameters) section. - * `ResponseFinalizeBlock.app_hash` contains an (optional) Merkle root hash of the application state. - * `ResponseFinalizeBlock.app_hash` is included as the `Header.AppHash` in the next block. - * `ResponseFinalizeBlock.app_hash` may also be empty or hard-coded, but MUST be + * `FinalizeBlockResponse.app_hash` contains an (optional) Merkle root hash of the application state. + * `FinalizeBlockResponse.app_hash` is included as the `Header.AppHash` in the next block. + * `FinalizeBlockResponse.app_hash` may also be empty or hard-coded, but MUST be **deterministic** - it must not be a function of anything that did not come from the parameters - of `RequestFinalizeBlock` and the previous committed state. + of `FinalizeBlockRequest` and the previous committed state. * Later calls to `Query` can return proofs about the application state anchored in this Merkle root hash. * The implementation of `FinalizeBlock` MUST be deterministic, since it is making the Application's state evolve in the context of state machine replication. - * Currently, CometBFT will fill up all fields in `RequestFinalizeBlock`, even if they were - already passed on to the Application via `RequestPrepareProposal` or `RequestProcessProposal`. + * Currently, CometBFT will fill up all fields in `FinalizeBlockRequest`, even if they were + already passed on to the Application via `PrepareProposalRequest` or `ProcessProposalRequest`. + * When calling `FinalizeBlock` with a block, the consensus algorithm run by CometBFT guarantees + that at least one non-byzantine validator has run `ProcessProposal` on that block. + * `FinalizeBlockResponse.next_block_delay` - how long CometBFT waits after + committing a block, before starting the next height. This includes the + time the application and CometBFT take for processing the committed block. + In CometBFT terms, this interval gives the proposer a chance to receive + some more precommits, even though it already has the required 2/3+. + - Set to 0 if you want a proposer to make progress as soon as it has all + the precommits and the block is processed by the application. + - Previously `timeout_commit` in CometBFT config. + **Set to constant 1s to preserve the old (v0.34 - v1.0) behavior**. + * `FinalizeBlockResponse.next_block_delay` is a non-deterministic field. + This means that each node MAY provide a different value, which is + supposed to depend on how long processing is taking at the local node. It's + reasonable to use real --wallclock-- time and mandate for the nodes to have + synchronized clocks (NTP, or other; PBTS also requires this) for the + variable delay to work properly. #### When does CometBFT call `FinalizeBlock`? @@ -673,20 +705,20 @@ When a node _p_ is in consensus height _h_, and _p_ receives then _p_ decides block _v_ and finalizes consensus for height _h_ in the following way 1. _p_ persists _v_ as the decision for height _h_. -2. _p_'s CometBFT calls `RequestFinalizeBlock` with _v_'s data. The call is synchronous. +2. _p_'s CometBFT calls `FinalizeBlock` with _v_'s data. The call is synchronous. 3. _p_'s Application executes block _v_. 4. _p_'s Application calculates and returns the _AppHash_, along with a list containing the outputs of each of the transactions executed. 5. _p_'s CometBFT hashes all the transaction outputs and stores it in _ResultHash_. 6. _p_'s CometBFT persists the transaction outputs, _AppHash_, and _ResultsHash_. 7. _p_'s CometBFT locks the mempool — no calls to `CheckTx` on new transactions. -8. _p_'s CometBFT calls `RequestCommit` to instruct the Application to persist its state. +8. _p_'s CometBFT calls `Commit` to instruct the Application to persist its state. 9. _p_'s CometBFT, optionally, re-checks all outstanding transactions in the mempool against the newly persisted Application state. 10. _p_'s CometBFT unlocks the mempool — newly received transactions can now be checked. 11. _p_ starts consensus for height _h+1_, round 0 -## Data Types existing in ABCI +## Data Types (exist before ABCI 2.0) Most of the data structures used in ABCI are shared [common data structures](../core/data_structures.md). In certain cases, ABCI uses different data structures which are documented here: @@ -694,15 +726,15 @@ Most of the data structures used in ABCI are shared [common data structures](../ * **Fields**: - | Name | Type | Description | Field Number | - |---------|-------|---------------------------------------------------------------------|--------------| - | address | bytes | [Address](../core/data_structures.md#address) of validator | 1 | - | power | int64 | Voting power of the validator | 3 | + | Name | Type | Description | Field Number | + |---------|-------|------------------------------------------------------------|--------------| + | address | bytes | [Address](../core/data_structures.md#address) of validator | 1 | + | power | int64 | Voting power of the validator | 3 | * **Usage**: * Validator identified by address - * Used as part of VoteInfo within `CommitInfo` (used in `ProcessProposal` and `FinalizeBlock`), - and `ExtendedCommitInfo` (used in `PrepareProposal`). + * Used as part of `VoteInfo` within `CommitInfo` (used in `ProcessProposal` + and `FinalizeBlock`), and `ExtendedCommitInfo` (used in `PrepareProposal`). * Does not include PubKey to avoid sending potentially large quantum pubkeys over the ABCI @@ -710,26 +742,27 @@ Most of the data structures used in ABCI are shared [common data structures](../ * **Fields**: - | Name | Type | Description | Field Number | - |---------|--------------------------------------------------|-------------------------------|--------------| - | pub_key | [Public Key](../core/data_structures.md#pub_key) | Public key of the validator | 1 | - | power | int64 | Voting power of the validator | 2 | + | Name | Type | Description | Field Number | Deterministic | + |---------------|--------------------------------------------------|-----------------------------------------------------|--------------|---------------| + | power | int64 | Voting power | 2 | Yes | + | pub_key_type | string | Public key's type (e.g. "tendermint/PubKeyEd25519") | 3 | Yes | + | pub_key_bytes | bytes | Public key's bytes | 4 | Yes | * **Usage**: - * Validator identified by PubKey + * Validator identified by PubKeyType and PubKeyBytes * Used to tell CometBFT to update the validator set ### Misbehavior * **Fields**: - | Name | Type | Description | Field Number | - |--------------------|-------------------------------------------------|------------------------------------------------------------------------------|--------------| - | type | [MisbehaviorType](#misbehaviortype) | Type of the misbehavior. An enum of possible misbehaviors. | 1 | - | validator | [Validator](#validator) | The offending validator | 2 | - | height | int64 | Height when the offense occurred | 3 | - | time | [google.protobuf.Timestamp][protobuf-timestamp] | Timestamp of the block that was committed at height `height` | 4 | - | total_voting_power | int64 | Total voting power of the validator set at height `height` | 5 | + | Name | Type | Description | Field Number | + |--------------------|-------------------------------------------------|--------------------------------------------------------------|--------------| + | type | [MisbehaviorType](#misbehaviortype) | Type of the misbehavior. An enum of possible misbehaviors. | 1 | + | validator | [Validator](#validator) | The offending validator | 2 | + | height | int64 | Height when the offense occurred | 3 | + | time | [google.protobuf.Timestamp][protobuf-timestamp] | Timestamp of the block that was committed at height `height` | 4 | + | total_voting_power | int64 | Total voting power of the validator set at height `height` | 5 | #### MisbehaviorType @@ -747,42 +780,44 @@ Most of the data structures used in ABCI are shared [common data structures](../ * **Fields**: - | Name | Type | Description | Field Number | - |-----------|---------------------------------------------------------------|------------------------------------------------------------------------------|--------------| - | block | [BlockParams](../core/data_structures.md#blockparams) | Parameters limiting the size of a block and time between consecutive blocks. | 1 | - | evidence | [EvidenceParams](../core/data_structures.md#evidenceparams) | Parameters limiting the validity of evidence of byzantine behaviour. | 2 | - | validator | [ValidatorParams](../core/data_structures.md#validatorparams) | Parameters limiting the types of public keys validators can use. | 3 | - | version | [VersionsParams](../core/data_structures.md#versionparams) | The ABCI application version. | 4 | + | Name | Type | Description | Field Number | Deterministic | + | --------- | ------------------------------------------------------------- | ---------------------------------------------------------------------------- | ------------ | ------------- | + | block | [BlockParams](../core/data_structures.md#blockparams) | Parameters limiting the size of a block and time between consecutive blocks. | 1 | Yes | + | evidence | [EvidenceParams](../core/data_structures.md#evidenceparams) | Parameters limiting the validity of evidence of byzantine behaviour. | 2 | Yes | + | validator | [ValidatorParams](../core/data_structures.md#validatorparams) | Parameters limiting the types of public keys validators can use. | 3 | Yes | + | version | [VersionsParams](../core/data_structures.md#versionparams) | The ABCI application version. | 4 | Yes | + | abci | [ABCIParams](../core/data_structures.md#abciparams) | ABCI-related parameters. | 5 | Yes | + | synchrony | [SynchronyParams](../core/data_structures.md#synchronyparams) | Parameters determining the validity bounds of a proposal timestamp. | 6 | Yes | ### ProofOps * **Fields**: - | Name | Type | Description | Field Number | - |------|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| - | ops | repeated [ProofOp](#proofop) | List of chained Merkle proofs, of possibly different types. The Merkle root of one op is the value being proven in the next op. The Merkle root of the final op should equal the ultimate root hash being verified against.. | 1 | + | Name | Type | Description | Field Number | Deterministic | + |------|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|---------------| + | ops | repeated [ProofOp](#proofop) | List of chained Merkle proofs, of possibly different types. The Merkle root of one op is the value being proven in the next op. The Merkle root of the final op should equal the ultimate root hash being verified against.. | 1 | N/A | ### ProofOp * **Fields**: - | Name | Type | Description | Field Number | - |------|--------|------------------------------------------------|--------------| - | type | string | Type of Merkle proof and how it's encoded. | 1 | - | key | bytes | Key in the Merkle tree that this proof is for. | 2 | - | data | bytes | Encoded Merkle proof for the key. | 3 | + | Name | Type | Description | Field Number | Deterministic | + |------|--------|------------------------------------------------|--------------|---------------| + | type | string | Type of Merkle proof and how it's encoded. | 1 | N/A | + | key | bytes | Key in the Merkle tree that this proof is for. | 2 | N/A | + | data | bytes | Encoded Merkle proof for the key. | 3 | N/A | ### Snapshot * **Fields**: - | Name | Type | Description | Field Number | - |----------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| - | height | uint64 | The height at which the snapshot was taken (after commit). | 1 | - | format | uint32 | An application-specific snapshot format, allowing applications to version their snapshot data format and make backwards-incompatible changes. CometBFT does not interpret this. | 2 | - | chunks | uint32 | The number of chunks in the snapshot. Must be at least 1 (even if empty). | 3 | - | hash | bytes | An arbitrary snapshot hash. Must be equal only for identical snapshots across nodes. CometBFT does not interpret the hash, it only compares them. | 4 | - | metadata | bytes | Arbitrary application metadata, for example chunk hashes or other verification data. | 5 | + | Name | Type | Description | Field Number | Deterministic | + |----------|--------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|---------------| + | height | uint64 | The height at which the snapshot was taken (after commit). | 1 | N/A | + | format | uint32 | An application-specific snapshot format, allowing applications to version their snapshot data format and make backwards-incompatible changes. CometBFT does not interpret this. | 2 | N/A | + | chunks | uint32 | The number of chunks in the snapshot. Must be at least 1 (even if empty). | 3 | N/A | + | hash | bytes | An arbitrary snapshot hash. Must be equal only for identical snapshots across nodes. CometBFT does not interpret the hash, it only compares them. | 4 | N/A | + | metadata | bytes | Arbitrary application metadata, for example chunk hashes or other verification data. | 5 | N/A | * **Usage**: * Used for state sync snapshots, see the [state sync section](../p2p/legacy-docs/messages/state-sync.md) for details. @@ -790,16 +825,16 @@ Most of the data structures used in ABCI are shared [common data structures](../ `Metadata`). Chunks may be retrieved from all nodes that have the same snapshot. * When sent across the network, a snapshot message can be at most 4 MB. -## Data types introduced or modified in ABCI++ +## Data types introduced or modified in ABCI 2.0 ### VoteInfo * **Fields**: - | Name | Type | Description | Field Number | - |-----------------------------|-------------------------|----------------------------------------------------------------|--------------| - | validator | [Validator](#validator) | The validator that sent the vote. | 1 | - | signed_last_block | bool | Indicates whether or not the validator signed the last block. | 2 | + | Name | Type | Description | Field Number | + |---------------|-------------------------------------------------------|------------------------------------------------------------------------------------------|--------------| + | validator | [Validator](#validator) | The validator that sent the vote. | 1 | + | block_id_flag | [BlockIDFlag](../core/data_structures.md#blockidflag) | Indicates whether the validator voted the last block, nil, or its vote was not received. | 3 | * **Usage**: * Indicates whether a validator signed the last block, allowing for rewards based on validator availability. @@ -809,16 +844,18 @@ Most of the data structures used in ABCI are shared [common data structures](../ * **Fields**: - | Name | Type | Description | Field Number | - |-------------------|-------------------------|------------------------------------------------------------------------------|--------------| - | validator | [Validator](#validator) | The validator that sent the vote. | 1 | - | signed_last_block | bool | Indicates whether or not the validator signed the last block. | 2 | - | vote_extension | bytes | Non-deterministic extension provided by the sending validator's Application. | 3 | + | Name | Type | Description | Field Number | + |---------------------|-------------------------------------------------------|---------------------------------------------------------------------------------------------|--------------| + | validator | [Validator](#validator) | The validator that sent the vote. | 1 | + | vote_extension | bytes | Non-deterministic extension provided by the sending validator's Application. | 3 | + | extension_signature | bytes | Signature of the vote extension produced by the sending validator and verified by CometBFT. | 4 | + | block_id_flag | [BlockIDFlag](../core/data_structures.md#blockidflag) | Indicates whether the validator voted the last block, nil, or its vote was not received. | 5 | * **Usage**: * Indicates whether a validator signed the last block, allowing for rewards based on validator availability. * This information is extracted from CometBFT's data structures in the local process. - * `vote_extension` contains the sending validator's vote extension, which is signed by CometBFT. It can be empty + * `vote_extension` contains the sending validator's vote extension, whose signature was verified by CometBFT. It can be empty. + * `extension_signature` is the signature of the vote extension, which was verified verified by CometBFT. This way, we expose the signature to the application for further processing or verification. ### CommitInfo @@ -829,6 +866,12 @@ Most of the data structures used in ABCI are shared [common data structures](../ | round | int32 | Commit round. Reflects the round at which the block proposer decided in the previous height. | 1 | | votes | repeated [VoteInfo](#voteinfo) | List of validators' addresses in the last validator set with their voting information. | 2 | +* **Notes** + * The `VoteInfo` in `votes` are ordered by the voting power of the validators (descending order, highest to lowest voting power). + * CometBFT guarantees the `votes` ordering through its logic to update the validator set in which, in the end, the validators are sorted (descending) by their voting power. + * The ordering is also persisted when a validator set is saved in the store. + * The validator set is loaded from the store when building the `CommitInfo`, ensuring order is maintained from the persisted validator set. + ### ExtendedCommitInfo * **Fields**: @@ -838,20 +881,26 @@ Most of the data structures used in ABCI are shared [common data structures](../ | round | int32 | Commit round. Reflects the round at which the block proposer decided in the previous height. | 1 | | votes | repeated [ExtendedVoteInfo](#extendedvoteinfo) | List of validators' addresses in the last validator set with their voting information, including vote extensions. | 2 | +* **Notes** + * The `ExtendedVoteInfo` in `votes` are ordered by the voting power of the validators (descending order, highest to lowest voting power). + * CometBFT guarantees the `votes` ordering through its logic to update the validator set in which, in the end, the validators are sorted (descending) by their voting power. + * The ordering is also persisted when a validator set is saved in the store. + * The validator set is loaded from the store when building the `ExtendedCommitInfo`, ensuring order is maintained from the persisted validator set. + ### ExecTxResult * **Fields**: - | Name | Type | Description | Field Number | - |------------|-------------------------------------------------------------|-----------------------------------------------------------------------|--------------| - | code | uint32 | Response code. | 1 | - | data | bytes | Result bytes, if any. | 2 | - | log | string | The output of the application's logger. **May be non-deterministic.** | 3 | - | info | string | Additional information. **May be non-deterministic.** | 4 | - | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | - | gas_used | int64 | Amount of gas consumed by transaction. | 6 | - | events | repeated [Event](abci++_basic_concepts.md#events) | Type & Key-Value events for indexing transactions (e.g. by account). | 7 | - | codespace | string | Namespace for the `code`. | 8 | + | Name | Type | Description | Field Number | Deterministic | + |------------|---------------------------------------------------|----------------------------------------------------------------------|--------------|---------------| + | code | uint32 | Response code. | 1 | Yes | + | data | bytes | Result bytes, if any. | 2 | Yes | + | log | string | The output of the application's logger. | 3 | No | + | info | string | Additional information. | 4 | No | + | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | Yes | + | gas_used | int64 | Amount of gas consumed by transaction. | 6 | Yes | + | events | repeated [Event](abci++_basic_concepts.md#events) | Type & Key-Value events for indexing transactions (e.g. by account). | 7 | No | + | codespace | string | Namespace for the `code`. | 8 | Yes | ### ProposalStatus @@ -886,4 +935,5 @@ enum VerifyStatus { * If `Status` is `ACCEPT`, the consensus algorithm will accept the vote as valid. * If `Status` is `REJECT`, the consensus algorithm will reject the vote as invalid. -[protobuf-timestamp]: https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp +[protobuf-timestamp]: https://protobuf.dev/reference/protobuf/google.protobuf/#timestamp +[protobuf-duration]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration diff --git a/spec/consensus/bft-time.md b/spec/consensus/bft-time.md index 9312d9b390f..8b6bf1ef792 100644 --- a/spec/consensus/bft-time.md +++ b/spec/consensus/bft-time.md @@ -3,54 +3,124 @@ order: 2 --- # BFT Time -CometBFT provides a deterministic, Byzantine fault-tolerant, source of time. -Time in CometBFT is defined with the Time field of the block header. - -It satisfies the following properties: - -- Time Monotonicity: Time is monotonically increasing, i.e., given -a header H1 for height h1 and a header H2 for height `h2 = h1 + 1`, `H1.Time < H2.Time`. -- Time Validity: Given a set of Commit votes that forms the `block.LastCommit` field, a range of -valid values for the Time field of the block header is defined only by -Precommit messages (from the LastCommit field) sent by correct processes, i.e., -a faulty process cannot arbitrarily increase the Time value. - -In the context of CometBFT, time is of type int64 and denotes UNIX time in milliseconds, i.e., -corresponds to the number of milliseconds since January 1, 1970. -Before defining rules that need to be enforced by Tendermint, the consensus algorithm adopted in CometBFT, -so the properties above holds, we introduce the following definition: - -- median of a Commit is equal to the median of `Vote.Time` fields of the `Vote` messages, -where the value of `Vote.Time` is counted number of times proportional to the process voting power. As -the voting power is not uniform (one process one vote), a vote message is actually an aggregator of the same votes whose -number is equal to the voting power of the process that has casted the corresponding votes message. - -Let's consider the following example: - -- we have four processes p1, p2, p3 and p4, with the following voting power distribution (p1, 23), (p2, 27), (p3, 10) -and (p4, 10). The total voting power is 70 (`N = 3f+1`, where `N` is the total voting power, and `f` is the maximum voting -power of the faulty processes), so we assume that the faulty processes have at most 23 of voting power. -Furthermore, we have the following vote messages in some LastCommit field (we ignore all fields except Time field): - - (p1, 100), (p2, 98), (p3, 1000), (p4, 500). We assume that p3 and p4 are faulty processes. Let's assume that the - `block.LastCommit` message contains votes of processes p2, p3 and p4. Median is then chosen the following way: - the value 98 is counted 27 times, the value 1000 is counted 10 times and the value 500 is counted also 10 times. - So the median value will be the value 98. No matter what set of messages with at least `2f+1` voting power we - choose, the median value will always be between the values sent by correct processes. - -We ensure Time Monotonicity and Time Validity properties by the following rules: - -- let rs denotes `RoundState` (consensus internal state) of some process. Then -`rs.ProposalBlock.Header.Time == median(rs.LastCommit) && -rs.Proposal.Timestamp == rs.ProposalBlock.Header.Time`. - -- Furthermore, when creating the `vote` message, the following rules for determining `vote.Time` field should hold: - - - if `rs.LockedBlock` is defined then - `vote.Time = max(rs.LockedBlock.Timestamp + time.Millisecond, time.Now())`, where `time.Now()` - denotes local Unix time in milliseconds - - - else if `rs.Proposal` is defined then - `vote.Time = max(rs.Proposal.Timestamp + time.Millisecond,, time.Now())`, - - - otherwise, `vote.Time = time.Now())`. In this case vote is for `nil` so it is not taken into account for - the timestamp of the next block. +BFT Time is a Byzantine fault-tolerant algorithm for computing [block times](./time.md). + +> :warning: +> CometBFT `v1.x` introduced [Proposer-Based Timestamps (PBTS)][pbts-spec], +> intended to be a replacement for BFT Time. +> Users are strongly encouraged to adopt the PBTS algorithm in new chains, or +> when upgrading existing chains, as the BFT Time algorithm MAY be deprecated +> in a future version of CometBFT. + +## Overview + +BFT Time computes the `Time` of a block proposed in height `H` of consensus +from the `Timestamp` field of the `Precommit` messages broadcast by +validators in the commit `Round` of the previous height `H-1`. + +In order to commit a block, a node needs to receive `Precommit` messages for +the corresponding `BlockID` from validators whose cumulative voting power is +more than `2/3` of the total voting power. +The received `Precommit` messages should refer to the same round, the commit `Round`. +A set of `Precommit` messages with the properties above mentioned is a `Commit`. +A `Commit` set of height `H` is included in blocks proposed in height `H+1`. + +BFT Time computes the `Time` field of a block proposed in height `H` deterministically +from the `LastCommit` field of the block, which is a `Commit` set from +height `H-1`, using the `MedianTime` method defined as follows: + +- `MedianTime`: the weighted median of `Timestamp` fields of `Precommit` + messages forming a `Commit` set, with weights defined by the validators' voting powers. + The weighted median is produced by considering the value of each `Timestamp` + value a number of times proportional to the voting power of the corresponding validator. + +The median of a set of values is one of the values of the set, so the +`Time` of a proposed block is one of the `Timestamp` fields of the `Precommit` +messages included in the `LastCommit` set of that block. + +### Example + +Consider the following example: + +- We have four validators p1, p2, p3 and p4, with voting power + distribution: (p1, 23), (p2, 27), (p3, 10), (p4, 10). + The total voting power is 70, so we assume that the faulty validators have at + most 23 of voting power (since `N < 3F`, where `N` is the total voting + power and `F` is the maximum voting power of faulty validators). +- We have the following `Precommit` messages in some `Commit` field (notice that we + ignore all fields except the validator name and the `Timestamp` field): (p1, 100), (p2, 98), (p3, 1000), (p4, 500). + We assume that p3 and p4 are faulty validators, as they propose block times + far much higher (far in the future) than p1 and p2. +- Let's assume that the `block.LastCommit` field contains `Precommit`s of + validators p2, p3 and p4. +- The `MedianTime` is then chosen the following way: the value 98 (p2) is + counted 27 times, the value 1000 (p3) is counted 10 times and the value 500 +(p4) is counted also 10 times. The median value will be `98`. + +Notice that, no matter what set of `Precommit` messages with at least `2/3` of +the total voting power we choose, the `MedianTime` value will always be a +value among the `Timestamp` values produced by correct validators. +By correct here we assume non-malicious validators whose clocks are reasonably +accurated with to time. + +## Operation + +In order to implement BFT Time, validators need to set the `Timestamp` field of +`Precommit` messages they sign and broadcast, and block proposers need to +compute the block `Time` from the `LastCommit` set. + +### Vote Time + +When producing a `Precommit` message, a validator should set the `Timestamp` field as follows: + +1. Let `now` be the clock time of the validator. +2. If `LockedBlock` is defined, set `Timestamp = max(now, LockedBlock.Time + 1ms)`. +3. Else if `ProposalBlock` is defined, set `Timestamp = max(now, ProposalBlock.Time + 1ms)`. +4. Otherwise, set `Timestamp = now`. + +The `LockedBlock`, if set, is the block for which the validator is issuing a `Precommit`. +The `ProposalBlock` is the block proposed in that round; in favorable runs, it +matches the `LockedBlock`. + + +The validator in practice _proposes_ the `Time` for the next block when setting +the `Timestamp` of its `Precommit`. +The proposed `Time` is, by default, the validator's current clock time. +To ensure [Time Monotonicity](./time.md#properties), the `Time` of the next block should be +higher than the `Time` of the block to be committed in the current height. +So if `now` is smaller than `Time`, the validator proposes the `Time` of the block to be committed +plus a small delta, set to `1ms`. + +### Proposed Time + +The proposer of a round of consensus produces a block to be proposed. +The proposed block must include a `Commit` set from the commit round of the +previous height, as the block's `LastCommit` field. + +The `Time` for the proposed block is then set as `Block.Time = MedianTime(block.LastCommit)`. + +Since the block `Time` is produced in a deterministic way, every node that +receives the proposed block, can validate `Block.Time` using the same +procedure. Blocks with wrongly computed block times are rejected. + +## Properties + +BFT Time guarantees the two main [properties](./time.md#properties) for block times: + +- **Time Monotonicity**: the [production](#vote-time) of `Timestamp` fields for + `Precommit` messages at correct validators ensures that the `Time` proposed + for the next block is higher than the `Time` of the current block. + Since the `Time` of a block is retrieved from a `Precommit` + produced by a correct validator, monotonicity is guaranteed. + +- **Byzantine Fault Tolerance**: given a `Commit` set that forms the + `LastCommit` field, a range of [valid values](#proposed-time) for the `Time` field of the + block is defined only by `Precommit` messages produced by correct validators, + i.e., faulty validators cannot arbitrarily influence (increase or decrease) the + `Time` value. + +Notice that the guarantees rely on the fact that the voting power owned by +Byzantine validators is limited, more specifically, is less than 1/3 of the +total voting power, which is also a requirement for the consensus algorithm. + +[pbts-spec]: ./proposer-based-timestamp/README.md diff --git a/spec/consensus/consensus-paper/IEEEtran.bst b/spec/consensus/consensus-paper/IEEEtran.bst index 53fbc030aae..f4bd88dead1 100644 --- a/spec/consensus/consensus-paper/IEEEtran.bst +++ b/spec/consensus/consensus-paper/IEEEtran.bst @@ -635,7 +635,7 @@ numnames % item. % % "output" does check for the presence of a previous empty item and will -% remove an empty item rather than outputing it. +% remove an empty item rather than outputting it. % % "output.warn" is like "output", but will issue a warning if it detects % an empty item. diff --git a/spec/consensus/consensus-paper/IEEEtran.cls b/spec/consensus/consensus-paper/IEEEtran.cls index 0dc961648d9..56cb47f1a4e 100644 --- a/spec/consensus/consensus-paper/IEEEtran.cls +++ b/spec/consensus/consensus-paper/IEEEtran.cls @@ -125,7 +125,7 @@ % margins will be the same and the text will be horizontally centered. % For final submission to IEEE, authors should use US letter (8.5 X 11in) % paper. Note that authors should ensure that all post-processing -% (ps, pdf, etc.) uses the same paper specificiation as the .tex document. +% (ps, pdf, etc.) uses the same paper specification as the .tex document. % Problems here are by far the number one reason for incorrect margins. % IEEEtran will automatically set the default paper size under pdflatex % (without requiring a change to pdftex.cfg), so this issue is more @@ -342,7 +342,7 @@ % default to US letter paper, 10pt, twocolumn, one sided, final, journal \ExecuteOptions{letterpaper,10pt,twocolumn,oneside,final,journal} -% overrride these defaults per user requests +% override these defaults per user requests \ProcessOptions @@ -2016,7 +2016,7 @@ Using a default centering column instead}% -% creates a vertical rule that extends from the bottom to the top a a cell +% creates a vertical rule that extends from the bottom to the top a cell % Provided in case other packages redefine \vline some other way. % usage: \IEEEeqnarrayvrule[rule thickness] % If no argument is provided, \arrayrulewidth will be used for the rule thickness. @@ -2359,7 +2359,7 @@ Using a default centering column instead}% \vrule width0pt height\dimen0 depth\dimen2\relax\fi} -% creates an invisible strut, useable even outside \IEEEeqnarray +% creates an invisible strut, usable even outside \IEEEeqnarray % if \IEEEvisiblestrutstrue, the strut will be visible and 0.2pt wide. % usage: \IEEEstrut[height][depth][font size commands] % default is \IEEEstrut[0.7\normalbaselineskip][0.3\normalbaselineskip][\relax] @@ -2469,7 +2469,7 @@ $$\@ignoretrue} \newif\if@IEEElastlinewassubequation% \@IEEElastlinewassubequationfalse -% IEEEeqnarray uses a modifed \\ instead of the plain \cr to +% IEEEeqnarray uses a modified \\ instead of the plain \cr to % end rows. This allows for things like \\*[vskip amount] % This "cr" macros are modified versions those for LaTeX2e's eqnarray % the {\ifnum0=`} braces must be kept away from the last column to avoid @@ -2504,7 +2504,7 @@ $$\@ignoretrue} \@IEEEappendtoksA{&}% \advance\@IEEEeqncolcnt by 1\relax% update the col count \repeat - % this number of &'s will take us the the isolation column + % this number of &'s will take us the isolation column \fi % execute the &'s \the\@IEEEtrantmptoksA% @@ -2639,7 +2639,7 @@ $$\@ignoretrue} -% IEEEeqnarraybox uses a modifed \\ instead of the plain \cr to +% IEEEeqnarraybox uses a modified \\ instead of the plain \cr to % end rows. This allows for things like \\[vskip amount] % This "cr" macros are modified versions those for LaTeX2e's eqnarray % For IEEEeqnarraybox, \\* is the same as \\ @@ -3305,7 +3305,7 @@ between column types.}% \rule[-0.3\@IEEEtrantmpdimenA]{0pt}{\@IEEEtrantmpdimenA}} -% blocks to hold the authors' names and affilations. +% blocks to hold the authors' names and affiliations. % Makes formatting easy for conferences % % use real definitions in conference mode @@ -4290,7 +4290,7 @@ between column types.}% \@IEEEtrantmpdimenB=0.005\@IEEEtrantmpdimenA% \multiply\@IEEEtrantmpdimenB by \@IEEEtrantmpcountA% % \@IEEEPARstartfont is globally set to the calculated font of the big letter -% We need to carry this out of the local calculation area to to create the +% We need to carry this out of the local calculation area to create the % big letter. \global\font\@IEEEPARstartfont\@IEEEPARstartFONTNAME\space at \@IEEEtrantmpdimenB% % Now set \@IEEEtrantmpdimenA to the width of the big letter @@ -4381,7 +4381,7 @@ between column types.}% % if this page does not have enough space, break it and lets start % with a new one \@IEEEtranneedspace{\@IEEEtrantmpdimenA}{\relax}% -% nominal spacer can strech, not shrink use 1fil so user can out stretch with \vfill +% nominal spacer can stretch, not shrink use 1fil so user can out stretch with \vfill \vskip \@IEEEBIOskipN plus 1fil minus 0\baselineskip% % the default box for where the photo goes \def\@IEEEtempbiographybox{{\setlength{\fboxsep}{0pt}\framebox{% diff --git a/spec/consensus/creating-proposal.md b/spec/consensus/creating-proposal.md index feeb8e59666..e524a8c624f 100644 --- a/spec/consensus/creating-proposal.md +++ b/spec/consensus/creating-proposal.md @@ -4,7 +4,7 @@ order: 2 # Creating a proposal A block consists of a header, transactions, votes (the commit), -and a list of evidence of malfeasance (eg. signing conflicting votes). +and a list of evidence of misbehavior (e.g. duplicate vote). Outstanding evidence items get priority over outstanding transactions in the mempool. All in all, the block MUST NOT exceed `ConsensusParams.Block.MaxBytes`, @@ -36,7 +36,7 @@ Once the transactions have been reaped from the mempool according to the rules d CometBFT calls `PrepareProposal` to the application with the transaction list that has just been reaped. As part of this call the application can remove, add, or reorder transactions in the transaction list. -The `RequestPrepareProposal` contains two important fields: +The `PrepareProposalRequest` contains two important fields: * `MaxTxBytes`, which contains the value returned by `MaxDataBytes` described above. The application MUST NOT return a list of transactions whose size exceeds this number. diff --git a/spec/consensus/evidence.md b/spec/consensus/evidence.md index 222d676cf38..aebfd56e9fa 100644 --- a/spec/consensus/evidence.md +++ b/spec/consensus/evidence.md @@ -38,8 +38,8 @@ evidence and begin gossiping this evidence to other nodes. [Verification](#dupli ```go type DuplicateVoteEvidence struct { - VoteA Vote - VoteB Vote + VoteA *Vote + VoteB *Vote // and abci specific fields } @@ -62,7 +62,7 @@ then the light client sends the "forged" light block to the node. ```go type LightClientAttackEvidence struct { - ConflictingBlock LightBlock + ConflictingBlock *LightBlock CommonHeight int64 // and abci specific fields @@ -77,7 +77,7 @@ should be committed within a certain period from the point that it occurred (timely). Timelines is defined by the `EvidenceParams`: `MaxAgeNumBlocks` and `MaxAgeDuration`. In Proof of Stake chains where validators are bonded, evidence age should be less than the unbonding period so validators still can be -punished. Given these two propoerties the following initial checks are made. +punished. Given these two properties the following initial checks are made. 1. Has the evidence expired? This is done by taking the height of the `Vote` within `DuplicateVoteEvidence` or `CommonHeight` within @@ -143,26 +143,33 @@ will usually cache verifications so that this process is much quicker. ## Sending Evidence to the Application After evidence is committed, the block is then processed by the block executor -which delivers the evidence to the application via `EndBlock`. Evidence is -stripped of the actual proof, split up per faulty validator and only the -validator, height, time and evidence type is sent. +which delivers the list of misbehavior (`[]abci.Misbehavior`) to the application via the `FinalizeBlock`. ```proto -enum EvidenceType { - UNKNOWN = 0; - DUPLICATE_VOTE = 1; - LIGHT_CLIENT_ATTACK = 2; +// The type of misbehavior committed by a validator. +enum MisbehaviorType { + option (gogoproto.goproto_enum_prefix) = false; + + // Unknown + MISBEHAVIOR_TYPE_UNKNOWN = 0; + // Duplicate vote + MISBEHAVIOR_TYPE_DUPLICATE_VOTE = 1; + // Light client attack + MISBEHAVIOR_TYPE_LIGHT_CLIENT_ATTACK = 2; } -message Evidence { - EvidenceType type = 1; +// Misbehavior is a type of misbehavior committed by a validator. +message Misbehavior { + MisbehaviorType type = 1; // The offending validator Validator validator = 2 [(gogoproto.nullable) = false]; // The height when the offense occurred int64 height = 3; // The corresponding time where the offense occurred google.protobuf.Timestamp time = 4 [ - (gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; // Total voting power of the validator set in case the ABCI application does // not store historical validators. // https://github.com/tendermint/tendermint/issues/4581 @@ -170,9 +177,10 @@ message Evidence { } ``` -`DuplicateVoteEvidence` and `LightClientAttackEvidence` are self-contained in -the sense that the evidence can be used to derive the `abci.Evidence` that is -sent to the application. Because of this, extra fields are necessary: +`DuplicateVoteEvidence` and `LightClientAttackEvidence` are can be used to derive the list of `abci.Misbehavior` for +each byzantine validator that is sent to the application in the `FinalizeBlockRequest`. + +Because of this, extra fields are necessary: ```go type DuplicateVoteEvidence struct { diff --git a/spec/consensus/proposer-based-timestamp/README.md b/spec/consensus/proposer-based-timestamp/README.md index 2972d8765b9..2af05c43aae 100644 --- a/spec/consensus/proposer-based-timestamp/README.md +++ b/spec/consensus/proposer-based-timestamp/README.md @@ -1,20 +1,132 @@ -# Proposer-Based Timestamps +# Proposer-Based Timestamps (PBTS) -This section describes a version of the Tendermint consensus algorithm, adopted in CometBFT, -which uses proposer-based timestamps. +This document describes a version of the Tendermint consensus algorithm +that uses proposer-based timestamps. -## Contents +PBTS is a Byzantine fault-tolerant algorithm used by CometBFT for computing [block times](../time.md). -- [Proposer-Based Time][main] (entry point) -- [Part I - System Model and Properties][sysmodel] -- [Part II - Protocol Specification][algorithm] +## Overview + +With PBTS, the timestamp of a block is assigned by its +proposer, according with its local clock. +In other words, the proposer of a block also *proposes* a timestamp for the block. +Validators can accept or reject a proposed block. +A block is only accepted if its timestamp is acceptable. +A proposed timestamp is acceptable if it is *received* within a certain time window, +determined by synchronous parameters. + +The motivation for introducing this new method for assigning timestamps is +summarized in the [first draft proposal][main_v1]. + +### Synchronous Parameters + +For validating timestamps, PBTS augments the system model considered by the +consensus algorithm with *synchronous assumptions*: + +- **Synchronized clocks**: simultaneous clock reads at any two correct validators +differ by at most `PRECISION`; + +- **Bounded message delays**: the end-to-end delay for delivering a `Proposal` + message, broadcast by a correct proposer, to all correct validators is + bounded by `MSGDELAY`. + +`PRECISION` and `MSGDELAY` are consensus parameters, shared by all validators, +that define whether the timestamp of a block is acceptable, +according with the introduced `timely` predicate. + +#### Note on Liveness + +Setting too small values for synchronous parameters can compromise, +possibly in an irreversible way, liveness of consensus. +This is particularly relevant for the `MSGDELAY` parameter. +When the `Proposal` end-to-end delay is underestimated or unrealistic, proposed block +times can be rejected by all correct nodes. + +In order to prevent networks with bad parameters from not making progress (that is, +remaining at the consensus instance for same height forever), the `MSGDELAY` +parameter has become adaptive in the implementation. +This means that the `MSGDELAY` parameter should be interpreted in the form `MSGDELAY(r)`, where `r` is the +consensus round, with `MSGDELAY(r+1) > MSGDELAY(r)`. +The original `MSGDELAY` is therefore in practice `MSGDELAY(0)`. + +More details and discussion on [issue 2184][issue2184]. + +### Timestamp Validation + +The `timely` predicate is defined as follows. +Let `proposalReceiveTime` be the time, read from its local clock, at +which a validator receives a `Proposal` message for a `block` with timestamp `ts = block.time`. +The proposed timestamp `ts` can be accepted if both: + + - `ts <= proposalReceiveTime + PRECISION` + - `ts >= proposalReceiveTime - MSGDELAY - PRECISION` + +The following diagram graphically represents the conditions for accepting a proposed timestamp: + +![diagram](./diagram.png) + +A more detailed and formalized description of the `timely` predicate is available in the +[System Model and Properties][sysmodel] document. + +## Implementation + +The implementation of PBTS requires some changes in Tendermint consensus algorithm, +summarized below: + +- A proposer timestamps a block with the current time, read from its local clock. +The block's timestamp represents the time at which it was assembled +(after the `getValue()` call in line 18 of the [arXiv][arXiv] algorithm): + + - Block timestamps are definitive, meaning that the original timestamp + is retained when a block is re-proposed (line 16); + + - To preserve monotonicity, a proposer might need to wait until its clock + reads a time greater than the timestamp of the previous block; + +- A validator only prevotes for a block if its timestamp is considered `timely` +(compared to the original algorithm, a check is added to line 23). +Otherwise, the validator prevotes `nil` (line 26): + + - Validators register the time at which they received `Proposal` messages, + in order to evaluate the `timely` predicate; + + - Blocks that are re-proposed because they received `2f+1 Prevotes` + in a previous round (line 28) are not subject to the `timely` predicate, + as their timestamps have already been evaluated at a previous round. + +The full solution is detailed and formalized in the [Algorithm Specification][algorithm] document. + +## Further details + +- [System Model and Properties][sysmodel] +- [Algorithm Specification][algorithm] - [TLA+ Specification][proposertla] +### Issues + +- [cometbft#2184: PBTS: should synchrony parameters be adaptive?][issue2184] +- [tendermint/spec#355: PBTS: evidence][issue355]: can we punish Byzantine proposers? +- [tendermint/spec#377: PBTS: margins for proposal times assigned by Byzantine proposers][issue377] + + +[main_v1]: ./v1/pbts_001_draft.md + +[algorithm]: ./pbts-algorithm.md +[algorithm_v1]: ./v1/pbts-algorithm_001_draft.md -[algorithm]: ./pbts-algorithm_001_draft.md +[sysmodel]: ./pbts-sysmodel.md +[sysmodel_v1]: ./v1/pbts-sysmodel_001_draft.md +[timely-predicate]: ./pbts-sysmodel.md#timely-predicate -[sysmodel]: ./pbts-sysmodel_001_draft.md +[proposertla]: ./tla/README.md -[main]: ./pbts_001_draft.md +[bfttime]: ../bft-time.md +[arXiv]: https://arxiv.org/pdf/1807.04938.pdf -[proposertla]: ./tla/TendermintPBT_001_draft.tla +[issue353]: https://github.com/tendermint/spec/issues/353 +[issue355]: https://github.com/tendermint/spec/issues/355 +[issue370]: https://github.com/tendermint/spec/issues/370 +[issue371]: https://github.com/tendermint/spec/issues/371 +[issue372]: https://github.com/tendermint/spec/issues/372 +[issue377]: https://github.com/tendermint/spec/issues/377 +[issue2184]: https://github.com/cometbft/cometbft/issues/2184 diff --git a/spec/consensus/proposer-based-timestamp/diagram.png b/spec/consensus/proposer-based-timestamp/diagram.png new file mode 100644 index 00000000000..bf8df8cb770 Binary files /dev/null and b/spec/consensus/proposer-based-timestamp/diagram.png differ diff --git a/spec/consensus/proposer-based-timestamp/pbts-algorithm.md b/spec/consensus/proposer-based-timestamp/pbts-algorithm.md new file mode 100644 index 00000000000..9197ee646f3 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/pbts-algorithm.md @@ -0,0 +1,170 @@ +# PBTS: Algorithm Specification + +## Proposal Time + +PBTS computes, for a proposed value `v`, the proposal time `v.time` with bounded difference to the actual real-time the proposed value was generated. +The proposal time is read from the clock of the process that proposes a value for the first time, which is its original proposer. + +With PBTS, therefore, we assume that processes have access to **synchronized clocks**. +The proper definition of what it means can be found in the [system model][sysmodel], +but essentially we assume that two correct processes do not simultaneously read from their clocks +time values that differ by more than `PRECISION`, which is a system parameter. + +### Proposal times are definitive + +When a value `v` is produced by a process, it also assigns the associated proposal time `v.time`. +If the same value `v` is then re-proposed in a subsequent round of consensus, +it retains its original time, assigned by its original proposer. + +A value `v` should re-proposed when it becomes locked by the network, i.e., when it receives `2f + 1 PREVOTES` in a round `r` of consensus. +This means that processes with `2f + 1`-equivalent voting power accepted both `v` and its associated time `v.time` in round `r`. +Since the originally proposed value and its associated time were considered valid, there is no reason for reassigning `v.time`. + +> In the [first version][algorithm_v1] of this specification, proposals were defined as pairs `(v, time)`. +> In addition, the same value `v` could be proposed, in different rounds, but would be associated with distinct times each time it was re-proposed. +> Since this possibility does not exist in this second specification, the proposal time became part of the proposed value. +> With this simplification, several small changes to the [arXiv][arXiv] algorithm are no longer required. + +## Time Monotonicity + +Values decided in successive heights of consensus must have increasing times, so: + +- Monotonicity: for any process `p` and any two decided heights `h` and `h'`, if `h > h'` then `decision_p[h].time > decision_p[h'].time`. + +To ensure time monotonicity, it is enough to ensure that a value `v` proposed by process `p` at height `h_p` has `v.time > decision_p[h_p-1].time`. +So, if process `p` is the proposer of a round of height `h_p` and reads from its clock a time `now_p <= decision_p[h_p-1]`, +it should postpone the generation of its proposal until `now_p > decision_p[h_p-1]`. + +> Although it should be considered, this scenario is unlikely during regular operation, +as from `decision_p[h_p-1].time` and the start of height `h_p`, a complete consensus instance need to be concluded. + +Notice that monotonicity is not introduced by this proposal, as it is already ensured by [`BFT Time`][bfttime]. +In `BFT Time`, the `Timestamp` field of every `Precommit` message of height `h_p` sent by a correct process is required to be larger than `decision_p[h_p-1].time`. +As one of such `Timestamp` fields becomes the time assigned to a value proposed at height `h_p`, time monotonicity is observed. + +The time monotonicity of values proposed in heights of consensus is verified by the `valid()` predicate, to which every proposed value is submitted. +A value rejected by `valid()` is not accepted by any correct process. + +## Timely Predicate + +PBTS introduces a new requirement for a process to accept a proposal: the proposal time must be `timely`. +It is a temporal requirement, associated with the following +[synchrony assumptions][sysmodel] regarding the behavior of processes and the network: + +- Synchronized clocks: the values simultaneously read from clocks of any two correct processes differ by at most `PRECISION`; +- Bounded transmission delays: the real time interval between the sending of a proposal at a correct process and the reception of the proposal at any correct process is upper bounded by `MSGDELAY`. + - With the introduction of [adaptive message delays](./pbts-sysmodel.md#pbts-msg-delay-adaptive0), + the `MSGDELAY` parameter should be interpreted as `MSGDELAY(r)`, where `r` is the current round, + where it is expected `MSGDELAY(r+1) > MSGDELAY(r)`. + +#### **[PBTS-RECEPTION-STEP.1]** + +Let `now_p` be the time, read from the clock of process `p`, at which `p` receives the proposed value `v` of round `r`. +The proposal time is considered `timely` by `p` when: + +1. `now_p >= v.time - PRECISION` +1. `now_p <= v.time + MSGDELAY + PRECISION` + +The first condition derives from the fact that the generation and sending of `v` precedes its reception. +The minimum receiving time `now_p` for `v.time` be considered `timely` by `p` is derived from the extreme scenario when +the clock of `p` is `PRECISION` *behind* of the clock of the proposer of `v`, and the proposal's transmission delay is `0` (minimum). + +The second condition derives from the assumption of an upper bound for the transmission delay of a proposal. +The maximum receiving time `now_p` for `v.time` be considered `timely` by `p` is derived from the extreme scenario when +the clock of `p` is `PRECISION` *ahead* of the clock of the proposer of `v`, and the proposal's transmission delay is `MSGDELAY` (maximum). + +## Updated Consensus Algorithm + +The following changes are proposed for the algorithm in the [arXiv paper][arXiv]. + +### Updated `StartRound` function + +There are two additions to operation of the **proposer** of a round: + +1. To ensure time monotonicity, the proposer does not propose a value until its +current local time, represented by `now_p` in the algorithm, +becomes greater than the previously decided value's time +2. When the proposer produce a new proposal it sets the proposal's time to its current local time + - No changes are made to the logic when a proposer has a non-nil `validValue_p`, which retains its original proposal time. + +#### **[PBTS-ALG-STARTROUND.1]** + +```go +function StartRound(round) { + round_p ← round + step_p ← propose + if proposer(h_p, round_p) = p { + if validValue_p != nil { + proposal ← validValue_p // proposal.time unchanged + } else { + wait until now_p > decision_p[h_p-1].time // time monotonicity + proposal ← getValue() + proposal.time ← now_p // proposal time set to current local time + } + broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ + } else { + schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) + } +} +``` + +### Updated upon clause of line 22 + +The rule on line 22 applies to values `v` proposed for the first time, i.e., +for proposals not backed by `2f + 1 PREVOTE`s for `v` in a previous round. +The `PROPOSAL` message, in this case, has a valid round `vr = -1`. + +The new rule for issuing a `PREVOTE` for a proposed value `v` +requires the proposal time to be `timely`: + +#### **[PBTS-ALG-UPON-PROP.1]** + +```go +upon ⟨PROPOSAL, h_p, round_p, v, −1⟩ from proposer(h_p, round_p) while step_p = propose do { + if timely(v.time) ∧ valid(v) ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ + } + else { + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + } + step_p ← prevote +} +``` + +The `timely` predicate considers the time at which the `PROPOSAL` message is received. +Although not represented above, for the sake of simplicity, it is assumed that the +`PROPOSAL` receive time is registered and provided to the `timely` predicate. + +### Unchanged upon clause of line 28 + +The rule on line 28 applies to values `v` re-proposed in the current round +because its proposer received `2f + 1 PREVOTE`s for `v` in a previous round +`vr`, therefore updating its `validValue_p` to `v` and `validRound_p` to `vr`. + +This means that there was a round `r <= vr` in which `2f + 1` processes +accepted a `PROPOSAL` for `v`, proposed at round `r` for the first time, with +`vr = -1`. +These processes executed the line 22 of the algorithm and broadcast a +`PREVOTE` for `v`, indicating in particular, that they have judged `v.time` +as `timely` in round `r`. + +Since `v.time` was considered `timely` by `2f + 1` processes in a previous +round, and provided that `v.time` cannot be updated when `v` is re-proposed, +the evaluation of `timely` predicate is not necessary in this case. + +For a more formal explanation and a proof of this assertion, refer to the +[properties][sysmodel-pol] section. + +**All other rules remain unchanged.** + +Back to [main document][main]. + +[main]: ./README.md + +[algorithm_v1]: ./v1/pbts-algorithm_001_draft.md + +[sysmodel]: ./pbts-sysmodel.md +[sysmodel-pol]: ./pbts-sysmodel.md#derived-proof-of-locks + +[bfttime]: ../bft-time.md +[arXiv]: https://arxiv.org/pdf/1807.04938.pdf diff --git a/spec/consensus/proposer-based-timestamp/pbts-sysmodel.md b/spec/consensus/proposer-based-timestamp/pbts-sysmodel.md new file mode 100644 index 00000000000..52bcbc1ce99 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/pbts-sysmodel.md @@ -0,0 +1,357 @@ +# PBTS: System Model and Properties + +## Outline + + - [System model](#system-model) + - [Synchronized clocks](#synchronized-clocks) + - [Message delays](#message-delays) + - [Problem Statement](#problem-statement) + - [Timely Predicate](#timely-predicate) + - [Timely Proof-of-Locks](#timely-proof-of-locks) + - [Derived Proof-of-Locks](#derived-proof-of-locks) + - [Temporal Analysis](#temporal-analysis) + - [Safety](#safety) + - [Liveness](#liveness) + +## System Model + +#### **[PBTS-CLOCK-NEWTON.0]** + +There is a reference Newtonian real-time `t`. + +No process has direct access to this reference time, used only for specification purposes. +The reference real-time is assumed to be aligned with the Coordinated Universal Time (UTC). + +### Synchronized clocks + +Processes are assumed to be equipped with synchronized clocks, +aligned with the Coordinated Universal Time (UTC). + +This requires processes to periodically synchronize their local clocks with an +external and trusted source of the time (e.g. NTP servers). +Each synchronization cycle aligns the process local clock with the external +source of time, making it a *fairly accurate* source of real time. +The periodic (re)synchronization aims to correct the *drift* of local clocks, +which tend to pace slightly faster or slower than the real time. + +To avoid an excessive level detail in the parameters and guarantees of +synchronized clocks, we adopt a single system parameter `PRECISION` to +encapsulate the potential inaccuracy of the synchronization mechanisms, +and drifts of local clocks from real time. + +#### **[PBTS-CLOCK-PRECISION.0]** + +There exists a system parameter `PRECISION`, such that +for any two processes `p` and `q`, with local clocks `C_p` and `C_q`: + +- If `p` and `q` are equipped with synchronized clocks, + then for any real-time `t` we have `|C_p(t) - C_q(t)| <= PRECISION`. + +`PRECISION` thus bounds the difference on the times simultaneously read by processes +from their local clocks, so that their clocks can be considered synchronized. + +### Message Delays + +To properly evaluate whether the time assigned to a proposal is consistent with the real time, +we need some information regarding the time it takes for a message carrying a proposal +to reach all its (correct) destinations. +More precisely, the *maximum delay* for delivering a proposal to its destinations allows +defining a lower bound, a *minimum time* that a correct process assigns to proposal. + +#### **[PBTS-MSG-DELAY.0]** + +There exists a system parameter `MSGDELAY` for end-to-end delays of proposal messages, +such for any two correct processes `p` and `q`: + +- If `p` sends a proposal message `m` at real time `t` and `q` receives `m` at + real time `t'`, then `t <= t' <= t + MSGDELAY`. + +Notice that, as a system parameter, `MSGDELAY` should be observed for any +proposal message broadcast by correct processes: it is a *worst-case* parameter. +As message delays depends on the message size, the above requirement implicitly +indicates that the size of proposal messages is either fixed or upper bounded. + +#### **[PBTS-MSG-DELAY-ADAPTIVE.0]** + +This specification is written assuming that there exists an end-to-end maximum +delay `maxMsgDelay` observed in the network, possibly unknown, and +that the chosen value for `MSGDELAY` is such that `MSGDELAY >= maxMsgDelay`. +Under this assumption, all properties described in this specification are satisfied. + +However, it is possible that in some networks the `MSGDELAY` parameters +selected by operators is too small, i.e., `MSGDELAY < maxMsgDelay`. +In order to tolerate this possibility, we propose the adoption of adaptive +end-to-end delays, namely a relaxation of [PBTS-MSG-DELAY.0] where the +`MSGDELAY` value increases each time consensus requires a new round. +In this way, after a number of rounds, the adopted `MSGDELAY` should match the +actual, but possibly unknown, end-to-end `maxMsgDelay`. +This is a typical approach in partial synchronous models. + +The adaptive system parameter `MSGDELAY(r)` is defined as follows. +Lets `p` and `q` be any correct processes: + +- If `p` sends a proposal message `m` from round `r` at real time `t` and `q` receives `m` at + real time `t'`, then `t < t' <= t + MSGDELAY(r)`. + +The adaptiveness is represented by the assumption that the value of the +parameter increases over rounds, i.e., `MSGDELAY(r+1) > MSGDELAY(r)`. +The initial value `MSGDELAY(0)` is equal to `MSGDELAY` as in [PBTS-MSG-DELAY.0]. + +For the sake of correctness and formal verification, if `MSGDELAY` is +chosen sufficiently large, then the fact that it increments in later rounds +(i) in practice will never be experienced, +and (ii) also has no theoretical implications. +The adaptation (increment) of `MSGDELAY` is only introduced here to handle +potential misconfiguration. + +## Problem Statement + +This section defines the properties of Tendermint consensus algorithm +(cf. the [arXiv paper][arXiv]) in this system model. + +#### **[PBTS-PROPOSE.0]** + +A proposer proposes a consensus value `v` that includes a proposal time +`v.time`. + +#### **[PBTS-INV-AGREEMENT.0]** + +- [Agreement] No two correct processes decide different values. + +This implies that no two correct processes decide, in particular, different +proposal times. + +#### **[PBTS-INV-VALID.0]** + +- [Validity] If a correct process decides on value `v`, then `v` satisfies a + predefined `valid` predicate. + +With respect to PBTS, the `valid` predicate requires proposal times to be +[monotonic][time-monotonicity] over heights of +consensus. + +#### **[PBTS-INV-MONOTONICITY.0]** + +- If a correct process decides on value `v` at the height `h` of consensus, + thus setting `decision[h] = v`, then `v.time > decision[h'].time` for all + previous heights `h' < h`. + +The monotonicity of proposal times +implicitly assumes that heights of consensus are executed in order. + +#### **[PBTS-INV-TIMELY.0]** + +- [Time-Validity] If a correct process decides on value `v`, then the proposal + time `v.time` was considered `timely` by at least one correct process. + +The following section defines the `timely` predicate +that restricts the allowed decisions based +on the proposal time `v.time` associated with a proposed value `v`. + +## Timely Predicate + +For PBTS, a `proposal` is a tuple `(v, v.time, v.round)`, where: + +- `v` is the proposed value; +- `v.time` is the associated proposal time; +- `v.round` is the round at which `v` was first proposed. + +We include the proposal round `v.round` in the proposal definition because a +value `v` can be proposed in multiple rounds of consensus, +but the evaluation of the `timely` predicate is only relevant at round `v.round`. + +> Considering the algorithm in the [arXiv paper][arXiv], a new proposal is +> produced by the `getValue()` method (line 18), invoked by the proposer `p` of round +> `round_p` when starting the round with `validValue_p = nil`. +> In this case, the proposed value is broadcast in a `PROPOSAL` message with +> `vr = validRound_p = -1`. + +#### **[PBTS-PROPOSAL-RECEPTION.0]** + +The `timely` predicate is evaluated when a process receives a proposal. +More precisely, let `p` be a correct process: + +- `proposalReceptionTime(p,r)` is the time `p` reads from its local clock when + it receives the proposal of round `r`. + +#### **[PBTS-TIMELY.0]** + +Lets `(v, v.time, v.round)` be a proposal, then `v.time` is considered `timely` by a correct process +`p` if: + +1. `proposalReceptionTime(p,v.round)` is set, and +1. `proposalReceptionTime(p,v.round) >= v.time - PRECISION`, and +1. `proposalReceptionTime(p,v.round) <= v.time + MSGDELAY(v.round) + PRECISION`. + +A correct process only sends a `PREVOTE` for `v` at round `v.round` if the +associated proposal time `v.time` is considered `timely`. + +> Considering the algorithm in the [arXiv paper][arXiv], the `timely` predicate +> is evaluated by a process `p` when it receives a valid `PROPOSAL` message +> from the proposer of the current round `round_p` with `vr = -1` (line 22). + +### Timely Proof-of-Locks + +A *Proof-of-Lock* is a set of `PREVOTE` messages of round of consensus for the +same value from processes whose cumulative voting power is at least `2f + 1`. +We denote as `POL(v,r)` a proof-of-lock of value `v` at round `r`. + +For PBTS, we are particularly interested in the `POL(v,v.round)` produced in +the round `v.round` at which a value `v` was first proposed. +We call it a *timely* proof-of-lock for `v` because it can only be observed +if at least one correct process considered it `timely`: + +#### **[PBTS-TIMELY-POL.0]** + +If + +- there is a valid `POL(v,r)` with `r = v.round`, and +- `POL(v,v.round)` contains a `PREVOTE` message from at least one correct process, + +Then, let `p` is a such correct process: + +- `p` received a `PROPOSAL` message of round `v.round`, and +- the `PROPOSAL` message contained a proposal `(v, v.time, v.round)`, and +- `p` was in round `v.round` and evaluated the proposal time `v.time` as `timely`. + +The existence of a such correct process `p` is guaranteed provided that the +voting power of Byzantine processes is bounded by `2f`. + +### Derived Proof-of-Locks + +The existence of `POL(v,r)` is a requirement for the decision of `v` at round +`r` of consensus. + +At the same time, the Time-Validity property establishes that if `v` is decided +then a timely proof-of-lock `POL(v,v.round)` must have been produced. + +So, we need to demonstrate here that any valid `POL(v,r)` is either a timely +proof-of-lock or it is derived from a timely proof-of-lock: + +#### **[PBTS-DERIVED-POL.0]** + +If + +- there is a valid `POL(v,r)`, and +- `POL(v,r)` contains a `PREVOTE` message from at least one correct process, + +Then + +- there is a valid `POL(v,v.round)` with `v.round <= r` which is a timely proof-of-lock. + +The above relation is trivially observed when `r = v.round`, as `POL(v,r)` must +be a timely proof-of-lock. +Notice that we cannot have `r < v.round`, as `v.round` is defined as the first +round at which `v` was proposed. + +For `r > v.round` we need to demonstrate that if there is a valid `POL(v,r)`, +then a timely `POL(v,v.round)` was previously obtained. +We observe that a condition for observing a `POL(v,r)` is that the proposer of +round `r` has broadcast a `PROPOSAL` message for `v`. +As `r > v.round`, we can affirm that `v` was not produced in round `r`. +Instead, by the protocol operation, `v` was a *valid value* for the proposer of +round `r`, which means that if the proposer has observed a `POL(v,vr)` with `vr +< r`. +The above operation considers a *correct* proposer, but since a `POL(v,r)` was +produced (by hypothesis) we can affirm that at least one correct process (also) +observed a `POL(v,vr)`. + +> Considering the algorithm in the [arXiv paper][arXiv], `v` was proposed by +> the proposer `p` of round `round_p` because its `validValue_p` variable was +> set to `v`. +> The `PROPOSAL` message broadcast by the proposer, in this case, had `vr = validRound_p > -1`, +> and it could only be accepted by processes that also observed a `POL(v,vr)`. + +Thus, if there is a `POL(v,r)` with `r > v.round`, then there is a valid +`POL(v,vr)` with `v.round <= vr < r`. +If `vr = v.round` then `POL(vr,v)` is a timely proof-of-lock and we are done. +Otherwise, there is another valid `POL(v,vr')` with `v.round <= vr' < vr`, +and the above reasoning can be recursively applied until we get `vr' = v.round` +and observe a timely proof-of-lock. + +## Temporal analysis + +In this section we present invariants that need be observed for ensuring that +PBTS is both safe and live. + +In addition to the variables and system parameters already defined, we use +`beginRound(p,r)` as the value of process `p`'s local clock +when it starts round `r` of consensus. + +### Safety + +The safety of PBTS requires that if a value `v` is decided, then at least one +correct process `p` considered the associated proposal time `v.time` timely. +Following the definition of [timely proposal times](#pbts-timely0) and +proof-of-locks, we require this condition to be asserted at a specific round of +consensus, defined as `v.round`: + +#### **[PBTS-SAFETY.0]** + +If + +- there is a valid commit `C` for a value `v` +- `C` contains a `PRECOMMIT` message from at least one correct process + +then there is a correct process `p` (not necessarily the same above considered) such that: + +- `beginRound(p,v.round) <= proposalReceptionTime(p,v.round) <= beginRound(p,v.round+1)` and +- `v.time <= proposalReceptionTime(p,v.round) + PRECISION` and +- `v.time >= proposalReceptionTime(p,v.round) - MSGDELAY(v.round) - PRECISION` + +That is, a correct process `p` started round `v.round` and, while still at +round `v.round`, received a `PROPOSAL` message from round `v.round` proposing +`v`. +Moreover, the reception time of the original proposal for `v`, according with +`p`'s local clock, enabled `p` to consider the proposal time `v.time` as +`timely`. +This is the requirement established by PBTS for issuing a `PREVOTE` for the +proposal `(v, v.time, v.round)`, so for the eventual decision of `v`. + +### Liveness + +The liveness of PBTS relies on correct processes accepting proposal times +assigned by correct proposers. +We thus present a set of conditions for assigning a proposal time `v.time` so +that every correct process should be able to issue a `PREVOTE` for `v`. + +#### **[PBTS-LIVENESS.0]** + +If + +- the proposer of a round `r` of consensus is correct +- and it proposes a value `v` for the first time, with associated proposal time `v.time` + +then the proposal `(v, v.time, r)` is accepted by every correct process provided that: + +- `min{p is correct : beginRound(p,r)} <= v.time <= max{p is correct : beginRound(p,r)}` and +- `max{p is correct : beginRound(p,r)} <= v.time + MSGDELAY(r) + PRECISION <= min{p is correct : beginRound(p,r+1)}` + +The first condition establishes a range of safe proposal times `v.time` for round `r`. +This condition is trivially observed if a correct proposer `p` sets `v.time` to the time it +reads from its clock when starting round `r` and proposing `v`. +A `PROPOSAL` message sent by `p` at local time `v.time` should not be received +by any correct process before its local clock reads `v.time - PRECISION`, so +that condition 2 of [PBTS-TIMELY.0] is observed. + +The second condition establishes that every correct process should start round +`v.round` at a local time that allows `v.time` to still be considered timely, +according to condition 3. of [PBTS-TIMELY.0]. +In addition, it requires correct processes to stay long enough in round +`v.round` so that they can receive the `PROPOSAL` message of round `v.round`. +It assumed here that the proposer of `v` broadcasts a `PROPOSAL` message at +time `v.time`, according to its local clock, so that every correct process +should receive this message by time `v.time + MSGDELAY(v.round) + PRECISION`, according +to their local clocks. + +Back to [main document][main]. + +[main]: ./README.md + +[algorithm]: ./pbts-algorithm.md +[time-monotonicity]: ./pbts-algorithm.md#time-monotonicity + +[sysmodel]: ./pbts-sysmodel.md +[sysmodel_v1]: ./v1/pbts-sysmodel_001_draft.md + +[arXiv]: https://arxiv.org/pdf/1807.04938.pdf diff --git a/spec/consensus/proposer-based-timestamp/tla/.gitignore b/spec/consensus/proposer-based-timestamp/tla/.gitignore new file mode 100644 index 00000000000..e25bf38d4b6 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/.gitignore @@ -0,0 +1 @@ +intermediate/ diff --git a/spec/consensus/proposer-based-timestamp/tla/Apalache.tla b/spec/consensus/proposer-based-timestamp/tla/Apalache.tla new file mode 100644 index 00000000000..044bff666f7 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/Apalache.tla @@ -0,0 +1,109 @@ +--------------------------- MODULE Apalache ----------------------------------- +(* + * This is a standard module for use with the Apalache model checker. + * The meaning of the operators is explained in the comments. + * Many of the operators serve as additional annotations of their arguments. + * As we like to preserve compatibility with TLC and TLAPS, we define the + * operator bodies by erasure. The actual interpretation of the operators is + * encoded inside Apalache. For the moment, these operators are mirrored in + * the class at.forsyte.apalache.tla.lir.oper.ApalacheOper. + * + * Igor Konnov, Jure Kukovec, Informal Systems 2020-2021 + *) + +(** + * An assignment of an expression e to a state variable x. Typically, one + * uses the non-primed version of x in the initializing predicate Init and + * the primed version of x (that is, x') in the transition predicate Next. + * Although TLA+ does not have a concept of a variable assignment, we find + * this concept extremely useful for symbolic model checking. In pure TLA+, + * one would simply write x = e, or x \in {e}. + * + * Apalache automatically converts some expressions of the form + * x = e or x \in {e} into assignments. However, if you like to annotate + * assignments by hand, you can use this operator. + * + * For a further discussion on that matter, see: + * https://github.com/informalsystems/apalache/blob/ik/idiomatic-tla/docs/idiomatic/assignments.md + *) +x := e == x = e + +(** + * A generator of a data structure. Given a positive integer `bound`, and + * assuming that the type of the operator application is known, we + * recursively generate a TLA+ data structure as a tree, whose width is + * bound by the number `bound`. + * + * The body of this operator is redefined by Apalache. + *) +Gen(size) == {} + +(** + * Convert a set of pairs S to a function F. Note that if S contains at least + * two pairs <> and <> such that x = u and y /= v, + * then F is not uniquely defined. We use CHOOSE to resolve this ambiguity. + * Apalache implements a more efficient encoding of this operator + * than the default one. + * + * @type: Set(<>) => (a -> b); + *) +SetAsFun(S) == + LET Dom == { x: <> \in S } + Rng == { y: <> \in S } + IN + [ x \in Dom |-> CHOOSE y \in Rng: <> \in S ] + +(** + * As TLA+ is untyped, one can use function- and sequence-specific operators + * interchangeably. However, to maintain correctness w.r.t. our type-system, + * an explicit cast is needed when using functions as sequences. + *) +LOCAL INSTANCE Sequences +FunAsSeq(fn, maxSeqLen) == SubSeq(fn, 1, maxSeqLen) + +(** + * Annotating an expression \E x \in S: P as Skolemizable. That is, it can + * be replaced with an expression c \in S /\ P(c) for a fresh constant c. + * Not every exisential can be replaced with a constant, this should be done + * with care. Apalache detects Skolemizable expressions by static analysis. + *) +Skolem(e) == e + +(** + * A hint to the model checker to expand a set S, instead of dealing + * with it symbolically. Apalache finds out which sets have to be expanded + * by static analysis. + *) +Expand(S) == S + +(** + * A hint to the model checker to replace its argument Cardinality(S) >= k + * with a series of existential quantifiers for a constant k. + * Similar to Skolem, this has to be done carefully. Apalache automatically + * places this hint by static analysis. + *) +ConstCardinality(cardExpr) == cardExpr + +(** + * The folding operator, used to implement computation over a set. + * Apalache implements a more efficient encoding than the one below. + * (from the community modules). + *) +RECURSIVE FoldSet(_,_,_) +FoldSet( Op(_,_), v, S ) == IF S = {} + THEN v + ELSE LET w == CHOOSE x \in S: TRUE + IN LET T == S \ {w} + IN FoldSet( Op, Op(v,w), T ) + +(** + * The folding operator, used to implement computation over a sequence. + * Apalache implements a more efficient encoding than the one below. + * (from the community modules). + *) +RECURSIVE FoldSeq(_,_,_) +FoldSeq( Op(_,_), v, seq ) == IF seq = <<>> + THEN v + ELSE FoldSeq( Op, Op(v,Head(seq)), Tail(seq) ) + +=============================================================================== diff --git a/spec/consensus/proposer-based-timestamp/tla/MC_PBT.tla b/spec/consensus/proposer-based-timestamp/tla/MC_PBT.tla new file mode 100644 index 00000000000..53f7336fbf0 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/MC_PBT.tla @@ -0,0 +1,77 @@ +----------------------------- MODULE MC_PBT ------------------------------- +CONSTANT + \* @type: ROUND -> PROCESS; + Proposer + +VARIABLES + \* @type: PROCESS -> ROUND; + round, \* a process round number + \* @type: PROCESS -> STEP; + step, \* a process step + \* @type: PROCESS -> DECISION; + decision, \* process decision + \* @type: PROCESS -> VALUE; + lockedValue, \* a locked value + \* @type: PROCESS -> ROUND; + lockedRound, \* a locked round + \* @type: PROCESS -> PROPOSAL; + validValue, \* a valid value + \* @type: PROCESS -> ROUND; + validRound \* a valid round + +\* time-related variables +VARIABLES + \* @type: PROCESS -> TIME; + localClock, \* a process local clock: Corr -> Ticks + \* @type: TIME; + realTime \* a reference Newtonian real time + +\* book-keeping variables +VARIABLES + \* @type: ROUND -> Set(PROPMESSAGE); + msgsPropose, \* PROPOSE messages broadcast in the system, Rounds -> Messages + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrevote, \* PREVOTE messages broadcast in the system, Rounds -> Messages + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrecommit, \* PRECOMMIT messages broadcast in the system, Rounds -> Messages + \* @type: Set(MESSAGE); + evidence, \* the messages that were used by the correct processes to make transitions + \* @type: ACTION; + action, \* we use this variable to see which action was taken + \* @type: PROCESS -> Set(PROPMESSAGE); + receivedTimelyProposal, \* used to keep track when a process receives a timely VALUE message + \* @type: <> -> TIME; + inspectedProposal \* used to keep track when a process tries to receive a message + +\* Invariant support +VARIABLES + \* @type: ROUND -> TIME; + beginRound, \* the minimum of the local clocks at the time any process entered a new round + \* @type: PROCESS -> TIME; + endConsensus, \* the local time when a decision is made + \* @type: ROUND -> TIME; + lastBeginRound, \* the maximum of the local clocks in each round + \* @type: ROUND -> TIME; + proposalTime, \* the real time when a proposer proposes in a round + \* @type: ROUND -> TIME; + proposalReceivedTime \* the real time when a correct process first receives a proposal message in a round + + +INSTANCE TendermintPBT_002_draft WITH + Corr <- {"c1", "c2"}, + Faulty <- {"f3", "f4"}, + N <- 4, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 5, + MaxTimestamp <- 10, + MinTimestamp <- 2, + Delay <- 2, + Precision <- 2 + +\* run Apalache with --cinit=CInit +CInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/consensus/proposer-based-timestamp/tla/MC_PBT_2C_2F.tla b/spec/consensus/proposer-based-timestamp/tla/MC_PBT_2C_2F.tla new file mode 100644 index 00000000000..d7de3df73cd --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/MC_PBT_2C_2F.tla @@ -0,0 +1,68 @@ +----------------------------- MODULE MC_PBT_2C_2F ------------------------------- +CONSTANT + \* @type: ROUND -> PROCESS; + Proposer + +VARIABLES + \* @type: PROCESS -> ROUND; + round, \* a process round number + \* @type: PROCESS -> STEP; + step, \* a process step + \* @type: PROCESS -> DECISION; + decision, \* process decision + \* @type: PROCESS -> VALUE; + lockedValue, \* a locked value + \* @type: PROCESS -> ROUND; + lockedRound, \* a locked round + \* @type: PROCESS -> PROPOSAL; + validValue, \* a valid value + \* @type: PROCESS -> ROUND; + validRound \* a valid round + +\* time-related variables +VARIABLES + \* @type: PROCESS -> TIME; + localClock, \* a process local clock: Corr -> Ticks + \* @type: TIME; + realTime \* a reference Newtonian real time + +\* book-keeping variables +VARIABLES + \* @type: ROUND -> Set(PROPMESSAGE); + msgsPropose, \* PROPOSE messages broadcast in the system, Rounds -> Messages + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrevote, \* PREVOTE messages broadcast in the system, Rounds -> Messages + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrecommit, \* PRECOMMIT messages broadcast in the system, Rounds -> Messages + \* @type: Set(MESSAGE); + evidence, \* the messages that were used by the correct processes to make transitions + \* @type: ACTION; + action, \* we use this variable to see which action was taken + \* @type: <> -> TIME; + proposalReceptionTime \* used to keep track when a process receives a message + +\* Invariant support +VARIABLES + \* @type: <> -> TIME; + beginRound \* the minimum of the local clocks at the time any process entered a new round + +INSTANCE TendermintPBT WITH + Corr <- {"c1", "c2"}, + Faulty <- {"f3", "f4"}, + N <- 4, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 3, + MaxTimestamp <- 7, + MinTimestamp <- 2, + Delay <- 2, + Precision <- 2, + PreloadAllFaultyMsgs <- TRUE, + N_GEN <- 5 + +\* run Apalache with --cinit=CInit +CInit == \* the proposer is arbitrary -- works for safety + ArbitraryProposer + +============================================================================= diff --git a/spec/consensus/proposer-based-timestamp/tla/MC_PBT_3C_1F.tla b/spec/consensus/proposer-based-timestamp/tla/MC_PBT_3C_1F.tla new file mode 100644 index 00000000000..aec3d17a913 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/MC_PBT_3C_1F.tla @@ -0,0 +1,68 @@ +----------------------------- MODULE MC_PBT_3C_1F ------------------------------- +CONSTANT + \* @type: ROUND -> PROCESS; + Proposer + +VARIABLES + \* @type: PROCESS -> ROUND; + round, \* a process round number + \* @type: PROCESS -> STEP; + step, \* a process step + \* @type: PROCESS -> DECISION; + decision, \* process decision + \* @type: PROCESS -> VALUE; + lockedValue, \* a locked value + \* @type: PROCESS -> ROUND; + lockedRound, \* a locked round + \* @type: PROCESS -> PROPOSAL; + validValue, \* a valid value + \* @type: PROCESS -> ROUND; + validRound \* a valid round + +\* time-related variables +VARIABLES + \* @type: PROCESS -> TIME; + localClock, \* a process local clock: Corr -> Ticks + \* @type: TIME; + realTime \* a reference Newtonian real time + +\* book-keeping variables +VARIABLES + \* @type: ROUND -> Set(PROPMESSAGE); + msgsPropose, \* PROPOSE messages broadcast in the system, Rounds -> Messages + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrevote, \* PREVOTE messages broadcast in the system, Rounds -> Messages + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrecommit, \* PRECOMMIT messages broadcast in the system, Rounds -> Messages + \* @type: Set(MESSAGE); + evidence, \* the messages that were used by the correct processes to make transitions + \* @type: ACTION; + action, \* we use this variable to see which action was taken + \* @type: <> -> TIME; + proposalReceptionTime \* used to keep track when a process receives a message + +\* Invariant support +VARIABLES + \* @type: <> -> TIME; + beginRound \* the minimum of the local clocks at the time any process entered a new round + +INSTANCE TendermintPBT WITH + Corr <- {"c1", "c2", "c3"}, + Faulty <- {"f4"}, + N <- 4, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 3, + MaxTimestamp <- 7, + MinTimestamp <- 2, + Delay <- 2, + Precision <- 2, + PreloadAllFaultyMsgs <- TRUE, + N_GEN <- 5 + +\* run Apalache with --cinit=CInit +CInit == \* the proposer is arbitrary -- works for safety + ArbitraryProposer + +============================================================================= diff --git a/spec/consensus/proposer-based-timestamp/tla/README.md b/spec/consensus/proposer-based-timestamp/tla/README.md new file mode 100644 index 00000000000..bca0c48d825 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/README.md @@ -0,0 +1,46 @@ +# PBTS: TLA+ modeling and verification tests + +The TLA+ specification [TendermintPBT.tla](./TendermintPBT.tla) models +Tendermint consensus algorithm with added clocks and proposer-based timestamps. + +## How to run tests + +The script `runApalache.sh` runs Apalache against one of the model files in this repository. This document describes how to use it. + +1. Get Apalache, by following [these](https://apalache.informal.systems/docs/apalache/installation/index.html) instructions. Summarized: + + 1. `git clone https://github.com/informalsystems/apalache.git` + 2. `make package` + +2. Define an environment variable `APALACHE_HOME` and set it to the directory where you cloned the repository (resp. unpacked the prebuilt release). `$APALACHE_HOME/bin/apalache-mc` should point to the run script. + +3. Execute `./runApalache.sh CMD N CM DD` where: + + 1. `CMD` is the command. Either "check" or "typecheck". Default: "typecheck" + 2. `N` is the number of steps if `CMD=check`. Ignored if `CMD=typecheck`. Default: 10 + 3. `MC` is a Boolean flag that controls whether the model checked has a 2/3+ majority of correct processes. Default: true + - if `MC` is `true`, the `MC_PBT_3C_1F.tla` is used as input + - if `MC` is `false`, the `MC_PBT_2C_2F.tla` is used as input + 4. `DD` is a Boolean flag that controls Apalache's `--discard-disabled` flag (See [here](https://apalache.informal.systems/docs/apalache/running.html)). Ignored if `CMD=typecheck`. Default: false + +The results will be written to `_apalache-out` (see the [Apalache documentation](https://apalache.informal.systems/docs/adr/009adr-outputs.html)). + +Example: +```sh +./runApalache.sh check 2 +``` +Checks 2 steps of `MC_PBT_3C_1F.tla` and +```sh +./runApalache.sh check 10 false +``` +Checks 10 steps of `MC_PBT_2C_2F.tla` + +## Updating the experiments log + +A summary of experiments performed is kept in [`experiment_log.md`](./experiment_log.md). + +After running a particularly significant test, copy the raw outputs from +`_apalache-out` to `experiment_data` and update `experiment_log.md` accordingly. +See `experiment_data/May2022/` for a suggested directory layout. + +Make sure to copy at least the `detailed.log` and `run.txt` files, as well as any counterexample files, if present. diff --git a/spec/consensus/proposer-based-timestamp/tla/TendermintPBT.tla b/spec/consensus/proposer-based-timestamp/tla/TendermintPBT.tla new file mode 100644 index 00000000000..fe418105fac --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/TendermintPBT.tla @@ -0,0 +1,898 @@ +-------------------- MODULE TendermintPBT --------------------------- +(* + A TLA+ specification of a simplified Tendermint consensus algorithm, with added clocks + and proposer-based timestamps. This TLA+ specification extends and modifies + the Tendermint TLA+ specification for fork accountability: + https://github.com/tendermint/spec/blob/master/spec/light-client/accountability/TendermintAcc_004_draft.tla + + * Version 2. A preliminary specification. + + Zarko Milosevic, Igor Konnov, Informal Systems, 2019-2020. + Ilina Stoilkovska, Josef Widder, Informal Systems, 2021. + Jure Kukovec, Informal Systems, 2022. + *) + +EXTENDS Integers, FiniteSets, Apalache, Sequences, typedefs + +(********************* PROTOCOL PARAMETERS **********************************) +\* General protocol parameters +CONSTANTS + \* @type: Set(PROCESS); + Corr, \* the set of correct processes + \* @type: Set(PROCESS); + Faulty, \* the set of Byzantine processes, may be empty + \* @type: Int; + N, \* the total number of processes: correct, defective, and Byzantine + \* @type: Int; + T, \* an upper bound on the number of Byzantine processes + \* @type: Set(VALUE); + ValidValues, \* the set of valid values, proposed both by correct and faulty + \* @type: Set(VALUE); + InvalidValues, \* the set of invalid values, never proposed by the correct ones + \* @type: ROUND; + MaxRound, \* the maximal round number + \* @type: ROUND -> PROCESS; + Proposer \* the proposer function from Rounds to AllProcs + +\* Time-related parameters +CONSTANTS + \* @type: TIME; + MaxTimestamp, \* the maximal value of the clock tick + \* @type: TIME; + MinTimestamp, \* the minimal value of the clock tick + \* @type: TIME; + Delay, \* message delay + \* @type: TIME; + Precision \* clock precision: the maximal difference between two local clocks + +ASSUME(N = Cardinality(Corr \union Faulty)) + +\* Modeling parameter +CONSTANTS + \* @type: Bool; + PreloadAllFaultyMsgs, + \* @type: Int; + N_GEN + +(*************************** DEFINITIONS ************************************) +\* @type: Set(PROCESS); +AllProcs == Corr \union Faulty \* the set of all processes +\* @type: Set(ROUND); +Rounds == 0..MaxRound \* the set of potential rounds +\* @type: Set(TIME); +Timestamps == 0..MaxTimestamp \* the set of clock ticks +\* @type: ROUND; +NilRound == -1 \* a special value to denote a nil round, outside of Rounds +\* @type: TIME; +NilTimestamp == -1 \* a special value to denote a nil timestamp, outside of Ticks +\* @type: Set(ROUND); +RoundsOrNil == Rounds \union {NilRound} +\* @type: Set(VALUE); +Values == ValidValues \union InvalidValues \* the set of all values +\* @type: VALUE; +NilValue == "None" \* a special value for a nil round, outside of Values +\* @type: Set(PROPOSAL); +Proposals == Values \X Timestamps \X Rounds +\* @type: PROPOSAL; +NilProposal == <> +\* @type: Set(VALUE); +ValuesOrNil == Values \union {NilValue} +\* @type: Set(DECISION); +Decisions == Proposals \X Rounds +\* @type: DECISION; +NilDecision == <> + +ArbitraryProposer == Proposer \in [Rounds -> AllProcs] +CorrectProposer == Proposer \in [Rounds -> Corr] +CyclicalProposer == + LET ProcOrder == + LET App(s,e) == Append(s,e) + IN ApaFoldSet(App, <<>>, AllProcs) + IN Proposer = [ r \in Rounds |-> ProcOrder[1 + (r % N)] ] + +ValidProposals == ValidValues \X (MinTimestamp..MaxTimestamp) \X Rounds +\* a value hash is modeled as identity +\* @type: (t) => t; +Id(v) == v + +\* The validity predicate +\* @type: (PROPOSAL) => Bool; +IsValid(p) == p \in ValidProposals + +\* Time validity check. If we want MaxTimestamp = \infty, set ValidTime(t) == TRUE +ValidTime(t) == t < MaxTimestamp + +\* @type: (PROPMESSAGE) => VALUE; +MessageValue(msg) == msg.proposal[1] +\* @type: (PROPMESSAGE) => TIME; +MessageTime(msg) == msg.proposal[2] +\* @type: (PROPMESSAGE) => ROUND; +MessageRound(msg) == msg.proposal[3] + +\* @type: (TIME, TIME) => Bool; +IsTimely(processTime, messageTime) == + /\ processTime >= messageTime - Precision + /\ processTime <= messageTime + Precision + Delay + +\* the two thresholds that are used in the algorithm +\* @type: Int; +THRESHOLD1 == T + 1 \* at least one process is not faulty +\* @type: Int; +THRESHOLD2 == 2 * T + 1 \* a quorum when having N > 3 * T + +\* @type: (TIME, TIME) => TIME; +Min2(a,b) == IF a <= b THEN a ELSE b +\* @type: (Set(TIME)) => TIME; +Min(S) == ApaFoldSet( Min2, MaxTimestamp, S ) +\* Min(S) == CHOOSE x \in S : \A y \in S : x <= y + +\* @type: (TIME, TIME) => TIME; +Max2(a,b) == IF a >= b THEN a ELSE b +\* @type: (Set(TIME)) => TIME; +Max(S) == ApaFoldSet( Max2, NilTimestamp, S ) +\* Max(S) == CHOOSE x \in S : \A y \in S : y <= x + +\* @type: (Set(MESSAGE)) => Int; +Card(S) == + LET + \* @type: (Int, MESSAGE) => Int; + PlusOne(i, m) == i + 1 + IN ApaFoldSet( PlusOne, 0, S ) + +(********************* PROTOCOL STATE VARIABLES ******************************) +VARIABLES + \* @type: PROCESS -> ROUND; + round, \* a process round number + \* @type: PROCESS -> STEP; + step, \* a process step + \* @type: PROCESS -> DECISION; + decision, \* process decision + \* @type: PROCESS -> VALUE; + lockedValue, \* a locked value + \* @type: PROCESS -> ROUND; + lockedRound, \* a locked round + \* @type: PROCESS -> PROPOSAL; + validValue, \* a valid value + \* @type: PROCESS -> ROUND; + validRound \* a valid round + +coreVars == + <> + +\* time-related variables +VARIABLES + \* @type: PROCESS -> TIME; + localClock, \* a process local clock: Corr -> Ticks + \* @type: TIME; + realTime \* a reference Newtonian real time + +temporalVars == <> + +\* book-keeping variables +VARIABLES + \* @type: ROUND -> Set(PROPMESSAGE); + msgsPropose, \* PROPOSE messages broadcast in the system, Rounds -> Messages + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrevote, \* PREVOTE messages broadcast in the system, Rounds -> Messages + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrecommit, \* PRECOMMIT messages broadcast in the system, Rounds -> Messages + \* @type: Set(MESSAGE); + evidence, \* the messages that were used by the correct processes to make transitions + \* @type: ACTION; + action, \* we use this variable to see which action was taken + \* @type: <> -> TIME; + proposalReceptionTime \* used to keep track when a process receives a message + +\* Action is excluded from the tuple, because it always changes +bookkeepingVars == + <> + +\* Invariant support +VARIABLES + \* @type: <> -> TIME; + beginRound \* the minimum of the local clocks at the time any process entered a new round + +(* to see a type invariant, check TendermintAccInv3 *) + +(********************* PROTOCOL INITIALIZATION ******************************) +\* @type: (ROUND) => Set(PROPMESSAGE); +FaultyProposals(r) == + [ + type : {"PROPOSAL"}, + src : Faulty, + round : {r}, + proposal : Proposals, + validRound: RoundsOrNil + ] + +\* @type: Set(PROPMESSAGE); +AllFaultyProposals == + [ + type : {"PROPOSAL"}, + src : Faulty, + round : Rounds, + proposal : Proposals, + validRound: RoundsOrNil + ] + +\* @type: (ROUND) => Set(PREMESSAGE); +FaultyPrevotes(r) == + [ + type : {"PREVOTE"}, + src : Faulty, + round: {r}, + id : Proposals + ] + +\* @type: Set(PREMESSAGE); +AllFaultyPrevotes == + [ + type : {"PREVOTE"}, + src : Faulty, + round: Rounds, + id : Proposals + ] + +\* @type: (ROUND) => Set(PREMESSAGE); +FaultyPrecommits(r) == + [ + type : {"PRECOMMIT"}, + src : Faulty, + round: {r}, + id : Proposals + ] + +\* @type: Set(PREMESSAGE); +AllFaultyPrecommits == + [ + type : {"PRECOMMIT"}, + src : Faulty, + round: Rounds, + id : Proposals + ] + +\* @type: Set(PROPMESSAGE); +AllProposals == + [ + type : {"PROPOSAL"}, + src : AllProcs, + round : Rounds, + proposal : Proposals, + validRound: RoundsOrNil + ] + +\* @type: (ROUND) => Set(PROPMESSAGE); +RoundProposals(r) == + [ + type : {"PROPOSAL"}, + src : AllProcs, + round : {r}, + proposal : Proposals, + validRound: RoundsOrNil + ] + +\* @type: (ROUND -> Set(MESSAGE)) => Bool; +BenignRoundsInMessages(msgfun) == + \* the message function never contains a message for a wrong round + \A r \in Rounds: + \A m \in msgfun[r]: + r = m.round + +\* @type: (ROUND -> Set(MESSAGE), Set(MESSAGE)) => Bool; +BenignAndSubset(msgfun, set) == + /\ \A r \in Rounds: + \* The generated values belong to SUBSET set + /\ msgfun[r] \subseteq set + \* the message function never contains a message for a wrong round + /\ \A m \in msgfun[r]: r = m.round + +InitGen == + /\ msgsPropose \in [Rounds -> Gen(N_GEN)] + /\ msgsPrevote \in [Rounds -> Gen(N_GEN)] + /\ msgsPrecommit \in [Rounds -> Gen(N_GEN)] + /\ BenignAndSubset(msgsPropose, AllFaultyProposals) + /\ BenignAndSubset(msgsPrevote, AllFaultyPrevotes) + /\ BenignAndSubset(msgsPrecommit, AllFaultyPrecommits) + +InitPreloadAllMsgs == + /\ msgsPropose \in [Rounds -> SUBSET AllFaultyProposals] + /\ msgsPrevote \in [Rounds -> SUBSET AllFaultyPrevotes] + /\ msgsPrecommit \in [Rounds -> SUBSET AllFaultyPrecommits] + /\ BenignRoundsInMessages(msgsPropose) + /\ BenignRoundsInMessages(msgsPrevote) + /\ BenignRoundsInMessages(msgsPrecommit) + +InitMsgs == + \/ /\ PreloadAllFaultyMsgs + \* /\ InitPreloadAllMsgs + /\ InitGen + \/ /\ ~PreloadAllFaultyMsgs + /\ msgsPropose = [r \in Rounds |-> {}] + /\ msgsPrevote = [r \in Rounds |-> {}] + /\ msgsPrecommit = [r \in Rounds |-> {}] + +\* The initial states of the protocol. Some faults can be in the system already. +Init == + /\ round = [p \in Corr |-> 0] + /\ localClock \in [Corr -> MinTimestamp..(MinTimestamp + Precision)] + /\ realTime = 0 + /\ step = [p \in Corr |-> "PROPOSE"] + /\ decision = [p \in Corr |-> NilDecision] + /\ lockedValue = [p \in Corr |-> NilValue] + /\ lockedRound = [p \in Corr |-> NilRound] + /\ validValue = [p \in Corr |-> NilProposal] + /\ validRound = [p \in Corr |-> NilRound] + /\ InitMsgs + /\ proposalReceptionTime = [r \in Rounds, p \in Corr |-> NilTimestamp] + /\ evidence = {} + /\ action = "Init" + /\ beginRound = + [r \in Rounds, c \in Corr |-> + IF r = 0 + THEN localClock[c] + ELSE MaxTimestamp + ] + +lastBeginRound == [ r \in Rounds |-> + Max({beginRound[r,p] : p \in Corr}) +] + +firstBeginRound == [ r \in Rounds |-> + Min({beginRound[r,p] : p \in Corr}) +] + +\* Faulty processes send messages +FaultyBroadcast == + /\ ~PreloadAllFaultyMsgs + /\ action' = "FaultyBroadcast" + /\ \E r \in Rounds: + \/ \E msgs \in SUBSET FaultyProposals(r): + /\ msgsPropose' = [msgsPropose EXCEPT ![r] = @ \union msgs] + /\ UNCHANGED <> + /\ UNCHANGED + <<(*msgsPropose,*) msgsPrevote, msgsPrecommit, + evidence, (*action,*) proposalReceptionTime>> + \/ \E msgs \in SUBSET FaultyPrevotes(r): + /\ msgsPrevote' = [msgsPrevote EXCEPT ![r] = @ \union msgs] + /\ UNCHANGED <> + /\ UNCHANGED + <> + \/ \E msgs \in SUBSET FaultyPrecommits(r): + /\ msgsPrecommit' = [msgsPrecommit EXCEPT ![r] = @ \union msgs] + /\ UNCHANGED <> + /\ UNCHANGED + <> + +(************************ MESSAGE PASSING ********************************) +\* @type: (PROCESS, ROUND, PROPOSAL, ROUND) => Bool; +BroadcastProposal(pSrc, pRound, pProposal, pValidRound) == + LET + \* @type: PROPMESSAGE; + newMsg == + [ + type |-> "PROPOSAL", + src |-> pSrc, + round |-> pRound, + proposal |-> pProposal, + validRound |-> pValidRound + ] + IN + /\ msgsPropose' = [msgsPropose EXCEPT ![pRound] = msgsPropose[pRound] \union {newMsg}] + +\* @type: (PROCESS, ROUND, PROPOSAL) => Bool; +BroadcastPrevote(pSrc, pRound, pId) == + LET + \* @type: PREMESSAGE; + newMsg == + [ + type |-> "PREVOTE", + src |-> pSrc, + round |-> pRound, + id |-> pId + ] + IN + /\ msgsPrevote' = [msgsPrevote EXCEPT ![pRound] = msgsPrevote[pRound] \union {newMsg}] + +\* @type: (PROCESS, ROUND, PROPOSAL) => Bool; +BroadcastPrecommit(pSrc, pRound, pId) == + LET + \* @type: PREMESSAGE; + newMsg == + [ + type |-> "PRECOMMIT", + src |-> pSrc, + round |-> pRound, + id |-> pId + ] + IN + /\ msgsPrecommit' = [msgsPrecommit EXCEPT ![pRound] = msgsPrecommit[pRound] \union {newMsg}] + +(***************************** TIME **************************************) + +\* [PBTS-CLOCK-PRECISION.0] +\* @type: Bool; +SynchronizedLocalClocks == + \A p \in Corr : \A q \in Corr : + p /= q => + \/ /\ localClock[p] >= localClock[q] + /\ localClock[p] - localClock[q] < Precision + \/ /\ localClock[p] < localClock[q] + /\ localClock[q] - localClock[p] < Precision + +\* [PBTS-PROPOSE.0] +\* @type: (VALUE, TIME, ROUND) => PROPOSAL; +Proposal(v, t, r) == + <> + +\* [PBTS-DECISION-ROUND.0] +\* @type: (PROPOSAL, ROUND) => DECISION; +Decision(p, r) == + <> + +(**************** MESSAGE PROCESSING TRANSITIONS *************************) +\* lines 12-13 +\* @type: (PROCESS, ROUND) => Bool; +StartRound(p, r) == + /\ step[p] /= "DECIDED" \* a decided process does not participate in consensus + /\ round' = [round EXCEPT ![p] = r] + /\ step' = [step EXCEPT ![p] = "PROPOSE"] + \* We only need to update (last)beginRound[r] once a process enters round `r` + /\ beginRound' = [beginRound EXCEPT ![r,p] = localClock[p]] + +\* lines 14-19, a proposal may be sent later +\* @type: (PROCESS) => Bool; +InsertProposal(p) == + LET r == round[p] IN + /\ p = Proposer[r] + /\ step[p] = "PROPOSE" + \* if the proposer is sending a proposal, then there are no other proposals + \* by the correct processes for the same round + /\ \A m \in msgsPropose[r]: m.src /= p + /\ \E v \in ValidValues: + LET proposal == + IF validValue[p] /= NilProposal + THEN validValue[p] + ELSE Proposal(v, localClock[p], r) + IN + /\ BroadcastProposal(p, r, proposal, validRound[p]) + /\ UNCHANGED <> + /\ UNCHANGED + <<(*msgsPropose,*) msgsPrevote, msgsPrecommit, + evidence, proposalReceptionTime>> + /\ UNCHANGED beginRound + /\ action' = "InsertProposal" + +\* a new action used to register the proposal and note the reception time. +\* [PBTS-RECEPTION-STEP.0] +\* @type: (PROCESS) => Bool; +ReceiveProposal(p) == + \E v \in Values, t \in Timestamps: + /\ LET r == round[p] IN + LET + \* @type: PROPMESSAGE; + msg == + [ + type |-> "PROPOSAL", + src |-> Proposer[round[p]], + round |-> round[p], + proposal |-> Proposal(v, t, r), + validRound |-> NilRound + ] + IN + /\ msg \in msgsPropose[round[p]] + /\ proposalReceptionTime[r,p] = NilTimestamp + /\ proposalReceptionTime' = [proposalReceptionTime EXCEPT ![r,p] = localClock[p]] + /\ UNCHANGED <> + /\ UNCHANGED + <> + /\ UNCHANGED beginRound + /\ action' = "ReceiveProposal" + +\* lines 22-27 +\* @type: (PROCESS) => Bool; +UponProposalInPropose(p) == + \E v \in Values, t \in Timestamps: + LET + r == round[p] + IN LET + \* @type: PROPOSAL; + prop == Proposal(v,t,r) + IN + /\ step[p] = "PROPOSE" (* line 22 *) + /\ LET + \* @type: PROPMESSAGE; + msg == + [ + type |-> "PROPOSAL", + src |-> Proposer[r], + round |-> r, + proposal |-> prop, + validRound |-> NilRound + ] + IN + /\ evidence' = {msg} \union evidence + /\ LET mid == (* line 23 *) + IF + \* Timeliness is checked against the process time, as was + \* recorded in proposalReceptionTime, not as it is now. + \* In the implementation, if the proposal is not timely, then we prevote + \* nil. In the natural-language specification, nothing happens. + \* This specification maintains consistency with the implementation. + /\ IsTimely( proposalReceptionTime[r, p], t) \* updated line 22 + /\ IsValid(prop) + /\ (lockedRound[p] = NilRound \/ lockedValue[p] = v) + THEN Id(prop) + ELSE NilProposal + IN + BroadcastPrevote(p, r, mid) \* lines 24-26 + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ action' = "UponProposalInPropose" + +\* lines 28-33 +\* [PBTS-ALG-OLD-PREVOTE.0] +\* @type: (PROCESS) => Bool; +UponProposalInProposeAndPrevote(p) == + \E v \in Values, t \in Timestamps, vr \in Rounds, pr \in Rounds: + LET + r == round[p] + IN LET + \* @type: PROPOSAL; + prop == Proposal(v,t,pr) + IN + /\ step[p] = "PROPOSE" /\ 0 <= vr /\ vr < r \* line 28, the while part + /\ pr <= vr + /\ LET + \* @type: PROPMESSAGE; + msg == + [ + type |-> "PROPOSAL", + src |-> Proposer[r], + round |-> r, + proposal |-> prop, + validRound |-> vr + ] + IN + \* Changed from 001: no need to re-check timeliness + /\ msg \in msgsPropose[r] \* line 28 + /\ LET PV == { m \in msgsPrevote[vr]: m.id = Id(prop) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 28 + /\ evidence' = PV \union {msg} \union evidence + /\ LET mid == (* line 29 *) + IF IsValid(prop) /\ (lockedRound[p] <= vr \/ lockedValue[p] = v) + THEN Id(prop) + ELSE NilProposal + IN + BroadcastPrevote(p, r, mid) \* lines 24-26 + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ action' = "UponProposalInProposeAndPrevote" + +\* lines 34-35 + lines 61-64 (onTimeoutPrevote) +\* @type: (PROCESS) => Bool; +UponQuorumOfPrevotesAny(p) == + /\ step[p] = "PREVOTE" \* line 34 and 61 + /\ \E MyEvidence \in SUBSET msgsPrevote[round[p]]: + \* find the unique voters in the evidence + LET Voters == { m.src: m \in MyEvidence } IN + \* compare the number of the unique voters against the threshold + /\ Cardinality(Voters) >= THRESHOLD2 \* line 34 + /\ evidence' = MyEvidence \union evidence + /\ BroadcastPrecommit(p, round[p], NilProposal) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + /\ UNCHANGED <> + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ action' = "UponQuorumOfPrevotesAny" + +\* lines 36-46 +\* [PBTS-ALG-NEW-PREVOTE.0] +\* @type: (PROCESS) => Bool; +UponProposalInPrevoteOrCommitAndPrevote(p) == + \E v \in ValidValues, t \in Timestamps, vr \in RoundsOrNil: + LET + r == round[p] + IN LET + \* @type: PROPOSAL; + prop == Proposal(v,t,r) + IN + /\ step[p] \in {"PREVOTE", "PRECOMMIT"} \* line 36 + /\ LET + \* @type: PROPMESSAGE; + msg == + [ + type |-> "PROPOSAL", + src |-> Proposer[r], + round |-> r, + proposal |-> prop, + validRound |-> vr + ] + IN + \* Changed from 001: no need to re-check timeliness + /\ msg \in msgsPropose[r] \* line 36 + /\ LET PV == { m \in msgsPrevote[r]: m.id = Id(prop) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 36 + /\ evidence' = PV \union {msg} \union evidence + /\ IF step[p] = "PREVOTE" + THEN \* lines 38-41: + /\ lockedValue' = [lockedValue EXCEPT ![p] = v] + /\ lockedRound' = [lockedRound EXCEPT ![p] = r] + /\ BroadcastPrecommit(p, r, Id(prop)) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + ELSE + UNCHANGED <> + \* lines 42-43 + /\ validValue' = [validValue EXCEPT ![p] = prop] + /\ validRound' = [validRound EXCEPT ![p] = r] + /\ UNCHANGED <> + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ action' = "UponProposalInPrevoteOrCommitAndPrevote" + +\* lines 47-48 + 65-67 (onTimeoutPrecommit) +\* @type: (PROCESS) => Bool; +UponQuorumOfPrecommitsAny(p) == + /\ \E MyEvidence \in SUBSET msgsPrecommit[round[p]]: + \* find the unique committers in the evidence + LET Committers == { m.src: m \in MyEvidence } IN + \* compare the number of the unique committers against the threshold + /\ Cardinality(Committers) >= THRESHOLD2 \* line 47 + /\ evidence' = MyEvidence \union evidence + /\ round[p] + 1 \in Rounds + /\ StartRound(p, round[p] + 1) + /\ UNCHANGED temporalVars + /\ UNCHANGED + <<(*round, step,*) decision, lockedValue, + lockedRound, validValue, validRound>> + /\ UNCHANGED + <> + /\ action' = "UponQuorumOfPrecommitsAny" + +\* lines 49-54 +\* [PBTS-ALG-DECIDE.0] +\* @type: (PROCESS) => Bool; +UponProposalInPrecommitNoDecision(p) == + /\ decision[p] = NilDecision \* line 49 + /\ \E v \in ValidValues, t \in Timestamps (* line 50*) , r \in Rounds, pr \in Rounds, vr \in RoundsOrNil: + LET + \* @type: PROPOSAL; + prop == Proposal(v,t,pr) + IN + /\ LET + \* @type: PROPMESSAGE; + msg == + [ + type |-> "PROPOSAL", + src |-> Proposer[r], + round |-> r, + proposal |-> prop, + validRound |-> vr + ] + IN + /\ msg \in msgsPropose[r] \* line 49 + /\ proposalReceptionTime[r,p] /= NilTimestamp \* Keep? + /\ LET PV == { m \in msgsPrecommit[r]: m.id = Id(prop) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 49 + /\ evidence' = PV \union {msg} \union evidence + /\ decision' = [decision EXCEPT ![p] = Decision(prop, r)] \* update the decision, line 51 + \* The original algorithm does not have 'DECIDED', but it increments the height. + \* We introduced 'DECIDED' here to prevent the process from changing its decision. + /\ step' = [step EXCEPT ![p] = "DECIDED"] + /\ UNCHANGED temporalVars + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ UNCHANGED beginRound + /\ action' = "UponProposalInPrecommitNoDecision" + +\* the actions below are not essential for safety, but added for completeness + +\* lines 20-21 + 57-60 +\* @type: (PROCESS) => Bool; +OnTimeoutPropose(p) == + /\ step[p] = "PROPOSE" + /\ p /= Proposer[round[p]] + /\ BroadcastPrevote(p, round[p], NilProposal) + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ action' = "OnTimeoutPropose" + +\* lines 44-46 +\* @type: (PROCESS) => Bool; +OnQuorumOfNilPrevotes(p) == + /\ step[p] = "PREVOTE" + /\ LET PV == { m \in msgsPrevote[round[p]]: m.id = Id(NilProposal) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 36 + /\ evidence' = PV \union evidence + /\ BroadcastPrecommit(p, round[p], Id(NilProposal)) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + /\ UNCHANGED <> + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ action' = "OnQuorumOfNilPrevotes" + +\* lines 55-56 +\* @type: (PROCESS) => Bool; +OnRoundCatchup(p) == + \E r \in Rounds: + /\ r > round[p] + /\ LET RoundMsgs == msgsPropose[r] \union msgsPrevote[r] \union msgsPrecommit[r] IN + \E MyEvidence \in SUBSET RoundMsgs: + LET Faster == { m.src: m \in MyEvidence } IN + /\ Cardinality(Faster) >= THRESHOLD1 + /\ evidence' = MyEvidence \union evidence + /\ StartRound(p, r) + /\ UNCHANGED temporalVars + /\ UNCHANGED + <<(*round, step,*) decision, lockedValue, + lockedRound, validValue, validRound>> + /\ UNCHANGED + <> + /\ action' = "OnRoundCatchup" + + +(********************* PROTOCOL TRANSITIONS ******************************) +\* advance the global clock +\* @type: Bool; +AdvanceRealTime == + /\ ValidTime(realTime) + /\ \E t \in Timestamps: + /\ t > realTime + /\ realTime' = t + /\ localClock' = [p \in Corr |-> localClock[p] + (t - realTime)] + /\ UNCHANGED <> + /\ action' = "AdvanceRealTime" + +\* process timely messages +\* @type: (PROCESS) => Bool; +MessageProcessing(p) == + \* start round + \/ InsertProposal(p) + \* reception step + \/ ReceiveProposal(p) + \* processing step + \/ UponProposalInPropose(p) + \/ UponProposalInProposeAndPrevote(p) + \/ UponQuorumOfPrevotesAny(p) + \/ UponProposalInPrevoteOrCommitAndPrevote(p) + \/ UponQuorumOfPrecommitsAny(p) + \/ UponProposalInPrecommitNoDecision(p) + \* the actions below are not essential for safety, but added for completeness + \/ OnTimeoutPropose(p) + \/ OnQuorumOfNilPrevotes(p) + \/ OnRoundCatchup(p) + +(* + * A system transition. In this specificatiom, the system may eventually deadlock, + * e.g., when all processes decide. This is expected behavior, as we focus on safety. + *) +Next == + \/ AdvanceRealTime + \/ FaultyBroadcast + \/ /\ SynchronizedLocalClocks + /\ \E p \in Corr: MessageProcessing(p) + +----------------------------------------------------------------------------- + +(*************************** INVARIANTS *************************************) + +\* [PBTS-INV-AGREEMENT.0] +AgreementOnValue == + \A p, q \in Corr: + /\ decision[p] /= NilDecision + /\ decision[q] /= NilDecision + => \E v \in ValidValues, t \in Timestamps, pr \in Rounds, r1 \in Rounds, r2 \in Rounds : + LET prop == Proposal(v,t,pr) + IN + /\ decision[p] = Decision(prop, r1) + /\ decision[q] = Decision(prop, r2) + +DisagreementOnValue == + \E p, q \in Corr: + \E p1 \in ValidProposals, p2 \in ValidProposals, r1 \in Rounds, r2 \in Rounds: + /\ p1 /= p2 + /\ decision[p] = Decision(p1, r1) + /\ decision[q] = Decision(p2, r2) + +\* [PBTS-INV-VALID.0] +ConsensusValidValue == + \A p \in Corr: + \* decision[p] = Decision(Proposal(v,t,pr), r) + LET prop == decision[p][1] IN + prop /= NilProposal => prop[1] \in ValidValues + +\* [PBTS-INV-MONOTONICITY.0] +\* TODO: we would need to compare timestamps of blocks from different height + +\* [PBTS-INV-TIMELY.0] +ConsensusTimeValid == + \A p \in Corr: + \* if a process decides on v and t + \E v \in ValidValues, t \in Timestamps, pr \in Rounds, dr \in Rounds : + \* FIXME: do we need to enforce pr <= dr? + decision[p] = Decision(Proposal(v,t,pr), dr) + \* then a process found t timely at its proposal round (pr) + => \E q \in Corr: + LET propRecvTime == proposalReceptionTime[pr, q] IN + ( + /\ beginRound[pr, q] <= propRecvTime + /\ beginRound[pr+1, q] >= propRecvTime + /\ IsTimely(propRecvTime, t) + ) + +IsFirstProposedInRound(prop, src, r) == + \E msg \in msgsPropose[r]: + /\ msg.proposal = prop + /\ msg.src = src + \* If a proposal is reused this changes from Nil to a valid round + /\ msg.validRound = NilRound + +TimeLiveness == +\A r \in Rounds \ {MaxRound}, v \in ValidValues: + LET p == Proposer[r] IN + p \in Corr \* Correct process is proposer in round r + => + \E t \in Timestamps: + LET prop == Proposal(v,t,r) IN + ( + /\ IsFirstProposedInRound(prop, p, r) \* p proposes v with some timestamp t in round r + /\ LET tOffset == t + Delay + Precision IN + /\ firstBeginRound[r] <= t + /\ t <= lastBeginRound[r] + /\ lastBeginRound[r] <= tOffset + /\ tOffset <= firstBeginRound[r+1] + ) => + \A q \in Corr: + \* q eventually decides prop + LET dq == decision[q] IN + dq /= NilDecision => dq[1] = prop + + +\* a conjunction of all invariants +Inv == + /\ AgreementOnValue + /\ ConsensusValidValue + /\ ConsensusTimeValid + /\ TimeLiveness + +============================================================================= diff --git a/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/MC.out b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/MC.out new file mode 100644 index 00000000000..51e2808a338 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/MC.out @@ -0,0 +1,789 @@ +@!@!@STARTMSG 2262:0 @!@!@ +Created by Apalache on Wed May 18 11:06:20 UTC 2022 +@!@!@ENDMSG 2262 @!@!@ +@!@!@STARTMSG 2110:1 @!@!@ +Invariant is violated. +@!@!@ENDMSG 2110 @!@!@ +@!@!@STARTMSG 2121:1 @!@!@ +The behavior up to this point is: +@!@!@ENDMSG 2121 @!@!@ +@!@!@STARTMSG 2217:4 @!@!@ +1: +/\ Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) +/\ action = "Init" +/\ beginRound = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) +/\ decision = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, + <<"c2", <<<<"None", -1, -1>>, -1>>>> }) +/\ evidence = {} +/\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) +/\ lockedRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) +/\ lockedValue = SetAsFun({ <<"c1", "None">>, <<"c2", "None">> }) +/\ msgsPrecommit = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, round |-> 2, src |-> "f3", type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, round |-> 3, src |-> "f4", type |-> "PRECOMMIT"]} + >> }) +/\ msgsPrevote = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ msgsPropose = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ proposalReceptionTime = SetAsFun({ <<<<0, "c1">>, -1>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, -1>>, + <<<<3, "c1">>, -1>> }) +/\ realTime = 0 +/\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) +/\ step = SetAsFun({ <<"c1", "PROPOSE">>, <<"c2", "PROPOSE">> }) +/\ validRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) +/\ validValue = SetAsFun({ <<"c1", <<"None", -1, -1>>>>, <<"c2", <<"None", -1, -1>>>> }) + +@!@!@ENDMSG 2217 @!@!@ +@!@!@STARTMSG 2217:4 @!@!@ +2: +/\ Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) +/\ action = "ReceiveProposal" +/\ beginRound = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) +/\ decision = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, + <<"c2", <<<<"None", -1, -1>>, -1>>>> }) +/\ evidence = {} +/\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) +/\ lockedRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) +/\ lockedValue = SetAsFun({ <<"c1", "None">>, <<"c2", "None">> }) +/\ msgsPrecommit = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, round |-> 2, src |-> "f3", type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, round |-> 3, src |-> "f4", type |-> "PRECOMMIT"]} + >> }) +/\ msgsPrevote = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ msgsPropose = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ proposalReceptionTime = SetAsFun({ <<<<0, "c1">>, -1>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) +/\ realTime = 0 +/\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) +/\ step = SetAsFun({ <<"c1", "PROPOSE">>, <<"c2", "PROPOSE">> }) +/\ validRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) +/\ validValue = SetAsFun({ <<"c1", <<"None", -1, -1>>>>, <<"c2", <<"None", -1, -1>>>> }) + +@!@!@ENDMSG 2217 @!@!@ +@!@!@STARTMSG 2217:4 @!@!@ +3: +/\ Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) +/\ action = "UponProposalInPropose" +/\ beginRound = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) +/\ decision = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, + <<"c2", <<<<"None", -1, -1>>, -1>>>> }) +/\ evidence = {[proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1]} +/\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) +/\ lockedRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) +/\ lockedValue = SetAsFun({ <<"c1", "None">>, <<"c2", "None">> }) +/\ msgsPrecommit = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, round |-> 2, src |-> "f3", type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, round |-> 3, src |-> "f4", type |-> "PRECOMMIT"]} + >> }) +/\ msgsPrevote = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ msgsPropose = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ proposalReceptionTime = SetAsFun({ <<<<0, "c1">>, -1>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) +/\ realTime = 0 +/\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) +/\ step = SetAsFun({ <<"c1", "PROPOSE">>, <<"c2", "PREVOTE">> }) +/\ validRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) +/\ validValue = SetAsFun({ <<"c1", <<"None", -1, -1>>>>, <<"c2", <<"None", -1, -1>>>> }) + +@!@!@ENDMSG 2217 @!@!@ +@!@!@STARTMSG 2217:4 @!@!@ +4: +/\ Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) +/\ action = "ReceiveProposal" +/\ beginRound = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) +/\ decision = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, + <<"c2", <<<<"None", -1, -1>>, -1>>>> }) +/\ evidence = {[proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1]} +/\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) +/\ lockedRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) +/\ lockedValue = SetAsFun({ <<"c1", "None">>, <<"c2", "None">> }) +/\ msgsPrecommit = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, round |-> 2, src |-> "f3", type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, round |-> 3, src |-> "f4", type |-> "PRECOMMIT"]} + >> }) +/\ msgsPrevote = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ msgsPropose = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ proposalReceptionTime = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) +/\ realTime = 0 +/\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) +/\ step = SetAsFun({ <<"c1", "PROPOSE">>, <<"c2", "PREVOTE">> }) +/\ validRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) +/\ validValue = SetAsFun({ <<"c1", <<"None", -1, -1>>>>, <<"c2", <<"None", -1, -1>>>> }) + +@!@!@ENDMSG 2217 @!@!@ +@!@!@STARTMSG 2217:4 @!@!@ +5: +/\ Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) +/\ action = "UponProposalInPrevoteOrCommitAndPrevote" +/\ beginRound = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) +/\ decision = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, + <<"c2", <<<<"None", -1, -1>>, -1>>>> }) +/\ evidence = { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2] } +/\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) +/\ lockedRound = SetAsFun({ <<"c1", -1>>, <<"c2", 0>> }) +/\ lockedValue = SetAsFun({ <<"c1", "None">>, <<"c2", "v0">> }) +/\ msgsPrecommit = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, round |-> 2, src |-> "f3", type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, round |-> 3, src |-> "f4", type |-> "PRECOMMIT"]} + >> }) +/\ msgsPrevote = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ msgsPropose = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ proposalReceptionTime = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) +/\ realTime = 0 +/\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) +/\ step = SetAsFun({ <<"c1", "PROPOSE">>, <<"c2", "PRECOMMIT">> }) +/\ validRound = SetAsFun({ <<"c1", -1>>, <<"c2", 0>> }) +/\ validValue = SetAsFun({ <<"c1", <<"None", -1, -1>>>>, <<"c2", <<"v0", 3, 0>>>> }) + +@!@!@ENDMSG 2217 @!@!@ +@!@!@STARTMSG 2217:4 @!@!@ +6: +/\ Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) +/\ action = "UponProposalInPropose" +/\ beginRound = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) +/\ decision = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, + <<"c2", <<<<"None", -1, -1>>, -1>>>> }) +/\ evidence = { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } +/\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) +/\ lockedRound = SetAsFun({ <<"c1", -1>>, <<"c2", 0>> }) +/\ lockedValue = SetAsFun({ <<"c1", "None">>, <<"c2", "v0">> }) +/\ msgsPrecommit = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, round |-> 2, src |-> "f3", type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, round |-> 3, src |-> "f4", type |-> "PRECOMMIT"]} + >> }) +/\ msgsPrevote = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "c1", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ msgsPropose = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ proposalReceptionTime = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) +/\ realTime = 0 +/\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) +/\ step = SetAsFun({ <<"c1", "PREVOTE">>, <<"c2", "PRECOMMIT">> }) +/\ validRound = SetAsFun({ <<"c1", -1>>, <<"c2", 0>> }) +/\ validValue = SetAsFun({ <<"c1", <<"None", -1, -1>>>>, <<"c2", <<"v0", 3, 0>>>> }) + +@!@!@ENDMSG 2217 @!@!@ +@!@!@STARTMSG 2217:4 @!@!@ +7: +/\ Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) +/\ action = "UponProposalInPrecommitNoDecision" +/\ beginRound = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) +/\ decision = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, <<"c2", <<<<"v0", 3, 0>>, 0>>>> +}) +/\ evidence = { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } +/\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) +/\ lockedRound = SetAsFun({ <<"c1", -1>>, <<"c2", 0>> }) +/\ lockedValue = SetAsFun({ <<"c1", "None">>, <<"c2", "v0">> }) +/\ msgsPrecommit = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, round |-> 2, src |-> "f3", type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, round |-> 3, src |-> "f4", type |-> "PRECOMMIT"]} + >> }) +/\ msgsPrevote = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "c1", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ msgsPropose = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ proposalReceptionTime = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) +/\ realTime = 0 +/\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) +/\ step = SetAsFun({ <<"c1", "PREVOTE">>, <<"c2", "DECIDED">> }) +/\ validRound = SetAsFun({ <<"c1", -1>>, <<"c2", 0>> }) +/\ validValue = SetAsFun({ <<"c1", <<"None", -1, -1>>>>, <<"c2", <<"v0", 3, 0>>>> }) + +@!@!@ENDMSG 2217 @!@!@ +@!@!@STARTMSG 2217:4 @!@!@ +8: +/\ Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) +/\ action = "UponProposalInPrevoteOrCommitAndPrevote" +/\ beginRound = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) +/\ decision = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, <<"c2", <<<<"v0", 3, 0>>, 0>>>> +}) +/\ evidence = { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "c1", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } +/\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) +/\ lockedRound = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) +/\ lockedValue = SetAsFun({ <<"c1", "v1">>, <<"c2", "v0">> }) +/\ msgsPrecommit = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "c1", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, round |-> 2, src |-> "f3", type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, round |-> 3, src |-> "f4", type |-> "PRECOMMIT"]} + >> }) +/\ msgsPrevote = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "c1", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ msgsPropose = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ proposalReceptionTime = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) +/\ realTime = 0 +/\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) +/\ step = SetAsFun({ <<"c1", "PRECOMMIT">>, <<"c2", "DECIDED">> }) +/\ validRound = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) +/\ validValue = SetAsFun({ <<"c1", <<"v1", 2, 0>>>>, <<"c2", <<"v0", 3, 0>>>> }) + +@!@!@ENDMSG 2217 @!@!@ +@!@!@STARTMSG 2217:4 @!@!@ +9: +/\ Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) +/\ action = "UponProposalInPrecommitNoDecision" +/\ beginRound = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) +/\ decision = SetAsFun({ <<"c1", <<<<"v1", 2, 0>>, 0>>>>, <<"c2", <<<<"v0", 3, 0>>, 0>>>> }) +/\ evidence = { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "c1", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "c1", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } +/\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) +/\ lockedRound = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) +/\ lockedValue = SetAsFun({ <<"c1", "v1">>, <<"c2", "v0">> }) +/\ msgsPrecommit = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "c1", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, round |-> 2, src |-> "f3", type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, round |-> 3, src |-> "f4", type |-> "PRECOMMIT"]} + >> }) +/\ msgsPrevote = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "c1", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ msgsPropose = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) +/\ proposalReceptionTime = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) +/\ realTime = 0 +/\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) +/\ step = SetAsFun({ <<"c1", "DECIDED">>, <<"c2", "DECIDED">> }) +/\ validRound = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) +/\ validValue = SetAsFun({ <<"c1", <<"v1", 2, 0>>>>, <<"c2", <<"v0", 3, 0>>>> }) + +@!@!@ENDMSG 2217 @!@!@ diff --git a/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/counterexample.itf.json b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/counterexample.itf.json new file mode 100644 index 00000000000..232cc28dbf5 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/counterexample.itf.json @@ -0,0 +1,5910 @@ +{ + "#meta": { + "format": "ITF", + "format-description": "https://apalache.informal.systems/docs/adr/015adr-trace.html", + "description": "Created by Apalache on Wed May 18 11:06:20 UTC 2022" + }, + "params": [ + "Proposer" + ], + "vars": [ + "decision", + "msgsPrecommit", + "beginRound", + "msgsPrevote", + "action", + "lockedRound", + "msgsPropose", + "validRound", + "step", + "lockedValue", + "validValue", + "realTime", + "round", + "evidence", + "proposalReceptionTime", + "localClock" + ], + "states": [ + { + "#meta": { + "index": 0 + }, + "Proposer": { + "#map": [ + [ + 0, + "f4" + ], + [ + 1, + "f4" + ], + [ + 2, + "f4" + ], + [ + 3, + "f4" + ] + ] + }, + "action": "Init", + "beginRound": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + 7 + ] + ] + }, + "decision": { + "#map": [ + [ + "c1", + { + "#tup": [ + { + "#tup": [ + "None", + -1, + -1 + ] + }, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + { + "#tup": [ + "None", + -1, + -1 + ] + }, + -1 + ] + } + ] + ] + }, + "evidence": { + "#set": [ + + ] + }, + "localClock": { + "#map": [ + [ + "c1", + 3 + ], + [ + "c2", + 2 + ] + ] + }, + "lockedRound": { + "#map": [ + [ + "c1", + -1 + ], + [ + "c2", + -1 + ] + ] + }, + "lockedValue": { + "#map": [ + [ + "c1", + "None" + ], + [ + "c2", + "None" + ] + ] + }, + "msgsPrecommit": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 3, + 2 + ] + }, + "round": 2, + "src": "f3", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 3, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 7, + 3 + ] + }, + "round": 3, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ] + ] + }, + "msgsPrevote": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "msgsPropose": { + "#map": [ + [ + 0, + { + "#set": [ + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": 2 + }, + { + "proposal": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "proposalReceptionTime": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + -1 + ] + ] + }, + "realTime": 0, + "round": { + "#map": [ + [ + "c1", + 0 + ], + [ + "c2", + 0 + ] + ] + }, + "step": { + "#map": [ + [ + "c1", + "PROPOSE" + ], + [ + "c2", + "PROPOSE" + ] + ] + }, + "validRound": { + "#map": [ + [ + "c1", + -1 + ], + [ + "c2", + -1 + ] + ] + }, + "validValue": { + "#map": [ + [ + "c1", + { + "#tup": [ + "None", + -1, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + "None", + -1, + -1 + ] + } + ] + ] + } + }, + { + "#meta": { + "index": 1 + }, + "Proposer": { + "#map": [ + [ + 0, + "f4" + ], + [ + 1, + "f4" + ], + [ + 2, + "f4" + ], + [ + 3, + "f4" + ] + ] + }, + "action": "ReceiveProposal", + "beginRound": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + 7 + ] + ] + }, + "decision": { + "#map": [ + [ + "c1", + { + "#tup": [ + { + "#tup": [ + "None", + -1, + -1 + ] + }, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + { + "#tup": [ + "None", + -1, + -1 + ] + }, + -1 + ] + } + ] + ] + }, + "evidence": { + "#set": [ + + ] + }, + "localClock": { + "#map": [ + [ + "c1", + 3 + ], + [ + "c2", + 2 + ] + ] + }, + "lockedRound": { + "#map": [ + [ + "c1", + -1 + ], + [ + "c2", + -1 + ] + ] + }, + "lockedValue": { + "#map": [ + [ + "c1", + "None" + ], + [ + "c2", + "None" + ] + ] + }, + "msgsPrecommit": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 3, + 2 + ] + }, + "round": 2, + "src": "f3", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 3, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 7, + 3 + ] + }, + "round": 3, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ] + ] + }, + "msgsPrevote": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "msgsPropose": { + "#map": [ + [ + 0, + { + "#set": [ + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": 2 + }, + { + "proposal": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "proposalReceptionTime": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + -1 + ] + ] + }, + "realTime": 0, + "round": { + "#map": [ + [ + "c1", + 0 + ], + [ + "c2", + 0 + ] + ] + }, + "step": { + "#map": [ + [ + "c1", + "PROPOSE" + ], + [ + "c2", + "PROPOSE" + ] + ] + }, + "validRound": { + "#map": [ + [ + "c1", + -1 + ], + [ + "c2", + -1 + ] + ] + }, + "validValue": { + "#map": [ + [ + "c1", + { + "#tup": [ + "None", + -1, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + "None", + -1, + -1 + ] + } + ] + ] + } + }, + { + "#meta": { + "index": 2 + }, + "Proposer": { + "#map": [ + [ + 0, + "f4" + ], + [ + 1, + "f4" + ], + [ + 2, + "f4" + ], + [ + 3, + "f4" + ] + ] + }, + "action": "UponProposalInPropose", + "beginRound": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + 7 + ] + ] + }, + "decision": { + "#map": [ + [ + "c1", + { + "#tup": [ + { + "#tup": [ + "None", + -1, + -1 + ] + }, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + { + "#tup": [ + "None", + -1, + -1 + ] + }, + -1 + ] + } + ] + ] + }, + "evidence": { + "#set": [ + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + }, + "localClock": { + "#map": [ + [ + "c1", + 3 + ], + [ + "c2", + 2 + ] + ] + }, + "lockedRound": { + "#map": [ + [ + "c1", + -1 + ], + [ + "c2", + -1 + ] + ] + }, + "lockedValue": { + "#map": [ + [ + "c1", + "None" + ], + [ + "c2", + "None" + ] + ] + }, + "msgsPrecommit": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 3, + 2 + ] + }, + "round": 2, + "src": "f3", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 3, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 7, + 3 + ] + }, + "round": 3, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ] + ] + }, + "msgsPrevote": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "msgsPropose": { + "#map": [ + [ + 0, + { + "#set": [ + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": 2 + }, + { + "proposal": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "proposalReceptionTime": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + -1 + ] + ] + }, + "realTime": 0, + "round": { + "#map": [ + [ + "c1", + 0 + ], + [ + "c2", + 0 + ] + ] + }, + "step": { + "#map": [ + [ + "c1", + "PROPOSE" + ], + [ + "c2", + "PREVOTE" + ] + ] + }, + "validRound": { + "#map": [ + [ + "c1", + -1 + ], + [ + "c2", + -1 + ] + ] + }, + "validValue": { + "#map": [ + [ + "c1", + { + "#tup": [ + "None", + -1, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + "None", + -1, + -1 + ] + } + ] + ] + } + }, + { + "#meta": { + "index": 3 + }, + "Proposer": { + "#map": [ + [ + 0, + "f4" + ], + [ + 1, + "f4" + ], + [ + 2, + "f4" + ], + [ + 3, + "f4" + ] + ] + }, + "action": "ReceiveProposal", + "beginRound": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + 7 + ] + ] + }, + "decision": { + "#map": [ + [ + "c1", + { + "#tup": [ + { + "#tup": [ + "None", + -1, + -1 + ] + }, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + { + "#tup": [ + "None", + -1, + -1 + ] + }, + -1 + ] + } + ] + ] + }, + "evidence": { + "#set": [ + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + }, + "localClock": { + "#map": [ + [ + "c1", + 3 + ], + [ + "c2", + 2 + ] + ] + }, + "lockedRound": { + "#map": [ + [ + "c1", + -1 + ], + [ + "c2", + -1 + ] + ] + }, + "lockedValue": { + "#map": [ + [ + "c1", + "None" + ], + [ + "c2", + "None" + ] + ] + }, + "msgsPrecommit": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 3, + 2 + ] + }, + "round": 2, + "src": "f3", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 3, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 7, + 3 + ] + }, + "round": 3, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ] + ] + }, + "msgsPrevote": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "msgsPropose": { + "#map": [ + [ + 0, + { + "#set": [ + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": 2 + }, + { + "proposal": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "proposalReceptionTime": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + -1 + ] + ] + }, + "realTime": 0, + "round": { + "#map": [ + [ + "c1", + 0 + ], + [ + "c2", + 0 + ] + ] + }, + "step": { + "#map": [ + [ + "c1", + "PROPOSE" + ], + [ + "c2", + "PREVOTE" + ] + ] + }, + "validRound": { + "#map": [ + [ + "c1", + -1 + ], + [ + "c2", + -1 + ] + ] + }, + "validValue": { + "#map": [ + [ + "c1", + { + "#tup": [ + "None", + -1, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + "None", + -1, + -1 + ] + } + ] + ] + } + }, + { + "#meta": { + "index": 4 + }, + "Proposer": { + "#map": [ + [ + 0, + "f4" + ], + [ + 1, + "f4" + ], + [ + 2, + "f4" + ], + [ + 3, + "f4" + ] + ] + }, + "action": "UponProposalInPrevoteOrCommitAndPrevote", + "beginRound": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + 7 + ] + ] + }, + "decision": { + "#map": [ + [ + "c1", + { + "#tup": [ + { + "#tup": [ + "None", + -1, + -1 + ] + }, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + { + "#tup": [ + "None", + -1, + -1 + ] + }, + -1 + ] + } + ] + ] + }, + "evidence": { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + }, + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": 2 + } + ] + }, + "localClock": { + "#map": [ + [ + "c1", + 3 + ], + [ + "c2", + 2 + ] + ] + }, + "lockedRound": { + "#map": [ + [ + "c1", + -1 + ], + [ + "c2", + 0 + ] + ] + }, + "lockedValue": { + "#map": [ + [ + "c1", + "None" + ], + [ + "c2", + "v0" + ] + ] + }, + "msgsPrecommit": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 3, + 2 + ] + }, + "round": 2, + "src": "f3", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 3, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 7, + 3 + ] + }, + "round": 3, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ] + ] + }, + "msgsPrevote": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "msgsPropose": { + "#map": [ + [ + 0, + { + "#set": [ + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": 2 + }, + { + "proposal": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "proposalReceptionTime": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + -1 + ] + ] + }, + "realTime": 0, + "round": { + "#map": [ + [ + "c1", + 0 + ], + [ + "c2", + 0 + ] + ] + }, + "step": { + "#map": [ + [ + "c1", + "PROPOSE" + ], + [ + "c2", + "PRECOMMIT" + ] + ] + }, + "validRound": { + "#map": [ + [ + "c1", + -1 + ], + [ + "c2", + 0 + ] + ] + }, + "validValue": { + "#map": [ + [ + "c1", + { + "#tup": [ + "None", + -1, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + "v0", + 3, + 0 + ] + } + ] + ] + } + }, + { + "#meta": { + "index": 5 + }, + "Proposer": { + "#map": [ + [ + 0, + "f4" + ], + [ + 1, + "f4" + ], + [ + 2, + "f4" + ], + [ + 3, + "f4" + ] + ] + }, + "action": "UponProposalInPropose", + "beginRound": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + 7 + ] + ] + }, + "decision": { + "#map": [ + [ + "c1", + { + "#tup": [ + { + "#tup": [ + "None", + -1, + -1 + ] + }, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + { + "#tup": [ + "None", + -1, + -1 + ] + }, + -1 + ] + } + ] + ] + }, + "evidence": { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + }, + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": 2 + }, + { + "proposal": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + }, + "localClock": { + "#map": [ + [ + "c1", + 3 + ], + [ + "c2", + 2 + ] + ] + }, + "lockedRound": { + "#map": [ + [ + "c1", + -1 + ], + [ + "c2", + 0 + ] + ] + }, + "lockedValue": { + "#map": [ + [ + "c1", + "None" + ], + [ + "c2", + "v0" + ] + ] + }, + "msgsPrecommit": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 3, + 2 + ] + }, + "round": 2, + "src": "f3", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 3, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 7, + 3 + ] + }, + "round": 3, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ] + ] + }, + "msgsPrevote": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "c1", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "msgsPropose": { + "#map": [ + [ + 0, + { + "#set": [ + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": 2 + }, + { + "proposal": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "proposalReceptionTime": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + -1 + ] + ] + }, + "realTime": 0, + "round": { + "#map": [ + [ + "c1", + 0 + ], + [ + "c2", + 0 + ] + ] + }, + "step": { + "#map": [ + [ + "c1", + "PREVOTE" + ], + [ + "c2", + "PRECOMMIT" + ] + ] + }, + "validRound": { + "#map": [ + [ + "c1", + -1 + ], + [ + "c2", + 0 + ] + ] + }, + "validValue": { + "#map": [ + [ + "c1", + { + "#tup": [ + "None", + -1, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + "v0", + 3, + 0 + ] + } + ] + ] + } + }, + { + "#meta": { + "index": 6 + }, + "Proposer": { + "#map": [ + [ + 0, + "f4" + ], + [ + 1, + "f4" + ], + [ + 2, + "f4" + ], + [ + 3, + "f4" + ] + ] + }, + "action": "UponProposalInPrecommitNoDecision", + "beginRound": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + 7 + ] + ] + }, + "decision": { + "#map": [ + [ + "c1", + { + "#tup": [ + { + "#tup": [ + "None", + -1, + -1 + ] + }, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + { + "#tup": [ + "v0", + 3, + 0 + ] + }, + 0 + ] + } + ] + ] + }, + "evidence": { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + }, + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": 2 + }, + { + "proposal": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + }, + "localClock": { + "#map": [ + [ + "c1", + 3 + ], + [ + "c2", + 2 + ] + ] + }, + "lockedRound": { + "#map": [ + [ + "c1", + -1 + ], + [ + "c2", + 0 + ] + ] + }, + "lockedValue": { + "#map": [ + [ + "c1", + "None" + ], + [ + "c2", + "v0" + ] + ] + }, + "msgsPrecommit": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 3, + 2 + ] + }, + "round": 2, + "src": "f3", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 3, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 7, + 3 + ] + }, + "round": 3, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ] + ] + }, + "msgsPrevote": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "c1", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "msgsPropose": { + "#map": [ + [ + 0, + { + "#set": [ + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": 2 + }, + { + "proposal": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "proposalReceptionTime": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + -1 + ] + ] + }, + "realTime": 0, + "round": { + "#map": [ + [ + "c1", + 0 + ], + [ + "c2", + 0 + ] + ] + }, + "step": { + "#map": [ + [ + "c1", + "PREVOTE" + ], + [ + "c2", + "DECIDED" + ] + ] + }, + "validRound": { + "#map": [ + [ + "c1", + -1 + ], + [ + "c2", + 0 + ] + ] + }, + "validValue": { + "#map": [ + [ + "c1", + { + "#tup": [ + "None", + -1, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + "v0", + 3, + 0 + ] + } + ] + ] + } + }, + { + "#meta": { + "index": 7 + }, + "Proposer": { + "#map": [ + [ + 0, + "f4" + ], + [ + 1, + "f4" + ], + [ + 2, + "f4" + ], + [ + 3, + "f4" + ] + ] + }, + "action": "UponProposalInPrevoteOrCommitAndPrevote", + "beginRound": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + 7 + ] + ] + }, + "decision": { + "#map": [ + [ + "c1", + { + "#tup": [ + { + "#tup": [ + "None", + -1, + -1 + ] + }, + -1 + ] + } + ], + [ + "c2", + { + "#tup": [ + { + "#tup": [ + "v0", + 3, + 0 + ] + }, + 0 + ] + } + ] + ] + }, + "evidence": { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "c1", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + }, + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": 2 + }, + { + "proposal": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + }, + "localClock": { + "#map": [ + [ + "c1", + 3 + ], + [ + "c2", + 2 + ] + ] + }, + "lockedRound": { + "#map": [ + [ + "c1", + 0 + ], + [ + "c2", + 0 + ] + ] + }, + "lockedValue": { + "#map": [ + [ + "c1", + "v1" + ], + [ + "c2", + "v0" + ] + ] + }, + "msgsPrecommit": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "c1", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 3, + 2 + ] + }, + "round": 2, + "src": "f3", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 3, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 7, + 3 + ] + }, + "round": 3, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ] + ] + }, + "msgsPrevote": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "c1", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "msgsPropose": { + "#map": [ + [ + 0, + { + "#set": [ + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": 2 + }, + { + "proposal": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "proposalReceptionTime": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + -1 + ] + ] + }, + "realTime": 0, + "round": { + "#map": [ + [ + "c1", + 0 + ], + [ + "c2", + 0 + ] + ] + }, + "step": { + "#map": [ + [ + "c1", + "PRECOMMIT" + ], + [ + "c2", + "DECIDED" + ] + ] + }, + "validRound": { + "#map": [ + [ + "c1", + 0 + ], + [ + "c2", + 0 + ] + ] + }, + "validValue": { + "#map": [ + [ + "c1", + { + "#tup": [ + "v1", + 2, + 0 + ] + } + ], + [ + "c2", + { + "#tup": [ + "v0", + 3, + 0 + ] + } + ] + ] + } + }, + { + "#meta": { + "index": 8 + }, + "Proposer": { + "#map": [ + [ + 0, + "f4" + ], + [ + 1, + "f4" + ], + [ + 2, + "f4" + ], + [ + 3, + "f4" + ] + ] + }, + "action": "UponProposalInPrecommitNoDecision", + "beginRound": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + 7 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + 7 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + 7 + ] + ] + }, + "decision": { + "#map": [ + [ + "c1", + { + "#tup": [ + { + "#tup": [ + "v1", + 2, + 0 + ] + }, + 0 + ] + } + ], + [ + "c2", + { + "#tup": [ + { + "#tup": [ + "v0", + 3, + 0 + ] + }, + 0 + ] + } + ] + ] + }, + "evidence": { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "c1", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "c1", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + }, + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": 2 + }, + { + "proposal": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + }, + "localClock": { + "#map": [ + [ + "c1", + 3 + ], + [ + "c2", + 2 + ] + ] + }, + "lockedRound": { + "#map": [ + [ + "c1", + 0 + ], + [ + "c2", + 0 + ] + ] + }, + "lockedValue": { + "#map": [ + [ + "c1", + "v1" + ], + [ + "c2", + "v0" + ] + ] + }, + "msgsPrecommit": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "c1", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PRECOMMIT" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 3, + 2 + ] + }, + "round": 2, + "src": "f3", + "type": "PRECOMMIT" + } + ] + } + ], + [ + 3, + { + "#set": [ + { + "id": { + "#tup": [ + "v2", + 7, + 3 + ] + }, + "round": 3, + "src": "f4", + "type": "PRECOMMIT" + } + ] + } + ] + ] + }, + "msgsPrevote": { + "#map": [ + [ + 0, + { + "#set": [ + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "c2", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "c1", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f3", + "type": "PREVOTE" + }, + { + "id": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PREVOTE" + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "msgsPropose": { + "#map": [ + [ + 0, + { + "#set": [ + { + "proposal": { + "#tup": [ + "v0", + 3, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": 2 + }, + { + "proposal": { + "#tup": [ + "v1", + 2, + 0 + ] + }, + "round": 0, + "src": "f4", + "type": "PROPOSAL", + "validRound": -1 + } + ] + } + ], + [ + 1, + { + "#set": [ + + ] + } + ], + [ + 2, + { + "#set": [ + + ] + } + ], + [ + 3, + { + "#set": [ + + ] + } + ] + ] + }, + "proposalReceptionTime": { + "#map": [ + [ + { + "#tup": [ + 0, + "c1" + ] + }, + 3 + ], + [ + { + "#tup": [ + 2, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c1" + ] + }, + -1 + ], + [ + { + "#tup": [ + 2, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 1, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 3, + "c2" + ] + }, + -1 + ], + [ + { + "#tup": [ + 0, + "c2" + ] + }, + 2 + ], + [ + { + "#tup": [ + 3, + "c1" + ] + }, + -1 + ] + ] + }, + "realTime": 0, + "round": { + "#map": [ + [ + "c1", + 0 + ], + [ + "c2", + 0 + ] + ] + }, + "step": { + "#map": [ + [ + "c1", + "DECIDED" + ], + [ + "c2", + "DECIDED" + ] + ] + }, + "validRound": { + "#map": [ + [ + "c1", + 0 + ], + [ + "c2", + 0 + ] + ] + }, + "validValue": { + "#map": [ + [ + "c1", + { + "#tup": [ + "v1", + 2, + 0 + ] + } + ], + [ + "c2", + { + "#tup": [ + "v0", + 3, + 0 + ] + } + ] + ] + } + } + ] +} \ No newline at end of file diff --git a/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/counterexample.json b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/counterexample.json new file mode 100644 index 00000000000..0314da0e5f0 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/counterexample.json @@ -0,0 +1,34833 @@ +{ + "name": "ApalacheIR", + "version": "1.0", + "description": "https://apalache.informal.systems/docs/adr/005adr-json.html", + "modules": [ + { + "kind": "TlaModule", + "name": "counterexample", + "declarations": [ + { + "type": "Untyped", + "kind": "TlaOperDecl", + "name": "ConstInit", + "formalParams": [ + + ], + "isRecursive": false, + "body": { + "type": "Untyped", + "kind": "OperEx", + "oper": "AND", + "args": [ + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "Proposer" + }, + { + "type": "(Int -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + }, + { + "type": "Untyped", + "kind": "TlaOperDecl", + "name": "State0", + "formalParams": [ + + ], + "isRecursive": false, + "body": { + "type": "Untyped", + "kind": "OperEx", + "oper": "AND", + "args": [ + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "Proposer" + }, + { + "type": "(Int -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "action" + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "Init" + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "beginRound" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "decision" + }, + { + "type": "(Str -> <<<>, Int>>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, Int>>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "evidence" + }, + { + "type": "Set([id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "localClock" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedValue" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrecommit" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrevote" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPropose" + }, + { + "type": "(Int -> Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str, validRound: Int])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "proposalReceptionTime" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "realTime" + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "round" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "step" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSE" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSE" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validValue" + }, + { + "type": "(Str -> <>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + }, + { + "type": "Untyped", + "kind": "TlaOperDecl", + "name": "State1", + "formalParams": [ + + ], + "isRecursive": false, + "body": { + "type": "Untyped", + "kind": "OperEx", + "oper": "AND", + "args": [ + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "Proposer" + }, + { + "type": "(Int -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "action" + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "ReceiveProposal" + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "beginRound" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "decision" + }, + { + "type": "(Str -> <<<>, Int>>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, Int>>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "evidence" + }, + { + "type": "Set([id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "localClock" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedValue" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrecommit" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrevote" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPropose" + }, + { + "type": "(Int -> Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str, validRound: Int])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "proposalReceptionTime" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "realTime" + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "round" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "step" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSE" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSE" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validValue" + }, + { + "type": "(Str -> <>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + }, + { + "type": "Untyped", + "kind": "TlaOperDecl", + "name": "State2", + "formalParams": [ + + ], + "isRecursive": false, + "body": { + "type": "Untyped", + "kind": "OperEx", + "oper": "AND", + "args": [ + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "Proposer" + }, + { + "type": "(Int -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "action" + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "UponProposalInPropose" + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "beginRound" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "decision" + }, + { + "type": "(Str -> <<<>, Int>>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, Int>>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "evidence" + }, + { + "type": "Set([id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "localClock" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedValue" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrecommit" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrevote" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPropose" + }, + { + "type": "(Int -> Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str, validRound: Int])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "proposalReceptionTime" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "realTime" + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "round" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "step" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSE" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validValue" + }, + { + "type": "(Str -> <>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + }, + { + "type": "Untyped", + "kind": "TlaOperDecl", + "name": "State3", + "formalParams": [ + + ], + "isRecursive": false, + "body": { + "type": "Untyped", + "kind": "OperEx", + "oper": "AND", + "args": [ + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "Proposer" + }, + { + "type": "(Int -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "action" + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "ReceiveProposal" + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "beginRound" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "decision" + }, + { + "type": "(Str -> <<<>, Int>>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, Int>>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "evidence" + }, + { + "type": "Set([id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "localClock" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedValue" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrecommit" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrevote" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPropose" + }, + { + "type": "(Int -> Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str, validRound: Int])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "proposalReceptionTime" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "realTime" + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "round" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "step" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSE" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validValue" + }, + { + "type": "(Str -> <>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + }, + { + "type": "Untyped", + "kind": "TlaOperDecl", + "name": "State4", + "formalParams": [ + + ], + "isRecursive": false, + "body": { + "type": "Untyped", + "kind": "OperEx", + "oper": "AND", + "args": [ + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "Proposer" + }, + { + "type": "(Int -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "action" + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "UponProposalInPrevoteOrCommitAndPrevote" + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "beginRound" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "decision" + }, + { + "type": "(Str -> <<<>, Int>>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, Int>>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "evidence" + }, + { + "type": "Set([id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "localClock" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedValue" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrecommit" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrevote" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPropose" + }, + { + "type": "(Int -> Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str, validRound: Int])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "proposalReceptionTime" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "realTime" + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "round" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "step" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSE" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validValue" + }, + { + "type": "(Str -> <>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + }, + { + "type": "Untyped", + "kind": "TlaOperDecl", + "name": "State5", + "formalParams": [ + + ], + "isRecursive": false, + "body": { + "type": "Untyped", + "kind": "OperEx", + "oper": "AND", + "args": [ + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "Proposer" + }, + { + "type": "(Int -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "action" + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "UponProposalInPropose" + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "beginRound" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "decision" + }, + { + "type": "(Str -> <<<>, Int>>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, Int>>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "evidence" + }, + { + "type": "Set([id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "localClock" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedValue" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrecommit" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrevote" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPropose" + }, + { + "type": "(Int -> Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str, validRound: Int])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "proposalReceptionTime" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "realTime" + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "round" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "step" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validValue" + }, + { + "type": "(Str -> <>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + }, + { + "type": "Untyped", + "kind": "TlaOperDecl", + "name": "State6", + "formalParams": [ + + ], + "isRecursive": false, + "body": { + "type": "Untyped", + "kind": "OperEx", + "oper": "AND", + "args": [ + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "Proposer" + }, + { + "type": "(Int -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "action" + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "UponProposalInPrecommitNoDecision" + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "beginRound" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "decision" + }, + { + "type": "(Str -> <<<>, Int>>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, Int>>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "evidence" + }, + { + "type": "Set([id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "localClock" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedValue" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrecommit" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrevote" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPropose" + }, + { + "type": "(Int -> Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str, validRound: Int])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "proposalReceptionTime" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "realTime" + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "round" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "step" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "DECIDED" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validValue" + }, + { + "type": "(Str -> <>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + }, + { + "type": "Untyped", + "kind": "TlaOperDecl", + "name": "State7", + "formalParams": [ + + ], + "isRecursive": false, + "body": { + "type": "Untyped", + "kind": "OperEx", + "oper": "AND", + "args": [ + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "Proposer" + }, + { + "type": "(Int -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "action" + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "UponProposalInPrevoteOrCommitAndPrevote" + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "beginRound" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "decision" + }, + { + "type": "(Str -> <<<>, Int>>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, Int>>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + }, + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "evidence" + }, + { + "type": "Set([id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "localClock" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedValue" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrecommit" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrevote" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPropose" + }, + { + "type": "(Int -> Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str, validRound: Int])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "proposalReceptionTime" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "realTime" + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "round" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "step" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "DECIDED" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validValue" + }, + { + "type": "(Str -> <>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + }, + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + }, + { + "type": "Untyped", + "kind": "TlaOperDecl", + "name": "State8", + "formalParams": [ + + ], + "isRecursive": false, + "body": { + "type": "Untyped", + "kind": "OperEx", + "oper": "AND", + "args": [ + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "Proposer" + }, + { + "type": "(Int -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "action" + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "UponProposalInPrecommitNoDecision" + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "beginRound" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "decision" + }, + { + "type": "(Str -> <<<>, Int>>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, Int>>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + }, + { + "type": "<>, Int>>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "evidence" + }, + { + "type": "Set([id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "[id: <>, proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "localClock" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "lockedValue" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrecommit" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PRECOMMIT" + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPrevote" + }, + { + "type": "(Int -> Set([id: <>, round: Int, src: Str, type: Str]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f3" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + }, + { + "type": "[id: <>, round: Int, src: Str, type: Str]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "id" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PREVOTE" + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([id: <>, round: Int, src: Str, type: Str])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "msgsPropose" + }, + { + "type": "(Int -> Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int]))", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>, round: Int, src: Str, type: Str, validRound: Int])>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "[proposal: <>, round: Int, src: Str, type: Str, validRound: Int]", + "kind": "OperEx", + "oper": "RECORD", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "proposal" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "round" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "src" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "f4" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "type" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "PROPOSAL" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "validRound" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + }, + { + "type": "<>, round: Int, src: Str, type: Str, validRound: Int])>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Set([proposal: <>, round: Int, src: Str, type: Str, validRound: Int])", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "proposalReceptionTime" + }, + { + "type": "(<> -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<<<>, Int>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 1 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "realTime" + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "round" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "step" + }, + { + "type": "(Str -> Str)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "DECIDED" + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "DECIDED" + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validRound" + }, + { + "type": "(Str -> Int)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Untyped", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "Untyped", + "kind": "NameEx", + "name": "validValue" + }, + { + "type": "(Str -> <>)", + "kind": "OperEx", + "oper": "Apalache!SetAsFun", + "args": [ + { + "type": "Set(<>>>)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 2 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + }, + { + "type": "<>>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + }, + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + }, + { + "type": "Untyped", + "kind": "TlaOperDecl", + "name": "InvariantViolation", + "formalParams": [ + + ], + "isRecursive": false, + "body": { + "type": "Bool", + "kind": "OperEx", + "oper": "Apalache!Skolem", + "args": [ + { + "type": "Bool", + "kind": "OperEx", + "oper": "EXISTS3", + "args": [ + { + "type": "Str", + "kind": "NameEx", + "name": "p$41" + }, + { + "type": "Set(Str)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Bool", + "kind": "OperEx", + "oper": "Apalache!Skolem", + "args": [ + { + "type": "Bool", + "kind": "OperEx", + "oper": "EXISTS3", + "args": [ + { + "type": "Str", + "kind": "NameEx", + "name": "q$14" + }, + { + "type": "Set(Str)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c1" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "c2" + } + } + ] + }, + { + "type": "Bool", + "kind": "OperEx", + "oper": "AND", + "args": [ + { + "type": "Bool", + "kind": "OperEx", + "oper": "AND", + "args": [ + { + "type": "Bool", + "kind": "OperEx", + "oper": "NOT", + "args": [ + { + "type": "Bool", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "FUN_APP", + "args": [ + { + "type": "(Str -> <<<>, Int>>)", + "kind": "NameEx", + "name": "decision" + }, + { + "type": "Str", + "kind": "NameEx", + "name": "p$41" + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + }, + { + "type": "Bool", + "kind": "OperEx", + "oper": "NOT", + "args": [ + { + "type": "Bool", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "FUN_APP", + "args": [ + { + "type": "(Str -> <<<>, Int>>)", + "kind": "NameEx", + "name": "decision" + }, + { + "type": "Str", + "kind": "NameEx", + "name": "q$14" + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "None" + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": -1 + } + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "Bool", + "kind": "OperEx", + "oper": "FORALL3", + "args": [ + { + "type": "Str", + "kind": "NameEx", + "name": "v$9" + }, + { + "type": "Set(Str)", + "kind": "OperEx", + "oper": "SET_ENUM", + "args": [ + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v0" + } + }, + { + "type": "Str", + "kind": "ValEx", + "value": { + "kind": "TlaStr", + "value": "v1" + } + } + ] + }, + { + "type": "Bool", + "kind": "OperEx", + "oper": "FORALL3", + "args": [ + { + "type": "Int", + "kind": "NameEx", + "name": "t$9" + }, + { + "type": "Set(Int)", + "kind": "OperEx", + "oper": "INT_RANGE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 7 + } + } + ] + }, + { + "type": "Bool", + "kind": "OperEx", + "oper": "FORALL3", + "args": [ + { + "type": "Int", + "kind": "NameEx", + "name": "pr$4" + }, + { + "type": "Set(Int)", + "kind": "OperEx", + "oper": "INT_RANGE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "Bool", + "kind": "OperEx", + "oper": "FORALL3", + "args": [ + { + "type": "Int", + "kind": "NameEx", + "name": "r1$2" + }, + { + "type": "Set(Int)", + "kind": "OperEx", + "oper": "INT_RANGE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "Bool", + "kind": "OperEx", + "oper": "FORALL3", + "args": [ + { + "type": "Int", + "kind": "NameEx", + "name": "r2$2" + }, + { + "type": "Set(Int)", + "kind": "OperEx", + "oper": "INT_RANGE", + "args": [ + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 0 + } + }, + { + "type": "Int", + "kind": "ValEx", + "value": { + "kind": "TlaInt", + "value": 3 + } + } + ] + }, + { + "type": "Bool", + "kind": "LetInEx", + "body": { + "type": "Bool", + "kind": "OperEx", + "oper": "OR", + "args": [ + { + "type": "Bool", + "kind": "OperEx", + "oper": "NOT", + "args": [ + { + "type": "Bool", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "FUN_APP", + "args": [ + { + "type": "(Str -> <<<>, Int>>)", + "kind": "NameEx", + "name": "decision" + }, + { + "type": "Str", + "kind": "NameEx", + "name": "p$41" + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "OPER_APP", + "args": [ + { + "type": "(() => <>)", + "kind": "NameEx", + "name": "prop$7" + } + ] + }, + { + "type": "Int", + "kind": "NameEx", + "name": "r1$2" + } + ] + } + ] + } + ] + }, + { + "type": "Bool", + "kind": "OperEx", + "oper": "NOT", + "args": [ + { + "type": "Bool", + "kind": "OperEx", + "oper": "EQ", + "args": [ + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "FUN_APP", + "args": [ + { + "type": "(Str -> <<<>, Int>>)", + "kind": "NameEx", + "name": "decision" + }, + { + "type": "Str", + "kind": "NameEx", + "name": "q$14" + } + ] + }, + { + "type": "<<<>, Int>>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "<>", + "kind": "OperEx", + "oper": "OPER_APP", + "args": [ + { + "type": "(() => <>)", + "kind": "NameEx", + "name": "prop$7" + } + ] + }, + { + "type": "Int", + "kind": "NameEx", + "name": "r2$2" + } + ] + } + ] + } + ] + } + ] + }, + "decls": [ + { + "type": "(() => <>)", + "kind": "TlaOperDecl", + "name": "prop$7", + "formalParams": [ + + ], + "isRecursive": false, + "body": { + "type": "<>", + "kind": "OperEx", + "oper": "TUPLE", + "args": [ + { + "type": "Str", + "kind": "NameEx", + "name": "v$9" + }, + { + "type": "Int", + "kind": "NameEx", + "name": "t$9" + }, + { + "type": "Int", + "kind": "NameEx", + "name": "pr$4" + } + ] + } + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + } + ] + } + ] +} \ No newline at end of file diff --git a/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/counterexample.tla b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/counterexample.tla new file mode 100644 index 00000000000..5250a2d544a --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/counterexample.tla @@ -0,0 +1,1174 @@ +---------------------------- MODULE counterexample ---------------------------- + +EXTENDS MC_PBT_2C_2F + +(* Constant initialization state *) +ConstInit == + Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) + +(* Initial state *) +State0 == + Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) + /\ action = "Init" + /\ beginRound + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) + /\ decision + = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, + <<"c2", <<<<"None", -1, -1>>, -1>>>> }) + /\ evidence = {} + /\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) + /\ lockedRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) + /\ lockedValue = SetAsFun({ <<"c1", "None">>, <<"c2", "None">> }) + /\ msgsPrecommit + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, + round |-> 2, + src |-> "f3", + type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, + round |-> 3, + src |-> "f4", + type |-> "PRECOMMIT"]} + >> }) + /\ msgsPrevote + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ msgsPropose + = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ proposalReceptionTime + = SetAsFun({ <<<<0, "c1">>, -1>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, -1>>, + <<<<3, "c1">>, -1>> }) + /\ realTime = 0 + /\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) + /\ step = SetAsFun({ <<"c1", "PROPOSE">>, <<"c2", "PROPOSE">> }) + /\ validRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) + /\ validValue + = SetAsFun({ <<"c1", <<"None", -1, -1>>>>, <<"c2", <<"None", -1, -1>>>> }) + +(* Transition 3 to State1 *) +State1 == + Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) + /\ action = "ReceiveProposal" + /\ beginRound + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) + /\ decision + = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, + <<"c2", <<<<"None", -1, -1>>, -1>>>> }) + /\ evidence = {} + /\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) + /\ lockedRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) + /\ lockedValue = SetAsFun({ <<"c1", "None">>, <<"c2", "None">> }) + /\ msgsPrecommit + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, + round |-> 2, + src |-> "f3", + type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, + round |-> 3, + src |-> "f4", + type |-> "PRECOMMIT"]} + >> }) + /\ msgsPrevote + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ msgsPropose + = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ proposalReceptionTime + = SetAsFun({ <<<<0, "c1">>, -1>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) + /\ realTime = 0 + /\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) + /\ step = SetAsFun({ <<"c1", "PROPOSE">>, <<"c2", "PROPOSE">> }) + /\ validRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) + /\ validValue + = SetAsFun({ <<"c1", <<"None", -1, -1>>>>, <<"c2", <<"None", -1, -1>>>> }) + +(* Transition 12 to State2 *) +State2 == + Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) + /\ action = "UponProposalInPropose" + /\ beginRound + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) + /\ decision + = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, + <<"c2", <<<<"None", -1, -1>>, -1>>>> }) + /\ evidence + = {[proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1]} + /\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) + /\ lockedRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) + /\ lockedValue = SetAsFun({ <<"c1", "None">>, <<"c2", "None">> }) + /\ msgsPrecommit + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, + round |-> 2, + src |-> "f3", + type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, + round |-> 3, + src |-> "f4", + type |-> "PRECOMMIT"]} + >> }) + /\ msgsPrevote + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ msgsPropose + = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ proposalReceptionTime + = SetAsFun({ <<<<0, "c1">>, -1>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) + /\ realTime = 0 + /\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) + /\ step = SetAsFun({ <<"c1", "PROPOSE">>, <<"c2", "PREVOTE">> }) + /\ validRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) + /\ validValue + = SetAsFun({ <<"c1", <<"None", -1, -1>>>>, <<"c2", <<"None", -1, -1>>>> }) + +(* Transition 3 to State3 *) +State3 == + Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) + /\ action = "ReceiveProposal" + /\ beginRound + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) + /\ decision + = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, + <<"c2", <<<<"None", -1, -1>>, -1>>>> }) + /\ evidence + = {[proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1]} + /\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) + /\ lockedRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) + /\ lockedValue = SetAsFun({ <<"c1", "None">>, <<"c2", "None">> }) + /\ msgsPrecommit + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, + round |-> 2, + src |-> "f3", + type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, + round |-> 3, + src |-> "f4", + type |-> "PRECOMMIT"]} + >> }) + /\ msgsPrevote + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ msgsPropose + = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ proposalReceptionTime + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) + /\ realTime = 0 + /\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) + /\ step = SetAsFun({ <<"c1", "PROPOSE">>, <<"c2", "PREVOTE">> }) + /\ validRound = SetAsFun({ <<"c1", -1>>, <<"c2", -1>> }) + /\ validValue + = SetAsFun({ <<"c1", <<"None", -1, -1>>>>, <<"c2", <<"None", -1, -1>>>> }) + +(* Transition 9 to State4 *) +State4 == + Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) + /\ action = "UponProposalInPrevoteOrCommitAndPrevote" + /\ beginRound + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) + /\ decision + = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, + <<"c2", <<<<"None", -1, -1>>, -1>>>> }) + /\ evidence + = { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2] } + /\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) + /\ lockedRound = SetAsFun({ <<"c1", -1>>, <<"c2", 0>> }) + /\ lockedValue = SetAsFun({ <<"c1", "None">>, <<"c2", "v0">> }) + /\ msgsPrecommit + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, + round |-> 2, + src |-> "f3", + type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, + round |-> 3, + src |-> "f4", + type |-> "PRECOMMIT"]} + >> }) + /\ msgsPrevote + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ msgsPropose + = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ proposalReceptionTime + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) + /\ realTime = 0 + /\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) + /\ step = SetAsFun({ <<"c1", "PROPOSE">>, <<"c2", "PRECOMMIT">> }) + /\ validRound = SetAsFun({ <<"c1", -1>>, <<"c2", 0>> }) + /\ validValue + = SetAsFun({ <<"c1", <<"None", -1, -1>>>>, <<"c2", <<"v0", 3, 0>>>> }) + +(* Transition 12 to State5 *) +State5 == + Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) + /\ action = "UponProposalInPropose" + /\ beginRound + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) + /\ decision + = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, + <<"c2", <<<<"None", -1, -1>>, -1>>>> }) + /\ evidence + = { [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + /\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) + /\ lockedRound = SetAsFun({ <<"c1", -1>>, <<"c2", 0>> }) + /\ lockedValue = SetAsFun({ <<"c1", "None">>, <<"c2", "v0">> }) + /\ msgsPrecommit + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, + round |-> 2, + src |-> "f3", + type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, + round |-> 3, + src |-> "f4", + type |-> "PRECOMMIT"]} + >> }) + /\ msgsPrevote + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "c1", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ msgsPropose + = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ proposalReceptionTime + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) + /\ realTime = 0 + /\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) + /\ step = SetAsFun({ <<"c1", "PREVOTE">>, <<"c2", "PRECOMMIT">> }) + /\ validRound = SetAsFun({ <<"c1", -1>>, <<"c2", 0>> }) + /\ validValue + = SetAsFun({ <<"c1", <<"None", -1, -1>>>>, <<"c2", <<"v0", 3, 0>>>> }) + +(* Transition 5 to State6 *) +State6 == + Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) + /\ action = "UponProposalInPrecommitNoDecision" + /\ beginRound + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) + /\ decision + = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, + <<"c2", <<<<"v0", 3, 0>>, 0>>>> }) + /\ evidence + = { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + /\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) + /\ lockedRound = SetAsFun({ <<"c1", -1>>, <<"c2", 0>> }) + /\ lockedValue = SetAsFun({ <<"c1", "None">>, <<"c2", "v0">> }) + /\ msgsPrecommit + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, + round |-> 2, + src |-> "f3", + type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, + round |-> 3, + src |-> "f4", + type |-> "PRECOMMIT"]} + >> }) + /\ msgsPrevote + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "c1", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ msgsPropose + = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ proposalReceptionTime + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) + /\ realTime = 0 + /\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) + /\ step = SetAsFun({ <<"c1", "PREVOTE">>, <<"c2", "DECIDED">> }) + /\ validRound = SetAsFun({ <<"c1", -1>>, <<"c2", 0>> }) + /\ validValue + = SetAsFun({ <<"c1", <<"None", -1, -1>>>>, <<"c2", <<"v0", 3, 0>>>> }) + +(* Transition 9 to State7 *) +State7 == + Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) + /\ action = "UponProposalInPrevoteOrCommitAndPrevote" + /\ beginRound + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) + /\ decision + = SetAsFun({ <<"c1", <<<<"None", -1, -1>>, -1>>>>, + <<"c2", <<<<"v0", 3, 0>>, 0>>>> }) + /\ evidence + = { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "c1", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + /\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) + /\ lockedRound = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) + /\ lockedValue = SetAsFun({ <<"c1", "v1">>, <<"c2", "v0">> }) + /\ msgsPrecommit + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "c1", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, + round |-> 2, + src |-> "f3", + type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, + round |-> 3, + src |-> "f4", + type |-> "PRECOMMIT"]} + >> }) + /\ msgsPrevote + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "c1", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ msgsPropose + = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ proposalReceptionTime + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) + /\ realTime = 0 + /\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) + /\ step = SetAsFun({ <<"c1", "PRECOMMIT">>, <<"c2", "DECIDED">> }) + /\ validRound = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) + /\ validValue + = SetAsFun({ <<"c1", <<"v1", 2, 0>>>>, <<"c2", <<"v0", 3, 0>>>> }) + +(* Transition 5 to State8 *) +State8 == + Proposer = SetAsFun({ <<0, "f4">>, <<1, "f4">>, <<2, "f4">>, <<3, "f4">> }) + /\ action = "UponProposalInPrecommitNoDecision" + /\ beginRound + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, 7>>, + <<<<1, "c1">>, 7>>, + <<<<2, "c2">>, 7>>, + <<<<1, "c2">>, 7>>, + <<<<3, "c2">>, 7>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, 7>> }) + /\ decision + = SetAsFun({ <<"c1", <<<<"v1", 2, 0>>, 0>>>>, + <<"c2", <<<<"v0", 3, 0>>, 0>>>> }) + /\ evidence + = { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "c2", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "c1", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "c1", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f3", type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, round |-> 0, src |-> "f4", type |-> "PREVOTE"], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1], + [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + /\ localClock = SetAsFun({ <<"c1", 3>>, <<"c2", 2>> }) + /\ lockedRound = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) + /\ lockedValue = SetAsFun({ <<"c1", "v1">>, <<"c2", "v0">> }) + /\ msgsPrecommit + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "c1", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PRECOMMIT"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PRECOMMIT"] } + >>, + <<1, {}>>, + << + 2, {[id |-> <<"v2", 3, 2>>, + round |-> 2, + src |-> "f3", + type |-> "PRECOMMIT"]} + >>, + << + 3, {[id |-> <<"v2", 7, 3>>, + round |-> 3, + src |-> "f4", + type |-> "PRECOMMIT"]} + >> }) + /\ msgsPrevote + = SetAsFun({ << + 0, { [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "c2", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "c1", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f3", + type |-> "PREVOTE"], + [id |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PREVOTE"] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ msgsPropose + = SetAsFun({ << + 0, { [proposal |-> <<"v0", 3, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> 2], + [proposal |-> <<"v1", 2, 0>>, + round |-> 0, + src |-> "f4", + type |-> "PROPOSAL", + validRound |-> -1] } + >>, + <<1, {}>>, + <<2, {}>>, + <<3, {}>> }) + /\ proposalReceptionTime + = SetAsFun({ <<<<0, "c1">>, 3>>, + <<<<2, "c1">>, -1>>, + <<<<1, "c1">>, -1>>, + <<<<2, "c2">>, -1>>, + <<<<1, "c2">>, -1>>, + <<<<3, "c2">>, -1>>, + <<<<0, "c2">>, 2>>, + <<<<3, "c1">>, -1>> }) + /\ realTime = 0 + /\ round = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) + /\ step = SetAsFun({ <<"c1", "DECIDED">>, <<"c2", "DECIDED">> }) + /\ validRound = SetAsFun({ <<"c1", 0>>, <<"c2", 0>> }) + /\ validValue + = SetAsFun({ <<"c1", <<"v1", 2, 0>>>>, <<"c2", <<"v0", 3, 0>>>> }) + +(* The following formula holds true in the last state and violates the invariant *) +InvariantViolation == + Skolem((\E p$41 \in { "c1", "c2" }: + Skolem((\E q$14 \in { "c1", "c2" }: + (~(decision[p$41] = <<<<"None", -1, -1>>, -1>>) + /\ ~(decision[q$14] = <<<<"None", -1, -1>>, -1>>)) + /\ (\A v$9 \in { "v0", "v1" }: + \A t$9 \in 0 .. 7: + \A pr$4 \in 0 .. 3: + \A r1$2 \in 0 .. 3: + \A r2$2 \in 0 .. 3: + LET prop_si_7 == <> IN + ~(decision[p$41] = <<(prop_si_7), r1$2>>) + \/ ~(decision[q$14] = <<(prop_si_7), r2$2>>)))))) + +================================================================================ +(* Created by Apalache on Wed May 18 11:06:20 UTC 2022 *) +(* https://github.com/informalsystems/apalache *) diff --git a/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/log0.smt b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/log0.smt new file mode 100644 index 00000000000..5c0edf996ed --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/log0.smt @@ -0,0 +1,5 @@ +Logging is disabled (Z3SolverContext.debug = false). Activate with --debug. +;; sat.random_seed = 0 +;; smt.random_seed = 0 +;; fp.spacer.random_seed = 0 +;; sls.random_seed = 0 diff --git a/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/run.txt b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/run.txt new file mode 100644 index 00000000000..ed35549f316 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_2C_2F.tla/run.txt @@ -0,0 +1 @@ + check --length=8 --inv=Inv --cinit=CInit --discard-disabled=false MC_PBT_2C_2F.tla diff --git a/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_3C_1F.tla/log0.smt b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_3C_1F.tla/log0.smt new file mode 100644 index 00000000000..5c0edf996ed --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_3C_1F.tla/log0.smt @@ -0,0 +1,5 @@ +Logging is disabled (Z3SolverContext.debug = false). Activate with --debug. +;; sat.random_seed = 0 +;; smt.random_seed = 0 +;; fp.spacer.random_seed = 0 +;; sls.random_seed = 0 diff --git a/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_3C_1F.tla/run.txt b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_3C_1F.tla/run.txt new file mode 100644 index 00000000000..734b5aeac62 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/MC_PBT_3C_1F.tla/run.txt @@ -0,0 +1 @@ + check --length=8 --inv=Inv --cinit=CInit --discard-disabled=false MC_PBT_3C_1F.tla diff --git a/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/machine.txt b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/machine.txt new file mode 100644 index 00000000000..2352d9db16a --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/experiment_data/May2022/machine.txt @@ -0,0 +1,34 @@ +Architecture: x86_64 +CPU op-mode(s): 32-bit, 64-bit +Byte Order: Little Endian +Address sizes: 40 bits physical, 48 bits virtual +CPU(s): 4 +On-line CPU(s) list: 0-3 +Thread(s) per core: 1 +Core(s) per socket: 4 +Socket(s): 1 +NUMA node(s): 1 +Vendor ID: GenuineIntel +CPU family: 6 +Model: 85 +Model name: Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz +Stepping: 7 +CPU MHz: 2494.140 +BogoMIPS: 4988.28 +Virtualization: VT-x +Hypervisor vendor: KVM +Virtualization type: full +L1d cache: 128 KiB +L1i cache: 128 KiB +L2 cache: 16 MiB +NUMA node0 CPU(s): 0-3 +Vulnerability Itlb multihit: KVM: Mitigation: Split huge pages +Vulnerability L1tf: Not affected +Vulnerability Mds: Not affected +Vulnerability Meltdown: Not affected +Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp +Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization +Vulnerability Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling +Vulnerability Srbds: Not affected +Vulnerability Tsx async abort: Not affected +Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single ssbd ibrs ibpb ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 pku ospke avx512_vnni arch_capabilities diff --git a/spec/consensus/proposer-based-timestamp/tla/experiment_log.md b/spec/consensus/proposer-based-timestamp/tla/experiment_log.md new file mode 100644 index 00000000000..70d40805cfd --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/experiment_log.md @@ -0,0 +1,9 @@ +# Experiment Log +This file summarizes the history of Apalache experiments for the PBTS specification. + +| Date | Commit | Command | Runtime [h:min:s] | Machine | Notes | +|---|---|---|---|---|---| +| May 11, 2022 | cd48156f662af8fc325a6478dfa34de6be0e36c8 | [run.txt](./experiment_data/May2022/MC_PBT_3C_1F.tla/run.txt) | 74:39:2 | [machine.txt](./experiment_data/May2022/machine.txt) | No invariant violation in 8 steps | +| May 16, 2022 | cd48156f662af8fc325a6478dfa34de6be0e36c8 | [run.txt](./experiment_data/May2022/MC_PBT_2C_2F.tla/run.txt) | 48:31:29 | [machine.txt](./experiment_data/May2022/machine.txt) | [Counterexample](experiment_data/May2022/MC_PBT_2C_2F.tla/counterexample.tla) found (expected) | + + diff --git a/spec/consensus/proposer-based-timestamp/tla/runApalache.sh b/spec/consensus/proposer-based-timestamp/tla/runApalache.sh new file mode 100755 index 00000000000..dfd0a8f07be --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/runApalache.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Works in all cases except running from jar on Windows +EXE=$APALACHE_HOME/bin/apalache-mc + +CMD=${1:-typecheck} +N=${2:-10} +MC=${3:-true} +DD=${4:-false} + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +CFG=$SCRIPT_DIR/.apalache.cfg + + +if [[ $1 == "check" ]]; then + FLAGS="--length=$N --inv=Inv --cinit=CInit --discard-disabled=$DD" +else + FLAGS="" +fi + +if ! [[ -f "$CFG" ]]; then + echo "out-dir: \"$SCRIPT_DIR/_apalache-out\"" >> $CFG + echo "write-intermediate: true" >> $CFG +fi + +if [[ "$MC" = false ]]; then + # Run 2c2f + $EXE $CMD $FLAGS MC_PBT_2C_2F.tla +else + # Run 3c1f + $EXE $CMD $FLAGS MC_PBT_3C_1F.tla +fi + + + diff --git a/spec/consensus/proposer-based-timestamp/tla/typedefs.tla b/spec/consensus/proposer-based-timestamp/tla/typedefs.tla new file mode 100644 index 00000000000..72e76df54b6 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/typedefs.tla @@ -0,0 +1,39 @@ +-------------------- MODULE typedefs --------------------------- +(* + @typeAlias: PROCESS = Str; + @typeAlias: VALUE = Str; + @typeAlias: STEP = Str; + @typeAlias: ROUND = Int; + @typeAlias: ACTION = Str; + @typeAlias: TRACE = Seq(Str); + @typeAlias: TIME = Int; + @typeAlias: PROPOSAL = <>; + @typeAlias: DECISION = <>; + @typeAlias: PROPMESSAGE = + [ + type: STEP, + src: PROCESS, + round: ROUND, + proposal: PROPOSAL, + validRound: ROUND + ]; + @typeAlias: PREMESSAGE = + [ + type: STEP, + src: PROCESS, + round: ROUND, + id: PROPOSAL + ]; + @typeAlias: MESSAGE = + [ + type: STEP, + src: PROCESS, + round: ROUND, + proposal: PROPOSAL, + validRound: ROUND, + id: PROPOSAL + ]; +*) +TypeAliases == TRUE + +============================================================================= \ No newline at end of file diff --git a/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md b/spec/consensus/proposer-based-timestamp/v1/pbts-algorithm_001_draft.md similarity index 95% rename from spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md rename to spec/consensus/proposer-based-timestamp/v1/pbts-algorithm_001_draft.md index b42b3ab2f1f..82a50291a67 100644 --- a/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md +++ b/spec/consensus/proposer-based-timestamp/v1/pbts-algorithm_001_draft.md @@ -1,4 +1,6 @@ -# Proposer-Based Time - Part II +# PBTS: Protocol Specification (first draft) + +This specification is **OUTDATED**. Please refer to the [new version][algorithm]. ## Updated Consensus Algorithm @@ -150,11 +152,11 @@ upon ⟨PROPOSAL, h_p, r, (v,t), ∗⟩ from proposer(h_p, r) AND 2f + 1 ⟨PREC **All other rules remains unchanged.** -Back to [main document][main]. - -[main]: ./pbts_001_draft.md - -[arXiv]: https://arxiv.org/abs/1807.04938 +Back to [main document][main_v1]. +[main_v1]: ./pbts_001_draft.md +[algorithm]: ../pbts-algorithm.md +[algorithm_v1]: ./pbts-algorithm_001_draft.md +[arXiv]: https://arxiv.org/abs/1807.04938 diff --git a/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md b/spec/consensus/proposer-based-timestamp/v1/pbts-sysmodel_001_draft.md similarity index 93% rename from spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md rename to spec/consensus/proposer-based-timestamp/v1/pbts-sysmodel_001_draft.md index 8fee14252ee..a6fe7b42201 100644 --- a/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md +++ b/spec/consensus/proposer-based-timestamp/v1/pbts-sysmodel_001_draft.md @@ -1,4 +1,6 @@ -# Proposer-Based Time - Part I +# PBTS: System Model and Properties (first draft) + +This specification is **OUTDATED**. Please refer to the [new version][sysmodel]. ## System Model @@ -105,7 +107,7 @@ then the time `b.time` in the block `b` that is signed by `c` satisfies - `beginConsensus(k) - PRECISION <= b.time < endConsensus(k) + PRECISION + MSGDELAY`. -> [PBTS-CONSENSUS-TIME-VALID.0] is based on an analysis where the proposer is faulty (and does does not count towards `beginConsensus(k)` and `endConsensus(k)`), and we estimate the times at which correct validators receive and `accept` the `propose` message. If the proposer is correct we obtain +> [PBTS-CONSENSUS-TIME-VALID.0] is based on an analysis where the proposer is faulty (and does not count towards `beginConsensus(k)` and `endConsensus(k)`), and we estimate the times at which correct validators receive and `accept` the `propose` message. If the proposer is correct we obtain #### **[PBTS-CONSENSUS-LIVE-VALID-CORR-PROP.0]** @@ -182,10 +184,14 @@ Let `b` be a block with a valid commit that contains at least one `precommit` me > "triggered the `PRECOMMIT`" implies that the data in `m` and `b` are "matching", that is, `m` proposed the values that are actually stored in `b`. -Back to [main document][main]. +Back to [main document][main_v1]. + +[main_v1]: ./pbts_001_draft.md + +[algorithm_v1]: ./pbts-algorithm_001_draft.md -[main]: ./pbts_001_draft.md +[sysmodel]: ../pbts-sysmodel.md [arXiv]: https://arxiv.org/abs/1807.04938 -[CMBC-FM-2THIRDS-link]: https://github.com/cometbft/cometbft/blob/main/spec/light-client/verification/verification_002_draft.md#cmbc-fm-2thirds1 +[CMBC-FM-2THIRDS-link]: ../../../light-client/verification/verification_002_draft.md#cmbc-fm-2thirds1 diff --git a/spec/consensus/proposer-based-timestamp/pbts_001_draft.md b/spec/consensus/proposer-based-timestamp/v1/pbts_001_draft.md similarity index 94% rename from spec/consensus/proposer-based-timestamp/pbts_001_draft.md rename to spec/consensus/proposer-based-timestamp/v1/pbts_001_draft.md index bcb01d73640..bc0a8882493 100644 --- a/spec/consensus/proposer-based-timestamp/pbts_001_draft.md +++ b/spec/consensus/proposer-based-timestamp/v1/pbts_001_draft.md @@ -1,4 +1,4 @@ -# Proposer-Based Time +# Proposer-Based Time (first draft) ## Current BFTTime @@ -21,7 +21,7 @@ In CometBFT, the first version of how time is computed and stored in a block wor 1. **Liveness.** The liveness of the protocol: 1. does not depend on clock synchronization, 1. depends on bounded message delays. -1. **Relation to real time.** There is no clock synchronizaton, which implies that there is **no relation** between the computed block `time` and real time. +1. **Relation to real time.** There is no clock synchronization, which implies that there is **no relation** between the computed block `time` and real time. 1. **Aggregate signatures.** As the `precommit` messages contain the local times, all these `precommit` messages typically differ in the time field, which **prevents** the use of aggregate signatures. ## Suggested Proposer-Based Time @@ -249,21 +249,20 @@ For analyzing real-time safety (Point 5), we use a system parameter `ACCURACY`, This specification describes the changes needed to be done to the Tendermint consensus algorithm as described in the [arXiv paper][arXiv] and the simplified specification in [TLA+][tlatender], and makes precise the underlying assumptions and the required properties. -- [Part I - System Model and Properties][sysmodel] -- [Part II - Protocol specification][algorithm] +- [Part I - System Model and Properties][sysmodel_v1] +- [Part II - Protocol specification][algorithm_v1] - [TLA+ Specification][proposertla] -[arXiv]: https://arxiv.org/abs/1807.04938 +[algorithm_v1]: ./pbts-algorithm_001_draft.md -[tlatender]: ../../light-client/accountability/README.md +[tlatender]: ../../../light-client/accountability/README.md [bfttime]: ../bft-time.md [lcspec]: ../../light-client/README.md -[algorithm]: ./pbts-algorithm_001_draft.md - -[sysmodel]: ./pbts-sysmodel_001_draft.md +[sysmodel_v1]: ./pbts-sysmodel_001_draft.md +[proposertla]: ../tla/TendermintPBT_001_draft.tla -[proposertla]: ./tla/TendermintPBT_001_draft.tla +[arXiv]: https://arxiv.org/abs/1807.04938 diff --git a/spec/consensus/proposer-selection.md b/spec/consensus/proposer-selection.md index f9f0ff4ace1..cc3898de9af 100644 --- a/spec/consensus/proposer-selection.md +++ b/spec/consensus/proposer-selection.md @@ -94,7 +94,7 @@ Each row shows the priority queue and the process place in it. The proposer is t It can be shown that: - At the end of each run k+1 the sum of the priorities is the same as at end of run k. If a new set's priorities are initialized to 0 then the sum of priorities will be 0 at each run while there are no changes. -- The max distance between priorites is (n-1) *P.*[formal proof not finished]* +- The max distance between priorities is (n-1) *P.*[formal proof not finished]* ## Validator Set Changes @@ -108,7 +108,7 @@ Validator | p1 | p2 ----------|----|--- VP | 4 | 3 -Let's also assume that before this change the proposer priorites were as shown in first row (last run). As it can be seen, the selection could run again, without changes, as before. +Let's also assume that before this change the proposer priorities were as shown in first row (last run). As it can be seen, the selection could run again, without changes, as before. | Priority Run | -2 | -1 | 0 | 1 | 2 | Comment | |----------------|----|----|---|----|----|-------------------| @@ -121,7 +121,7 @@ However, when a validator changes power from a high to a low value, some other v As before: - At the end of each run k+1 the sum of the priorities is the same as at run k. -- The max distance between priorites is (n-1) * P. +- The max distance between priorities is (n-1) * P. ### Validator Removal @@ -179,7 +179,7 @@ In order to prevent this, when a new validator is added, its initial priority is where P is the total voting power of the set including V. -Curent implementation uses the penalty factor of 1.125 because it provides a small punishment that is efficient to calculate. See [here](https://github.com/tendermint/tendermint/pull/2785#discussion_r235038971) for more details. +Current implementation uses the penalty factor of 1.125 because it provides a small punishment that is efficient to calculate. See [here](https://github.com/tendermint/tendermint/pull/2785#discussion_r235038971) for more details. If we consider the validator set where p3 has just been added: @@ -274,7 +274,7 @@ The modified selection algorithm is: Observations: -- With this modification, the maximum distance between priorites becomes 2 * P. +- With this modification, the maximum distance between priorities becomes 2 * P. Note also that even during steady state the priority range may increase beyond 2 * P. The scaling introduced here helps to keep the range bounded. diff --git a/spec/consensus/readme.md b/spec/consensus/readme.md index 9dbee537e11..48aac8c52f1 100644 --- a/spec/consensus/readme.md +++ b/spec/consensus/readme.md @@ -14,8 +14,8 @@ Specification of the consensus protocol implemented in CometBFT. - [Consensus Paper](./consensus-paper) - Latex paper on [arxiv](https://arxiv.org/abs/1807.04938) describing the Tendermint consensus algorithm, adopted in CometBFT, with proofs of safety and termination. -- [BFT Time](./bft-time.md) - How the timestamp in a CometBFT - block header is computed in a Byzantine Fault Tolerant manner +- [Time](./time.md) - How the timestamp in a CometBFT + block header is produced in a Byzantine Fault Tolerant manner - [Creating Proposal](./creating-proposal.md) - How a proposer creates a block proposal for consensus - [Light Client Protocol](./light-client) - A protocol for light weight consensus diff --git a/spec/consensus/signing.md b/spec/consensus/signing.md index ce8b871f772..8304b8e9879 100644 --- a/spec/consensus/signing.md +++ b/spec/consensus/signing.md @@ -30,14 +30,30 @@ All signed messages must correspond to one of these types. ## Timestamp -Timestamp validation is subtle and there are currently no bounds placed on the -timestamp included in a proposal or vote. It is expected that validators will honestly -report their local clock time. The median of all timestamps -included in a commit is used as the timestamp for the next block height. +Both `Proposal` and `Vote` messages include a `Timestamp` field of +[Time](../core/data_structures.md#time) data type. +Timestamp validation is subtle and there are currently no validations on the +timestamp included in a received `Proposal` or `Vote`. +As a general rule, it is expected that validators report in the Timestamp field their +local clock time. Timestamps are expected to be strictly monotonic for a given validator, though -this is not currently enforced. - +this is not enforced. + +Some timestamps, however, are used by the algorithms adopted for computing +[block times](./time.md): + +- [BFT Time](./bft-time.md): the `Timestamp` field of `Precommit` vote messages + is used to compute the `Time` for the next proposed block. + Correct validators are expected to report their local clock time, provided + that the time is higher than the current block's time. + Otherwise, the reported time is the current block's time plus 1ms. + +- [PBTS](./proposer-based-timestamp/README.md): the `Timestamp` field of a + `Proposal` message must match the proposed `Block.Time`. + Otherwise, the `Proposal` will be rejected by correct validators. + There are no requirements for `Vote.Timestamp` values. + ## ChainID ChainID is an unstructured string with a max length of 50-bytes. diff --git a/spec/consensus/time.md b/spec/consensus/time.md new file mode 100644 index 00000000000..c1294100621 --- /dev/null +++ b/spec/consensus/time.md @@ -0,0 +1,89 @@ +--- +order: 2 +--- +# Time + +CometBFT provides a Byzantine fault-tolerant source of time. + +Time in CometBFT is defined with the [`Time`][spec-time] field of the +block [`Header`][spec-header]. + +## Properties + +The Time produced by CometBFT satisfies the following properties: + +- **Time Monotonicity**: time is monotonically increasing. More precisely, given + two block headers `H1` of height `h1` and `H2` of height `h2`, + it is guaranteed that if `h2 > h1` then `H2.Time > H1.Time`. + +- **Byzantine Fault Tolerance**: malicious nodes or nodes with inaccurate clocks should not be able + to arbitrarily increase or decrease the block Time. + In other words, the Time of blocks should be defined by correct nodes. + +In addition, the Time produced by CometBFT is expected, by external observers, to provide: + +- **Relation to real time**: block times bear some resemblance to real time. + In other words, block times should represent, within some reasonable accuracy, + the actual clock time at which blocks were produced. + More formally, lets `t` be the clock time at which a block with header `H` + was first proposed. + Then there exists a, possibly unknown but reasonably small, bound `ACCURACY` + so that `|H.Time - t| < ACCURACY`. + +## Implementations + +CometBFT implements two algorithms for computing block times: + +- [BFT Time][bft-time]: the algorithm adopted in versions up to `v0.38.x`; + available, in legacy mode, in version `v1.x`. + +- [Proposer-Based Timestamps (PBTS)][pbts-spec]: introduced in version `v1.x`, + as a replacement for BFT Time. + +Users are strongly encouraged to adopt PBTS in new chains or switch to PBTS +when upgrading existing chains. + +### Comparison + +The table below compares BFT Time and PBTS algorithms in terms of the above enumerated properties: + +| Algorithm | Time Monotonicity | Byzantine Fault Tolerance | Relation to real time | +|-----------|:-----------------:|:---------------------------------:|-----------------------------------------------------------------------------------------------| +| BFT Time | Guaranteed | Tolerates `< 1/3` Byzantine nodes | Best effort and **not guaranteed**. | +| PBTS | Guaranteed | Tolerates `< 2/3` Byzantine nodes | Guaranteed with `ACCURACY` determined by the consensus parameters `PRECISION` and `MSGDELAY`. | + +Note that by Byzantine nodes we consider both malicious nodes, that purposely +try to increase or decrease block times, and nodes that produce or propose +inaccurate block times because they rely on inaccurate local clocks. + +For more details, refer to the specification of [BFT Time][bft-time] and [Proposer-Based Timestamps][pbts-spec]. + +## Adopting PBTS + +The Proposer-Based Timestamp (PBTS) algorithm is the recommended algorithm for +producing block times. + +As of CometBFT `v1.x`, however, PBTS is not enabled by default, neither for new +chains using default values for genesis parameters, nor for chains upgrading to +newer CometBFT versions, for backwards compatibility reasons. + +Enabling PBTS requires configuring some consensus parameters: + +- From `SynchronyParams`, the `Precision` and `MessageDelay` parameters. + They correspond, respectively, to the `PRECISION` and `MSGDELAY` parameters + adopted in the PBTS specification. +- From `FeatureParams`, the `PbtsEnableHeight` parameter, which defines the + height from which PBTS will be adopted. + While it is set to `0` (default) or in heights previous to + `PbtsEnableHeight`, BFT Time is adopted. + +Refer to the [consensus parameters specification][spec-params] for more details, +or to the [PBTS user documentation]() for a more pragmatic description of the +algorithm and recommendations on how to properly configure its parameters. + +[spec-time]: ../core/data_structures.md#time +[spec-header]: ../core/data_structures.md#header +[bft-time]: ./bft-time.md +[pbts-spec]: ./proposer-based-timestamp/README.md +[spec-params]: ../core/data_structures.md#consensusparams +[pbts-doc]: https://github.com/cometbft/cometbft/blob/feature/pbts/docs/explanation/core/proposer-based-timestamps.md diff --git a/spec/core/data_structures.md b/spec/core/data_structures.md index 1c2fe23ab7a..489cafdf9e1 100644 --- a/spec/core/data_structures.md +++ b/spec/core/data_structures.md @@ -9,51 +9,57 @@ Here we describe the data structures in the CometBFT blockchain and the rules fo The CometBFT blockchain consists of a short list of data types: - [Data Structures](#data-structures) - - [Block](#block) - - [Execution](#execution) - - [Header](#header) - - [Version](#version) - - [BlockID](#blockid) - - [PartSetHeader](#partsetheader) - - [Part](#part) - - [Time](#time) - - [Data](#data) - - [Commit](#commit) - - [CommitSig](#commitsig) - - [BlockIDFlag](#blockidflag) - - [Vote](#vote) - - [CanonicalVote](#canonicalvote) - - [Proposal](#proposal) - - [SignedMsgType](#signedmsgtype) - - [Signature](#signature) - - [EvidenceList](#evidencelist) - - [Evidence](#evidence) - - [DuplicateVoteEvidence](#duplicatevoteevidence) - - [LightClientAttackEvidence](#lightclientattackevidence) - - [LightBlock](#lightblock) - - [SignedHeader](#signedheader) - - [ValidatorSet](#validatorset) - - [Validator](#validator) - - [Address](#address) - - [ConsensusParams](#consensusparams) - - [BlockParams](#blockparams) - - [EvidenceParams](#evidenceparams) - - [ValidatorParams](#validatorparams) - - [VersionParams](#versionparams) - - [Proof](#proof) + - [Block](#block) + - [Execution](#execution) + - [Header](#header) + - [Version](#version) + - [BlockID](#blockid) + - [PartSetHeader](#partsetheader) + - [Part](#part) + - [Time](#time) + - [Data](#data) + - [Commit](#commit) + - [ExtendedCommit](#extendedcommit) + - [CommitSig](#commitsig) + - [ExtendedCommitSig](#extendedcommitsig) + - [BlockIDFlag](#blockidflag) + - [Vote](#vote) + - [CanonicalVote](#canonicalvote) + - [CanonicalVoteExtension](#canonicalvoteextension) + - [Proposal](#proposal) + - [SignedMsgType](#signedmsgtype) + - [Signature](#signature) + - [EvidenceList](#evidencelist) + - [Evidence](#evidence) + - [DuplicateVoteEvidence](#duplicatevoteevidence) + - [LightClientAttackEvidence](#lightclientattackevidence) + - [LightBlock](#lightblock) + - [SignedHeader](#signedheader) + - [ValidatorSet](#validatorset) + - [Validator](#validator) + - [Address](#address) + - [Proof](#proof) + - [ConsensusParams](#consensusparams) + - [BlockParams](#blockparams) + - [EvidenceParams](#evidenceparams) + - [ValidatorParams](#validatorparams) + - [VersionParams](#versionparams) + - [ABCIParams](#abciparams) + - [FeatureParams](#featureparams) + - [SynchronyParams](#synchronyparams) ## Block A block consists of a header, transactions, votes (the commit), -and a list of evidence of malfeasance (ie. signing conflicting votes). +and a list of evidence of misbehavior (ie. signing conflicting votes). -| Name | Type | Description | Validation | -|--------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| -| Header | [Header](#header) | Header corresponding to the block. This field contains information used throughout consensus and other areas of the protocol. To find out what it contains, visit [header](#header) | Must adhere to the validation rules of [header](#header) | -| Data | [Data](#data) | Data contains a list of transactions. The contents of the transaction is unknown to CometBFT. | This field can be empty or populated, but no validation is performed. Applications can perform validation on individual transactions prior to block creation using [checkTx](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_methods.md#checktx). -| Evidence | [EvidenceList](#evidencelist) | Evidence contains a list of infractions committed by validators. | Can be empty, but when populated the validations rules from [evidenceList](#evidencelist) apply | -| LastCommit | [Commit](#commit) | `LastCommit` includes one vote for every validator. All votes must either be for the previous block, nil or absent. If a vote is for the previous block it must have a valid signature from the corresponding validator. The sum of the voting power of the validators that voted must be greater than 2/3 of the total voting power of the complete validator set. The number of votes in a commit is limited to 10000 (see `types.MaxVotesCount`). | Must be empty for the initial height and must adhere to the validation rules of [commit](#commit). | +| Name | Type | Description | Validation | +|--------|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| +| Header | [Header](#header) | Header corresponding to the block. This field contains information used throughout consensus and other areas of the protocol. To find out what it contains, visit [header](#header) | Must adhere to the validation rules of [header](#header) | +| Data | [Data](#data) | Data contains a list of transactions. The contents of the transaction is unknown to CometBFT. | This field can be empty or populated, but no validation is performed. Applications can perform validation on individual transactions prior to block creation using [checkTx](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_methods.md#checktx). +| Evidence | [EvidenceList](#evidencelist) | Evidence contains a list of evidence of misbehavior committed by validators. | Can be empty, but when populated the validations rules from [evidenceList](#evidencelist) apply | +| LastCommit | [Commit](#commit) | `LastCommit` includes one vote for every validator. All votes must either be for the previous block, nil or absent. If a vote is for the previous block it must have a valid signature from the corresponding validator. The sum of the voting power of the validators that voted must be greater than 2/3 of the total voting power of the complete validator set. The number of votes in a commit is limited to 10000 (see `types.MaxVotesCount`). | Must be empty for the initial height and must adhere to the validation rules of [commit](#commit). | ## Execution @@ -72,7 +78,7 @@ set (TODO). Execute is defined as: ```go func Execute(state State, app ABCIApp, block Block) State { - // Fuction ApplyBlock executes block of transactions against the app and returns the new root hash of the app state, + // Function ApplyBlock executes block of transactions against the app and returns the new root hash of the app state, // modifications to the validator set and the changes of the consensus parameters. AppHash, ValidatorChanges, ConsensusParamChanges := app.ApplyBlock(block) @@ -119,22 +125,22 @@ The steps to validate a new block are: A block header contains metadata about the block and about the consensus, as well as commitments to the data in the current block, the previous block, and the results returned by the application: -| Name | Type | Description | Validation | -|-------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Version | [Version](#version) | Version defines the application and protocol version being used. | Must adhere to the validation rules of [Version](#version) | -| ChainID | String | ChainID is the ID of the chain. This must be unique to your chain. | ChainID must be less than 50 bytes. | -| Height | uint64 | Height is the height for this header. | Must be > 0, >= initialHeight, and == previous Height+1 | -| Time | [Time](#time) | The timestamp is equal to the weighted median of validators present in the last commit. Read more on time in the [BFT-time section](../consensus/bft-time.md). Note: the timestamp of a vote must be greater by at least one millisecond than that of the block being voted on. | Time must be >= previous header timestamp + consensus parameters TimeIotaMs. The timestamp of the first block must be equal to the genesis time (since there's no votes to compute the median). | -| LastBlockID | [BlockID](#blockid) | BlockID of the previous block. | Must adhere to the validation rules of [blockID](#blockid). The first block has `block.Header.LastBlockID == BlockID{}`. | -| LastCommitHash | slice of bytes (`[]byte`) | MerkleRoot of the lastCommit's signatures. The signatures represent the validators that committed to the last block. The first block has an empty slices of bytes for the hash. | Must be of length 32 | -| DataHash | slice of bytes (`[]byte`) | MerkleRoot of the hash of transactions. **Note**: The transactions are hashed before being included in the merkle tree, the leaves of the Merkle tree are the hashes, not the transactions themselves. | Must be of length 32 | -| ValidatorHash | slice of bytes (`[]byte`) | MerkleRoot of the current validator set. The validators are first sorted by voting power (descending), then by address (ascending) prior to computing the MerkleRoot. | Must be of length 32 | -| NextValidatorHash | slice of bytes (`[]byte`) | MerkleRoot of the next validator set. The validators are first sorted by voting power (descending), then by address (ascending) prior to computing the MerkleRoot. | Must be of length 32 | -| ConsensusHash | slice of bytes (`[]byte`) | Hash of the protobuf encoded consensus parameters. | Must be of length 32 | -| AppHash | slice of bytes (`[]byte`) | Arbitrary byte array returned by the application after executing and committing the previous block. It serves as the basis for validating any merkle proofs that comes from the ABCI application and represents the state of the actual application rather than the state of the blockchain itself. The first block's `block.Header.AppHash` is given by `ResponseInitChain.app_hash`. | This hash is determined by the application, CometBFT can not perform validation on it. | -| LastResultHash | slice of bytes (`[]byte`) | `LastResultsHash` is the root hash of a Merkle tree built from `ResponseDeliverTx` responses (`Log`,`Info`, `Codespace` and `Events` fields are ignored). | Must be of length 32. The first block has `block.Header.ResultsHash == MerkleRoot(nil)`, i.e. the hash of an empty input, for RFC-6962 conformance. | -| EvidenceHash | slice of bytes (`[]byte`) | MerkleRoot of the evidence of Byzantine behavior included in this block. | Must be of length 32 | -| ProposerAddress | slice of bytes (`[]byte`) | Address of the original proposer of the block. Validator must be in the current validatorSet. | Must be of length 20 | +| Name | Type | Description | Validation | +|-------------------|---------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Version | [Version](#version) | Version defines the application and block versions being used. | Must adhere to the validation rules of [Version](#version) | +| ChainID | String | ChainID is the ID of the chain. This must be unique to your chain. | ChainID must be less than 50 bytes. | +| Height | uint64 | Height is the height for this header. | Must be > 0, >= initialHeight, and == previous Height+1 | +| Time | [Time](#time) | The timestamp can be computed using [PBTS][pbts] or [BFT Time][bfttime] algorithms. In case of PBTS, it is the time at which the proposer has produced the block (the value of its local clock). In case of BFT Time, it is equal to the weighted median of timestamps present in the previous commit. | Time must be larger than the Time of the previous block header. The timestamp of the first block should not be smaller than the genesis time. When BFT Time is used, it should match the genesis time (since there's no votes to compute the median with). | +| LastBlockID | [BlockID](#blockid) | BlockID of the previous block. | Must adhere to the validation rules of [blockID](#blockid). The first block has `block.Header.LastBlockID == BlockID{}`. | +| LastCommitHash | slice of bytes (`[]byte`) | MerkleRoot of the lastCommit's signatures. The signatures represent the validators that committed to the last block. The first block has an empty slices of bytes for the hash. | Must be of length 32 | +| DataHash | slice of bytes (`[]byte`) | MerkleRoot of the hash of transactions. **Note**: The transactions are hashed before being included in the merkle tree, the leaves of the Merkle tree are the hashes, not the transactions themselves. | Must be of length 32 | +| ValidatorHash | slice of bytes (`[]byte`) | MerkleRoot of the current validator set. The validators are first sorted by voting power (descending), then by address (ascending) prior to computing the MerkleRoot. | Must be of length 32 | +| NextValidatorHash | slice of bytes (`[]byte`) | MerkleRoot of the next validator set. The validators are first sorted by voting power (descending), then by address (ascending) prior to computing the MerkleRoot. | Must be of length 32 | +| ConsensusHash | slice of bytes (`[]byte`) | Hash of the protobuf encoded consensus parameters. | Must be of length 32 | +| AppHash | slice of bytes (`[]byte`) | Arbitrary byte array returned by the application after executing and committing the previous block. It serves as the basis for validating any merkle proofs that comes from the ABCI application and represents the state of the actual application rather than the state of the blockchain itself. The first block's `block.Header.AppHash` is given by `InitChainResponse.app_hash`. | This hash is determined by the application, CometBFT can not perform validation on it. | +| LastResultHash | slice of bytes (`[]byte`) | `LastResultsHash` is the root hash of a Merkle tree built from `DeliverTxResponse` responses (`Log`,`Info`, `Codespace` and `Events` fields are ignored). | Must be of length 32. The first block has `block.Header.ResultsHash == MerkleRoot(nil)`, i.e. the hash of an empty input, for RFC-6962 conformance. | +| EvidenceHash | slice of bytes (`[]byte`) | MerkleRoot of the evidence of Byzantine behavior included in this block. | Must be of length 32 | +| ProposerAddress | slice of bytes (`[]byte`) | Address of the original proposer of the block. Validator must be in the current validatorSet. | Must be of length 20 | ## Version @@ -142,10 +148,10 @@ NOTE: that this is more specifically the consensus version and doesn't include i P2P Version. (TODO: we should write a comprehensive document about versioning that this can refer to) -| Name | type | Description | Validation | -|-------|--------|-----------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------| -| Block | uint64 | This number represents the version of the block protocol and must be the same throughout an operational network | Must be equal to protocol version being used in a network (`block.Version.Block == state.Version.Consensus.Block`) | -| App | uint64 | App version is decided on by the application. Read [here](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci++_app_requirements.md) | `block.Version.App == state.Version.Consensus.App` | +| Name | type | Description | Validation | +|-------|--------|------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------| +| Block | uint64 | This number represents the block version and must be the same throughout an operational network | Must be equal to block version being used in a network (`block.Version.Block == state.Version.Consensus.Block`) | +| App | uint64 | App version is decided on by the application. Read [here](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci++_app_requirements.md) | `block.Version.App == state.Version.Consensus.App` | ## BlockID @@ -171,14 +177,15 @@ Part defines a part of a block. In CometBFT blocks are broken into `parts` for g | Name | Type | Description | Validation | |-------|-----------------|-----------------------------------|----------------------| -| index | int32 | Total amount of parts for a block | Must be > 0 | +| index | int32 | Total amount of parts for a block | Must be >= 0 | | bytes | bytes | MerkleRoot of a serialized block | Must be of length 32 | | proof | [Proof](#proof) | MerkleRoot of a serialized block | Must be of length 32 | ## Time -CometBFT uses the [Google.Protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) +CometBFT uses the [Google.Protobuf.Timestamp](https://protobuf.dev/reference/protobuf/google.protobuf/#timestamp) format, which uses two integers, one 64 bit integer for Seconds and a 32 bit integer for Nanoseconds. +Time is aligned with the Coordinated Universal Time (UTC). ## Data @@ -192,12 +199,14 @@ Data is just a wrapper for a list of transactions, where transactions are arbitr Commit is a simple wrapper for a list of signatures, with one for each validator. It also contains the relevant BlockID, height and round: -| Name | Type | Description | Validation | -|------------|----------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------| -| Height | int64 | Height at which this commit was created. | Must be > 0 | -| Round | int32 | Round that the commit corresponds to. | Must be > 0 | -| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | Must adhere to the validation rules of [BlockID](#blockid). | -| Signatures | Array of [CommitSig](#commitsig) | Array of commit signatures that correspond to current validator set. | Length of signatures must be > 0 and adhere to the validation of each individual [Commitsig](#commitsig) | +| Name | Type | Description | Validation | +|------------|----------------------------------|----------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------| +| Height | int64 | Height at which this commit was created. | Must be >= 0. | +| Round | int32 | Round that the commit corresponds to. | Must be >= 0. | +| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | If Height > 0, then it cannot be the [BlockID](#blockid) of a nil block. | +| Signatures | Array of [CommitSig](#commitsig) | Array of commit signatures that correspond to current validator set. | If Height > 0, then the length of signatures must be > 0 and adhere to the validation of each individual [Commitsig](#commitsig). | + + ## ExtendedCommit @@ -206,8 +215,8 @@ In addition, it contains the verified vote extensions, one for each non-`nil` vo | Name | Type | Description | Validation | |--------------------|------------------------------------------|-------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------| -| Height | int64 | Height at which this commit was created. | Must be > 0 | -| Round | int32 | Round that the commit corresponds to. | Must be > 0 | +| Height | int64 | Height at which this commit was created. | Must be >= 0 | +| Round | int32 | Round that the commit corresponds to. | Must be >= 0 | | BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | Must adhere to the validation rules of [BlockID](#blockid). | | ExtendedSignatures | Array of [ExtendedCommitSig](#commitsig) | The current validator set's commit signatures, extension, and extension signatures. | Length of signatures must be > 0 and adhere to the validation of each individual [ExtendedCommitSig](#extendedcommitsig) | @@ -222,7 +231,7 @@ to reconstruct the vote set given the validator set. | BlockIDFlag | [BlockIDFlag](#blockidflag) | Represents the validators participation in consensus: its vote was not received, voted for the block that received the majority, or voted for nil | Must be one of the fields in the [BlockIDFlag](#blockidflag) enum | | ValidatorAddress | [Address](#address) | Address of the validator | Must be of length 20 | | Timestamp | [Time](#time) | This field will vary from `CommitSig` to `CommitSig`. It represents the timestamp of the validator. | [Time](#time) | -| Signature | [Signature](#signature) | Signature corresponding to the validators participation in consensus. | The length of the signature must be > 0 and < than 64 | +| Signature | [Signature](#signature) | Signature corresponding to the validators participation in consensus. | The length of the signature must be > 0 and < than 64 for `ed25519` or < 96 for `bls12381` | NOTE: `ValidatorAddress` and `Timestamp` fields may be removed in the future (see [ADR-25](https://github.com/cometbft/cometbft/blob/main/docs/architecture/adr-025-commit.md)). @@ -262,18 +271,18 @@ enum BlockIDFlag { A vote is a signed message from a validator for a particular block. The vote includes information about the validator signing it. When stored in the blockchain or propagated over the network, votes are encoded in Protobuf. -| Name | Type | Description | Validation | -|--------------------|---------------------------------|------------------------------------------------------------------------------------------|------------------------------------------| -| Type | [SignedMsgType](#signedmsgtype) | The type of message the vote refers to | Must be `PrevoteType` or `PrecommitType` | -| Height | int64 | Height for which this vote was created for | Must be > 0 | -| Round | int32 | Round that the commit corresponds to. | Must be > 0 | -| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | | -| Timestamp | [Time](#time) | Timestamp represents the time at which a validator signed. | | -| ValidatorAddress | bytes | Address of the validator | Length must be equal to 20 | -| ValidatorIndex | int32 | Index at a specific block height corresponding to the Index of the validator in the set. | Must be > 0 | -| Signature | bytes | Signature by the validator if they participated in consensus for the associated block. | Length must be > 0 and < 64 | +| Name | Type | Description | Validation | +|--------------------|---------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------| +| Type | [SignedMsgType](#signedmsgtype) | The type of message the vote refers to | Must be `PrevoteType` or `PrecommitType` | +| Height | int64 | Height for which this vote was created for | Must be > 0 | +| Round | int32 | Round that the commit corresponds to. | Must be >= 0 | +| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | | +| Timestamp | [Time](#time) | Timestamp represents the time at which a validator signed. | | +| ValidatorAddress | bytes | Address of the validator | Length must be equal to 20 | +| ValidatorIndex | int32 | Index at a specific block height corresponding to the Index of the validator in the set. | Must be > 0 | +| Signature | bytes | Signature by the validator if they participated in consensus for the associated block. | Length must be > 0 and < 64 for `ed25519` or < 96 for `bls12381` | | Extension | bytes | Vote extension provided by the Application running at the validator's node. | Length can be 0 | -| ExtensionSignature | bytes | Signature for the extension | Length must be > 0 and < 64 | +| ExtensionSignature | bytes | Signature for the extension | Length must be > 0 and < 64 for `ed25519` or < 96 for `bls12381`| ## CanonicalVote @@ -284,8 +293,8 @@ and uses a different ordering of the fields. | Name | Type | Description | Validation | |-----------|---------------------------------|-----------------------------------------|------------------------------------------| | Type | [SignedMsgType](#signedmsgtype) | The type of message the vote refers to | Must be `PrevoteType` or `PrecommitType` | -| Height | int64 | Height in which the vote was provided. | Must be > 0 | -| Round | int64 | Round in which the vote was provided. | Must be > 0 | +| Height | int64 | Height in which the vote was provided. | Must be > 0 | +| Round | int64 | Round in which the vote was provided. | Must be >= 0 | | BlockID | string | ID of the block the vote refers to. | | | Timestamp | string | Time of the vote. | | | ChainID | string | ID of the blockchain running consensus. | | @@ -318,26 +327,26 @@ This is the structure to marshall in order to obtain the bytes to sign or verify | Name | Type | Description | Validation | |-----------|--------|---------------------------------------------|----------------------| | Extension | bytes | Vote extension provided by the Application. | Can have zero length | -| Height | int64 | Height in which the extension was provided. | Must be > 0 | -| Round | int64 | Round in which the extension was provided. | Must be > 0 | +| Height | int64 | Height in which the extension was provided. | Must be >= 0 | +| Round | int64 | Round in which the extension was provided. | Must be >= 0 | | ChainID | string | ID of the blockchain running consensus. | | ## Proposal Proposal contains height and round for which this proposal is made, BlockID as a unique identifier of proposed block, timestamp, and POLRound (a so-called Proof-of-Lock (POL) round) that is needed for -termination of the consensus. If POLRound >= 0, then BlockID corresponds to the block that -is locked in POLRound. The message is signed by the validator private key. - -| Name | Type | Description | Validation | -|-----------|---------------------------------|---------------------------------------------------------------------------------------|---------------------------------------------------------| -| Type | [SignedMsgType](#signedmsgtype) | Represents a Proposal [SignedMsgType](#signedmsgtype) | Must be `ProposalType` [signedMsgType](#signedmsgtype) | -| Height | uint64 | Height for which this vote was created for | Must be > 0 | -| Round | int32 | Round that the commit corresponds to. | Must be > 0 | -| POLRound | int64 | Proof of lock | Must be > 0 | -| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | [BlockID](#blockid) | -| Timestamp | [Time](#time) | Timestamp represents the time at which a validator signed. | [Time](#time) | -| Signature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length of signature must be > 0 and < 64 | +termination of the consensus. If POLRound >= 0, then BlockID corresponds to the block that was +or could have been locked in POLRound. The message is signed by the validator private key. + +| Name | Type | Description | Validation | +|-----------|---------------------------------|---------------------------------------------------------------------------------------|--------------------------------------------------------------------------------| +| Type | [SignedMsgType](#signedmsgtype) | Represents a Proposal [SignedMsgType](#signedmsgtype). | Must be `ProposalType` | +| Height | uint64 | Height for which this vote was created for | Must be >= 0 | +| Round | int32 | Round that the commit corresponds to. | Must be >= 0 | +| POLRound | int64 | Proof of lock round. | Must be >= -1 | +| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | [BlockID](#blockid) | +| Timestamp | [Time](#time) | Timestamp represents the time at which the block was produced. | [Time](#time) | +| Signature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length of signature must be > 0 and < 64 for `ed25519` or < 96 for `bls12381` | ## SignedMsgType @@ -455,47 +464,106 @@ func SumTruncated(bz []byte) []byte { } ``` +## Proof + +| Name | Type | Description | Field Number | +|-----------|----------------|-----------------------------------------------|:------------:| +| total | int64 | Total number of items. | 1 | +| index | int64 | Index item to prove. | 2 | +| leaf_hash | bytes | Hash of item value. | 3 | +| aunts | repeated bytes | Hashes from leaf's sibling to a root's child. | 4 | + ## ConsensusParams -| Name | Type | Description | Field Number | -|-----------|-------------------------------------|------------------------------------------------------------------------------|--------------| -| block | [BlockParams](#blockparams) | Parameters limiting the size of a block and time between consecutive blocks. | 1 | -| evidence | [EvidenceParams](#evidenceparams) | Parameters limiting the validity of evidence of byzantine behavior. | 2 | -| validator | [ValidatorParams](#validatorparams) | Parameters limiting the types of public keys validators can use. | 3 | -| version | [BlockParams](#blockparams) | The ABCI application version. | 4 | +| Name | Type | Description | Field Number | +|-----------|-------------------------------------|-------------------------------------------------------------------------|:------------:| +| block | [BlockParams](#blockparams) | Parameters limiting the block and gas. | 1 | +| evidence | [EvidenceParams](#evidenceparams) | Parameters determining the validity of evidences of Byzantine behavior. | 2 | +| validator | [ValidatorParams](#validatorparams) | Parameters limiting the types of public keys validators can use. | 3 | +| version | [VersionParams](#versionparams) | The version of specific components of CometBFT. | 4 | +| synchrony | [SynchronyParams](#synchronyparams) | Parameters determining the validity of block timestamps. | 6 | +| feature | [FeatureParams](#featureparms) | Parameters for configuring the height from which features are enabled. | 7 | ### BlockParams -| Name | Type | Description | Field Number | -|--------------|-------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| -| max_bytes | int64 | Max size of a block, in bytes. | 1 | -| max_gas | int64 | Max sum of `GasWanted` in a proposed block. NOTE: blocks that violate this may be committed if there are Byzantine proposers. It's the application's responsibility to handle this when processing a block! | 2 | +| Name | Type | Description | Field Number | +|-----------|-------|---------------------------------------------------------|:------------:| +| max_bytes | int64 | Maximum size of a block, in bytes. | 1 | +| max_gas | int64 | Maximum gas wanted by transactions included in a block. | 2 | + +The `max_bytes` parameter must be greater or equal to -1, and cannot be greater +than the hard-coded maximum block size, which is 100MB. +If set to -1, the limit is the hard-coded maximum block size. + +The `max_gas` parameter must be greater or equal to -1. +If set to -1, no limit is enforced. + +Blocks that violate `max_gas` were potentially proposed by Byzantine validators. +CometBFT does not enforce the maximum wanted gas for committed blocks. +It is responsibility of the application handling blocks whose wanted gas exceeds +the configured `max_gas` when processing the block. ### EvidenceParams -| Name | Type | Description | Field Number | -|--------------------|------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| -| max_age_num_blocks | int64 | Max age of evidence, in blocks. | 1 | -| max_age_duration | [google.protobuf.Duration](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration) | Max age of evidence, in time. It should correspond with an app's "unbonding period" or other similar mechanism for handling [Nothing-At-Stake attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). | 2 | -| max_bytes | int64 | maximum size in bytes of total evidence allowed to be entered into a block | 3 | +| Name | Type | Description | Field Number | +|--------------------|--------------------------------------------|----------------------------------------------------------------------|:------------:| +| max_age_num_blocks | int64 | Max age of evidence, in blocks. | 1 | +| max_age_duration | [google.protobuf.Duration][proto-duration] | Max age of evidence, in time. | 2 | +| max_bytes | int64 | Maximum size in bytes of evidence allowed to be included in a block. | 3 | + +The recommended value of `max_age_duration` parameter should correspond to +the application's "unbonding period" or other similar mechanism for handling +[Nothing-At-Stake attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). + +The recommended formula for calculating `max_age_num_blocks` is `max_age_duration / {average block time}`. ### ValidatorParams | Name | Type | Description | Field Number | -|---------------|-----------------|-----------------------------------------------------------------------|--------------| +|---------------|-----------------|-----------------------------------------------------------------------|:------------:| | pub_key_types | repeated string | List of accepted public key types. Uses same naming as `PubKey.Type`. | 1 | +The `pub_key_types` parameter uses ABCI public keys naming, not Amino names. + ### VersionParams -| Name | Type | Description | Field Number | -|-------------|--------|-------------------------------|--------------| -| app_version | uint64 | The ABCI application version. | 1 | +| Name | Type | Description | Field Number | +|------|--------|-------------------------------|:------------:| +| app | uint64 | The ABCI application version. | 1 | -## Proof +The `app` parameter was named `app_version` in CometBFT 0.34. -| Name | Type | Description | Field Number | -|-----------|----------------|-----------------------------------------------|--------------| -| total | int64 | Total number of items. | 1 | -| index | int64 | Index item to prove. | 2 | -| leaf_hash | bytes | Hash of item value. | 3 | -| aunts | repeated bytes | Hashes from leaf's sibling to a root's child. | 4 | +### ABCIParams + +| Name | Type | Description | Field Number | +|-------------------------------|-------|---------------------------------------------------|:------------:| +| vote_extensions_enable_height | int64 | The height where vote extensions will be enabled. | 1 | + +The `ABCIParams` type has been **deprecated** from CometBFT `v1.0`. + +### FeatureParams + +| Name | Type | Description | Field Number | +|-------------------------------|-------|-------------------------------------------------------------------|:------------:| +| vote_extensions_enable_height | int64 | First height during which vote extensions will be enabled. | 1 | +| pbts_enable_height | int64 | Height at which Proposer-Based Timestamps (PBTS) will be enabled. | 2 | + +From the configured height, and for all subsequent heights, the corresponding +feature will be enabled. +Cannot be set to heights lower or equal to the current blockchain height. +A value of 0 (the default) indicates that the feature is disabled. + +### SynchronyParams + +| Name | Type | Description | Field Number | +|---------------|--------------------------------------------|-------------------------------------------------------------------------------------------------------------------------|:------------:| +| precision | [google.protobuf.Duration][proto-duration] | Bound for how skewed a proposer's clock may be from any validator on the network while still producing valid proposals. | 1 | +| message_delay | [google.protobuf.Duration][proto-duration] | Bound for how long a proposal message may take to reach all validators on a network and still be considered valid. | 2 | + +These parameters are part of the Proposer-Based Timestamps (PBTS) algorithm. +For more information on the relationship of the synchrony parameters to +block timestamps validity, refer to the [PBTS specification][pbts]. + +[pbts]: ../consensus/proposer-based-timestamp/README.md +[bfttime]: ../consensus/bft-time.md +[proto-duration]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration diff --git a/spec/core/encoding.md b/spec/core/encoding.md index 0c2fdb1f63e..0be19158736 100644 --- a/spec/core/encoding.md +++ b/spec/core/encoding.md @@ -19,7 +19,7 @@ For details on varints, see the [protobuf spec](https://developers.google.com/protocol-buffers/docs/encoding#varints). For example, the byte-array `[0xA, 0xB]` would be encoded as `0x020A0B`, -while a byte-array containing 300 entires beginning with `[0xA, 0xB, ...]` would +while a byte-array containing 300 entries beginning with `[0xA, 0xB, ...]` would be encoded as `0xAC020A0B...` where `0xAC02` is the UVarint encoding of 300. ## Hashing @@ -41,7 +41,7 @@ include details of the private keys beyond their type and name. ### Key Types -Each type specifies it's own pubkey, address, and signature format. +Each type specifies its own pubkey, address, and signature format. #### Ed25519 @@ -59,10 +59,38 @@ CometBFT adopts [zip215](https://zips.z.cash/zip-0215) for verification of ed255 #### Secp256k1 -The address is the first 20-bytes of the SHA256 hash of the raw 32-byte public key: +The address is the RIPEMD160 hash of the SHA256 hash of the raw 33-byte public key: + ```go -address = SHA256(pubkey)[:20] +address = RIPEMD160(SHA256(pubkey)) +``` + +RIPEMD160 checksum size is 20 bytes. + +The public key comprised of 32 bytes for one field element (the x-coordinate), +plus one byte for the parity of the y-coordinate. The first byte depends is a +0x02 byte if the y-coordinate is the lexicographically largest of the two +associated with the x-coordinate. Otherwise the first byte is a 0x03. This +prefix is followed with the x-coordinate. + +Decoding the RPC response for a secp256k1 pubkey: + +```sh +# "pub_key": { +# "type": "tendermint/PubKeySecp256k1", +# "value": "AkeI23hsiCXVTf2+k+hGJAj/tuXRlwNRHI/Iv2Cvj3LQ" +# }, + +$ echo AkeI23hsiCXVTf2+k+hGJAj/tuXRlwNRHI/Iv2Cvj3LQ | base64 -d | xxd -p -c 33 +024788db786c8825d54dfdbe93e8462408ffb6e5d19703511c8fc8bf60af8f72d0 +``` + +The first byte plus the two field elements: + +``` +02_4788db786c8825d54dfdbe93e8462408ffb6e5d19703511c8fc8bf60af8f72d0 + 4788db786c8825d54dfdbe93e8462408ffb6e5d19703511c8fc8bf60af8f72d0_c47efb012b928018e99e892cfbfa7e8535de85169682346d66676e47da261498 ``` ## Other Common Types @@ -171,7 +199,7 @@ func getSplitPoint(k int) { ... } func MerkleRoot(items [][]byte) []byte{ switch len(items) { case 0: - return empthHash() + return emptyHash() case 1: return leafHash(items[0]) default: diff --git a/spec/core/genesis.md b/spec/core/genesis.md index 843c4f699a1..f59654520b8 100644 --- a/spec/core/genesis.md +++ b/spec/core/genesis.md @@ -25,7 +25,7 @@ The genesis file is the starting point of a chain. An application will populate > Note: For evidence to be considered invalid, evidence must be older than both `max_age_num_blocks` and `max_age_duration` - `validator` - - `pub_key_types`: Defines which curves are to be accepted as a valid validator consensus key. CometBFT supports ed25519, sr25519 and secp256k1. + - `pub_key_types`: Defines which curves are to be accepted as a valid validator consensus key. CometBFT supports ed25519, secp256k1 and bls12381. - `version` - `app_version`: The version of the application. This is set by the application and is used to identify which version of the app a user should be using in order to operate a node. diff --git a/spec/core/state.md b/spec/core/state.md index c19d0096ca1..762df2e75a6 100644 --- a/spec/core/state.md +++ b/spec/core/state.md @@ -51,11 +51,11 @@ be found in [data structures](./data_structures.md) ## Execution -State gets updated at the end of executing a block. Of specific interest is `ResponseEndBlock` and -`ResponseCommit` +State gets updated at the end of executing a block. Of specific interest is `EndBlockResponse` and +`CommitResponse` ```go -type ResponseEndBlock struct { +type EndBlockResponse struct { ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` @@ -66,15 +66,16 @@ where ```go type ValidatorUpdate struct { - PubKey crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` - Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` + Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` + PubKeyBytes []byte `protobuf:"bytes,3,opt,name=pub_key_bytes,json=pubKeyBytes,proto3" json:"pub_key_bytes,omitempty"` + PubKeyType string `protobuf:"bytes,4,opt,name=pub_key_type,json=pubKeyType,proto3" json:"pub_key_type,omitempty"` } ``` and ```go -type ResponseCommit struct { +type CommitResponse struct { // reserve 1 Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` @@ -84,7 +85,7 @@ type ResponseCommit struct { `ValidatorUpdates` are used to add and remove validators to the current set as well as update validator power. Setting validator power to 0 in `ValidatorUpdate` will cause the validator to be removed. `ConsensusParams` are safely copied across (i.e. if a field is nil it gets ignored) and the -`Data` from the `ResponseCommit` is used as the `AppHash` +`Data` from the `CommitResponse` is used as the `AppHash` ## Version @@ -107,8 +108,8 @@ otherwise. The Application may set `ConsensusParams.Block.MaxBytes` to -1. In that case, the actual block limit is set to 100 MB, and CometBFT will provide all transactions in the mempool as part of `PrepareProposal`. -The application has to be careful to return a list of transactions in `ResponsePrepareProposal` -whose size is less than or equal to `RequestPrepareProposal.MaxTxBytes`. +The application has to be careful to return a list of transactions in `PrepareProposalResponse` +whose size is less than or equal to `PrepareProposalRequest.MaxTxBytes`. Blocks should additionally be limited by the amount of "gas" consumed by the transactions in the block, though this is not yet implemented. @@ -127,5 +128,5 @@ implemented to mitigate spam attacks. ## Validator -Validators from genesis file and `ResponseEndBlock` must have pubkeys of type ∈ +Validators from genesis file and `EndBlockResponse` must have pubkeys of type ∈ `ConsensusParams.Validator.PubKeyTypes`. diff --git a/spec/ivy-proofs/accountable_safety_2.ivy b/spec/ivy-proofs/accountable_safety_2.ivy index 7fb928909a3..599933ccc13 100644 --- a/spec/ivy-proofs/accountable_safety_2.ivy +++ b/spec/ivy-proofs/accountable_safety_2.ivy @@ -6,7 +6,7 @@ include abstract_tendermint # Here we prove the second accountability property: no well-behaved node is # ever observed to violate the accountability properties. -# The proof is done in two steps: first we prove the the abstract specification +# The proof is done in two steps: first we prove the abstract specification # satisfies the property, and then we show by refinement that this property # also holds in the concrete specification. diff --git a/spec/ivy-proofs/classic_safety.ivy b/spec/ivy-proofs/classic_safety.ivy index b422a2c175f..f5315733104 100644 --- a/spec/ivy-proofs/classic_safety.ivy +++ b/spec/ivy-proofs/classic_safety.ivy @@ -10,7 +10,7 @@ include abstract_tendermint # Here we prove the classic safety property: assuming that every two quorums # have a well-behaved node in common, no two well-behaved nodes ever disagree. -# The proof is done in two steps: first we prove the the abstract specification +# The proof is done in two steps: first we prove the abstract specification # satisfies the property, and then we show by refinement that this property # also holds in the concrete specification. diff --git a/spec/legacy/reactors/README.md b/spec/legacy/reactors/README.md new file mode 100644 index 00000000000..2dba223c652 --- /dev/null +++ b/spec/legacy/reactors/README.md @@ -0,0 +1,15 @@ +--- +cards: true +--- + +# Reactors + +> NOTE: This is legacy documentation with outdated information. +> Documents will be removed as they get processed and decommissioned or updated in the new documentation structure. + +* [BlockSync](./block_sync/README.md) +* [Consensus](./consensus/README.md) +* [Evidence](./evidence/README.md) +* [Mempool](./mempool/README.md) +* [PEX](./pex/README.md) +* [StateSync](./state_sync/README.md) diff --git a/spec/legacy/reactors/block_sync/README.md b/spec/legacy/reactors/block_sync/README.md new file mode 100644 index 00000000000..efeff03b2a3 --- /dev/null +++ b/spec/legacy/reactors/block_sync/README.md @@ -0,0 +1,310 @@ +# BlockSync Reactor + +> NOTE: This is legacy documentation with outdated information. + +The BlockSync Reactor's high level responsibility is to enable peers who are +far behind the current state of the consensus to quickly catch up by downloading +many blocks in parallel, verifying their commits, and executing them against the +ABCI application. + +CometBFT full nodes run the BlockSync Reactor as a service to provide blocks +to new nodes. New nodes run the BlockSync Reactor in "block_sync" mode, +where they actively make requests for more blocks until they sync up. +Once caught up, "block_sync" mode is disabled and the node switches to +using (and turns on) the Consensus Reactor. + +## Message Types + +```go +const ( + msgTypeBlockRequest = byte(0x10) + msgTypeBlockResponse = byte(0x11) + msgTypeNoBlockResponse = byte(0x12) + msgTypeStatusResponse = byte(0x20) + msgTypeStatusRequest = byte(0x21) +) +``` + +```go +type bcBlockRequestMessage struct { + Height int64 +} + +type bcNoBlockResponseMessage struct { + Height int64 +} + +type bcBlockResponseMessage struct { + Block Block +} + +type bcStatusRequestMessage struct { + Height int64 + +type bcStatusResponseMessage struct { + Height int64 +} +``` + +## Architecture and algorithm + +The BlockSync reactor is organised as a set of concurrent tasks: + +- Receive routine of BlockSync Reactor +- Task for creating Requesters +- Set of Requesters tasks and - Controller task. + +![BlockSync Reactor Architecture Diagram](img/bc-reactor.png) + +### Data structures + +These are the core data structures necessarily to provide the BlockSync Reactor logic. + +Requester data structure is used to track assignment of request for `block` at position `height` to a peer with id equals to `peerID`. + +```go +type Requester { + mtx Mutex + block Block + height int64 + 
 peerID p2p.ID + redoChannel chan p2p.ID //redo may send multi-time; peerId is used to identify repeat +} +``` + +Pool is a core data structure that stores last executed block (`height`), assignment of requests to peers (`requesters`), current height for each peer and number of pending requests for each peer (`peers`), maximum peer height, etc. + +```go +type Pool { + mtx Mutex + requesters map[int64]*Requester + height int64 + peers map[p2p.ID]*Peer + maxPeerHeight int64 + numPending int32 + store BlockStore + requestsChannel chan<- BlockRequest + errorsChannel chan<- peerError +} +``` + +Peer data structure stores for each peer current `height` and number of pending requests sent to the peer (`numPending`), etc. + +```go +type Peer struct { + id p2p.ID + height int64 + numPending int32 + timeout *time.Timer + didTimeout bool +} +``` + +BlockRequest is internal data structure used to denote current mapping of request for a block at some `height` to a peer (`PeerID`). + +```go +type BlockRequest { + Height int64 + PeerID p2p.ID +} +``` + +### Receive routine of BlockSync Reactor + +It is executed upon message reception on the BlockchainChannel inside p2p receive routine. There is a separate p2p receive routine (and therefore receive routine of the BlockSync Reactor) executed for each peer. Note that try to send will not block (returns immediately) if outgoing buffer is full. + +```go +handleMsg(pool, m): + upon receiving bcBlockRequestMessage m from peer p: + block = load block for height m.Height from pool.store + if block != nil then + try to send BlockResponseMessage(block) to p + else + try to send bcNoBlockResponseMessage(m.Height) to p + + upon receiving bcBlockResponseMessage m from peer p: + pool.mtx.Lock() + requester = pool.requesters[m.Height] + if requester == nil then + error("peer sent us a block we didn't expect") + continue + + if requester.block == nil and requester.peerID == p then + requester.block = m + pool.numPending -= 1 // atomic decrement + peer = pool.peers[p] + if peer != nil then + peer.numPending-- + if peer.numPending == 0 then + peer.timeout.Stop() + // NOTE: we don't send Quit signal to the corresponding requester task! + else + trigger peer timeout to expire after peerTimeout + pool.mtx.Unlock() + + + upon receiving bcStatusRequestMessage m from peer p: + try to send bcStatusResponseMessage(pool.store.Height) + + upon receiving bcStatusResponseMessage m from peer p: + pool.mtx.Lock() + peer = pool.peers[p] + if peer != nil then + peer.height = m.height + else + peer = create new Peer data structure with id = p and height = m.Height + pool.peers[p] = peer + + if m.Height > pool.maxPeerHeight then + pool.maxPeerHeight = m.Height + pool.mtx.Unlock() + +onTimeout(p): + send error message to pool error channel + peer = pool.peers[p] + peer.didTimeout = true +``` + +### Requester tasks + +Requester task is responsible for fetching a single block at position `height`. + +```go +fetchBlock(height, pool): + while true do { + peerID = nil + block = nil + peer = pickAvailablePeer(height) + peerID = peer.id + + enqueue BlockRequest(height, peerID) to pool.requestsChannel + redo = false + while !redo do + select { + upon receiving Quit message do + return + upon receiving redo message with id on redoChannel do + if peerID == id { + mtx.Lock() + pool.numPending++ + redo = true + mtx.UnLock() + } + } + } + +pickAvailablePeer(height): + selectedPeer = nil + while selectedPeer = nil do + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout and peer.numPending < maxPendingRequestsPerPeer and peer.height >= height then + peer.numPending++ + selectedPeer = peer + break + pool.mtx.Unlock() + + if selectedPeer = nil then + sleep requestIntervalMS + + return selectedPeer +``` + +sleep for requestIntervalMS + +### Task for creating Requesters + +This task is responsible for continuously creating and starting Requester tasks. + +```go +createRequesters(pool): + while true do + if !pool.isRunning then break + if pool.numPending < maxPendingRequests or size(pool.requesters) < maxTotalRequesters then + pool.mtx.Lock() + nextHeight = pool.height + size(pool.requesters) + requester = create new requester for height nextHeight + pool.requesters[nextHeight] = requester + pool.numPending += 1 // atomic increment + start requester task + pool.mtx.Unlock() + else + sleep requestIntervalMS + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout && peer.numPending > 0 && peer.curRate < minRecvRate then + send error on pool error channel + peer.didTimeout = true + if peer.didTimeout then + for each requester in pool.requesters do + if requester.getPeerID() == peer then + enqueue msg on requestor's redoChannel + delete(pool.peers, peerID) + pool.mtx.Unlock() +``` + +### Main blockchain reactor controller task + +```go +main(pool): + create trySyncTicker with interval trySyncIntervalMS + create statusUpdateTicker with interval statusUpdateIntervalSeconds + create switchToConsensusTicker with interval switchToConsensusIntervalSeconds + + while true do + select { + upon receiving BlockRequest(Height, Peer) on pool.requestsChannel: + try to send bcBlockRequestMessage(Height) to Peer + + upon receiving error(peer) on errorsChannel: + stop peer for error + + upon receiving message on statusUpdateTickerChannel: + broadcast bcStatusRequestMessage(bcR.store.Height) // message sent in a separate routine + + upon receiving message on switchToConsensusTickerChannel: + pool.mtx.Lock() + receivedBlockOrTimedOut = pool.height > 0 || (time.Now() - pool.startTime) > 5 Seconds + ourChainIsLongestAmongPeers = pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight + haveSomePeers = size of pool.peers > 0 + pool.mtx.Unlock() + if haveSomePeers && receivedBlockOrTimedOut && ourChainIsLongestAmongPeers then + switch to consensus mode + + upon receiving message on trySyncTickerChannel: + for i = 0; i < 10; i++ do + pool.mtx.Lock() + firstBlock = pool.requesters[pool.height].block + secondBlock = pool.requesters[pool.height].block + if firstBlock == nil or secondBlock == nil then continue + pool.mtx.Unlock() + verify firstBlock using LastCommit from secondBlock + if verification failed + pool.mtx.Lock() + peerID = pool.requesters[pool.height].peerID + redoRequestsForPeer(peerId) + delete(pool.peers, peerID) + stop peer peerID for error + pool.mtx.Unlock() + else + delete(pool.requesters, pool.height) + save firstBlock to store + pool.height++ + execute firstBlock + } + +redoRequestsForPeer(pool, peerId): + for each requester in pool.requesters do + if requester.getPeerID() == peerID + enqueue msg on redoChannel for requester +``` + +## Channels + +Defines `maxMsgSize` for the maximum size of incoming messages, +`SendQueueCapacity` and `RecvBufferCapacity` for maximum sending and +receiving buffers respectively. These are supposed to prevent amplification +attacks by setting up the upper limit on how much data we can receive & send to +a peer. + +Sending incorrectly encoded data will result in stopping the peer. diff --git a/spec/legacy/reactors/block_sync/img/bc-reactor-routines.png b/spec/legacy/reactors/block_sync/img/bc-reactor-routines.png new file mode 100644 index 00000000000..3f574a79b1a Binary files /dev/null and b/spec/legacy/reactors/block_sync/img/bc-reactor-routines.png differ diff --git a/spec/legacy/reactors/block_sync/img/bc-reactor.png b/spec/legacy/reactors/block_sync/img/bc-reactor.png new file mode 100644 index 00000000000..f7fe0f8193d Binary files /dev/null and b/spec/legacy/reactors/block_sync/img/bc-reactor.png differ diff --git a/spec/legacy/reactors/block_sync/impl.md b/spec/legacy/reactors/block_sync/impl.md new file mode 100644 index 00000000000..31bdf14ee5a --- /dev/null +++ b/spec/legacy/reactors/block_sync/impl.md @@ -0,0 +1,43 @@ +# BlockSync Reactor + +## Reactor + +- coordinates the pool for syncing +- coordinates the store for persistence +- coordinates the playing of blocks towards the app using a sm.BlockExecutor +- handles switching between fastsync and consensus +- it is a p2p.BaseReactor +- starts the pool.Start() and its poolRoutine() +- registers all the concrete types and interfaces for serialisation + +### poolRoutine + +- listens to these channels: + - pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends + a &bcBlockRequestMessage for a specific height + - pool signals timeout of a specific peer by posting to timeoutsCh + - switchToConsensusTicker to periodically try and switch to consensus + - trySyncTicker to periodically check if we have fallen behind and then catch-up sync + - if there aren't any new blocks available on the pool it skips syncing +- tries to sync the app by taking downloaded blocks from the pool, gives them to the app and stores + them on disk +- implements Receive which is called by the switch/peer + - calls AddBlock on the pool when it receives a new block from a peer + +## Block Pool + +- responsible for downloading blocks from peers +- makeRequestersRoutine() + - removes timeout peers + - starts new requesters by calling makeNextRequester() +- requestRoutine(): + - picks a peer and sends the request, then blocks until: + - pool is stopped by listening to pool.Quit + - requester is stopped by listening to Quit + - request is redone + - we receive a block + - gotBlockCh is strange + +## Go Routines in BlockSync Reactor + +![Go Routines Diagram](img/bc-reactor-routines.png) diff --git a/spec/legacy/reactors/consensus/README.md b/spec/legacy/reactors/consensus/README.md new file mode 100644 index 00000000000..83550969c0d --- /dev/null +++ b/spec/legacy/reactors/consensus/README.md @@ -0,0 +1,368 @@ +# Consensus Reactor + +> NOTE: This is legacy documentation with outdated information. + +Consensus Reactor defines a reactor for the consensus service. It contains the ConsensusState service that +manages the state of the Tendermint consensus internal state machine. +When Consensus Reactor is started, it starts Broadcast Routine which starts ConsensusState service. +Furthermore, for each peer that is added to the Consensus Reactor, it creates (and manages) the known peer state +(that is used extensively in gossip routines) and starts the following three routines for the peer p: +Gossip Data Routine, Gossip Votes Routine and QueryMaj23Routine. Finally, Consensus Reactor is responsible +for decoding messages received from a peer and for adequate processing of the message depending on its type and content. +The processing normally consists of updating the known peer state and for some messages +(`ProposalMessage`, `BlockPartMessage` and `VoteMessage`) also forwarding message to ConsensusState module +for further processing. In the following text we specify the core functionality of those separate unit of executions +that are part of the Consensus Reactor. + +## ConsensusState service + +Consensus State handles execution of the Tendermint BFT consensus algorithm. It processes votes and proposals, +and upon reaching agreement, commits blocks to the chain and executes them against the application. +The internal state machine receives input from peers, the internal validator and from a timer. + +Inside Consensus State we have the following units of execution: Timeout Ticker and Receive Routine. +Timeout Ticker is a timer that schedules timeouts conditional on the height/round/step that are processed +by the Receive Routine. + +### Receive Routine of the ConsensusState service + +Receive Routine of the ConsensusState handles messages which may cause internal consensus state transitions. +It is the only routine that updates RoundState that contains internal consensus state. +Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. +It receives messages from peers, internal validators and from Timeout Ticker +and invokes the corresponding handlers, potentially updating the RoundState. +The details of the protocol (together with formal proofs of correctness) implemented by the Receive Routine are +discussed in separate document. For understanding of this document +it is sufficient to understand that the Receive Routine manages and updates RoundState data structure that is +then extensively used by the gossip routines to determine what information should be sent to peer processes. + +## Round State + +RoundState defines the internal consensus state. It contains height, round, round step, a current validator set, +a proposal and proposal block for the current round, locked round and block (if some block is being locked), set of +received votes and last commit and last validators set. + +```go +type RoundState struct { + Height int64 + Round int + Step RoundStepType + Validators ValidatorSet + Proposal Proposal + ProposalBlock Block + ProposalBlockParts PartSet + LockedRound int + LockedBlock Block + LockedBlockParts PartSet + Votes HeightVoteSet + LastCommit VoteSet + LastValidators ValidatorSet +} +``` + +Internally, consensus will run as a state machine with the following states: + +- RoundStepNewHeight +- RoundStepNewRound +- RoundStepPropose +- RoundStepProposeWait +- RoundStepPrevote +- RoundStepPrevoteWait +- RoundStepPrecommit +- RoundStepPrecommitWait +- RoundStepCommit + +## Peer Round State + +Peer round state contains the known state of a peer. It is being updated by the Receive routine of +Consensus Reactor and by the gossip routines upon sending a message to the peer. + +```golang +type PeerRoundState struct { + Height int64 // Height peer is at + Round int // Round peer is at, -1 if unknown. + Step RoundStepType // Step peer is at + Proposal bool // True if peer has proposal for this round + ProposalBlockPartsHeader PartSetHeader + ProposalBlockParts BitArray + ProposalPOLRound int // Proposal's POL round. -1 if none. + ProposalPOL BitArray // nil until ProposalPOLMessage received. + Prevotes BitArray // All votes peer has for this round + Precommits BitArray // All precommits peer has for this round + LastCommitRound int // Round of commit for last height. -1 if none. + LastCommit BitArray // All commit precommits of commit for last height. + CatchupCommitRound int // Round that we have commit for. Not necessarily unique. -1 if none. + CatchupCommit BitArray // All commit precommits peer has for this height & CatchupCommitRound +} +``` + +## Receive method of Consensus reactor + +The entry point of the Consensus reactor is a receive method. When a message is +received from a peer p, normally the peer round state is updated +correspondingly, and some messages are passed for further processing, for +example to ConsensusState service. We now specify the processing of messages in +the receive method of Consensus reactor for each message type. In the following +message handler, `rs` and `prs` denote `RoundState` and `PeerRoundState`, +respectively. + +### NewRoundStepMessage handler + +```go +handleMessage(msg): + if msg is from smaller height/round/step then return + // Just remember these values. + prsHeight = prs.Height + prsRound = prs.Round + prsCatchupCommitRound = prs.CatchupCommitRound + prsCatchupCommit = prs.CatchupCommit + + Update prs with values from msg + if prs.Height or prs.Round has been updated then + reset Proposal related fields of the peer state + if prs.Round has been updated and msg.Round == prsCatchupCommitRound then + prs.Precommits = psCatchupCommit + if prs.Height has been updated then + if prsHeight+1 == msg.Height && prsRound == msg.LastCommitRound then + prs.LastCommitRound = msg.LastCommitRound + prs.LastCommit = prs.Precommits + } else { + prs.LastCommitRound = msg.LastCommitRound + prs.LastCommit = nil + } + Reset prs.CatchupCommitRound and prs.CatchupCommit +``` + +### NewValidBlockMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height then return + + if prs.Round != msg.Round && !msg.IsCommit then return + + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader + prs.ProposalBlockParts = msg.BlockParts +``` + +The number of block parts is limited to 1601 (`types.MaxBlockPartsCount`) to +protect the node against DOS attacks. + +### HasVoteMessage handler + +```go +handleMessage(msg): + if prs.Height == msg.Height then + prs.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) +``` + +### VoteSetMaj23Message handler + +```go +handleMessage(msg): + if prs.Height == msg.Height then + Record in rs that a peer claim to have ⅔ majority for msg.BlockID + Send VoteSetBitsMessage showing votes node has for that BlockId +``` + +### ProposalMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height || prs.Round != msg.Round || prs.Proposal then return + prs.Proposal = true + if prs.ProposalBlockParts == empty set then // otherwise it is set in NewValidBlockMessage handler + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader + prs.ProposalPOLRound = msg.POLRound + prs.ProposalPOL = nil + Send msg through internal peerMsgQueue to ConsensusState service +``` + +### ProposalPOLMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height or prs.ProposalPOLRound != msg.ProposalPOLRound then return + prs.ProposalPOL = msg.ProposalPOL +``` + +The number of votes is limited to 10000 (`types.MaxVotesCount`) to protect the +node against DOS attacks. + +### BlockPartMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height || prs.Round != msg.Round then return + Record in prs that peer has block part msg.Part.Index + Send msg through internal peerMsgQueue to ConsensusState service +``` + +### VoteMessage handler + +```go +handleMessage(msg): + Record in prs that a peer knows vote with index msg.vote.ValidatorIndex for particular height and round + Send msg through internal peerMsgQueue to ConsensusState service +``` + +### VoteSetBitsMessage handler + +```go +handleMessage(msg): + Update prs for the bit-array of votes peer claims to have for the msg.BlockID +``` + +The number of votes is limited to 10000 (`types.MaxVotesCount`) to protect the +node against DOS attacks. + +## Gossip Data Routine + +It is used to send the following messages to the peer: `BlockPartMessage`, `ProposalMessage` and +`ProposalPOLMessage` on the DataChannel. The gossip data routine is based on the local RoundState (`rs`) +and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: + +```go +1a) if rs.ProposalBlockPartsHeader == prs.ProposalBlockPartsHeader and the peer does not have all the proposal parts then + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(rs.Height, rs.Round, Part) to the peer on the DataChannel + if send returns true, record that the peer knows the corresponding block Part + Continue + +1b) if (0 < prs.Height) and (prs.Height < rs.Height) then + help peer catch up using gossipDataForCatchup function + Continue + +1c) if (rs.Height != prs.Height) or (rs.Round != prs.Round) then + Sleep PeerGossipSleepDuration + Continue + +// at this point rs.Height == prs.Height and rs.Round == prs.Round +1d) if (rs.Proposal != nil and !prs.Proposal) then + Send ProposalMessage(rs.Proposal) to the peer + if send returns true, record that the peer knows Proposal + if 0 <= rs.Proposal.POLRound then + polRound = rs.Proposal.POLRound + prevotesBitArray = rs.Votes.Prevotes(polRound).BitArray() + Send ProposalPOLMessage(rs.Height, polRound, prevotesBitArray) + Continue + +2) Sleep PeerGossipSleepDuration +``` + +### Gossip Data For Catchup + +This function is responsible for helping peer catch up if it is at the smaller height (prs.Height < rs.Height). +The function executes the following logic: + +```go + if peer does not have all block parts for prs.ProposalBlockPart then + blockMeta = Load Block Metadata for height prs.Height from blockStore + if (!blockMeta.BlockID.PartsHeader == prs.ProposalBlockPartsHeader) then + Sleep PeerGossipSleepDuration + return + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(prs.Height, prs.Round, Part) to the peer on the DataChannel + if send returns true, record that the peer knows the corresponding block Part + return + else Sleep PeerGossipSleepDuration +``` + +## Gossip Votes Routine + +It is used to send the following message: `VoteMessage` on the VoteChannel. +The gossip votes routine is based on the local RoundState (`rs`) +and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: + +```go +1a) if rs.Height == prs.Height then + if prs.Step == RoundStepNewHeight then + vote = random vote from rs.LastCommit the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.Step <= RoundStepPrevote and prs.Round != -1 and prs.Round <= rs.Round then + Prevotes = rs.Votes.Prevotes(prs.Round) + vote = random vote from Prevotes the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.Step <= RoundStepPrecommit and prs.Round != -1 and prs.Round <= rs.Round then + Precommits = rs.Votes.Precommits(prs.Round) + vote = random vote from Precommits the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.ProposalPOLRound != -1 then + PolPrevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) + vote = random vote from PolPrevotes the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +1b) if prs.Height != 0 and rs.Height == prs.Height+1 then + vote = random vote from rs.LastCommit peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +1c) if prs.Height != 0 and rs.Height >= prs.Height+2 then + Commit = get commit from BlockStore for prs.Height + vote = random vote from Commit the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +2) Sleep PeerGossipSleepDuration +``` + +## QueryMaj23Routine + +It is used to send the following message: `VoteSetMaj23Message`. `VoteSetMaj23Message` is sent to indicate that a given +BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs`) and the known PeerRoundState +(`prs`). The routine repeats forever the logic shown below. + +```go +1a) if rs.Height == prs.Height then + Prevotes = rs.Votes.Prevotes(prs.Round) + if there is a ⅔ majority for some blockId in Prevotes then + m = VoteSetMaj23Message(prs.Height, prs.Round, Prevote, blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1b) if rs.Height == prs.Height then + Precommits = rs.Votes.Precommits(prs.Round) + if there is a ⅔ majority for some blockId in Precommits then + m = VoteSetMaj23Message(prs.Height,prs.Round,Precommit,blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1c) if rs.Height == prs.Height and prs.ProposalPOLRound >= 0 then + Prevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) + if there is a ⅔ majority for some blockId in Prevotes then + m = VoteSetMaj23Message(prs.Height,prs.ProposalPOLRound,Prevotes,blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1d) if prs.CatchupCommitRound != -1 and 0 < prs.Height and + prs.Height <= blockStore.Height() then + Commit = LoadCommit(prs.Height) + m = VoteSetMaj23Message(prs.Height,Commit.Round,Precommit,Commit.BlockID) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +2) Sleep PeerQueryMaj23SleepDuration +``` + +## Broadcast routine + +The Broadcast routine subscribes to an internal event bus to receive new round steps and votes messages, and broadcasts messages to peers upon receiving those +events. +It broadcasts `NewRoundStepMessage` or `CommitStepMessage` upon new round state event. Note that +broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel. +Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel. + +## Channels + +Defines 4 channels: state, data, vote and vote_set_bits. Each channel +has `SendQueueCapacity` and `RecvBufferCapacity` and +`RecvMessageCapacity` set to `maxMsgSize`. + +Sending incorrectly encoded data will result in stopping the peer. diff --git a/spec/legacy/reactors/consensus/consensus.md b/spec/legacy/reactors/consensus/consensus.md new file mode 100644 index 00000000000..60b33bbd667 --- /dev/null +++ b/spec/legacy/reactors/consensus/consensus.md @@ -0,0 +1,187 @@ +# Tendermint Consensus Reactor + +Tendermint Consensus is a distributed protocol executed by validator processes to agree on +the next block to be added to the CometBFT blockchain. The protocol proceeds in rounds, where +each round is a try to reach agreement on the next block. A round starts by having a dedicated +process (called proposer) suggesting to other processes what should be the next block with +the `ProposalMessage`. +The processes respond by voting for a block with `VoteMessage` (there are two kinds of vote +messages, prevote and precommit votes). Note that a proposal message is just a suggestion what the +next block should be; a validator might vote with a `VoteMessage` for a different block. If in some +round, enough number of processes vote for the same block, then this block is committed and later +added to the blockchain. `ProposalMessage` and `VoteMessage` are signed by the private key of the +validator. The internals of the protocol and how it ensures safety and liveness properties are +explained in a forthcoming document. + +For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the +block as the block size is big, i.e., they don't embed the block inside `Proposal` and +`VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in +[Blockchain](https://github.com/cometbft/cometbft/blob/master/spec/core/data_structures.md#blockid) section) +that uniquely identifies each block. The block itself is +disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a +proposer first splitting a block into a number of block parts, that are then gossiped between +processes using `BlockPartMessage`. + +Validators in CometBFT communicate by peer-to-peer gossiping protocol. Each validator is connected +only to a subset of processes called peers. By the gossiping protocol, a validator send to its peers +all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can +reach agreement on some block, and also obtain the content of the chosen block (block parts). As +part of the gossiping protocol, processes also send auxiliary messages that inform peers about the +executed steps of the core consensus algorithm (`NewRoundStepMessage` and `NewValidBlockMessage`), and +also messages that inform peers what votes the process has seen (`HasVoteMessage`, +`VoteSetMaj23Message` and `VoteSetBitsMessage`). These messages are then used in the gossiping +protocol to determine what messages a process should send to its peers. + +We now describe the content of each message exchanged during Tendermint consensus protocol. + +## ProposalMessage + +ProposalMessage is sent when a new block is proposed. It is a suggestion of what the +next block in the blockchain should be. + +```go +type ProposalMessage struct { + Proposal Proposal +} +``` + +### Proposal + +Proposal contains height and round for which this proposal is made, BlockID as a unique identifier +of proposed block, timestamp, and POLRound (a so-called Proof-of-Lock (POL) round) that is needed for +termination of the consensus. If POLRound >= 0, then BlockID corresponds to the block that +is locked in POLRound. The message is signed by the validator private key. + +```go +type Proposal struct { + Height int64 + Round int + POLRound int + BlockID BlockID + Timestamp Time + Signature Signature +} +``` + +## VoteMessage + +VoteMessage is sent to vote for some block (or to inform others that a process does not vote in the +current round). Vote is defined in the +[Blockchain](https://github.com/cometbft/cometbft/blob/master/spec/core/data_structures.md#blockidd) +section and contains validator's +information (validator address and index), height and round for which the vote is sent, vote type, +blockID if process vote for some block (`nil` otherwise) and a timestamp when the vote is sent. The +message is signed by the validator private key. + +```go +type VoteMessage struct { + Vote Vote +} +``` + +## BlockPartMessage + +BlockPartMessage is sent when gossiping a piece of the proposed block. It contains height, round +and the block part. + +```go +type BlockPartMessage struct { + Height int64 + Round int + Part Part +} +``` + +## NewRoundStepMessage + +NewRoundStepMessage is sent for every step transition during the core consensus algorithm execution. +It is used in the gossip part of the Tendermint protocol to inform peers about a current +height/round/step a process is in. + +```go +type NewRoundStepMessage struct { + Height int64 + Round int + Step RoundStepType + SecondsSinceStartTime int + LastCommitRound int +} +``` + +## NewValidBlockMessage + +NewValidBlockMessage is sent when a validator observes a valid block B in some round r, +i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +It contains height and round in which valid block is observed, block parts header that describes +the valid block and is used to obtain all +block parts, and a bit array of the block parts a process currently has, so its peers can know what +parts it is missing so they can send them. +In case the block is also committed, then IsCommit flag is set to true. + +```go +type NewValidBlockMessage struct { + Height int64 + Round int + BlockPartsHeader PartSetHeader + BlockParts BitArray + IsCommit bool +} +``` + +## ProposalPOLMessage + +ProposalPOLMessage is sent when a previous block is re-proposed. +It is used to inform peers in what round the process learned for this block (ProposalPOLRound), +and what prevotes for the re-proposed block the process has. + +```go +type ProposalPOLMessage struct { + Height int64 + ProposalPOLRound int + ProposalPOL BitArray +} +``` + +## HasVoteMessage + +HasVoteMessage is sent to indicate that a particular vote has been received. It contains height, +round, vote type and the index of the validator that is the originator of the corresponding vote. + +```go +type HasVoteMessage struct { + Height int64 + Round int + Type byte + Index int +} +``` + +## VoteSetMaj23Message + +VoteSetMaj23Message is sent to indicate that a process has seen +2/3 votes for some BlockID. +It contains height, round, vote type and the BlockID. + +```go +type VoteSetMaj23Message struct { + Height int64 + Round int + Type byte + BlockID BlockID +} +``` + +## VoteSetBitsMessage + +VoteSetBitsMessage is sent to communicate the bit-array of votes a process has seen for a given +BlockID. It contains height, round, vote type, BlockID and a bit array of +the votes a process has. + +```go +type VoteSetBitsMessage struct { + Height int64 + Round int + Type byte + BlockID BlockID + Votes BitArray +} +``` diff --git a/spec/legacy/reactors/evidence/README.md b/spec/legacy/reactors/evidence/README.md new file mode 100644 index 00000000000..ba5e7035210 --- /dev/null +++ b/spec/legacy/reactors/evidence/README.md @@ -0,0 +1,10 @@ +# Evidence Reactor + +> NOTE: This is legacy documentation with outdated information. + +## Channels + +Sending invalid evidence will result in stopping the peer. + +Sending incorrectly encoded data or data exceeding `maxMsgSize` will result +in stopping the peer. diff --git a/spec/legacy/reactors/mempool/README.md b/spec/legacy/reactors/mempool/README.md new file mode 100644 index 00000000000..a9dac2ea75b --- /dev/null +++ b/spec/legacy/reactors/mempool/README.md @@ -0,0 +1,26 @@ +# Mempool Reactor + +> NOTE: This is legacy documentation with outdated information. + +## Channels + +Mempool maintains a cache of the last 10000 transactions to prevent +replaying old transactions (plus transactions coming from other +validators, who are continually exchanging transactions). Read "Replay +Protection" for details. + +Sending incorrectly encoded data or data exceeding `maxMsgSize` will result +in stopping the peer. + +`maxMsgSize` equals `MaxBatchBytes` (10MB) + 4 (proto overhead). +`MaxBatchBytes` is a mempool config parameter -> defined locally. The reactor +sends transactions to the connected peers in batches. The maximum size of one +batch is `MaxBatchBytes`. + +The mempool will not send a tx back to any peer which it received it from. + +The reactor assigns an `uint16` number for each peer and maintains a map from +p2p.ID to `uint16`. Each mempool transaction carries a list of all the senders +(`[]uint16`). The list is updated every time mempool receives a transaction it +is already seen. `uint16` assumes that a node will never have over 65535 active +peers (0 is reserved for unknown source - e.g. RPC). diff --git a/spec/legacy/reactors/mempool/concurrency.md b/spec/legacy/reactors/mempool/concurrency.md new file mode 100644 index 00000000000..a6870db9bb5 --- /dev/null +++ b/spec/legacy/reactors/mempool/concurrency.md @@ -0,0 +1,8 @@ +# Mempool Concurrency + +Look at the concurrency model this uses... + +- Receiving CheckTx +- Broadcasting new tx +- Interfaces with consensus engine, reap/update while checking +- Calling the ABCI app (ordering. callbacks. how proxy works alongside the blockchain proxy which actually writes blocks) diff --git a/spec/legacy/reactors/mempool/functionality.md b/spec/legacy/reactors/mempool/functionality.md new file mode 100644 index 00000000000..416ffad97ee --- /dev/null +++ b/spec/legacy/reactors/mempool/functionality.md @@ -0,0 +1,43 @@ +# Mempool Functionality + +The mempool maintains a list of potentially valid transactions, +both to broadcast to other nodes, as well as to provide to the +consensus reactor when it is selected as the block proposer. + +There are two sides to the mempool state: + +- External: get, check, and broadcast new transactions +- Internal: return valid transaction, update list after block commit + +## External functionality + +External functionality is exposed via network interfaces +to potentially untrusted actors. + +- CheckTx - triggered via RPC or P2P +- Broadcast - gossip messages after a successful check + +## Internal functionality + +Internal functionality is exposed via method calls to other +code compiled into the tendermint binary. + +- ReapMaxBytesMaxGas - get txs to propose in the next block. Guarantees that the + size of the txs is less than MaxBytes, and gas is less than MaxGas +- Update - remove tx that were included in last block +- ABCI.CheckTx - call ABCI app to validate the tx + +What does it provide the consensus reactor? +What guarantees does it need from the ABCI app? +(talk about interleaving processes in concurrency) + +## Optimizations + +The implementation within this library also implements a tx cache. +This is so that signatures don't have to be reverified if the tx has +already been seen before. +However, we only store valid txs in the cache, not invalid ones. +This is because invalid txs could become good later. +Txs that are included in a block aren't removed from the cache, +as they still may be getting received over the p2p network. +These txs are stored in the cache by their hash, to mitigate memory concerns. diff --git a/spec/legacy/reactors/mempool/messages.md b/spec/legacy/reactors/mempool/messages.md new file mode 100644 index 00000000000..a71caf59011 --- /dev/null +++ b/spec/legacy/reactors/mempool/messages.md @@ -0,0 +1,52 @@ +# Mempool Messages + +## P2P Messages + +There is currently only one message that Mempool broadcasts and receives over +the p2p gossip network (via the reactor): `TxsMessage` + +```go +// TxsMessage is a MempoolMessage containing a list of transactions. +type TxsMessage struct { + Txs []types.Tx +} +``` + +## RPC Messages + +Mempool exposes `CheckTx([]byte)` over the RPC interface. + +It can be posted via `broadcast_commit`, `broadcast_sync` or +`broadcast_async`. They all parse a message with one argument, +`"tx": "HEX_ENCODED_BINARY"` and differ in only how long they +wait before returning (sync makes sure CheckTx passes, commit +makes sure it was included in a signed block). + +Request (`POST http://gaia.zone:26657/`): + +```json +{ + "id": "", + "jsonrpc": "2.0", + "method": "broadcast_sync", + "params": { + "tx": "F012A4BC68..." + } +} +``` + +Response: + +```json +{ + "error": "", + "result": { + "hash": "E39AAB7A537ABAA237831742DCE1117F187C3C52", + "log": "", + "data": "", + "code": 0 + }, + "id": "", + "jsonrpc": "2.0" +} +``` diff --git a/spec/legacy/reactors/pex/README.md b/spec/legacy/reactors/pex/README.md new file mode 100644 index 00000000000..857995fa778 --- /dev/null +++ b/spec/legacy/reactors/pex/README.md @@ -0,0 +1,14 @@ +# PEX Reactor + +> NOTE: This is legacy documentation with outdated information. + +## Channels + +Defines only `SendQueueCapacity`. + +Implements rate-limiting by enforcing minimal time between two consecutive +`pexRequestMessage` requests. If the peer sends us addresses we did not ask, +it is stopped. + +Sending incorrectly encoded data or data exceeding `maxMsgSize` will result +in stopping the peer. diff --git a/spec/legacy/reactors/pex/pex.md b/spec/legacy/reactors/pex/pex.md new file mode 100644 index 00000000000..5b8071cf639 --- /dev/null +++ b/spec/legacy/reactors/pex/pex.md @@ -0,0 +1,164 @@ +# Peer Strategy and Exchange + +Here we outline the design of the AddressBook +and how it used by the Peer Exchange Reactor (PEX) to ensure we are connected +to good peers and to gossip peers to others. + +## Peer Types + +Certain peers are special in that they are specified by the user as `persistent`, +which means we auto-redial them if the connection fails, or if we fail to dial +them. +Some peers can be marked as `private`, which means +we will not put them in the address book or gossip them to others. + +All peers except private peers and peers coming from them are tracked using the +address book. + +The rest of our peers are only distinguished by being either +inbound (they dialed our public address) or outbound (we dialed them). + +## Discovery + +Peer discovery begins with a list of seeds. + +When we don't have enough peers, we + +1. ask existing peers +2. dial seeds if we're not dialing anyone currently + +On startup, we will also immediately dial the given list of `persistent_peers`, +and will attempt to maintain persistent connections with them. If the +connections die, or we fail to dial, we will redial every 5s for a few minutes, +then switch to an exponential backoff schedule, and after about a day of +trying, stop dialing the peer. This behavior is when `persistent_peers_max_dial_period` is configured to zero. + +But If `persistent_peers_max_dial_period` is set greater than zero, terms between each dial to each persistent peer +will not exceed `persistent_peers_max_dial_period` during exponential backoff. +Therefore, `dial_period` = min(`persistent_peers_max_dial_period`, `exponential_backoff_dial_period`) +and we keep trying again regardless of `maxAttemptsToDial` + +As long as we have less than `MaxNumOutboundPeers`, we periodically request +additional peers from each of our own and try seeds. + +## Listening + +Peers listen on a configurable ListenAddr that they self-report in their +NodeInfo during handshakes with other peers. Peers accept up to +`MaxNumInboundPeers` incoming peers. + +## Address Book + +Peers are tracked via their ID (their PubKey.Address()). +Peers are added to the address book from the PEX when they first connect to us or +when we hear about them from other peers. + +The address book is arranged in sets of buckets, and distinguishes between +vetted (old) and unvetted (new) peers. It keeps different sets of buckets for +vetted and unvetted peers. Buckets provide randomization over peer selection. +Peers are put in buckets according to their IP groups. + +IP group can be a masked IP (e.g. `1.2.0.0` or `2602:100::`) or `local` for +local addresses or `unroutable` for unroutable addresses. The mask which +corresponds to the `/16` subnet is used for IPv4, `/32` subnet - for IPv6. +Each group has a limited number of buckets to prevent DoS attacks coming from +that group (e.g. an attacker buying a `/16` block of IPs and launching a DoS +attack). + +[highwayhash](https://arxiv.org/abs/1612.06257) is used as a hashing function +when calculating a bucket. + +When placing a peer into a new bucket: + +```md +hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets +``` + +When placing a peer into an old bucket: + +```md +hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets +``` + +where `key` - random 24 HEX string, `group` - IP group of the peer (e.g. `1.2.0.0`), +`sourcegroup` - IP group of the sender (peer who sent us this address) (e.g. `174.11.0.0`), +`addr` - string representation of the peer's address (e.g. `174.11.10.2:26656`). + +A vetted peer can only be in one bucket. An unvetted peer can be in multiple buckets, and +each instance of the peer can have a different IP:PORT. + +If we're trying to add a new peer but there's no space in its bucket, we'll +remove the worst peer from that bucket to make room. + +## Vetting + +When a peer is first added, it is unvetted. +Marking a peer as vetted is outside the scope of the `p2p` package. +For CometBFT, a Peer becomes vetted once it has contributed sufficiently +at the consensus layer; ie. once it has sent us valid and not-yet-known +votes and/or block parts for `NumBlocksForVetted` blocks. +Other users of the p2p package can determine their own conditions for when a peer is marked vetted. + +If a peer becomes vetted but there are already too many vetted peers, +a randomly selected one of the vetted peers becomes unvetted. + +If a peer becomes unvetted (either a new peer, or one that was previously vetted), +a randomly selected one of the unvetted peers is removed from the address book. + +More fine-grained tracking of peer behaviour can be done using +a trust metric (see below), but it's best to start with something simple. + +## Select Peers to Dial + +When we need more peers, we pick addresses randomly from the addrbook with some +configurable bias for unvetted peers. The bias should be lower when we have +fewer peers and can increase as we obtain more, ensuring that our first peers +are more trustworthy, but always giving us the chance to discover new good +peers. + +We track the last time we dialed a peer and the number of unsuccessful attempts +we've made. If too many attempts are made, we mark the peer as bad. + +Connection attempts are made with exponential backoff (plus jitter). Because +the selection process happens every `ensurePeersPeriod`, we might not end up +dialing a peer for much longer than the backoff duration. + +If we fail to connect to the peer after 16 tries (with exponential backoff), we +remove from address book completely. But for persistent peers, we indefinitely try to +dial all persistent peers unless `persistent_peers_max_dial_period` is configured to zero + +## Select Peers to Exchange + +When we’re asked for peers, we select them as follows: + +- select at most `maxGetSelection` peers +- try to select at least `minGetSelection` peers - if we have less than that, select them all. +- select a random, unbiased `getSelectionPercent` of the peers + +Send the selected peers. Note we select peers for sending without bias for vetted/unvetted. + +## Preventing Spam + +There are various cases where we decide a peer has misbehaved and we disconnect from them. +When this happens, the peer is removed from the address book and black listed for +some amount of time. We call this "Disconnect and Mark". +Note that the bad behaviour may be detected outside the PEX reactor itself +(for instance, in the mconnection, or another reactor), but it must be communicated to the PEX reactor +so it can remove and mark the peer. + +In the PEX, if a peer sends us an unsolicited list of peers, +or if the peer sends a request too soon after another one, +we Disconnect and MarkBad. + +## Trust Metric + +The quality of peers can be tracked in more fine-grained detail using a +Proportional-Integral-Derivative (PID) controller that incorporates +current, past, and rate-of-change data to inform peer quality. + +While a PID trust metric has been implemented, it remains for future work +to use it in the PEX. + +See the [trustmetric](https://github.com/cometbft/cometbft/blob/master/docs/architecture/adr-006-trust-metric.md) +and [trustmetric usage](https://github.com/cometbft/cometbft/blob/master/docs/architecture/adr-007-trust-metric-usage.md) +architecture docs for more details. diff --git a/spec/legacy/reactors/state_sync/README.md b/spec/legacy/reactors/state_sync/README.md new file mode 100644 index 00000000000..87e36b02e08 --- /dev/null +++ b/spec/legacy/reactors/state_sync/README.md @@ -0,0 +1,79 @@ +# State Sync Reactor + +> NOTE: This is legacy documentation with outdated information. + +State sync allows new nodes to rapidly bootstrap and join the network by discovering, fetching, +and restoring state machine snapshots. For more information, see the [state sync ABCI section](../../../abci/apps.md#state-sync). + +The state sync reactor has two main responsibilities: + +* Serving state machine snapshots taken by the local ABCI application to new nodes joining the + network. + +* Discovering existing snapshots and fetching snapshot chunks for an empty local application + being bootstrapped. + +The state sync process for bootstrapping a new node is described in detail in the section linked +above. While technically part of the reactor (see `statesync/syncer.go` and related components), +this document will only cover the P2P reactor component. + +For details on the ABCI methods and data types, see the [ABCI documentation](../../../abci/abci.md). + +## State Sync P2P Protocol + +When a new node begin state syncing, it will ask all peers it encounters if it has any +available snapshots: + +```go +type snapshotsRequestMessage struct{} +``` + +The receiver will query the local ABCI application via `ListSnapshots`, and send a message +containing snapshot metadata (limited to 4 MB) for each of the 10 most recent snapshots: + +```go +type snapshotsResponseMessage struct { + Height uint64 + Format uint32 + Chunks uint32 + Hash []byte + Metadata []byte +} +``` + +The node running state sync will offer these snapshots to the local ABCI application via +`OfferSnapshot` ABCI calls, and keep track of which peers contain which snapshots. Once a snapshot +is accepted, the state syncer will request snapshot chunks from appropriate peers: + +```go +type chunkRequestMessage struct { + Height uint64 + Format uint32 + Index uint32 +} +``` + +The receiver will load the requested chunk from its local application via `LoadSnapshotChunk`, +and respond with it (limited to 16 MB): + +```go +type chunkResponseMessage struct { + Height uint64 + Format uint32 + Index uint32 + Chunk []byte + Missing bool +} +``` + +Here, `Missing` is used to signify that the chunk was not found on the peer, since an empty +chunk is a valid (although unlikely) response. + +The returned chunk is given to the ABCI application via `ApplySnapshotChunk` until the snapshot +is restored. If a chunk response is not returned within some time, it will be re-requested, +possibly from a different peer. + +The ABCI application is able to request peer bans and chunk refetching as part of the ABCI protocol. + +If no state sync is in progress (i.e. during normal operation), any unsolicited response messages +are discarded. diff --git a/spec/light-client/accountability/README.md b/spec/light-client/accountability/README.md index 64b475bec71..75563df708c 100644 --- a/spec/light-client/accountability/README.md +++ b/spec/light-client/accountability/README.md @@ -231,7 +231,7 @@ Execution Consequences: -* The validators in F1 will be detectable by the the fork accountability mechanisms. +* The validators in F1 will be detectable by the fork accountability mechanisms. * The validators in F2 cannot be detected using this mechanism. Only in case they signed something which conflicts with the application this can be used against them. Otherwise they do not do anything incorrect. * This case is not covered by the report as it only assumes at most 2/3 of faulty validators. diff --git a/spec/light-client/accountability/TendermintAccInv_004_draft.tla b/spec/light-client/accountability/TendermintAccInv_004_draft.tla index e23f69cbae7..bbab81369d7 100644 --- a/spec/light-client/accountability/TendermintAccInv_004_draft.tla +++ b/spec/light-client/accountability/TendermintAccInv_004_draft.tla @@ -164,7 +164,7 @@ TwoThirdsPrevotes(vr, v) == Cardinality(PV) >= THRESHOLD2 \* if a process sends a PREVOTE, then there are three possibilities: -\* 1) the process is faulty, 2) the PREVOTE cotains Nil, +\* 1) the process is faulty, 2) the PREVOTE contains Nil, \* 3) there is a proposal in an earlier (valid) round and two thirds of PREVOTES IfSentPrevoteThenReceivedProposalOrTwoThirds(r) == \A mpv \in msgsPrevote[r]: @@ -254,7 +254,7 @@ NoEquivocationByCorrect(r, msgs) == \/ m.src /= p \/ m.id = v -\* a proposer nevers sends two values +\* a proposer never sends two values \* @type: ($round, $round -> Set($proposeMsg)) => Bool; ProposalsByProposer(r, msgs) == \* if the proposer is not faulty, it sends only one value diff --git a/spec/light-client/attacks/Blockchain_003_draft.tla b/spec/light-client/attacks/Blockchain_003_draft.tla index 6b725d83d6b..064a41973af 100644 --- a/spec/light-client/attacks/Blockchain_003_draft.tla +++ b/spec/light-client/attacks/Blockchain_003_draft.tla @@ -35,7 +35,7 @@ BlockHeaders == [ (* in the implementation, only the hashes of V and NextV are stored in a block, as V and NextV are stored in the application state *) VS: SUBSET AllNodes, - \* the validators of this bloc. We store the validators instead of the hash. + \* the validators of this block. We store the validators instead of the hash. NextVS: SUBSET AllNodes \* the validators of the next block. We store the next validators instead of the hash. ] diff --git a/spec/light-client/detection/Blockchain_003_draft.tla b/spec/light-client/detection/Blockchain_003_draft.tla index c57bc469382..ef04dfc4d0b 100644 --- a/spec/light-client/detection/Blockchain_003_draft.tla +++ b/spec/light-client/detection/Blockchain_003_draft.tla @@ -35,7 +35,7 @@ BlockHeaders == [ (* in the implementation, only the hashes of V and NextV are stored in a block, as V and NextV are stored in the application state *) VS: SUBSET AllNodes, - \* the validators of this bloc. We store the validators instead of the hash. + \* the validators of this block. We store the validators instead of the hash. NextVS: SUBSET AllNodes \* the validators of the next block. We store the next validators instead of the hash. ] diff --git a/spec/light-client/detection/LCDetector_003_draft.tla b/spec/light-client/detection/LCDetector_003_draft.tla index cdc492b3661..aed0ea8fcd5 100644 --- a/spec/light-client/detection/LCDetector_003_draft.tla +++ b/spec/light-client/detection/LCDetector_003_draft.tla @@ -11,7 +11,7 @@ * * - the light client has its own local clock that can drift from the reference clock * within the envelope [refClock - CLOCK_DRIFT, refClock + CLOCK_DRIFT]. - * The local clock may increase as well as decrease in the the envelope + * The local clock may increase as well as decrease in the envelope * (similar to clock synchronization). * * - the ratio of the faulty validators is set as the parameter. diff --git a/spec/light-client/detection/detection_001_reviewed.md b/spec/light-client/detection/detection_001_reviewed.md index 25d72e10227..aa51844272a 100644 --- a/spec/light-client/detection/detection_001_reviewed.md +++ b/spec/light-client/detection/detection_001_reviewed.md @@ -621,7 +621,7 @@ func AttackDetector(root LightBlock, primary_trace []LightBlock) for each secondary in Secondaries { // we replay the primary trace with the secondary, in // order to generate evidence that we can submit to the - // secodary. We return the evidence + the trace the + // secondary. We return the evidence + the trace the // secondary told us that spans the evidence at its local store EvidenceForSecondary, newroot, secondary_trace, result := diff --git a/spec/light-client/detection/draft-functions.md b/spec/light-client/detection/draft-functions.md index f983fded124..5b643902f3d 100644 --- a/spec/light-client/detection/draft-functions.md +++ b/spec/light-client/detection/draft-functions.md @@ -33,7 +33,7 @@ func checkMisbehaviorAndUpdateState(cs: ClientState, PoF: LightNodeProofOfFork) *0 < i < length(PoF.PrimaryTrace)* - supports(PoF.TrustedBlock, PoF.SecondaryTrace[1], t) - supports(PoF.SecondaryTrace[i], PoF.SecondaryTrace[i+1], t) for - *0 < i < length(PoF.SecondaryTrace)* + *0 < i < length(PoF.SecondaryTrace)* - Expected postcondition - set cs.FrozenHeight to min(cs.FrozenHeight, PoF.TrustedBlock.Header.Height) - Error condition @@ -60,7 +60,7 @@ func QueryHeightsRange(id, from, to) ([]Height) > This function can be used if the relayer has no information about > the IBC component. This allows late-joining relayers to also -> participate in fork dection and the generation in proof of +> participate in fork detection and the generation in proof of > fork. Alternatively, we may also postulate that relayers are not > responsible to detect forks for heights before they started (and > subscribed to the transactions reporting fresh headers being @@ -120,9 +120,9 @@ func SubmitIBCProofOfFork( else { // the ibc component does not have the TrustedBlock and might // even be on yet a different branch. We have to compute a PoF - // that the ibc component can verifiy based on its current + // that the ibc component can verify based on its current // knowledge - + ibcLightBlock, lblock, _, result := commonRoot(lightStore, ibc, PoF.TrustedBlock) if result = Success { @@ -169,7 +169,7 @@ LightBlock) (LightBlock, LightBlock, LightStore, Result) { lblock.Height - 1); // this function does not exist yet. Alternatively, we may // request all transactions that installed headers via CosmosSDK - + for { h, result = max(ibcHeights) @@ -194,7 +194,7 @@ LightBlock) (LightBlock, LightBlock, LightStore, Result) { - a lightBlock b1 from the IBC component, and - a lightBlock b2 from the local lightStore with height less than - lblock.Header.Hight, s.t. b1 supports b2, and + lblock.Header.Height, s.t. b1 supports b2, and - a lightstore with the blocks downloaded from the ibc component @@ -237,7 +237,7 @@ func extendPoF (root LightBlock, - let prefix = connector + lightStore.Subtrace(connector.Header.Height, PoF.TrustedBlock.Header.Height-1) + - PoF.TrustedBlock + PoF.TrustedBlock - newPoF.PrimaryTrace = prefix + PoF.PrimaryTrace - newPoF.SecondaryTrace = prefix + PoF.SecondaryTrace @@ -259,7 +259,7 @@ func DetectIBCFork(ibc IBCComponent, lightStore LightStore) (LightNodeProofOfFor lb, result = LightClient.Main(primary, lightStore, cs.Header.Height) // [LCV-FUNC-IBCMAIN.1] **TODO** decide what to do following the outcome of Issue #499 - + // I guess here we have to get into the light client } @@ -281,7 +281,7 @@ func DetectIBCFork(ibc IBCComponent, lightStore LightStore) (LightNodeProofOfFor **TODO:** finish conditions - Implementation remark - - we ask the handler for the lastest check. Cross-check with the + - we ask the handler for the latest check. Cross-check with the chain. In case they deviate we generate PoF. - we assume IBC component is correct. It has verified the consensus state diff --git a/spec/light-client/supervisor/supervisor_001_draft.md b/spec/light-client/supervisor/supervisor_001_draft.md index 8a7cb9f8990..44268672850 100644 --- a/spec/light-client/supervisor/supervisor_001_draft.md +++ b/spec/light-client/supervisor/supervisor_001_draft.md @@ -474,8 +474,8 @@ we want to maintain [LCV-INV-TP.1] from the beginning. > it may increase trust, when one cross-checks the initial light > block. However, if a peer provides a conflicting > lightblock, the question is to distinguish the case of a -> [bogus](https://informal.systems) block (upon which operation should proceed) from a -> [light client attack](https://informal.systems) (upon which operation should stop). In +> bogus block (upon which operation should proceed) from a +> light client attack (upon which operation should stop). In > case of a bogus block, the lightclient might be forced to do > backwards verification until the blocks are out of the trusting > period, to make sure no previous validator set could have generated diff --git a/spec/light-client/verification/Blockchain_002_draft.tla b/spec/light-client/verification/Blockchain_002_draft.tla index df22b346dc4..2a9712decfc 100644 --- a/spec/light-client/verification/Blockchain_002_draft.tla +++ b/spec/light-client/verification/Blockchain_002_draft.tla @@ -35,7 +35,7 @@ BlockHeaders == [ (* in the implementation, only the hashes of V and NextV are stored in a block, as V and NextV are stored in the application state *) VS: SUBSET AllNodes, - \* the validators of this bloc. We store the validators instead of the hash. + \* the validators of this block. We store the validators instead of the hash. NextVS: SUBSET AllNodes \* the validators of the next block. We store the next validators instead of the hash. ] diff --git a/spec/light-client/verification/Blockchain_003_draft.tla b/spec/light-client/verification/Blockchain_003_draft.tla index 3037a0d2d5c..8db2ba71bba 100644 --- a/spec/light-client/verification/Blockchain_003_draft.tla +++ b/spec/light-client/verification/Blockchain_003_draft.tla @@ -41,7 +41,7 @@ BlockHeaders == [ (* in the implementation, only the hashes of V and NextV are stored in a block, as V and NextV are stored in the application state *) VS: SUBSET AllNodes, - \* the validators of this bloc. We store the validators instead of the hash. + \* the validators of this block. We store the validators instead of the hash. NextVS: SUBSET AllNodes \* the validators of the next block. We store the next validators instead of the hash. ] diff --git a/spec/light-client/verification/Blockchain_A_1.tla b/spec/light-client/verification/Blockchain_A_1.tla index cf8f45967b2..21d6b539f85 100644 --- a/spec/light-client/verification/Blockchain_A_1.tla +++ b/spec/light-client/verification/Blockchain_A_1.tla @@ -35,7 +35,7 @@ BlockHeaders == [ (* in the implementation, only the hashes of V and NextV are stored in a block, as V and NextV are stored in the application state *) VS: SUBSET AllNodes, - \* the validators of this bloc. We store the validators instead of the hash. + \* the validators of this block. We store the validators instead of the hash. NextVS: SUBSET AllNodes \* the validators of the next block. We store the next validators instead of the hash. ] diff --git a/spec/light-client/verification/Lightclient_002_draft.tla b/spec/light-client/verification/Lightclient_002_draft.tla index 32c807f6e62..1e5b5671abc 100644 --- a/spec/light-client/verification/Lightclient_002_draft.tla +++ b/spec/light-client/verification/Lightclient_002_draft.tla @@ -155,7 +155,7 @@ LCInit == /\ fetchedLightBlocks = [h \in {TRUSTED_HEIGHT} |-> trustedLightBlock] \* initially, lightBlockStatus is a function of one element, i.e., TRUSTED_HEIGHT /\ lightBlockStatus = [h \in {TRUSTED_HEIGHT} |-> "StateVerified"] - \* the latest verified block the the trusted block + \* the latest verified block the trusted block /\ latestVerified = trustedLightBlock /\ InitMonitor(trustedLightBlock, trustedLightBlock, now, "SUCCESS") diff --git a/spec/light-client/verification/Lightclient_003_draft.tla b/spec/light-client/verification/Lightclient_003_draft.tla index 8ec17850bbb..814abdf63f8 100644 --- a/spec/light-client/verification/Lightclient_003_draft.tla +++ b/spec/light-client/verification/Lightclient_003_draft.tla @@ -152,7 +152,7 @@ LCInit == /\ fetchedLightBlocks = [h \in {TRUSTED_HEIGHT} |-> trustedLightBlock] \* initially, lightBlockStatus is a function of one element, i.e., TRUSTED_HEIGHT /\ lightBlockStatus = [h \in {TRUSTED_HEIGHT} |-> "StateVerified"] - \* the latest verified block the the trusted block + \* the latest verified block the trusted block /\ latestVerified = trustedLightBlock /\ InitMonitor(trustedLightBlock, trustedLightBlock, localClock, "SUCCESS") diff --git a/spec/light-client/verification/Lightclient_A_1.tla b/spec/light-client/verification/Lightclient_A_1.tla index 6274632d019..902108b89d5 100644 --- a/spec/light-client/verification/Lightclient_A_1.tla +++ b/spec/light-client/verification/Lightclient_A_1.tla @@ -134,7 +134,7 @@ LCInit == /\ fetchedLightBlocks = [h \in {TRUSTED_HEIGHT} |-> trustedLightBlock] \* initially, lightBlockStatus is a function of one element, i.e., TRUSTED_HEIGHT /\ lightBlockStatus = [h \in {TRUSTED_HEIGHT} |-> "StateVerified"] - \* the latest verified block the the trusted block + \* the latest verified block the trusted block /\ latestVerified = trustedLightBlock \* block should contain a copy of the block from the reference chain, with a matching commit diff --git a/spec/light-client/verification/verification_001_published.md b/spec/light-client/verification/verification_001_published.md index 45123d1a29a..8e6e9fbb306 100644 --- a/spec/light-client/verification/verification_001_published.md +++ b/spec/light-client/verification/verification_001_published.md @@ -29,7 +29,7 @@ formalized in TLA+ and model checked. ## Issues that need to be addressed As it is part of the larger light node, its data structures and -functions interact with the fork dectection functionality of the light +functions interact with the fork detection functionality of the light client. As a result of the work on [Pull Request 479](https://github.com/informalsystems/tendermint-rs/pull/479) we established the need for an update in the data structures in [Issue 499](https://github.com/informalsystems/tendermint-rs/issues/499). This diff --git a/spec/light-client/verification/verification_002_draft.md b/spec/light-client/verification/verification_002_draft.md index 4ae3731f825..5744b7166ff 100644 --- a/spec/light-client/verification/verification_002_draft.md +++ b/spec/light-client/verification/verification_002_draft.md @@ -32,7 +32,7 @@ formalized in TLA+ and model checked. ## Issues that are addressed in this revision As it is part of the larger light node, its data structures and -functions interact with the attack dectection functionality of the light +functions interact with the attack detection functionality of the light client. As a result of the work on - [attack detection](https://github.com/tendermint/spec/pull/164) for light nodes diff --git a/spec/light-client/verification/verification_003_draft.md b/spec/light-client/verification/verification_003_draft.md index 67f213ea2be..35f44b37bf7 100644 --- a/spec/light-client/verification/verification_003_draft.md +++ b/spec/light-client/verification/verification_003_draft.md @@ -29,7 +29,7 @@ height int64, commit *Commit) error { continue } - // If the vals and commit have a 1-to-1 correspondance we can retrieve + // If the vals and commit have a 1-to-1 correspondence we can retrieve // them by index else we need to retrieve them by address if lookUpByIndex { val = vals.Validators[idx] diff --git a/spec/mempool/gossip/Makefile b/spec/mempool/gossip/Makefile new file mode 100644 index 00000000000..de0d17ac58d --- /dev/null +++ b/spec/mempool/gossip/Makefile @@ -0,0 +1,9 @@ +mds = p2p.md mempool.md flood.md +quints = $(mds:.md=.qnt) + +%.qnt: %.md + lmt $< + +all: $(quints) +.PHONY: all + \ No newline at end of file diff --git a/spec/mempool/gossip/README.md b/spec/mempool/gossip/README.md new file mode 100644 index 00000000000..0afe74fdf33 --- /dev/null +++ b/spec/mempool/gossip/README.md @@ -0,0 +1,46 @@ +# Mempool Gossip + +This directory contains specifications of gossip protocols used by the mempool to disseminate +transactions in the network. + +## Protocols + +- [Flood](flood.md). Currently implemented by CometBFT, Flood is a straightforward gossip protocol + with a focus on rapid transaction propagation. + - Pros: + + Latency: nodes forward transactions to their peers as soon as they receive them, resulting in + the minimum possible latency of decentralised P2P networks. + + Byzantine Fault Tolerance (BFT): flooding the network with messages ensures malicious actors + cannot easily prevent transaction dissemination (i.e., censoring), making it resilient to network disruptions + and attacks. + - Cons: + - Bandwidth: the broadcast nature of Flood results in significant redundancy in message + propagation, leading to exponential increases in bandwidth usage. + +## Specifications with Quint snippets + +These specifications are written in English with code snippets in the [Quint][quint] language, +following the [literature programming paradigm][lit]. The intention is that Quint snippets can be +read as pseudo-code. Moreover, we can automatically generate Quint files from the markdown files. + +Quint allows specs to be executed, tested, and formally verified. For the moment we use it here just +to give structure to the spec documentation and to type-check the definitions. + +To (re-)generate the Quint files: +1. install the [lmt tool][lmt] (see the prerequisites [here][lit]), and +2. run `make`. + +The Flood gossip protocol is self-described in its own [flood](flood.md) spec. It is built on top of +two other specs, which are not strictly needed to understand the protocol: +- [mempool](mempool.md) with definitions of common data structures from the mempool, and +- [p2p](p2p.md) with networking definitions, assumptions, and boilerplate. + +Specs dependencies: +```mermaid +flowchart TB + flood --> mempool --> p2p; +``` + +[quint]: https://quint-lang.org/ +[lit]: https://quint-lang.org/docs/literate +[lmt]: https://github.com/driusan/lmt diff --git a/spec/mempool/gossip/flood.md b/spec/mempool/gossip/flood.md new file mode 100644 index 00000000000..dc8903149b0 --- /dev/null +++ b/spec/mempool/gossip/flood.md @@ -0,0 +1,280 @@ +# Flood gossip protocol + +Flood is a basic _push_ gossip protocol: every time a node receives a transaction, it forwards (or +"pushes") the transaction to all its peers, except to the peer(s) from which it received the +transaction. + +This protocol is built on top of the [mempool](mempool.md) and [p2p](p2p.md) modules. + +**Table of contents** + - [Messages](#messages) + - [State](#state) + - [Initial state](#initial-state) + - [State transitions (actions)](#state-transitions-actions) + - [Adding transactions to the mempool](#adding-transactions-to-the-mempool) + - [Adding first-time transactions](#adding-first-time-transactions) + - [Handling duplicate transactions](#handling-duplicate-transactions) + - [Handling incoming messages](#handling-incoming-messages) + - [Transaction dissemination](#transaction-dissemination) + - [Properties](#properties) + +> This document was written using the literature programming paradigm. Code snippets are written in +> [Quint][quint] and can get "tangled" into a Quint file. + +## Messages + +Nodes communicates only one type of message carrying a full transaction. +```bluespec "messages" += +type Message = + | TxMsg(TX) +``` + +## State + +Flood's state consists of the underlying [mempool](mempool.md) state (variable `mempool`) and +[P2P](p2p.md) state (variables `incomingMsgs` and `peers`). + +Additionally, for each transaction in each node's mempool, we keep track of the peer IDs from whom +the node received the transaction. +```bluespec "state" += +var senders: NodeID -> TxID -> List[NodeID] +``` +We define the senders as a list instead of a set because the DOG protocol needs to know who is the +first sender of a transaction. + +Note that a transaction won't have a sender when it is in the cache but not in the mempool. Senders +are only needed for disseminating (valid) transactions that are in the mempool. + +
+ Auxiliary definitions + +```bluespec "auxstate" += +def Senders(node) = senders.get(node) +``` + +The set of senders of transaction `tx`: +```bluespec "auxstate" += +def sendersOf(node, tx) = + node.Senders().mapGetDefault(hash(tx), List()).listToSet() +``` + +Function `addSender` adds a sender to `tx`'s list of senders (`_txSenders`), if `optionalSender` has +a value that's not already in the list. +```bluespec "auxstate" += +pure def addSender(_txSenders, tx, optionalSender) = + match optionalSender { + | Some(sender) => _txSenders.update(hash(tx), ss => + if (ss.includes(sender)) ss else ss.append(sender)) + | None => _txSenders + } +``` +
+ +## Initial state + +Flood's initial state is the underlying mempool's initial state (`MP_init`) and an empty mapping of +transactions to senders. +```bluespec "actions" += +action init = all { + MP_init, + senders' = NodeIDs.mapBy(n => Map()), +} +``` + +## State transitions (actions) + +These are the state transitions of the system. Note that generic actions are imported from the +[mempool](mempool.md) and [p2p](p2p.md) specs. The missing implementation details (`tryAddTx`, +`handleMessage`, `mkTargetNodes`) are described in the rest of the section. + +1. User-submitted transactions: when a node receives a transaction from a user, it tries to add it + to the mempool. + ```bluespec "steps" += + nondet node = oneOf(nodesInNetwork) + nondet tx = oneOf(AllTxs) + node.receiveTxFromUser(tx, tryAddTx), + ``` + +2. Peer message handling: a node processes messages received from a peer. + ```bluespec "steps" += + nondet node = oneOf(nodesInNetwork) + node.receiveFromPeer(handleMessage), + ``` + +3. Transaction dissemination: a node sends a transaction in its mempool to a subset of target nodes. + ```bluespec "steps" += + nondet node = oneOf(nodesInNetwork) + all { + node.disseminateNextTx(mkTargetNodes, TxMsg), + senders' = senders, + }, + ``` + +4. A node joins the network. + ```bluespec "steps" += + all { + pickNodeAndJoin, + mempool' = mempool, + senders' = senders, + }, + ``` + +5. A node disconnects from the network. + ```bluespec "steps" += + all { + pickNodeAndDisconnect, + mempool' = mempool, + senders' = senders, + } + ``` + +### Adding transactions to the mempool + +A node attempting to add a transaction to its mempool processes the transaction according to whether +it has seen it before, that is, if the transaction exists in the mempool cache. +- A *first-time* transaction is one that the node does not have in its cache. +- A *duplicate* transaction is one that the node has received multiple times, and thus it's cached. + +```bluespec "actions" += +action tryAddTx(node, _incomingMsgs, optionalSender, tx) = + if (not(hash(tx).in(node.Cache()))) + node.tryAddFirstTimeTx(_incomingMsgs, optionalSender, tx) + else + node.processDuplicateTx(_incomingMsgs, optionalSender, tx) +``` +In this action the sender is optional. When there's a sender, it means that the transaction comes +from a peer; otherwise it comes directly from a user. + +#### Adding first-time transactions + +`tryAddFirstTimeTx` attempts to add a first-time transaction `tx` to a +`node`'s mempool: +1. it caches `tx`, +2. if `tx` is valid, it appends `tx` to `txs`, and +3. updates its senders. +```bluespec "actions" += +action tryAddFirstTimeTx(node, _incomingMsgs, optionalSender, tx) = all { + mempool' = mempool.update(node, st => { + cache: st.cache.join(hash(tx)), + txs: if (valid(tx)) st.txs.append(tx) else st.txs, + ...st }), + senders' = senders.update(node, ss => + if (valid(tx)) ss.addSender(tx, optionalSender) else ss), + incomingMsgs' = _incomingMsgs, + peers' = peers, +} +``` + +#### Handling duplicate transactions + +Action `processDuplicateTx` processes a duplicate transaction `tx` by updating the list of senders, +only if `tx` is already in the mempool (`txs`). +```bluespec "actions" += +action processDuplicateTx(node, _incomingMsgs, optionalSender, tx) = all { + senders' = senders.update(node, ss => + if (node.Txs().includes(tx)) ss.addSender(tx, optionalSender) else ss), + mempool' = mempool, + incomingMsgs' = _incomingMsgs, + peers' = peers, +} +``` + +### Handling incoming messages + +Upon receiving a message with transaction `tx` from a peer (i.e., the `sender`), the `node` attempts +to add `tx` to its mempool. +```bluespec "actions" += +action handleMessage(node, _incomingMsgs, sender, msg) = + match msg { + | TxMsg(tx) => node.tryAddTx(_incomingMsgs, Some(sender), tx) + } +``` +> The argument `_incomingMsgs` is passed just to update the queues of incoming messages, when +applicable (Flood does not reply with any message but DOG does). + +### Transaction dissemination + +In Flood, a node sends a transaction to all its peers except those who previously sent it. + +`mkTargetNodes` defines the set of peers to whom `node` will send `tx`. It is passed as an argument +to the generic transaction dissemination action. +```bluespec "actions" += +def mkTargetNodes(node, tx) = + node.Peers().exclude(node.sendersOf(tx)) +``` + +## Properties + +Function `txInAllMempools` returns `true` if the given transaction `tx` is in the mempool of all +nodes. +```bluespec "properties" += +def txInAllMempools(tx) = + NodeIDs.forall(n => n.Txs().includes(tx)) +``` + +_**Property**_ If a transaction is in the mempool of any node, then eventually the transaction will +reach the mempool of all nodes (maybe more than once, and assuming transactions are not removed from +mempools). +```bluespec "properties" += +temporal txInPoolGetsDisseminated = + AllTxs.forall(tx => + NodeIDs.exists(node => + node.Txs().includes(tx) implies eventually(txInAllMempools(tx)))) +``` + +_**Invariant**_ If node A sent a transaction `tx` to node B (A is in the list of `tx`'s senders), +then B does not send `tx` to A (the message won't be in A's incoming messages). +```bluespec "properties" += +val dontSendBackToSender = + NodeIDs.forall(nodeA => + NodeIDs.forall(nodeB => + AllTxs.forall(tx => + nodeB.sendersOf(tx).contains(nodeA) + implies + not(nodeA.IncomingMsgs().includes((nodeB, TxMsg(tx)))) + ))) +``` + + + +[quint]: https://quint-lang.org/ diff --git a/spec/mempool/gossip/mempool.md b/spec/mempool/gossip/mempool.md new file mode 100644 index 00000000000..9184e3f8f38 --- /dev/null +++ b/spec/mempool/gossip/mempool.md @@ -0,0 +1,237 @@ +# Mempool + +This specification of a mempool defines essential types and data structures needed to keep a list of +pending transactions ("the mempool"), as well as generic actions to disseminate transactions. Those +generic actions are then instantiated with specific functions that define the behaviour of the +gossip protocols. + +The mempool is built on top of a [P2P layer](p2p.md), which declares many definitions found here. + +## Types + +### Transactions + +A transaction is uniquely identified by a string, which represents its content (typically +implemented as an array of bytes). +```bluespec "types" += +type TX = str +``` + +Transactions are validated by an external entity. The validation function must be deterministic. In +the actual implementation, the mempool makes a CheckTx ABCI call to the application, which validates +the transaction. +```bluespec "types" += +pure def valid(tx) = true +``` + +In this simplified specification we model all transactions as valid. To model invalid transactions, +`valid` should be declared as a model parameter (a `const`) and instantiated with a deterministic +function of type `(TX) => bool`. + +### Transaction IDs + +A transaction identifier, computed as the hash of the transaction (typically a short array of +bytes). +```bluespec "types" += +type TxID = str +pure def hash(tx: TX): TxID = tx +``` + +## Parameters + +The set of all possible transactions. +```bluespec "params" += +const AllTxs: Set[TX] +``` + +## State + +Each node has a mempool state. +```bluespec "state" += +var mempool: NodeID -> MempoolState +``` + +We define `MempoolState` as a data structure with the following fields. + +#### Cache of already received transaction IDs + +We assume the cache never overflows, i.e., it can grow indefinitely. +```bluespec "mempoolstate" += +cache: Set[TxID], +``` + +#### List of uncommitted or pending transactions ("the mempool") + +This list is used for storaging transactions and for picking transactions to disseminate to peers. +```bluespec "mempoolstate" += +txs: List[TX], +``` + +We make the following assumptions about the mempool: +- It does not have a maximum capacity. +- New entries are only appended. We do not model when entries are removed. + +A transaction that is in the `txs` list, must also be in `cache` (assuming an infinite cache), but +not necessarily the inverse. The reason a transaction is in `cache` but not in `txs` is either +because: +- the transaction was initially invalid and never got into `txs`, +- the transaction became invalid after it got in `txs` and thus got evicted when it was revalidated, + or +- the transaction was committed to a block and got removed from `txs`. + +All these scenarios are not modeled here. Then `cache` and `txs` will always have the same content +and one of the two is actually redundant in this spec. + +#### Index to the next transaction to disseminate + +A mempool iterator traverses the entries in `txs` one at a time. +```bluespec "mempoolstate" += +txsIndex: int, +``` +We model transaction dissemination using one dissemination process (`disseminateNextTx`) that +iterates on the list of transactions reading one entry per step, and atomically multicasts one +transaction message to all connected peers. + +In the implementation there is one dissemination process per peer, each with its own iterator (and +thus a separate index per iterator) with a `next()` method to retrieve the next entry in the `txs` +list. If it reaches the end of the list, it blocks until a new entry is added. All iterators read +concurrently from `txs`. + +
+ Auxiliary definitions + +```bluespec "auxstate" += +def Cache(node) = mempool.get(node).cache +def Txs(node) = mempool.get(node).txs +def TxsIndex(node) = mempool.get(node).txsIndex +``` +
+ +## Initial state + +The initial state of a mempool: +```bluespec "actions" += +action MP_init = all { + P2P_init, + mempool' = NodeIDs.mapBy(n => initialMempoolState), +} +``` +where: +```bluespec "actions" += +val initialMempoolState = { + cache: Set(), + txs: List(), + txsIndex: 0, +} +``` + +## State transitions (actions) + +### Handling incoming transactions + +Users create transactions and send them to one of the nodes in the network. Nodes receive +transactions either directly from users or in messages from peers. Transaction from users have no +sender. + +Action `receiveTxFromUser` models a `node` receiving transaction `tx` from a user. +```bluespec "actions" += +action receiveTxFromUser(node, tx, _tryAddTx) = + node._tryAddTx(incomingMsgs, None, tx) +``` +The function parameter `_tryAddTx(incomingMsgs, optionalSender, tx)` defines how transactions are +added to the mempool. + +Typically, users send (full) transactions to the node via an RPC endpoint. Users are allowed to +submit the same transaction more than once and to multiple nodes. + +This action is enabled only if the transaction is not in the mempool. In the actual mempool +implementation we have the cache that prevents this scenario. + +### Transaction dissemination + +Action `disseminateNextTx` models a `node` traversing the `txs` list while sending transactions to +its peers. It takes the transaction pointed by `txsIndex` and atomically sends it to a set of target +peers. + +The following function parameters define to who `node` will send transactions: +- `_mkTargetNodes(node, tx)` returns the set of peers to which `node` + will send `tx`. +- `_mkTxMsg(tx)` is a wrapper function that returns the specific message + type used by the gossip protocol. +```bluespec "actions" += +action disseminateNextTx(node, _mkTargetNodes, _mkTxMsg) = all { + // Check that the current index is within bounds. + require(node.TxsIndex() < node.Txs().length()), + // Get from the mempool the next transaction to disseminate. + val tx = node.Txs()[node.TxsIndex()] + all { + // Wrap transaction in a message and send it to the target nodes. + incomingMsgs' = + node.multiSend(incomingMsgs, _mkTargetNodes(node, tx), _mkTxMsg(tx)), + // Increase index. + mempool' = mempool.update(node, st => { txsIndex: st.txsIndex + 1, ...st }), + peers' = peers, + } +} +``` + +The index must not exceed the `txs`'s length. This pre-condition models when the iterator is at the +end of the list and it's blocked waiting for a new entry to be appended to the list. + +In the actual implementation, there is a separate goroutine for each peer, so not all transactions +are sent at the same time. + +## Properties + +_**Invariant**_ Transaction lists do not have repeated entries. +```bluespec "properties" += +val uniqueTxsInMempool = + NodeIDs.forall(node => size(node.Txs().listToSet()) == length(node.Txs())) +``` + + diff --git a/spec/mempool/gossip/p2p.md b/spec/mempool/gossip/p2p.md new file mode 100644 index 00000000000..e0d020cf1a2 --- /dev/null +++ b/spec/mempool/gossip/p2p.md @@ -0,0 +1,208 @@ +# P2P + +This module specifies a P2P layer as needed for the gossip protocols. It includes the definitions of +nodes, peers, network topology, sending messages, nodes joining and leaving the network. + +## Types + +Nodes are identified by a string. +```bluespec "types" +type NodeID = str +``` + +## Parameters + +The set of all possible node IDs, even those that are not initially connected to the network. +```bluespec "params" +const NodeIDs: Set[NodeID] +``` + +Initial network topology. A topology is defined by the set of peers each node has. +```bluespec "params" += +const InitialPeers: NodeID -> Set[NodeID] +``` + +## State + +To model network communication, each node has a queue (a list) of incoming messages. Node A sends a +message to a node B by appending the message to B's queue. We use queues to model that messages +arrive in order, as we assume this is guaranteed by the transport layer. Messages have a sender (a +node ID). + +The type variable `msg` can be instantiated on the message types of different protocols. + +```bluespec "state" +var incomingMsgs: NodeID -> List[(NodeID, msg)] +``` + +In the actual implementation, transaction messages are transamitted on the `Mempool` data channel of +the P2P layer. Control messages are usually transmitted on other channels with different priorities. +Here we model a single, reliable channel. + +The dynamic network topology. Each node has a set of peers that is updated when nodes join or leave +the network. + +```bluespec "state" += +var peers: NodeID -> Set[NodeID] +``` + +
+ Auxiliary definitions + +```bluespec "auxstate" += +def IncomingMsgs(node) = incomingMsgs.get(node) +def Peers(node) = peers.get(node) +``` + +Function `multiSend` sends message `msg` to a set of `targetNodes`. It updates a list of incoming +messages `_incomingMsgs`. `targetNodes` can be empty, in which case `_incomingMsgs` will stay the +same. +```bluespec "state" += +pure def multiSend(node, _incomingMsgs, targetNodes, msg) = + _incomingMsgs.updateMultiple(targetNodes, ms => ms.append((node, msg))) +``` + +A node is in the network if it has peers: +```bluespec "auxstate" += +val nodesInNetwork = NodeIDs.filter(node => node.Peers().nonEmpty()) +val nodesNotInNetwork = NodeIDs.exclude(nodesInNetwork) +``` +
+ +## Initial state + +The initial state of the P2P layer: +```bluespec "actions" += +action P2P_init = all { + incomingMsgs' = NodeIDs.mapBy(_ => List()), + peers' = NodeIDs.mapBy(n => InitialPeers.get(n)), +} +``` + +## State transitions (actions) + +A node receives one of the incoming messages from a peer and handles it according to its type. +```bluespec "actions" += +action receiveFromPeer(node, handleMessage) = all { + require(length(node.IncomingMsgs()) > 0), + // We model receiving of a message as taking the head of the list of + // incoming messages and leaving the tail. + val someMsg = node.IncomingMsgs().head() + val sender = someMsg._1 + val msg = someMsg._2 + val _incomingMsgs = incomingMsgs.update(node, tail) + handleMessage(node, _incomingMsgs, sender, msg) +} +``` + +A node joins the network by connecting to a given set of peers. All those peers add the new node to +their list of peers. +```bluespec "actions" += +action joinNetwork(node, peerSet) = all { + // The node must not be connected to the network. + require(node.Peers().isEmpty()), + peers' = peers + // Assign to node the set of new peers. + .put(node, peerSet) + // Add node as a new peer to the set of connecting peers. + .updateMultiple(peerSet, ps => ps.join(node)), + incomingMsgs' = incomingMsgs, +} +``` + +Non-deterministically pick a node and its peers to join the network. +```bluespec "actions" += +action pickNodeAndJoin = all { + // Pick a node that is not connected to the network. + require(NodeIDs.exclude(nodesInNetwork).nonEmpty()), + nondet node = oneOf(NodeIDs.exclude(nodesInNetwork)) + // Pick a non-empty set of nodes in the network to be the node's peers. + nondet peerSet = oneOf(nodesInNetwork.powerset().exclude(Set())) + node.joinNetwork(peerSet), +} +``` + +A node gets disconnected from the network. All its peers are immediately aware that the node is no +longer one of their peers, so their state is updated accordingly. +```bluespec "actions" += +// TODO: the network must not become disconnected; we don't want to model that. +action disconnectNetwork(nodeToDisconnect, _incomingMsgs) = all { + peers' = peers + // Clean node's state and remove all its peers. + .put(nodeToDisconnect, Set()) + // Remove node from other peers' state. + .updateMultiple(nodesInNetwork, ps => ps.exclude(Set(nodeToDisconnect))), + incomingMsgs' = _incomingMsgs, +} +``` + +Non-deterministically pick a node to disconnect from the network. +```bluespec "actions" += +action pickNodeAndDisconnect = all { + // Pick a node that is not the only node in the network. + require(size(nodesInNetwork) > 1), + nondet nodeToDisconnect = oneOf(nodesInNetwork) + disconnectNetwork(nodeToDisconnect, incomingMsgs), +} +``` + +## Properties + +_**Invariant**_ Peer relationships are bidirectional or symmetrical: if node A has B as peer, then B +has A as peer. +```bluespec "properties" += +val bidirectionalNetwork = + NodeIDs.forall(nodeA => + nodeA.Peers().forall(nodeB => nodeA.in(nodeB.Peers()))) +``` + +_**Property**_ Eventually all messages are delivered (there are no incoming messages). +```bluespec "properties" += +temporal allMsgsDelivered = + eventually(NodeIDs.forall(node => length(node.IncomingMsgs()) == 0)) +``` + +```bluespec "properties" += +// TODO: Invariant: all nodes in the network are always connected. +``` + + diff --git a/spec/mempool/gossip/quint/flood.qnt b/spec/mempool/gossip/quint/flood.qnt new file mode 100644 index 00000000000..911326969e7 --- /dev/null +++ b/spec/mempool/gossip/quint/flood.qnt @@ -0,0 +1,109 @@ +// -*- mode: Bluespec; -*- + +// File generated from markdown using https://github.com/driusan/lmt. DO NOT EDIT. + +module flood { + import spells.* from "./spells" + import mempool.* from "./mempool" + export mempool.* + + //-------------------------------------------------------------------------- + // Messages + //-------------------------------------------------------------------------- + type Message = + | TxMsg(TX) + + //-------------------------------------------------------------------------- + // State + //-------------------------------------------------------------------------- + var senders: NodeID -> TxID -> List[NodeID] + + // Auxiliary definitions + def Senders(node) = senders.get(node) + def sendersOf(node, tx) = + node.Senders().mapGetDefault(hash(tx), List()).listToSet() + pure def addSender(_txSenders, tx, optionalSender) = + match optionalSender { + | Some(sender) => _txSenders.update(hash(tx), ss => + if (ss.includes(sender)) ss else ss.append(sender)) + | None => _txSenders + } + + //-------------------------------------------------------------------------- + // Actions + //-------------------------------------------------------------------------- + action init = all { + MP_init, + senders' = NodeIDs.mapBy(n => Map()), + } + action tryAddTx(node, _incomingMsgs, optionalSender, tx) = + if (not(hash(tx).in(node.Cache()))) + node.tryAddFirstTimeTx(_incomingMsgs, optionalSender, tx) + else + node.processDuplicateTx(_incomingMsgs, optionalSender, tx) + action tryAddFirstTimeTx(node, _incomingMsgs, optionalSender, tx) = all { + mempool' = mempool.update(node, st => { + cache: st.cache.join(hash(tx)), + txs: if (valid(tx)) st.txs.append(tx) else st.txs, + ...st }), + senders' = senders.update(node, ss => + if (valid(tx)) ss.addSender(tx, optionalSender) else ss), + incomingMsgs' = _incomingMsgs, + peers' = peers, + } + action processDuplicateTx(node, _incomingMsgs, optionalSender, tx) = all { + senders' = senders.update(node, ss => + if (node.Txs().includes(tx)) ss.addSender(tx, optionalSender) else ss), + mempool' = mempool, + incomingMsgs' = _incomingMsgs, + peers' = peers, + } + action handleMessage(node, _incomingMsgs, sender, msg) = + match msg { + | TxMsg(tx) => node.tryAddTx(_incomingMsgs, Some(sender), tx) + } + def mkTargetNodes(node, tx) = + node.Peers().exclude(node.sendersOf(tx)) + + action step = any { + nondet node = oneOf(nodesInNetwork) + nondet tx = oneOf(AllTxs) + node.receiveTxFromUser(tx, tryAddTx), + nondet node = oneOf(nodesInNetwork) + node.receiveFromPeer(handleMessage), + nondet node = oneOf(nodesInNetwork) + all { + node.disseminateNextTx(mkTargetNodes, TxMsg), + senders' = senders, + }, + all { + pickNodeAndJoin, + mempool' = mempool, + senders' = senders, + }, + all { + pickNodeAndDisconnect, + mempool' = mempool, + senders' = senders, + } + } + + //-------------------------------------------------------------------------- + // Properties + //-------------------------------------------------------------------------- + def txInAllMempools(tx) = + NodeIDs.forall(n => n.Txs().includes(tx)) + temporal txInPoolGetsDisseminated = + AllTxs.forall(tx => + NodeIDs.exists(node => + node.Txs().includes(tx) implies eventually(txInAllMempools(tx)))) + val dontSendBackToSender = + NodeIDs.forall(nodeA => + NodeIDs.forall(nodeB => + AllTxs.forall(tx => + nodeB.sendersOf(tx).contains(nodeA) + implies + not(nodeA.IncomingMsgs().includes((nodeB, TxMsg(tx)))) + ))) + +} diff --git a/spec/mempool/gossip/quint/mempool.qnt b/spec/mempool/gossip/quint/mempool.qnt new file mode 100644 index 00000000000..0fe3eabeeb4 --- /dev/null +++ b/spec/mempool/gossip/quint/mempool.qnt @@ -0,0 +1,74 @@ +// -*- mode: Bluespec; -*- + +// File generated from markdown using https://github.com/driusan/lmt. DO NOT EDIT. + +module mempool { + import spells.* from "./spells" + import p2p.* from "./p2p" + export p2p.* + + //-------------------------------------------------------------------------- + // Types + //-------------------------------------------------------------------------- + type TX = str + pure def valid(tx) = true + type TxID = str + pure def hash(tx: TX): TxID = tx + + //-------------------------------------------------------------------------- + // Parameters + //-------------------------------------------------------------------------- + const AllTxs: Set[TX] + + //-------------------------------------------------------------------------- + // State + //-------------------------------------------------------------------------- + var mempool: NodeID -> MempoolState + + type MempoolState = { + cache: Set[TxID], + txs: List[TX], + txsIndex: int, + } + + // Auxiliary definitions + def Cache(node) = mempool.get(node).cache + def Txs(node) = mempool.get(node).txs + def TxsIndex(node) = mempool.get(node).txsIndex + + //-------------------------------------------------------------------------- + // Actions + //-------------------------------------------------------------------------- + action MP_init = all { + P2P_init, + mempool' = NodeIDs.mapBy(n => initialMempoolState), + } + val initialMempoolState = { + cache: Set(), + txs: List(), + txsIndex: 0, + } + action receiveTxFromUser(node, tx, _tryAddTx) = + node._tryAddTx(incomingMsgs, None, tx) + action disseminateNextTx(node, _mkTargetNodes, _mkTxMsg) = all { + // Check that the current index is within bounds. + require(node.TxsIndex() < node.Txs().length()), + // Get from the mempool the next transaction to disseminate. + val tx = node.Txs()[node.TxsIndex()] + all { + // Wrap transaction in a message and send it to the target nodes. + incomingMsgs' = + node.multiSend(incomingMsgs, _mkTargetNodes(node, tx), _mkTxMsg(tx)), + // Increase index. + mempool' = mempool.update(node, st => { txsIndex: st.txsIndex + 1, ...st }), + peers' = peers, + } + } + + //-------------------------------------------------------------------------- + // Properties + //-------------------------------------------------------------------------- + val uniqueTxsInMempool = + NodeIDs.forall(node => size(node.Txs().listToSet()) == length(node.Txs())) + +} diff --git a/spec/mempool/gossip/quint/p2p.qnt b/spec/mempool/gossip/quint/p2p.qnt new file mode 100644 index 00000000000..5247c027f4a --- /dev/null +++ b/spec/mempool/gossip/quint/p2p.qnt @@ -0,0 +1,94 @@ +// -*- mode: Bluespec; -*- + +// File generated from markdown using https://github.com/driusan/lmt. DO NOT EDIT. + +module p2p { + import spells.* from "./spells" + + //-------------------------------------------------------------------------- + // Types + //-------------------------------------------------------------------------- + type NodeID = str + + //-------------------------------------------------------------------------- + // Parameters + //-------------------------------------------------------------------------- + const NodeIDs: Set[NodeID] + const InitialPeers: NodeID -> Set[NodeID] + + //-------------------------------------------------------------------------- + // State + //-------------------------------------------------------------------------- + var incomingMsgs: NodeID -> List[(NodeID, msg)] + var peers: NodeID -> Set[NodeID] + pure def multiSend(node, _incomingMsgs, targetNodes, msg) = + _incomingMsgs.updateMultiple(targetNodes, ms => ms.append((node, msg))) + + // Auxiliary definitions + def IncomingMsgs(node) = incomingMsgs.get(node) + def Peers(node) = peers.get(node) + val nodesInNetwork = NodeIDs.filter(node => node.Peers().nonEmpty()) + val nodesNotInNetwork = NodeIDs.exclude(nodesInNetwork) + + //-------------------------------------------------------------------------- + // Actions + //-------------------------------------------------------------------------- + action P2P_init = all { + incomingMsgs' = NodeIDs.mapBy(_ => List()), + peers' = NodeIDs.mapBy(n => InitialPeers.get(n)), + } + action receiveFromPeer(node, handleMessage) = all { + require(length(node.IncomingMsgs()) > 0), + // We model receiving of a message as taking the head of the list of + // incoming messages and leaving the tail. + val someMsg = node.IncomingMsgs().head() + val sender = someMsg._1 + val msg = someMsg._2 + val _incomingMsgs = incomingMsgs.update(node, tail) + handleMessage(node, _incomingMsgs, sender, msg) + } + action joinNetwork(node, peerSet) = all { + // The node must not be connected to the network. + require(node.Peers().isEmpty()), + peers' = peers + // Assign to node the set of new peers. + .put(node, peerSet) + // Add node as a new peer to the set of connecting peers. + .updateMultiple(peerSet, ps => ps.join(node)), + incomingMsgs' = incomingMsgs, + } + action pickNodeAndJoin = all { + // Pick a node that is not connected to the network. + require(NodeIDs.exclude(nodesInNetwork).nonEmpty()), + nondet node = oneOf(NodeIDs.exclude(nodesInNetwork)) + // Pick a non-empty set of nodes in the network to be the node's peers. + nondet peerSet = oneOf(nodesInNetwork.powerset().exclude(Set())) + node.joinNetwork(peerSet), + } + // TODO: the network must not become disconnected; we don't want to model that. + action disconnectNetwork(nodeToDisconnect, _incomingMsgs) = all { + peers' = peers + // Clean node's state and remove all its peers. + .put(nodeToDisconnect, Set()) + // Remove node from other peers' state. + .updateMultiple(nodesInNetwork, ps => ps.exclude(Set(nodeToDisconnect))), + incomingMsgs' = _incomingMsgs, + } + action pickNodeAndDisconnect = all { + // Pick a node that is not the only node in the network. + require(size(nodesInNetwork) > 1), + nondet nodeToDisconnect = oneOf(nodesInNetwork) + disconnectNetwork(nodeToDisconnect, incomingMsgs), + } + + //-------------------------------------------------------------------------- + // Properties + //-------------------------------------------------------------------------- + val bidirectionalNetwork = + NodeIDs.forall(nodeA => + nodeA.Peers().forall(nodeB => nodeA.in(nodeB.Peers()))) + temporal allMsgsDelivered = + eventually(NodeIDs.forall(node => length(node.IncomingMsgs()) == 0)) + // TODO: Invariant: all nodes in the network are always connected. + +} diff --git a/spec/mempool/gossip/quint/spells.qnt b/spec/mempool/gossip/quint/spells.qnt new file mode 100644 index 00000000000..313654cc294 --- /dev/null +++ b/spec/mempool/gossip/quint/spells.qnt @@ -0,0 +1,135 @@ +// -*- mode: Bluespec; -*- +module spells { + + //-------------------------------------------------------------------------- + // Basic + //-------------------------------------------------------------------------- + + /// An annotation for writing preconditions. + pure def require(__cond: bool): bool = __cond + + //-------------------------------------------------------------------------- + // Arithmetic + //-------------------------------------------------------------------------- + + pure def min(x,y) = if (x < y) x else y + pure def max(x,y) = if (x > y) x else y + + //-------------------------------------------------------------------------- + // Options + //-------------------------------------------------------------------------- + + type Option[a] = + | Some(a) + | None + + pure def isSome(__opt) = + match __opt { + | Some(_) => true + | None => false + } + + pure def isNone(__opt) = not(isSome(__opt)) + + pure def optionMap(__opt: Option[a], __f: a => b): Option[b] = + match __opt { + | Some(a) => Some(__f(a)) + | None => None + } + + pure def optionFlatten(__opt: Option[Option[a]]): Option[a] = + match __opt { + | Some(o) => o + | None => None + } + + pure def optionGetDefault(__opt: Option[a], __default: a): a = + match __opt { + | Some(o) => o + | None => __default + } + + pure def optionToSet(__opt: Option[a]): Set[a] = + match __opt { + | Some(o) => Set(o) + | None => Set() + } + + //-------------------------------------------------------------------------- + // Sets + //-------------------------------------------------------------------------- + + pure def join(__set: Set[a], __elem: a): Set[a] = + __set.union(Set(__elem)) + + pure def isEmpty(__set: Set[a]): bool = + __set == Set() + + pure def nonEmpty(__set: Set[a]): bool = + __set != Set() + + pure def except(__set: Set[a], __elem: a): Set[a] = + __set.exclude(Set(__elem)) + + //-------------------------------------------------------------------------- + // Maps + //-------------------------------------------------------------------------- + + /// Update a map entry using the previous value. + /// + /// @param __map the map to update + /// @param __key the key to search for + /// @param __f a function that returns the new value for __key + /// when applied to __key's old value + /// @returns a new map equal to __map except that __key maps + /// to __f applied to __key's old value + pure def update(__map: a -> b, __key: a, __f: b => b): (a -> b) = + __map.put(__key, __f(__map.get(__key))) + + /// Update multiple entries in a map. + /// + /// @param __map the map to update + /// @param __keys the set of keys to be updated in __map + /// @param __values a function from __map values to new values + /// @returns a new map equal to __map except that for each key k in __keys + /// will map to __values(k) + pure def updateMultiple(__map: a -> b, __keys: Set[a], __values: b => b): (a -> b) = + __map.keys().union(__keys).mapBy(k => + if (k.in(__keys)) __values(__map.get(k)) else __map.get(k) + ) + + pure def mapRemoveMultiple(__map: a -> b, __keys: Set[a]): (a -> b) = + __map.keys().filter(k => not(k.in(__keys))).mapBy(k => __map.get(k)) + + pure def mapRemove(__map: a -> b, __key: a): (a -> b) = + mapRemoveMultiple(__map, Set(__key)) + + pure def mapGet(__map: a -> b, x: a): Option[b] = + if (__map.keys().contains(x)) Some(__map.get(x)) else None + + pure def mapGetDefault(__map: a -> b, __x: a, __default: b): b = + if (__map.keys().contains(__x)) __map.get(__x) else __default + + //-------------------------------------------------------------------------- + // Lists + //-------------------------------------------------------------------------- + + pure def listIsEmpty(__list: List[a]): bool = + __list == List() + + pure def listNonEmpty(__list: List[a]): bool = + __list != List() + + pure def listToSet(__list: List[a]): Set[a] = + __list.foldl(Set(), (__s, __x) => __s.join(__x)) + + pure def setToList(__set: Set[a]): List[a] = + __set.fold(List(), (__l, __x) => __l.append(__x)) + + pure def includes(__list: List[a], x: a): bool = + __list.listToSet().contains(x) + + pure def headOption(__list: List[a]): Option[a] = + if (__list.length() > 0) Some(__list.head()) else None + +} diff --git a/spec/p2p/README.md b/spec/p2p/README.md index 29efd8ecadf..1ca494055ff 100644 --- a/spec/p2p/README.md +++ b/spec/p2p/README.md @@ -34,13 +34,8 @@ The current content is organized as follows: p2p layer to the protocol layer, through the `Reactor` abstraction. This is a high-level specification (i.e., it should not be implementation-specific) of the p2p layer API, covering item 3. from the list above. -- [`legacy-docs`](./legacy-docs/): We keep older documentation in - the `legacy-docs` directory, as overall, it contains useful information. - However, part of this content is redundant, - being more comprehensively covered in more recent documents, - and some implementation details might be outdated - (see [issue #981](https://github.com/cometbft/cometbft/issues/981)). In addition to this content, some unfinished, work in progress, and auxiliary material can be found in the -[knowledge-base](https://github.com/cometbft/knowledge-base/tree/main/p2p) repository. +[knowledge-base](https://github.com/cometbft/knowledge-base/tree/main/p2p) repository, +and on the `legacy-docs` section on Github (see [issue #981](https://github.com/cometbft/cometbft/issues/981)). diff --git a/spec/p2p/implementation/configuration.md b/spec/p2p/implementation/configuration.md deleted file mode 100644 index 9f172c22c81..00000000000 --- a/spec/p2p/implementation/configuration.md +++ /dev/null @@ -1,49 +0,0 @@ -# CometBFT p2p configuration - -This document contains configurable parameters a node operator can use to tune the p2p behaviour. - -| Parameter| Default| Description | -| --- | --- | ---| -| ListenAddress | "tcp://0.0.0.0:26656" | Address to listen for incoming connections (0.0.0.0:0 means any interface, any port) | -| ExternalAddress | "" | Address to advertise to peers for them to dial | -| [Seeds](./pex-protocol.md#seed-nodes) | empty | Comma separated list of seed nodes to connect to (ID@host:port )| -| [Persistent peers](./peer_manager.md#persistent-peers) | empty | Comma separated list of nodes to keep persistent connections to (ID@host:port ) | -| [AddrBook](./addressbook.md) | defaultAddrBookPath | Path do address book | -| AddrBookStrict | true | Set true for strict address routability rules and false for private or local networks | -| [MaxNumInboundPeers](./switch.md#accepting-peers) | 40 | Maximum number of inbound peers | -| [MaxNumOutboundPeers](./peer_manager.md#ensure-peers) | 10 | Maximum number of outbound peers to connect to, excluding persistent peers | -| [UnconditionalPeers](./switch.md#accepting-peers) | empty | These are IDs of the peers which are allowed to be (re)connected as both inbound or outbound regardless of whether the node reached `max_num_inbound_peers` or `max_num_outbound_peers` or not. | -| PersistentPeersMaxDialPeriod| 0 * time.Second | Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) | -| FlushThrottleTimeout |100 * time.Millisecond| Time to wait before flushing messages out on the connection | -| MaxPacketMsgPayloadSize | 1024 | Maximum size of a message packet payload, in bytes | -| SendRate | 5120000 (5 mB/s) | Rate at which packets can be sent, in bytes/second | -| RecvRate | 5120000 (5 mB/s) | Rate at which packets can be received, in bytes/second| -| [PexReactor](./pex.md) | true | Set true to enable the peer-exchange reactor | -| SeedMode | false | Seed mode, in which node constantly crawls the network and looks for. Does not work if the peer-exchange reactor is disabled. | -| PrivatePeerIDs | empty | Comma separated list of peer IDsthat we do not add to the address book or gossip to other peers. They stay private to us. | -| AllowDuplicateIP | false | Toggle to disable guard against peers connecting from the same ip.| -| [HandshakeTimeout](./transport.md#connection-upgrade) | 20 * time.Second | Timeout for handshake completion between peers | -| [DialTimeout](./switch.md#dialing-peers) | 3 * time.Second | Timeout for dialing a peer | - - -These parameters can be set using the `$CMTHOME/config/config.toml` file. A subset of them can also be changed via command line using the following command line flags: - -| Parameter | Flag | Example | -| --- | --- | --- | -| Listen address| `p2p.laddr` | "tcp://0.0.0.0:26656" | -| Seed nodes | `p2p.seeds` | `--p2p.seeds “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:4444”` | -| Persistent peers | `p2p.persistent_peers` | `--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”` | -| Unconditional peers | `p2p.unconditional_peer_ids` | `--p2p.unconditional_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` | -| PexReactor | `p2p.pex` | `--p2p.pex` | -| Seed mode | `p2p.seed_mode` | `--p2p.seed_mode` | -| Private peer ids | `p2p.private_peer_ids` | `--p2p.private_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` | - - **Note on persistent peers** - - If `persistent_peers_max_dial_period` is set greater than zero, the -pause between each dial to each persistent peer will not exceed `persistent_peers_max_dial_period` -during exponential backoff and we keep trying again without giving up. - -If `seeds` and `persistent_peers` intersect, -the user will be warned that seeds may auto-close connections -and that the node may not be able to keep the connection persistent. diff --git a/spec/p2p/implementation/peer_manager.md b/spec/p2p/implementation/peer_manager.md index 2deb82a4dc6..b842e3d4059 100644 --- a/spec/p2p/implementation/peer_manager.md +++ b/spec/p2p/implementation/peer_manager.md @@ -85,16 +85,9 @@ To avoid this delay, which can be particularly relevant when the node has no peers, a node immediately attempts to dial peer addresses when they are received from a peer that is locally configured as a seed node. -> FIXME: The current logic was introduced in [#3762](https://github.com/tendermint/tendermint/pull/3762). -> Although it fix the issue, the delay between receiving an address and dialing -> the peer, it does not impose and limit on how many addresses are dialed in this -> scenario. -> So, all addresses received from a seed node are dialed, regardless of the -> current number of outbound peers, the number of dialing routines, or the -> `MaxNumOutboundPeers` parameter. -> -> Issue [#9548](https://github.com/tendermint/tendermint/issues/9548) was -> created to handle this situation. +> This was implemented in a rough way, leading to inconsistencies described in +> this [issue](https://github.com/cometbft/cometbft/issues/486), +> fixed by this [PR](https://github.com/cometbft/cometbft/pull/3360). ### First round diff --git a/spec/p2p/implementation/pex.md b/spec/p2p/implementation/pex.md index 8f49e84af74..22ade2791ba 100644 --- a/spec/p2p/implementation/pex.md +++ b/spec/p2p/implementation/pex.md @@ -62,9 +62,9 @@ The `OnStop` method implements `BaseService` and stops the PEX reactor. The address book routine that periodically saves its content to disk is stopped. -## GetChannels +## StreamDescriptors -The `GetChannels` method, from the `Reactor` interface, returns the descriptor +The `StreamDescriptors` method, from the `Reactor` interface, returns the descriptor of the channel used by the PEX protocol. The channel ID is `PexChannel` (0), with priority `1`, send queue capacity of diff --git a/spec/p2p/implementation/switch.md b/spec/p2p/implementation/switch.md index 4497fef96e2..17ad9c8fc9d 100644 --- a/spec/p2p/implementation/switch.md +++ b/spec/p2p/implementation/switch.md @@ -104,8 +104,7 @@ The exponentially increasing dialing interval is adjusted as well by a random jitter up to `dialRandomizerIntervalMilliseconds`. At most `reconnectBackOffAttempts`, hard-coded to 10, are made using this approach. -> Note: the first sleep interval, to which a random jitter is applied, is 1, -> not `reconnectBackOffBaseSeconds`, as the first exponent is `0`... +> Note: the intervals don't work as expected, see [#3519](https://github.com/cometbft/cometbft/issues/3519). ## Accepting peers diff --git a/spec/p2p/implementation/types.md b/spec/p2p/implementation/types.md index cef2632936b..19a15ae36be 100644 --- a/spec/p2p/implementation/types.md +++ b/spec/p2p/implementation/types.md @@ -116,7 +116,7 @@ Interface `IPeerSet` offers methods to access a table of [`Peer`](#peergo) insta Type `PeerSet` implements a thread-safe table of [`Peer`](#peergo) instances, used by the [switch](#switchgo). -The switch provides limited access to this table by returing a `IPeerSet` +The switch provides limited access to this table by returning a `IPeerSet` instance, used by the [PEX reactor](#pex_reactorgo). ### `switch.go` diff --git a/spec/p2p/legacy-docs/config.md b/spec/p2p/legacy-docs/config.md deleted file mode 100644 index a087f8e1d53..00000000000 --- a/spec/p2p/legacy-docs/config.md +++ /dev/null @@ -1,49 +0,0 @@ -# P2P Config - -Here we describe configuration options around the Peer Exchange. -These can be set using flags or via the `$CMTHOME/config/config.toml` file. - -## Seed Mode - -`--p2p.seed_mode` - -The node operates in seed mode. In seed mode, a node continuously crawls the network for peers, -and upon incoming connection shares some peers and disconnects. - -## Seeds - -`--p2p.seeds “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:4444”` - -Dials these seeds when we need more peers. They should return a list of peers and then disconnect. -If we already have enough peers in the address book, we may never need to dial them. - -## Persistent Peers - -`--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”` - -Dial these peers and auto-redial them if the connection fails. -These are intended to be trusted persistent peers that can help -anchor us in the p2p network. The auto-redial uses exponential -backoff and will give up after a day of trying to connect. - -But If `persistent_peers_max_dial_period` is set greater than zero, -pause between each dial to each persistent peer will not exceed `persistent_peers_max_dial_period` -during exponential backoff and we keep trying again without giving up - -**Note:** If `seeds` and `persistent_peers` intersect, -the user will be warned that seeds may auto-close connections -and that the node may not be able to keep the connection persistent. - -## Private Peers - -`--p2p.private_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` - -These are IDs of the peers that we do not add to the address book or gossip to -other peers. They stay private to us. - -## Unconditional Peers - -`--p2p.unconditional_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` - -These are IDs of the peers which are allowed to be connected by both inbound or outbound regardless of -`max_num_inbound_peers` or `max_num_outbound_peers` of user's node reached or not. diff --git a/spec/p2p/legacy-docs/connection.md b/spec/p2p/legacy-docs/connection.md index 158d9d4fa5b..21d14ed3e22 100644 --- a/spec/p2p/legacy-docs/connection.md +++ b/spec/p2p/legacy-docs/connection.md @@ -85,7 +85,7 @@ incoming messages are received on the reactor. // Declare a MyReactor reactor that handles messages on MyChannelID. type MyReactor struct{} -func (reactor MyReactor) GetChannels() []*ChannelDescriptor { +func (reactor MyReactor) StreamDescriptors() []*ChannelDescriptor { return []*ChannelDescriptor{ChannelDescriptor{ID:MyChannelID, Priority: 1}} } @@ -103,7 +103,7 @@ switch := NewSwitch([]Reactor{MyReactor{}}) ... // Send a random message to all outbound connections -for _, peer := range switch.Peers().List() { +for _, peer := range switch.Peers().Copy() { if peer.IsOutbound() { peer.Send(MyChannelID, "Here's a random message") } diff --git a/spec/p2p/legacy-docs/peer.md b/spec/p2p/legacy-docs/peer.md index 995babaf879..d6c85e98163 100644 --- a/spec/p2p/legacy-docs/peer.md +++ b/spec/p2p/legacy-docs/peer.md @@ -74,8 +74,8 @@ Before continuing, we check if the new peer has the same ID as ourselves or an existing peer. If so, we disconnect. We also check the peer's address and public key against -an optional whitelist which can be managed through the ABCI app - -if the whitelist is enabled and the peer does not qualify, the connection is +an optional allowlist which can be managed through the ABCI app - +if the allowlist is enabled and the peer does not qualify, the connection is terminated. ### CometBFT Version Handshake diff --git a/spec/p2p/reactor-api/p2p-api.md b/spec/p2p/reactor-api/p2p-api.md index 927e416c72b..86a7a7b5d82 100644 --- a/spec/p2p/reactor-api/p2p-api.md +++ b/spec/p2p/reactor-api/p2p-api.md @@ -45,19 +45,20 @@ the p2p layer: the set of connected peers. func (sw *Switch) Peers() IPeerSet -The `Peers()` method returns the current set of connected peers. -The returned `IPeerSet` is an immutable concurrency-safe copy of this set. -Observe that the `Peer` handlers returned by this method were previously -[added to the reactor][reactor-addpeer] via the `InitPeer(Peer)` method, -but not yet removed via the `RemovePeer(Peer)` method. +The `Peers()` method returns the current set of connected peers. The returned +`IPeerSet` is concurrency-safe. Observe that the `Peer` handlers returned by +this method were previously [added to the reactor][reactor-addpeer] via the +`InitPeer(Peer)` method, but not yet removed via the `RemovePeer(Peer)` method. Thus, a priori, reactors should already have this information. func (sw *Switch) NumPeers() (outbound, inbound, dialing int) The `NumPeers()` method returns the current number of connected peers, -distinguished between `outbound` and `inbound` peers. -An `outbound` peer is a peer the node has dialed to, while an `inbound` peer is -a peer the node has accepted a connection from. +distinguished between `outbound` and `inbound` peers. An `outbound` peer is a +peer the node has dialed to, while an `inbound` peer is a peer the node has +accepted a connection from. Note that `unconditional` peers are not being +counted here. + The third field `dialing` reports the number of peers to which the node is currently attempting to connect, so not (yet) connected peers. @@ -90,7 +91,7 @@ returned channel, which is closed when all operations are completed. > part of the `Peer.Send(Envelope)` helper method, that is, once per > connected peer. > - The return value of the broadcast method is not considered by any of the -> standard reactors that employ the method. One of the reasons is that is is +> standard reactors that employ the method. One of the reasons is that is > not possible to associate each of the boolean outputs added to the > returned channel to a peer. @@ -181,15 +182,16 @@ From this point, reactors can use the methods of the new `Peer` instance. The table below summarizes the interaction of the standard reactors with connected peers, with the `Peer` methods used by them: -| `Peer` API method | consensus | block sync | state sync | mempool | evidence | PEX | -|--------------------------------------------|-----------|------------|------------|---------|-----------|-------| -| `ID() ID` | x | x | x | x | x | x | -| `IsRunning() bool` | x | | | x | x | | -| `Quit() <-chan struct{}` | | | | x | x | | -| `Get(string) interface{}` | x | | | x | x | | -| `Set(string, interface{})` | x | | | | | | -| `Send(Envelope) bool` | x | x | x | x | x | x | -| `TrySend(Envelope) bool` | x | x | | | | | +| `Peer` API method | consensus | block sync | state sync | mempool | evidence | PEX | +|----------------------------|-----------|------------|------------|---------|----------|-----| +| `ID() ID` | x | x | x | x | x | x | +| `IsRunning() bool` | x | | | x | x | | +| `Quit() <-chan struct{}` | | | | x | x | | +| `Get(string) interface{}` | x | | | x | x | | +| `Set(string, interface{})` | x | | | | | | +| `HasChannel(byte) bool` | x | | | x | x | | +| `Send(Envelope) bool` | x | x | x | x | x | x | +| `TrySend(Envelope) bool` | x | x | | | | | The above list is not exhaustive as it does not include all the `Peer` methods invoked by the PEX reactor, a special component that should be considered part @@ -265,8 +267,10 @@ Finally, a `Peer` instance allows a reactor to send messages to companion reactors running at that peer. This is ultimately the goal of the switch when it provides `Peer` instances to the registered reactors. -There are two methods for sending messages: +There are two methods for sending messages, and one auxiliary method to check +whether the peer supports a given channel: + func (p Peer) HasChannel(chID byte) bool func (p Peer) Send(e Envelope) bool func (p Peer) TrySend(e Envelope) bool @@ -275,6 +279,9 @@ set as follows: - `ChannelID`: the channel the message should be sent through, which defines the reactor that will process the message; + - The auxiliary `HasChannel()` method allows testing whether the remote peer + implements a channel; if it does not, both message-sending methods will + immediately return `false`, as sending always fails. - `Src`: this field represents the source of an incoming message, which is irrelevant for outgoing messages; - `Message`: the actual message's payload, which is marshalled using protocol buffers. diff --git a/spec/p2p/reactor-api/reactor.md b/spec/p2p/reactor-api/reactor.md index 9d85e7ccd0c..50ad283256c 100644 --- a/spec/p2p/reactor-api/reactor.md +++ b/spec/p2p/reactor-api/reactor.md @@ -42,7 +42,7 @@ producing events associated to a different peer: ```abnf start = registration on-start *peer-management on-stop -registration = get-channels set-switch +registration = stream-descriptors set-switch ; Refers to a single peer, a reactor must support multiple concurrent peers peer-management = init-peer start-peer stop-peer @@ -51,15 +51,15 @@ connected-peer = add-peer *receive stop-peer = [peer-error] remove-peer ; Service interface -on-start = %s"OnStart()" -on-stop = %s"OnStop()" +on-start = %s"OnStart()" +on-stop = %s"OnStop()" ; Reactor interface -get-channels = %s"GetChannels()" -set-switch = %s"SetSwitch(*Switch)" -init-peer = %s"InitPeer(Peer)" -add-peer = %s"AddPeer(Peer)" -remove-peer = %s"RemovePeer(Peer, reason)" -receive = %s"Receive(Envelope)" +stream-descriptors = %s"StreamDescriptors()" +set-switch = %s"SetSwitch(*Switch)" +init-peer = %s"InitPeer(Peer)" +add-peer = %s"AddPeer(Peer)" +remove-peer = %s"RemovePeer(Peer, reason)" +receive = %s"Receive(Envelope)" ; Errors, for reference start-error = %s"log(Error starting peer)" @@ -69,7 +69,7 @@ peer-error = %s"log(Stopping peer for error)" The grammar is written in case-sensitive Augmented Backus–Naur form (ABNF, specified in [IETF RFC 7405](https://datatracker.ietf.org/doc/html/rfc7405)). It is inspired on the grammar produced to specify the interaction of CometBFT -with an ABCI++ application, available [here](../../abci/abci%2B%2B_comet_expected_behavior.md). +with an ABCI application, available [here](../../abci/abci%2B%2B_comet_expected_behavior.md). ## Registration @@ -85,11 +85,11 @@ In other words, there is no support for registering a reactor on a running node: reactors must be registered as part of the setup of a node. ```abnf -registration = get-channels set-switch +registration = stream-descriptors set-switch ``` The p2p layer retrieves from the reactor a list of channels the reactor is -responsible for, using the `GetChannels()` method. +responsible for, using the `StreamDescriptors()` method. The reactor implementation should thereafter expect the delivery of every message received by the p2p layer in the informed channels. diff --git a/spec/p2p/reactor-api/reactor.qnt b/spec/p2p/reactor-api/reactor.qnt index 002c57023af..4a10d11f212 100644 --- a/spec/p2p/reactor-api/reactor.qnt +++ b/spec/p2p/reactor-api/reactor.qnt @@ -1,7 +1,7 @@ // -*- mode: Bluespec; -*- /* * Reactor is responsible for handling incoming messages on one or more - * Channel. Switch calls GetChannels when reactor is added to it. When a new + * Channel. Switch calls ChannelDescriptors when reactor is added to it. When a new * peer joins our node, InitPeer and AddPeer are called. RemovePeer is called * when the peer is stopped. Receive is called when a message is received on a * channel associated with this reactor. @@ -77,10 +77,10 @@ module reactor { // Pure definitions below represent the `p2p.Reactor` interface methods: /* - * GetChannels returns the list of MConnection.ChannelDescriptor. Make sure + * ChannelDescriptors returns the list of MConnection.ChannelDescriptor. Make sure * that each ID is unique across all the reactors added to the switch. */ - pure def GetChannels(s: ReactorState): Set[ChannelDescriptor] = { + pure def ChannelDescriptors(s: ReactorState): Set[ChannelDescriptor] = { s.channels // Static list, configured at initialization. } @@ -171,7 +171,7 @@ module reactor { reactor.state == "init", // Assign the reactor as responsible for its channel IDs, which // should not be already assigned to another reactor. - val chIDs = reactor.GetChannels().map(c => c.ID) + val chIDs = reactor.ChannelDescriptors().map(c => c.ID) all { size(chIDs.intersect(reactorsByCh.keys())) == 0, reactorsByCh' = reactorsByCh.keys().union(chIDs). @@ -222,7 +222,7 @@ module reactor { // Reactor is assigned to the message's channel ID e.ChannelID.in(reactorsByCh.keys()), reactorsByCh.get(e.ChannelID) == reactor.name, - reactor.GetChannels().exists(c => c.ID == e.ChannelID), + reactor.ChannelDescriptors().exists(c => c.ID == e.ChannelID), updateReactorTo(reactor.Receive(e)) } diff --git a/state/compatibility_test.go b/state/compatibility_test.go new file mode 100644 index 00000000000..01a5a81d6e3 --- /dev/null +++ b/state/compatibility_test.go @@ -0,0 +1,654 @@ +package state_test + +import ( + "fmt" + "testing" + "time" + + gogo "github.com/cosmos/gogoproto/types" + "github.com/stretchr/testify/require" + + dbm "github.com/cometbft/cometbft-db" + abciv1 "github.com/cometbft/cometbft/api/cometbft/abci/v1" + abciv1beta1 "github.com/cometbft/cometbft/api/cometbft/abci/v1beta1" + abciv1beta2 "github.com/cometbft/cometbft/api/cometbft/abci/v1beta2" + abciv1beta3 "github.com/cometbft/cometbft/api/cometbft/abci/v1beta3" + cryptov1 "github.com/cometbft/cometbft/api/cometbft/crypto/v1" + statev1 "github.com/cometbft/cometbft/api/cometbft/state/v1" + statev1beta2 "github.com/cometbft/cometbft/api/cometbft/state/v1beta2" + statev1beta3 "github.com/cometbft/cometbft/api/cometbft/state/v1beta3" + typesv1 "github.com/cometbft/cometbft/api/cometbft/types/v1" + typesv1beta1 "github.com/cometbft/cometbft/api/cometbft/types/v1beta1" + typesv1beta2 "github.com/cometbft/cometbft/api/cometbft/types/v1beta2" + "github.com/cometbft/cometbft/crypto/ed25519" + sm "github.com/cometbft/cometbft/state" +) + +// Compatibility test across different state proto versions + +func calcABCIResponsesKey(height int64) []byte { + return []byte(fmt.Sprintf("abciResponsesKey:%v", height)) +} + +var lastABCIResponseKey = []byte("lastABCIResponseKey") + +var ( + _ sm.Store = (*MultiStore)(nil) + _ LegacyStore = (*MultiStore)(nil) +) + +// MultiStore represents a state store that implements the Store interface +// and contains additional store and database options. +// +// Fields: +// - Store (sm.Store): The store instance used by the MultiStore. +// - db (dbm.DB): The database instance used by the MultiStore. +// - StoreOptions (sm.StoreOptions): The options for the MultiStore. +type MultiStore struct { + sm.Store + db dbm.DB + sm.StoreOptions +} + +// NewMultiStore initializes a new instance of MultiStore with the provided parameters. +// It sets the store, db, and StoreOptions fields of the MultiStore struct. +// +// Parameters: +// - db (dbm.DB): The database instance to be used by the MultiStore. +// - options (sm.StoreOptions): The store options to be used by the MultiStore. +// - store (sm.Store): The store instance to be used by the MultiStore. +// +// Returns: +// - *MultiStore: A pointer to the newly created MultiStore instance. +func NewMultiStore(db dbm.DB, options sm.StoreOptions, store sm.Store) *MultiStore { + return &MultiStore{ + Store: store, + db: db, + StoreOptions: options, + } +} + +// LegacyStore represents a legacy data store. +// Example usage: +// +// _ LegacyStore = (*MultiStore)(nil) +type LegacyStore interface { + SaveABCIResponses(height int64, abciResponses *statev1beta2.ABCIResponses) error +} + +// SaveABCIResponses saves the ABCIResponses for a given height in the MultiStore. +// It strips out any nil values from the DeliverTxs field, and saves the ABCIResponses to +// disk if the DiscardABCIResponses flag is set to false. It also saves the last ABCI response +// for crash recovery, overwriting the previously saved response. +// +// Parameters: +// - height (int64): The height at which the ABCIResponses are being saved. +// - abciResponses (ABCIResponses): The ABCIResponses to be saved. +// +// Returns: +// - error: An error if there was a problem saving the ABCIResponses. +// +// NOTE: The MultiStore must be properly configured with the StoreOptions and db before calling this method. +func (multi MultiStore) SaveABCIResponses(height int64, abciResponses *statev1beta2.ABCIResponses) error { + var dtxs []*abciv1beta2.ResponseDeliverTx + // strip nil values, + for _, tx := range abciResponses.DeliverTxs { + if tx != nil { + dtxs = append(dtxs, tx) + } + } + abciResponses.DeliverTxs = dtxs + + // If the flag is false then we save the ABCIResponse. This can be used for the /BlockResults + // query or to reindex an event using the command line. + if !multi.StoreOptions.DiscardABCIResponses { + bz, err := abciResponses.Marshal() + if err != nil { + return err + } + if err := multi.db.Set(calcABCIResponsesKey(height), bz); err != nil { + return err + } + } + + // We always save the last ABCI response for crash recovery. + // This overwrites the previous saved ABCI Response. + response := &statev1beta2.ABCIResponsesInfo{ + AbciResponses: abciResponses, + Height: height, + } + bz, err := response.Marshal() + if err != nil { + return err + } + + return multi.db.SetSync(lastABCIResponseKey, bz) +} + +// TestSaveLegacyAndLoadFinalizeBlock tests saving and loading of ABCIResponses +// using the multiStore. It verifies that the loaded ABCIResponses match the +// original ones and that missing fields are correctly handled. +// This test is important for the LoadFinalizeBlockResponse method in the state store. +func TestSaveLegacyAndLoadFinalizeBlock(t *testing.T) { + tearDown, stateDB, _, store := setupTestCaseWithStore(t) + defer tearDown(t) + options := sm.StoreOptions{ + DiscardABCIResponses: false, + } + + height := int64(1) + multiStore := NewMultiStore(stateDB, options, store) + + // try with a complete ABCI Response + v1beta2ABCIResponses := newV1Beta2ABCIResponses() + err := multiStore.SaveABCIResponses(height, &v1beta2ABCIResponses) + require.NoError(t, err) + require.Equal(t, 1, len(v1beta2ABCIResponses.DeliverTxs)) + require.Equal(t, 1, len(v1beta2ABCIResponses.BeginBlock.Events)) + require.Equal(t, 1, len(v1beta2ABCIResponses.EndBlock.Events)) + + finalizeBlockResponse, err := multiStore.LoadFinalizeBlockResponse(height) + require.NoError(t, err) + + // Test for not nil + require.NotNil(t, finalizeBlockResponse.TxResults) + require.NotNil(t, finalizeBlockResponse.Events) + require.NotNil(t, finalizeBlockResponse.ValidatorUpdates) + require.NotNil(t, finalizeBlockResponse.ConsensusParamUpdates) + require.Nil(t, finalizeBlockResponse.AppHash) + + // Test for equality + require.Equal(t, 1, len(finalizeBlockResponse.TxResults)) + require.Equal(t, len(v1beta2ABCIResponses.DeliverTxs), len(finalizeBlockResponse.TxResults)) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Code, finalizeBlockResponse.TxResults[0].Code) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Data, finalizeBlockResponse.TxResults[0].Data) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Log, finalizeBlockResponse.TxResults[0].Log) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].GasWanted, finalizeBlockResponse.TxResults[0].GasWanted) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].GasUsed, finalizeBlockResponse.TxResults[0].GasUsed) + require.Equal(t, len(v1beta2ABCIResponses.DeliverTxs[0].Events), len(finalizeBlockResponse.TxResults[0].Events)) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Events[0].Type, finalizeBlockResponse.TxResults[0].Events[0].Type) + require.Equal(t, len(v1beta2ABCIResponses.DeliverTxs[0].Events[0].Attributes), len(finalizeBlockResponse.TxResults[0].Events[0].Attributes)) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Events[0].Attributes[0].Key, finalizeBlockResponse.TxResults[0].Events[0].Attributes[0].Key) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Events[0].Attributes[0].Value, finalizeBlockResponse.TxResults[0].Events[0].Attributes[0].Value) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Codespace, finalizeBlockResponse.TxResults[0].Codespace) + + require.Equal(t, 2, len(finalizeBlockResponse.Events)) + require.Equal(t, len(v1beta2ABCIResponses.BeginBlock.Events)+len(v1beta2ABCIResponses.EndBlock.Events), len(finalizeBlockResponse.Events)) + + require.Equal(t, v1beta2ABCIResponses.BeginBlock.Events[0].Type, finalizeBlockResponse.Events[0].Type) + require.Equal(t, len(v1beta2ABCIResponses.BeginBlock.Events[0].Attributes)+1, len(finalizeBlockResponse.Events[0].Attributes)) // +1 for inject 'mode' attribute + require.Equal(t, v1beta2ABCIResponses.BeginBlock.Events[0].Attributes[0].Key, finalizeBlockResponse.Events[0].Attributes[0].Key) + require.Equal(t, v1beta2ABCIResponses.BeginBlock.Events[0].Attributes[0].Value, finalizeBlockResponse.Events[0].Attributes[0].Value) + + require.Equal(t, v1beta2ABCIResponses.EndBlock.ConsensusParamUpdates.Block.MaxBytes, finalizeBlockResponse.ConsensusParamUpdates.Block.MaxBytes) + require.Equal(t, v1beta2ABCIResponses.EndBlock.ConsensusParamUpdates.Block.MaxGas, finalizeBlockResponse.ConsensusParamUpdates.Block.MaxGas) + require.Equal(t, v1beta2ABCIResponses.EndBlock.ConsensusParamUpdates.Evidence.MaxAgeNumBlocks, finalizeBlockResponse.ConsensusParamUpdates.Evidence.MaxAgeNumBlocks) + require.Equal(t, v1beta2ABCIResponses.EndBlock.ConsensusParamUpdates.Evidence.MaxAgeDuration, finalizeBlockResponse.ConsensusParamUpdates.Evidence.MaxAgeDuration) + require.Equal(t, v1beta2ABCIResponses.EndBlock.ConsensusParamUpdates.Evidence.MaxBytes, finalizeBlockResponse.ConsensusParamUpdates.Evidence.MaxBytes) + require.Equal(t, v1beta2ABCIResponses.EndBlock.ConsensusParamUpdates.Validator.PubKeyTypes, finalizeBlockResponse.ConsensusParamUpdates.Validator.PubKeyTypes) + require.Equal(t, v1beta2ABCIResponses.EndBlock.ConsensusParamUpdates.Version.App, finalizeBlockResponse.ConsensusParamUpdates.Version.App) + + require.Nil(t, finalizeBlockResponse.ConsensusParamUpdates.Abci) + require.Nil(t, finalizeBlockResponse.ConsensusParamUpdates.Synchrony) + require.Nil(t, finalizeBlockResponse.ConsensusParamUpdates.Feature) + require.Nil(t, finalizeBlockResponse.AppHash) + + require.Equal(t, finalizeBlockResponse.NextBlockDelay, time.Duration(0)) + + require.Equal(t, len(v1beta2ABCIResponses.EndBlock.ValidatorUpdates), len(finalizeBlockResponse.ValidatorUpdates)) + require.Equal(t, v1beta2ABCIResponses.EndBlock.ValidatorUpdates[0].Power, finalizeBlockResponse.ValidatorUpdates[0].Power) + + // skip until an equivalency test is possible + // require.NotNil(t, finalizeBlockResponse.ValidatorUpdates[0].PubKeyBytes) + // require.NotEmpty(t, finalizeBlockResponse.ValidatorUpdates[0].PubKeyType) + // require.Equal(t, v1beta2ABCIResponses.ValidatorUpdates[0].PubKey.GetEd25519(), finalizeBlockResponse.ValidatorUpdates[0].PubKeyBytes) + + // try with an ABCI Response missing fields + height = int64(2) + v1beta2ABCIResponses = newV1Beta2ABCIResponsesWithNullFields() + require.Equal(t, 1, len(v1beta2ABCIResponses.DeliverTxs)) + require.Equal(t, 1, len(v1beta2ABCIResponses.BeginBlock.Events)) + require.Nil(t, v1beta2ABCIResponses.EndBlock) + err = multiStore.SaveABCIResponses(height, &v1beta2ABCIResponses) + require.NoError(t, err) + finalizeBlockResponse, err = multiStore.LoadFinalizeBlockResponse(height) + require.NoError(t, err) + + require.Equal(t, len(v1beta2ABCIResponses.DeliverTxs), len(finalizeBlockResponse.TxResults)) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].String(), finalizeBlockResponse.TxResults[0].String()) + require.Equal(t, len(v1beta2ABCIResponses.BeginBlock.Events), len(finalizeBlockResponse.Events)) +} + +// This test un-marshals a v1beta2.ABCIResponses as a statev1.LegacyABCIResponses +// This logic is important for the LoadFinalizeBlockResponse method in the state store +// The conversion should not fail because they are compatible. +// + +func TestStateV1Beta2ABCIResponsesAsStateV1LegacyABCIResponse(t *testing.T) { + v1beta2ABCIResponses := newV1Beta2ABCIResponses() + + v1b2Resp, err := v1beta2ABCIResponses.Marshal() + require.NoError(t, err) + require.NotNil(t, v1b2Resp) + + // un-marshall a v1beta2 ABCI Response as a LegacyABCIResponse + legacyABCIResponses := new(statev1.LegacyABCIResponses) + err = legacyABCIResponses.Unmarshal(v1b2Resp) + require.NoError(t, err) + + // ensure not nil + require.NotNil(t, legacyABCIResponses.DeliverTxs) + require.NotNil(t, legacyABCIResponses.EndBlock) + require.NotNil(t, legacyABCIResponses.BeginBlock) + + // ensure for equality + require.Equal(t, len(v1beta2ABCIResponses.DeliverTxs), len(legacyABCIResponses.DeliverTxs)) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Code, legacyABCIResponses.DeliverTxs[0].Code) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Data, legacyABCIResponses.DeliverTxs[0].Data) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Log, legacyABCIResponses.DeliverTxs[0].Log) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].GasWanted, legacyABCIResponses.DeliverTxs[0].GasWanted) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].GasUsed, legacyABCIResponses.DeliverTxs[0].GasUsed) + require.Equal(t, len(v1beta2ABCIResponses.DeliverTxs[0].Events), len(legacyABCIResponses.DeliverTxs[0].Events)) + require.Equal(t, len(v1beta2ABCIResponses.DeliverTxs[0].Events[0].Attributes), len(legacyABCIResponses.DeliverTxs[0].Events[0].Attributes)) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Events[0].Attributes[0].Key, legacyABCIResponses.DeliverTxs[0].Events[0].Attributes[0].Key) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Events[0].Attributes[0].Value, legacyABCIResponses.DeliverTxs[0].Events[0].Attributes[0].Value) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Codespace, legacyABCIResponses.DeliverTxs[0].Codespace) + + require.Equal(t, len(v1beta2ABCIResponses.BeginBlock.Events), len(legacyABCIResponses.BeginBlock.Events)) + require.Equal(t, v1beta2ABCIResponses.BeginBlock.Events[0].Type, legacyABCIResponses.BeginBlock.Events[0].Type) + require.Equal(t, len(v1beta2ABCIResponses.BeginBlock.Events[0].Attributes), len(legacyABCIResponses.BeginBlock.Events[0].Attributes)) + require.Equal(t, v1beta2ABCIResponses.BeginBlock.Events[0].Attributes[0].Key, legacyABCIResponses.BeginBlock.Events[0].Attributes[0].Key) + require.Equal(t, v1beta2ABCIResponses.BeginBlock.Events[0].Attributes[0].Value, legacyABCIResponses.BeginBlock.Events[0].Attributes[0].Value) +} + +// This test unmarshals a v1beta2.ABCIResponses as a v1beta3.ResponseFinalizeBlock +// This logic is important for the LoadFinalizeBlockResponse method in the state store +// The conversion should fail because they are not compatible. +func TestStateV1Beta2ABCIResponsesAsV1Beta3ResponseFinalizeBlock(t *testing.T) { + v1beta2ABCIResponses := newV1Beta2ABCIResponses() + data, err := v1beta2ABCIResponses.Marshal() + require.NoError(t, err) + require.NotNil(t, data) + + // This cannot work since they have different schemas, a wrong wireType error is generated + responseFinalizeBlock := new(abciv1beta3.ResponseFinalizeBlock) + err = responseFinalizeBlock.Unmarshal(data) + require.Error(t, err) + require.ErrorContains(t, err, "unexpected EOF") +} + +// This test unmarshal a v1beta2.ABCIResponses as a v1.FinalizeBlockResponse +// This logic is important for the LoadFinalizeBlockResponse method in the state store +// The conversion should fail because they are not compatible. +func TestStateV1Beta2ABCIResponsesAsV1FinalizeBlockResponse(t *testing.T) { + v1beta2ABCIResponses := newV1Beta2ABCIResponses() + data, err := v1beta2ABCIResponses.Marshal() + require.NoError(t, err) + require.NotNil(t, data) + + // This cannot work since they have different schemas, a wrong wireType error is generated + finalizeBlockResponse := new(abciv1.FinalizeBlockResponse) + err = finalizeBlockResponse.Unmarshal(data) + require.Error(t, err) + require.ErrorContains(t, err, "unexpected EOF") +} + +// This test unmarshal a v1beta2.ABCIResponses as a v1beta3.ResponseFinalizeBlock +// This logic is important for the LoadFinalizeBlockResponse method in the state store +// The conversion doesn't fail because no error is return, but they are NOT compatible. +func TestStateV1Beta2ABCIResponsesWithNullAsV1Beta3ResponseFinalizeBlock(t *testing.T) { + v1beta2ABCIResponsesWithNull := newV1Beta2ABCIResponsesWithNullFields() + data, err := v1beta2ABCIResponsesWithNull.Marshal() + require.NoError(t, err) + require.NotNil(t, data) + + // This should not work since they have different schemas + // but an error is not returned, so it deserializes an ABCIResponse + // on top of a FinalizeBlockResponse giving the false impression they are the same + // but because it doesn't error out, the fields in finalizeBlockResponse will have + // their zero-value (e.g. nil, 0, "") + finalizeBlockResponse := new(abciv1beta3.ResponseFinalizeBlock) + err = finalizeBlockResponse.Unmarshal(data) + require.NoError(t, err) + require.Nil(t, finalizeBlockResponse.AppHash) + require.Nil(t, finalizeBlockResponse.TxResults) +} + +// This test unmarshal a v1beta2.ABCIResponses as a statev1beta3.LegacyABCIResponses +// This logic is important for the LoadFinalizeBlockResponse method in the state store +// The conversion should work because they are compatible. +// + +func TestStateV1Beta2ABCIResponsesAsStateV1Beta3LegacyABCIResponse(t *testing.T) { + v1beta2ABCIResponses := newV1Beta2ABCIResponses() + + data, err := v1beta2ABCIResponses.Marshal() + require.NoError(t, err) + require.NotNil(t, data) + + // This works because they are equivalent protos and the fields are populated + legacyABCIResponses := new(statev1beta3.LegacyABCIResponses) + err = legacyABCIResponses.Unmarshal(data) + require.NoError(t, err) + + // ensure not nil + require.NotNil(t, legacyABCIResponses.DeliverTxs) + require.NotNil(t, legacyABCIResponses.EndBlock) + require.NotNil(t, legacyABCIResponses.BeginBlock) + + // ensure for equality + require.Equal(t, len(v1beta2ABCIResponses.DeliverTxs), len(legacyABCIResponses.DeliverTxs)) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Code, legacyABCIResponses.DeliverTxs[0].Code) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Data, legacyABCIResponses.DeliverTxs[0].Data) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Log, legacyABCIResponses.DeliverTxs[0].Log) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].GasWanted, legacyABCIResponses.DeliverTxs[0].GasWanted) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].GasUsed, legacyABCIResponses.DeliverTxs[0].GasUsed) + require.Equal(t, len(v1beta2ABCIResponses.DeliverTxs[0].Events), len(legacyABCIResponses.DeliverTxs[0].Events)) + require.Equal(t, len(v1beta2ABCIResponses.DeliverTxs[0].Events[0].Attributes), len(legacyABCIResponses.DeliverTxs[0].Events[0].Attributes)) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Events[0].Attributes[0].Key, legacyABCIResponses.DeliverTxs[0].Events[0].Attributes[0].Key) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Events[0].Attributes[0].Value, legacyABCIResponses.DeliverTxs[0].Events[0].Attributes[0].Value) + require.Equal(t, v1beta2ABCIResponses.DeliverTxs[0].Codespace, legacyABCIResponses.DeliverTxs[0].Codespace) + + require.Equal(t, len(v1beta2ABCIResponses.BeginBlock.Events), len(legacyABCIResponses.BeginBlock.Events)) + require.Equal(t, v1beta2ABCIResponses.BeginBlock.Events, legacyABCIResponses.BeginBlock.Events) + require.Equal(t, v1beta2ABCIResponses.BeginBlock.Events[0].Type, legacyABCIResponses.BeginBlock.Events[0].Type) + require.Equal(t, len(v1beta2ABCIResponses.BeginBlock.Events[0].Attributes), len(legacyABCIResponses.BeginBlock.Events[0].Attributes)) + require.Equal(t, v1beta2ABCIResponses.BeginBlock.Events[0].Attributes[0].Key, legacyABCIResponses.BeginBlock.Events[0].Attributes[0].Key) + require.Equal(t, v1beta2ABCIResponses.BeginBlock.Events[0].Attributes[0].Value, legacyABCIResponses.BeginBlock.Events[0].Attributes[0].Value) +} + +// This test unmarshal a v1beta2.ABCIResponsesWithNullFields as a statev1beta3.LegacyABCIResponses +// This logic is important for the LoadFinalizeBlockResponse method in the state store +// The conversion should work because they are compatible even if fields to be converted are null. +func TestStateV1Beta2ABCIResponsesWithNullAsStateV1Beta3LegacyABCIResponse(t *testing.T) { + v1beta2ABCIResponsesWithNull := newV1Beta2ABCIResponsesWithNullFields() + data, err := v1beta2ABCIResponsesWithNull.Marshal() + require.NoError(t, err) + require.NotNil(t, data) + + // This works because they are equivalent protos and the fields are populated + // even if a field is null, it will be converted properly + legacyResponseWithNull := new(statev1beta3.LegacyABCIResponses) + err = legacyResponseWithNull.Unmarshal(data) + require.NoError(t, err) + require.NotNil(t, legacyResponseWithNull.DeliverTxs) + require.Nil(t, legacyResponseWithNull.EndBlock) + require.NotNil(t, legacyResponseWithNull.BeginBlock) + + require.Equal(t, len(v1beta2ABCIResponsesWithNull.BeginBlock.Events), len(legacyResponseWithNull.BeginBlock.Events)) + require.Equal(t, v1beta2ABCIResponsesWithNull.BeginBlock.Events, legacyResponseWithNull.BeginBlock.Events) + require.Equal(t, v1beta2ABCIResponsesWithNull.BeginBlock.Events[0].Type, legacyResponseWithNull.BeginBlock.Events[0].Type) + require.Equal(t, len(v1beta2ABCIResponsesWithNull.BeginBlock.Events[0].Attributes), len(legacyResponseWithNull.BeginBlock.Events[0].Attributes)) + require.Equal(t, v1beta2ABCIResponsesWithNull.BeginBlock.Events[0].Attributes[0].Key, legacyResponseWithNull.BeginBlock.Events[0].Attributes[0].Key) + require.Equal(t, v1beta2ABCIResponsesWithNull.BeginBlock.Events[0].Attributes[0].Value, legacyResponseWithNull.BeginBlock.Events[0].Attributes[0].Value) + + require.Equal(t, len(v1beta2ABCIResponsesWithNull.DeliverTxs), len(legacyResponseWithNull.DeliverTxs)) + require.Equal(t, v1beta2ABCIResponsesWithNull.DeliverTxs[0].Code, legacyResponseWithNull.DeliverTxs[0].Code) + require.Equal(t, v1beta2ABCIResponsesWithNull.DeliverTxs[0].Data, legacyResponseWithNull.DeliverTxs[0].Data) + require.Equal(t, v1beta2ABCIResponsesWithNull.DeliverTxs[0].Log, legacyResponseWithNull.DeliverTxs[0].Log) + require.Equal(t, v1beta2ABCIResponsesWithNull.DeliverTxs[0].GasWanted, legacyResponseWithNull.DeliverTxs[0].GasWanted) + require.Equal(t, v1beta2ABCIResponsesWithNull.DeliverTxs[0].GasUsed, legacyResponseWithNull.DeliverTxs[0].GasUsed) + require.Equal(t, len(v1beta2ABCIResponsesWithNull.DeliverTxs[0].Events), len(legacyResponseWithNull.DeliverTxs[0].Events)) + require.Equal(t, len(v1beta2ABCIResponsesWithNull.DeliverTxs[0].Events[0].Attributes), len(legacyResponseWithNull.DeliverTxs[0].Events[0].Attributes)) + require.Equal(t, v1beta2ABCIResponsesWithNull.DeliverTxs[0].Events[0].Attributes[0].Key, legacyResponseWithNull.DeliverTxs[0].Events[0].Attributes[0].Key) + require.Equal(t, v1beta2ABCIResponsesWithNull.DeliverTxs[0].Events[0].Attributes[0].Value, legacyResponseWithNull.DeliverTxs[0].Events[0].Attributes[0].Value) + require.Equal(t, v1beta2ABCIResponsesWithNull.DeliverTxs[0].Codespace, legacyResponseWithNull.DeliverTxs[0].Codespace) +} + +// This test unmarshal a v1beta3.ResponseFinalizeBlock as a abciv1.FinalizeBlockResponse +// This logic is important for the LoadFinalizeBlockResponse method in the state store +// The conversion should work because they are compatible. +func TestStateV1Beta3ResponsesFinalizeBlockAsV1FinalizeBlockResponse(t *testing.T) { + v1beta3ResponseFinalizeBlock := newV1Beta3ResponsesFinalizeBlock() + + data, err := v1beta3ResponseFinalizeBlock.Marshal() + require.NoError(t, err) + require.NotNil(t, data) + + // This works because they are equivalent protos and the fields are populated + finalizeBlockResponse := new(abciv1.FinalizeBlockResponse) + err = finalizeBlockResponse.Unmarshal(data) + require.NoError(t, err) + + // Test for not nil + require.NotNil(t, finalizeBlockResponse.TxResults) + require.NotNil(t, finalizeBlockResponse.Events) + require.NotNil(t, finalizeBlockResponse.ValidatorUpdates) + require.NotNil(t, finalizeBlockResponse.ConsensusParamUpdates) + require.NotNil(t, finalizeBlockResponse.AppHash) + + // Test for equality + require.Equal(t, len(v1beta3ResponseFinalizeBlock.TxResults), len(finalizeBlockResponse.TxResults)) + require.Equal(t, v1beta3ResponseFinalizeBlock.TxResults[0].Code, finalizeBlockResponse.TxResults[0].Code) + require.Equal(t, v1beta3ResponseFinalizeBlock.TxResults[0].Data, finalizeBlockResponse.TxResults[0].Data) + require.Equal(t, v1beta3ResponseFinalizeBlock.TxResults[0].Log, finalizeBlockResponse.TxResults[0].Log) + require.Equal(t, v1beta3ResponseFinalizeBlock.TxResults[0].GasWanted, finalizeBlockResponse.TxResults[0].GasWanted) + require.Equal(t, v1beta3ResponseFinalizeBlock.TxResults[0].GasUsed, finalizeBlockResponse.TxResults[0].GasUsed) + require.Equal(t, len(v1beta3ResponseFinalizeBlock.TxResults[0].Events), len(finalizeBlockResponse.TxResults[0].Events)) + require.Equal(t, v1beta3ResponseFinalizeBlock.TxResults[0].Events[0].Type, finalizeBlockResponse.TxResults[0].Events[0].Type) + require.Equal(t, len(v1beta3ResponseFinalizeBlock.TxResults[0].Events[0].Attributes), len(finalizeBlockResponse.TxResults[0].Events[0].Attributes)) + require.Equal(t, v1beta3ResponseFinalizeBlock.TxResults[0].Events[0].Attributes[0].Key, finalizeBlockResponse.TxResults[0].Events[0].Attributes[0].Key) + require.Equal(t, v1beta3ResponseFinalizeBlock.TxResults[0].Events[0].Attributes[0].Value, finalizeBlockResponse.TxResults[0].Events[0].Attributes[0].Value) + require.Equal(t, v1beta3ResponseFinalizeBlock.TxResults[0].Codespace, finalizeBlockResponse.TxResults[0].Codespace) + + require.Equal(t, len(v1beta3ResponseFinalizeBlock.Events), len(finalizeBlockResponse.Events)) + require.Equal(t, v1beta3ResponseFinalizeBlock.Events[0].Type, finalizeBlockResponse.Events[0].Type) + require.Equal(t, len(v1beta3ResponseFinalizeBlock.Events[0].Attributes), len(finalizeBlockResponse.Events[0].Attributes)) + require.Equal(t, v1beta3ResponseFinalizeBlock.Events[0].Attributes[0].Key, finalizeBlockResponse.Events[0].Attributes[0].Key) + require.Equal(t, v1beta3ResponseFinalizeBlock.Events[0].Attributes[0].Value, finalizeBlockResponse.Events[0].Attributes[0].Value) + + require.Equal(t, v1beta3ResponseFinalizeBlock.ConsensusParamUpdates, finalizeBlockResponse.ConsensusParamUpdates) + require.Equal(t, v1beta3ResponseFinalizeBlock.ConsensusParamUpdates.Block.MaxBytes, finalizeBlockResponse.ConsensusParamUpdates.Block.MaxBytes) + require.Equal(t, v1beta3ResponseFinalizeBlock.ConsensusParamUpdates.Block.MaxGas, finalizeBlockResponse.ConsensusParamUpdates.Block.MaxGas) + require.Equal(t, v1beta3ResponseFinalizeBlock.ConsensusParamUpdates.Evidence.MaxAgeNumBlocks, finalizeBlockResponse.ConsensusParamUpdates.Evidence.MaxAgeNumBlocks) + require.Equal(t, v1beta3ResponseFinalizeBlock.ConsensusParamUpdates.Evidence.MaxAgeDuration, finalizeBlockResponse.ConsensusParamUpdates.Evidence.MaxAgeDuration) + require.Equal(t, v1beta3ResponseFinalizeBlock.ConsensusParamUpdates.Evidence.MaxBytes, finalizeBlockResponse.ConsensusParamUpdates.Evidence.MaxBytes) + require.Equal(t, v1beta3ResponseFinalizeBlock.ConsensusParamUpdates.Validator.PubKeyTypes, finalizeBlockResponse.ConsensusParamUpdates.Validator.PubKeyTypes) + require.Equal(t, v1beta3ResponseFinalizeBlock.ConsensusParamUpdates.Version.App, finalizeBlockResponse.ConsensusParamUpdates.Version.App) + require.Equal(t, v1beta3ResponseFinalizeBlock.ConsensusParamUpdates.Synchrony.Precision, finalizeBlockResponse.ConsensusParamUpdates.Synchrony.Precision) + require.Equal(t, v1beta3ResponseFinalizeBlock.ConsensusParamUpdates.Synchrony.MessageDelay, finalizeBlockResponse.ConsensusParamUpdates.Synchrony.MessageDelay) + require.Equal(t, v1beta3ResponseFinalizeBlock.ConsensusParamUpdates.Feature.VoteExtensionsEnableHeight.Value, finalizeBlockResponse.ConsensusParamUpdates.Feature.VoteExtensionsEnableHeight.Value) + require.Equal(t, v1beta3ResponseFinalizeBlock.ConsensusParamUpdates.Feature.PbtsEnableHeight.Value, finalizeBlockResponse.ConsensusParamUpdates.Feature.PbtsEnableHeight.Value) + + require.Equal(t, v1beta3ResponseFinalizeBlock.AppHash, finalizeBlockResponse.AppHash) + + require.Equal(t, len(v1beta3ResponseFinalizeBlock.ValidatorUpdates), len(finalizeBlockResponse.ValidatorUpdates)) + require.Equal(t, v1beta3ResponseFinalizeBlock.ValidatorUpdates[0].Power, finalizeBlockResponse.ValidatorUpdates[0].Power) + + // skip until an equivalency test is possible + // require.NotNil(t, finalizeBlockResponse.ValidatorUpdates[0].PubKeyBytes) + // require.NotEmpty(t, finalizeBlockResponse.ValidatorUpdates[0].PubKeyType) + // require.Equal(t, v1beta3ResponseFinalizeBlock.ValidatorUpdates[0].PubKey.GetEd25519(), finalizeBlockResponse.ValidatorUpdates[0].PubKeyBytes) +} + +// Generate a v1beta2 ABCIResponses with data for all fields. +func newV1Beta2ABCIResponses() statev1beta2.ABCIResponses { + eventAttr := abciv1beta2.EventAttribute{ + Key: "key", + Value: "value", + } + + deliverTxEvent := abciv1beta2.Event{ + Type: "deliver_tx_event", + Attributes: []abciv1beta2.EventAttribute{eventAttr}, + } + + responseDeliverTx := abciv1beta2.ResponseDeliverTx{ + Code: abciv1beta1.CodeTypeOK, + Data: []byte("result tx data"), + Log: "tx committed successfully", + Info: "tx processing info", + Events: []abciv1beta2.Event{deliverTxEvent}, + } + + validatorUpdates := []abciv1beta1.ValidatorUpdate{{ + PubKey: cryptov1.PublicKey{Sum: &cryptov1.PublicKey_Ed25519{Ed25519: make([]byte, 1)}}, + Power: int64(10), + }} + + consensusParams := &typesv1beta2.ConsensusParams{ + Block: &typesv1beta2.BlockParams{ + MaxBytes: int64(100000), + MaxGas: int64(10000), + }, + Evidence: &typesv1beta1.EvidenceParams{ + MaxAgeNumBlocks: int64(10), + MaxAgeDuration: time.Duration(1000), + MaxBytes: int64(10000), + }, + Validator: &typesv1beta1.ValidatorParams{ + PubKeyTypes: []string{"ed25519"}, + }, + Version: &typesv1beta1.VersionParams{ + App: uint64(10), + }, + } + + endBlockEvent := abciv1beta2.Event{ + Type: "end_block_event", + Attributes: []abciv1beta2.EventAttribute{eventAttr}, + } + + beginBlockEvent := abciv1beta2.Event{ + Type: "begin_block_event", + Attributes: []abciv1beta2.EventAttribute{eventAttr}, + } + + // v1beta2 ABCI Responses + v1beta2ABCIResponses := statev1beta2.ABCIResponses{ + BeginBlock: &abciv1beta2.ResponseBeginBlock{ + Events: []abciv1beta2.Event{beginBlockEvent}, + }, + DeliverTxs: []*abciv1beta2.ResponseDeliverTx{ + &responseDeliverTx, + }, + EndBlock: &abciv1beta2.ResponseEndBlock{ + ValidatorUpdates: validatorUpdates, + ConsensusParamUpdates: consensusParams, + Events: []abciv1beta2.Event{endBlockEvent}, + }, + } + return v1beta2ABCIResponses +} + +// Generate a v1beta2 ABCIResponses with fields missing data (nil). +func newV1Beta2ABCIResponsesWithNullFields() statev1beta2.ABCIResponses { + eventAttr := abciv1beta2.EventAttribute{ + Key: "key", + Value: "value", + } + + deliverTxEvent := abciv1beta2.Event{ + Type: "deliver_tx_event", + Attributes: []abciv1beta2.EventAttribute{eventAttr}, + } + + responseDeliverTx := abciv1beta2.ResponseDeliverTx{ + Code: abciv1beta1.CodeTypeOK, + Events: []abciv1beta2.Event{deliverTxEvent}, + } + + beginBlockEvent := abciv1beta2.Event{ + Type: "begin_block_event", + Attributes: []abciv1beta2.EventAttribute{eventAttr}, + } + + // v1beta2 ABCI Responses + v1beta2ABCIResponses := statev1beta2.ABCIResponses{ + BeginBlock: &abciv1beta2.ResponseBeginBlock{ + Events: []abciv1beta2.Event{beginBlockEvent}, + }, + DeliverTxs: []*abciv1beta2.ResponseDeliverTx{ + &responseDeliverTx, + }, + } + return v1beta2ABCIResponses +} + +// Generate a v1beta3 Response Finalize Block with data for all fields. +func newV1Beta3ResponsesFinalizeBlock() abciv1beta3.ResponseFinalizeBlock { + eventAttr := abciv1beta2.EventAttribute{ + Key: "key", + Value: "value", + } + + txEvent := abciv1beta2.Event{ + Type: "tx_event", + Attributes: []abciv1beta2.EventAttribute{eventAttr}, + } + + oneEvent := abciv1beta2.Event{ + Type: "one_event", + Attributes: []abciv1beta2.EventAttribute{eventAttr}, + } + + twoEvent := abciv1beta2.Event{ + Type: "two_event", + Attributes: []abciv1beta2.EventAttribute{eventAttr}, + } + + events := make([]abciv1beta2.Event, 0) + events = append(events, txEvent) + events = append(events, oneEvent) + events = append(events, twoEvent) + + txResults := []*abciv1beta3.ExecTxResult{{ + Code: 0, + Data: []byte("result tx data"), + Log: "tx committed successfully", + Info: "tx processing info", + GasWanted: 15, + GasUsed: 10, + Events: []abciv1beta2.Event{txEvent}, + Codespace: "01", + }} + + validatorUpdates := []abciv1beta1.ValidatorUpdate{{ + PubKey: cryptov1.PublicKey{Sum: &cryptov1.PublicKey_Ed25519{Ed25519: make([]byte, ed25519.PubKeySize)}}, + Power: int64(10), + }} + + consensusParams := &typesv1.ConsensusParams{ + Block: &typesv1.BlockParams{ + MaxBytes: int64(100000), + MaxGas: int64(10000), + }, + Evidence: &typesv1.EvidenceParams{ + MaxAgeNumBlocks: int64(10), + MaxAgeDuration: time.Duration(1000), + MaxBytes: int64(10000), + }, + Validator: &typesv1.ValidatorParams{ + PubKeyTypes: []string{ed25519.KeyType}, + }, + Version: &typesv1.VersionParams{ + App: uint64(10), + }, + Synchrony: &typesv1.SynchronyParams{ + Precision: durationPtr(time.Second * 2), + MessageDelay: durationPtr(time.Second * 4), + }, + Feature: &typesv1.FeatureParams{ + VoteExtensionsEnableHeight: &gogo.Int64Value{ + Value: 10, + }, + PbtsEnableHeight: &gogo.Int64Value{ + Value: 10, + }, + }, + } + + // v1beta3 FinalizeBlock Response + v1beta3FinalizeBlock := abciv1beta3.ResponseFinalizeBlock{ + Events: events, + TxResults: txResults, + ValidatorUpdates: validatorUpdates, + ConsensusParamUpdates: consensusParams, + AppHash: make([]byte, 32), + } + return v1beta3FinalizeBlock +} + +func durationPtr(t time.Duration) *time.Duration { + return &t +} diff --git a/state/doc.go b/state/doc.go new file mode 100644 index 00000000000..9a22fa92ec3 --- /dev/null +++ b/state/doc.go @@ -0,0 +1,3 @@ +// XXX: This package may be internalized (made private) in future +// releases. +package state diff --git a/state/errors.go b/state/errors.go index debf26dfa66..40131641bf7 100644 --- a/state/errors.go +++ b/state/errors.go @@ -5,6 +5,12 @@ import ( "fmt" ) +var ( + ErrFinalizeBlockResponsesNotPersisted = errors.New("node is not persisting finalize block responses") + ErrPrunerCannotLowerRetainHeight = errors.New("cannot set a height lower than previously requested - heights might have already been pruned") + ErrInvalidRetainHeight = errors.New("retain height cannot be less or equal than 0") +) + type ( ErrInvalidBlock error ErrProxyAppConn error @@ -74,6 +80,15 @@ type ( ErrCannotLoadState struct { Err error } + + ErrABCIResponseResponseUnmarshalForHeight struct { + Height int64 + } + + ErrABCIResponseCorruptedOrSpecChangeForHeight struct { + Err error + Height int64 + } ) func (e ErrUnknownBlock) Error() string { @@ -126,6 +141,18 @@ func (e ErrNoABCIResponsesForHeight) Error() string { return fmt.Sprintf("could not find results for height #%d", e.Height) } +func (e ErrABCIResponseResponseUnmarshalForHeight) Error() string { + return fmt.Sprintf("could not decode results for height %d", e.Height) +} + +func (e ErrABCIResponseCorruptedOrSpecChangeForHeight) Error() string { + return fmt.Sprintf("failed to unmarshall FinalizeBlockResponse (also tried as legacy ABCI response) for height %d", e.Height) +} + +func (e ErrABCIResponseCorruptedOrSpecChangeForHeight) Unwrap() error { + return e.Err +} + func (e ErrPrunerFailedToGetRetainHeight) Error() string { return fmt.Sprintf("pruner failed to get existing %s retain height: %s", e.Which, e.Err.Error()) } @@ -135,7 +162,7 @@ func (e ErrPrunerFailedToGetRetainHeight) Unwrap() error { } func (e ErrPrunerFailedToLoadState) Error() string { - return fmt.Sprintf("failed to load state, cannot prune: %s", e.Err.Error()) + return "failed to load state, cannot prune: " + e.Err.Error() } func (e ErrPrunerFailedToLoadState) Unwrap() error { @@ -158,12 +185,6 @@ func (e ErrFailedToPruneStates) Unwrap() error { return e.Err } -var ( - ErrFinalizeBlockResponsesNotPersisted = errors.New("node is not persisting finalize block responses") - ErrPrunerCannotLowerRetainHeight = errors.New("cannot set a height lower than previously requested - heights might have already been pruned") - ErrInvalidRetainHeight = errors.New("retain height cannot be less or equal than 0") -) - func (e ErrCannotLoadState) Error() string { return fmt.Sprintf("cannot load state: %v", e.Err) } diff --git a/state/execution.go b/state/execution.go index 4699c36a63a..4fff875c1c8 100644 --- a/state/execution.go +++ b/state/execution.go @@ -3,20 +3,21 @@ package state import ( "bytes" "context" + "errors" "fmt" "time" abci "github.com/cometbft/cometbft/abci/types" - cryptoenc "github.com/cometbft/cometbft/crypto/encoding" - "github.com/cometbft/cometbft/libs/fail" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" + "github.com/cometbft/cometbft/internal/fail" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/mempool" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/proxy" "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" ) -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // BlockExecutor handles block execution and state updates. // It exposes ApplyBlock(), which validates & executes the block, updates state w/ ABCI responses, // then commits and updates the mempool atomically, then saves state. @@ -42,6 +43,9 @@ type BlockExecutor struct { mempool mempool.Mempool evpool EvidencePool + // 1-element cache of validated blocks + lastValidatedBlock *types.Block + logger log.Logger metrics *Metrics @@ -135,10 +139,10 @@ func (blockExec *BlockExecutor) CreateProposalBlock( block := state.MakeBlock(height, txs, commit, evidence, proposerAddr) rpp, err := blockExec.proxyApp.PrepareProposal( ctx, - &abci.RequestPrepareProposal{ + &abci.PrepareProposalRequest{ MaxTxBytes: maxDataBytes, Txs: block.Txs.ToSliceOfBytes(), - LocalLastCommit: buildExtendedCommitInfoFromStore(lastExtCommit, blockExec.store, state.InitialHeight, state.ConsensusParams.ABCI), + LocalLastCommit: buildExtendedCommitInfoFromStore(lastExtCommit, blockExec.store, state.InitialHeight, state.ConsensusParams.Feature), Misbehavior: block.Evidence.Evidence.ToABCI(), Height: block.Height, Time: block.Time, @@ -170,7 +174,7 @@ func (blockExec *BlockExecutor) ProcessProposal( block *types.Block, state State, ) (bool, error) { - resp, err := blockExec.proxyApp.ProcessProposal(context.TODO(), &abci.RequestProcessProposal{ + resp, err := blockExec.proxyApp.ProcessProposal(context.TODO(), &abci.ProcessProposalRequest{ Hash: block.Header.Hash(), Height: block.Header.Height, Time: block.Header.Time, @@ -184,7 +188,7 @@ func (blockExec *BlockExecutor) ProcessProposal( return false, err } if resp.IsStatusUnknown() { - panic(fmt.Sprintf("ProcessProposal responded with status %s", resp.Status.String())) + panic("ProcessProposal responded with status " + resp.Status.String()) } return resp.IsAccepted(), nil @@ -195,13 +199,22 @@ func (blockExec *BlockExecutor) ProcessProposal( // Validation does not mutate state, but does require historical information from the stateDB, // ie. to verify evidence from a validator at an old height. func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) error { - err := validateBlock(state, block) - if err != nil { - return err + if !blockExec.lastValidatedBlock.HashesTo(block.Hash()) { + if err := validateBlock(state, block); err != nil { + return err + } + blockExec.lastValidatedBlock = block } return blockExec.evpool.CheckEvidence(block.Evidence.Evidence) } +// ApplyVerifiedBlock does the same as `ApplyBlock`, but skips verification. +func (blockExec *BlockExecutor) ApplyVerifiedBlock( + state State, blockID types.BlockID, block *types.Block, syncingToHeight int64, +) (State, error) { + return blockExec.applyBlock(state, blockID, block, syncingToHeight) +} + // ApplyBlock validates the block against the state, executes it against the app, // fires the relevant events, commits the app, and saves the new state and responses. // It returns the new state. @@ -209,14 +222,19 @@ func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) e // from outside this package to process and commit an entire block. // It takes a blockID to avoid recomputing the parts hash. func (blockExec *BlockExecutor) ApplyBlock( - state State, blockID types.BlockID, block *types.Block, + state State, blockID types.BlockID, block *types.Block, syncingToHeight int64, ) (State, error) { - if err := validateBlock(state, block); err != nil { - return state, ErrInvalidBlock(err) + if !blockExec.lastValidatedBlock.HashesTo(block.Hash()) { + if err := validateBlock(state, block); err != nil { + return state, ErrInvalidBlock(err) + } + blockExec.lastValidatedBlock = block } + return blockExec.applyBlock(state, blockID, block, syncingToHeight) +} - startTime := time.Now().UnixNano() - abciResponse, err := blockExec.proxyApp.FinalizeBlock(context.TODO(), &abci.RequestFinalizeBlock{ +func (blockExec *BlockExecutor) applyBlock(state State, blockID types.BlockID, block *types.Block, syncingToHeight int64) (State, error) { + abciResponse, err := blockExec.proxyApp.FinalizeBlock(context.TODO(), &abci.FinalizeBlockRequest{ Hash: block.Hash(), NextValidatorsHash: block.NextValidatorsHash, ProposerAddress: block.ProposerAddress, @@ -225,20 +243,20 @@ func (blockExec *BlockExecutor) ApplyBlock( DecidedLastCommit: buildLastCommitInfoFromStore(block, blockExec.store, state.InitialHeight), Misbehavior: block.Evidence.Evidence.ToABCI(), Txs: block.Txs.ToSliceOfBytes(), + SyncingToHeight: syncingToHeight, }) - endTime := time.Now().UnixNano() - blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) if err != nil { - blockExec.logger.Error("error in proxyAppConn.FinalizeBlock", "err", err) + blockExec.logger.Error("Error in proxyAppConn.FinalizeBlock", "err", err) return state, err } blockExec.logger.Info( - "finalized block", + "Finalized block", "height", block.Height, "num_txs_res", len(abciResponse.TxResults), "num_val_updates", len(abciResponse.ValidatorUpdates), "block_app_hash", fmt.Sprintf("%X", abciResponse.AppHash), + "syncing_to_height", syncingToHeight, ) // Assert that the application correctly returned tx results for each of the transactions provided in the block @@ -246,8 +264,6 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, fmt.Errorf("expected tx results length to match size of transactions in block. Expected %d, got %d", len(block.Data.Txs), len(abciResponse.TxResults)) } - blockExec.logger.Info("executed block", "height", block.Height, "app_hash", fmt.Sprintf("%X", abciResponse.AppHash)) - fail.Fail() // XXX // Save the results before we commit. @@ -260,7 +276,7 @@ func (blockExec *BlockExecutor) ApplyBlock( // validate the validator updates and convert to CometBFT types err = validateValidatorUpdates(abciResponse.ValidatorUpdates, state.ConsensusParams.Validator) if err != nil { - return state, fmt.Errorf("error in validator updates: %v", err) + return state, fmt.Errorf("error in validator updates: %w", err) } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponse.ValidatorUpdates) @@ -268,23 +284,28 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, err } if len(validatorUpdates) > 0 { - blockExec.logger.Info("updates to validators", "updates", types.ValidatorListString(validatorUpdates)) + blockExec.logger.Info("Updates to validators", "updates", types.ValidatorListString(validatorUpdates)) blockExec.metrics.ValidatorSetUpdates.Add(1) } if abciResponse.ConsensusParamUpdates != nil { blockExec.metrics.ConsensusParamUpdates.Add(1) } + err = validateNextBlockDelay(abciResponse.NextBlockDelay) + if err != nil { + return state, fmt.Errorf("error in next block delay: %w", err) + } + // Update the state with the block and responses. state, err = updateState(state, blockID, &block.Header, abciResponse, validatorUpdates) if err != nil { - return state, fmt.Errorf("commit failed for application: %v", err) + return state, fmt.Errorf("commit failed for application: %w", err) } // Lock mempool, commit app state, update mempoool. retainHeight, err := blockExec.Commit(state, block, abciResponse) if err != nil { - return state, fmt.Errorf("commit failed for application: %v", err) + return state, fmt.Errorf("commit failed for application: %w", err) } // Update evpool with the latest state. @@ -309,8 +330,8 @@ func (blockExec *BlockExecutor) ApplyBlock( } // Events are fired after everything else. - // NOTE: if we crash between Commit and Save, events wont be fired during replay - fireEvents(blockExec.logger, blockExec.eventBus, block, blockID, abciResponse, validatorUpdates) + // NOTE: if we crash between Commit and Save, events won't be fired during replay + fireEvents(blockExec.logger, blockExec.eventBus, block, blockID, abciResponse, validatorUpdates, blockExec.metrics) return state, nil } @@ -327,8 +348,7 @@ func (blockExec *BlockExecutor) ExtendVote( if vote.Height != block.Height { panic(fmt.Sprintf("vote's and block's heights do not match %d!=%d", block.Height, vote.Height)) } - - req := abci.RequestExtendVote{ + req := abci.ExtendVoteRequest{ Hash: vote.BlockID.Hash, Height: vote.Height, Time: block.Time, @@ -347,7 +367,7 @@ func (blockExec *BlockExecutor) ExtendVote( } func (blockExec *BlockExecutor) VerifyVoteExtension(ctx context.Context, vote *types.Vote) error { - req := abci.RequestVerifyVoteExtension{ + req := abci.VerifyVoteExtensionRequest{ Hash: vote.BlockID.Hash, ValidatorAddress: vote.ValidatorAddress, Height: vote.Height, @@ -359,7 +379,7 @@ func (blockExec *BlockExecutor) VerifyVoteExtension(ctx context.Context, vote *t panic(fmt.Errorf("VerifyVoteExtension call failed: %w", err)) } if resp.IsStatusUnknown() { - panic(fmt.Sprintf("VerifyVoteExtension responded with status %s", resp.Status.String())) + panic("VerifyVoteExtension responded with status " + resp.Status.String()) } if !resp.IsAccepted() { @@ -368,58 +388,84 @@ func (blockExec *BlockExecutor) VerifyVoteExtension(ctx context.Context, vote *t return nil } -// Commit locks the mempool, runs the ABCI Commit message, and updates the +// Commit locks the mempool, runs the ABCI Commit message, and asynchronously starts updating the // mempool. -// It returns the result of calling abci.Commit which is the height to retain (if any)). +// Commit returns the result of calling abci.Commit which is the height to retain (if any)). // The application is expected to have persisted its state (if any) before returning // from the ABCI Commit call. This is the only place where the application should // persist its state. // The Mempool must be locked during commit and update because state is // typically reset on Commit and old txs must be replayed against committed // state before new txs are run in the mempool, lest they be invalid. +// The mempool is unlocked when the Update routine completes, which is +// asynchronous from Commit. func (blockExec *BlockExecutor) Commit( state State, block *types.Block, - abciResponse *abci.ResponseFinalizeBlock, + abciResponse *abci.FinalizeBlockResponse, ) (int64, error) { + blockExec.mempool.PreUpdate() blockExec.mempool.Lock() - defer blockExec.mempool.Unlock() + unlockMempool := func() { blockExec.mempool.Unlock() } // while mempool is Locked, flush to ensure all async requests have completed // in the ABCI app before Commit. err := blockExec.mempool.FlushAppConn() if err != nil { - blockExec.logger.Error("client error during mempool.FlushAppConn", "err", err) + unlockMempool() + blockExec.logger.Error("Client error during mempool.FlushAppConn, flushing mempool", "err", err) return 0, err } // Commit block, get hash back res, err := blockExec.proxyApp.Commit(context.TODO()) if err != nil { - blockExec.logger.Error("client error during proxyAppConn.CommitSync", "err", err) + unlockMempool() + blockExec.logger.Error("Client error during proxyAppConn.CommitSync", "err", err) return 0, err } // ResponseCommit has no error code - just data blockExec.logger.Info( - "committed state", + "Committed state", "height", block.Height, "block_app_hash", fmt.Sprintf("%X", block.AppHash), ) // Update mempool. - err = blockExec.mempool.Update( + go blockExec.asyncUpdateMempool(unlockMempool, block, state.Copy(), abciResponse) + + return res.RetainHeight, nil +} + +// updates the mempool with the latest state asynchronously. +func (blockExec *BlockExecutor) asyncUpdateMempool( + unlockMempool func(), + block *types.Block, + state State, + abciResponse *abci.FinalizeBlockResponse, +) { + defer unlockMempool() + + err := blockExec.mempool.Update( block.Height, block.Txs, abciResponse.TxResults, TxPreCheck(state), TxPostCheck(state), ) - - return res.RetainHeight, err + if err != nil { + // We panic in this case, out of legacy behavior. Before we made the mempool + // update complete asynchronously from Commit, we would panic if the mempool + // update failed. This is because we panic on any error within commit. + // We should consider changing this behavior in the future, as there is no + // need to panic if the mempool update failed. The most severe thing we + // would need to do is dump the mempool and restart it. + panic(fmt.Sprintf("client error during mempool.Update; error %v", err)) + } } -//--------------------------------------------------------- +// --------------------------------------------------------- // Helper functions for executing blocks and updating state func buildLastCommitInfoFromStore(block *types.Block, store Store, initialHeight int64) abci.CommitInfo { @@ -485,7 +531,7 @@ func BuildLastCommitInfo(block *types.Block, lastValSet *types.ValidatorSet, ini // data, it returns an empty record. // // Assumes that the commit signatures are sorted according to validator index. -func buildExtendedCommitInfoFromStore(ec *types.ExtendedCommit, store Store, initialHeight int64, ap types.ABCIParams) abci.ExtendedCommitInfo { +func buildExtendedCommitInfoFromStore(ec *types.ExtendedCommit, store Store, initialHeight int64, fp types.FeatureParams) abci.ExtendedCommitInfo { if ec.Height < initialHeight { // There are no extended commits for heights below the initial height. return abci.ExtendedCommitInfo{} @@ -496,13 +542,13 @@ func buildExtendedCommitInfoFromStore(ec *types.ExtendedCommit, store Store, ini panic(fmt.Errorf("failed to load validator set at height %d, initial height %d: %w", ec.Height, initialHeight, err)) } - return BuildExtendedCommitInfo(ec, valSet, initialHeight, ap) + return BuildExtendedCommitInfo(ec, valSet, initialHeight, fp) } // BuildExtendedCommitInfo builds an ExtendedCommitInfo from the given block and validator set. // If you want to load the validator set from the store instead of providing it, // use buildExtendedCommitInfoFromStore. -func BuildExtendedCommitInfo(ec *types.ExtendedCommit, valSet *types.ValidatorSet, initialHeight int64, ap types.ABCIParams) abci.ExtendedCommitInfo { +func BuildExtendedCommitInfo(ec *types.ExtendedCommit, valSet *types.ValidatorSet, initialHeight int64, fp types.FeatureParams) abci.ExtendedCommitInfo { if ec.Height < initialHeight { // There are no extended commits for heights below the initial height. return abci.ExtendedCommitInfo{} @@ -539,7 +585,7 @@ func BuildExtendedCommitInfo(ec *types.ExtendedCommit, valSet *types.ValidatorSe // during that height, we ensure they are present and deliver the data to // the proposer. If they were not enabled during this previous height, we // will not deliver extension data. - if err := ecs.EnsureExtension(ap.VoteExtensionsEnabled(ec.Height)); err != nil { + if err := ecs.EnsureExtension(fp.VoteExtensionsEnabled(ec.Height)); err != nil { panic(fmt.Errorf("commit at height %d has problems with vote extension data; err %w", ec.Height, err)) } @@ -561,24 +607,24 @@ func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params types.ValidatorParams, ) error { for _, valUpdate := range abciUpdates { - if valUpdate.GetPower() < 0 { - return fmt.Errorf("voting power can't be negative %v", valUpdate) - } else if valUpdate.GetPower() == 0 { - // continue, since this is deleting the validator, and thus there is no - // pubkey to check - continue + if valUpdate.Power < 0 { + return fmt.Errorf("voting power of %X can't be negative", valUpdate.PubKeyBytes) } - // Check if validator's pubkey matches an ABCI type in the consensus params - pk, err := cryptoenc.PubKeyFromProto(valUpdate.PubKey) - if err != nil { - return err + // Check if validator's pubkey matches an ABCI type in the consensus params. + if !types.IsValidPubkeyType(params, valUpdate.PubKeyType) { + return fmt.Errorf("validator %X is using pubkey %s, which is unsupported for consensus", + valUpdate.PubKeyBytes, valUpdate.PubKeyType) } - if !types.IsValidPubkeyType(params, pk.Type()) { - return fmt.Errorf("validator %v is using pubkey %s, which is unsupported for consensus", - valUpdate, pk.Type()) - } + // XXX: PubKeyBytes will be checked in PB2TM.ValidatorUpdates + } + return nil +} + +func validateNextBlockDelay(nextBlockDelay time.Duration) error { + if nextBlockDelay < 0 { + return errors.New("negative duration") } return nil } @@ -588,7 +634,7 @@ func updateState( state State, blockID types.BlockID, header *types.Header, - abciResponse *abci.ResponseFinalizeBlock, + abciResponse *abci.FinalizeBlockResponse, validatorUpdates []*types.Validator, ) (State, error) { // Copy the valset so we can apply changes from EndBlock @@ -602,7 +648,7 @@ func updateState( if err != nil { return state, fmt.Errorf("changing validator set: %w", err) } - // Change results from this height but only applies to the next next height. + // Change results from this height but only applies to the height + 2. lastHeightValsChanged = header.Height + 1 + 1 } @@ -650,6 +696,7 @@ func updateState( LastHeightConsensusParamsChanged: lastHeightParamsChanged, LastResultsHash: TxResultsHash(abciResponse.TxResults), AppHash: nil, + NextBlockDelay: abciResponse.NextBlockDelay, }, nil } @@ -661,21 +708,26 @@ func fireEvents( eventBus types.BlockEventPublisher, block *types.Block, blockID types.BlockID, - abciResponse *abci.ResponseFinalizeBlock, + abciResponse *abci.FinalizeBlockResponse, validatorUpdates []*types.Validator, + metrics *Metrics, ) { + defer func(start time.Time) { + metrics.FireBlockEventsDelaySeconds.Set(cmttime.Since(start).Seconds()) + }(cmttime.Now()) + if err := eventBus.PublishEventNewBlock(types.EventDataNewBlock{ Block: block, BlockID: blockID, ResultFinalizeBlock: *abciResponse, }); err != nil { - logger.Error("failed publishing new block", "err", err) + logger.Error("Failed publishing new block", "err", err) } if err := eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ Header: block.Header, }); err != nil { - logger.Error("failed publishing new block header", "err", err) + logger.Error("Failed publishing new block header", "err", err) } if err := eventBus.PublishEventNewBlockEvents(types.EventDataNewBlockEvents{ @@ -683,7 +735,7 @@ func fireEvents( Events: abciResponse.Events, NumTxs: int64(len(block.Txs)), }); err != nil { - logger.Error("failed publishing new block events", "err", err) + logger.Error("Failed publishing new block events", "err", err) } if len(block.Evidence.Evidence) != 0 { @@ -692,7 +744,7 @@ func fireEvents( Evidence: ev, Height: block.Height, }); err != nil { - logger.Error("failed publishing new evidence", "err", err) + logger.Error("Failed publishing new evidence", "err", err) } } } @@ -704,19 +756,19 @@ func fireEvents( Tx: tx, Result: *(abciResponse.TxResults[i]), }}); err != nil { - logger.Error("failed publishing event TX", "err", err) + logger.Error("Failed publishing event TX", "err", err) } } if len(validatorUpdates) > 0 { if err := eventBus.PublishEventValidatorSetUpdates( types.EventDataValidatorSetUpdates{ValidatorUpdates: validatorUpdates}); err != nil { - logger.Error("failed publishing event", "err", err) + logger.Error("Failed publishing event", "err", err) } } } -//---------------------------------------------------------------------------------------------------- +// ---------------------------------------------------------------------------------------------------- // Execute block without state. TODO: eliminate // ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state. @@ -726,11 +778,11 @@ func ExecCommitBlock( block *types.Block, logger log.Logger, store Store, - initialHeight int64, + initialHeight, finalHeight int64, ) ([]byte, error) { commitInfo := buildLastCommitInfoFromStore(block, store, initialHeight) - resp, err := appConnConsensus.FinalizeBlock(context.TODO(), &abci.RequestFinalizeBlock{ + resp, err := appConnConsensus.FinalizeBlock(context.TODO(), &abci.FinalizeBlockRequest{ Hash: block.Hash(), NextValidatorsHash: block.NextValidatorsHash, ProposerAddress: block.ProposerAddress, @@ -739,9 +791,10 @@ func ExecCommitBlock( DecidedLastCommit: commitInfo, Misbehavior: block.Evidence.Evidence.ToABCI(), Txs: block.Txs.ToSliceOfBytes(), + SyncingToHeight: finalHeight, }) if err != nil { - logger.Error("error in proxyAppConn.FinalizeBlock", "err", err) + logger.Error("Error in proxyAppConn.FinalizeBlock", "err", err) return nil, err } @@ -750,12 +803,12 @@ func ExecCommitBlock( return nil, fmt.Errorf("expected tx results length to match size of transactions in block. Expected %d, got %d", len(block.Data.Txs), len(resp.TxResults)) } - logger.Info("executed block", "height", block.Height, "app_hash", fmt.Sprintf("%X", resp.AppHash)) + logger.Info("Executed block", "height", block.Height, "app_hash", fmt.Sprintf("%X", resp.AppHash)) // Commit block _, err = appConnConsensus.Commit(context.TODO()) if err != nil { - logger.Error("client error during proxyAppConn.Commit", "err", err) + logger.Error("Client error during proxyAppConn.Commit", "err", err) return nil, err } diff --git a/state/execution_test.go b/state/execution_test.go index 20533936b4b..4c0143f4933 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -11,19 +11,17 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - abciclientmocks "github.com/cometbft/cometbft/abci/client/mocks" abci "github.com/cometbft/cometbft/abci/types" abcimocks "github.com/cometbft/cometbft/abci/types/mocks" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" - cryptoenc "github.com/cometbft/cometbft/crypto/encoding" "github.com/cometbft/cometbft/crypto/tmhash" "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/libs/log" mpmocks "github.com/cometbft/cometbft/mempool/mocks" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" "github.com/cometbft/cometbft/proxy" pmocks "github.com/cometbft/cometbft/proxy/mocks" sm "github.com/cometbft/cometbft/state" @@ -36,7 +34,7 @@ import ( var ( chainID = "execution_chain" - testPartSize uint32 = 65536 + testPartSize uint32 = types.BlockPartSizeBytes ) func TestApplyBlock(t *testing.T) { @@ -44,10 +42,10 @@ func TestApplyBlock(t *testing.T) { cc := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() - require.Nil(t, err) + require.NoError(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests - state, stateDB, _ := makeState(1, 1) + state, stateDB, _ := makeState(1, 1, chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -56,6 +54,7 @@ func TestApplyBlock(t *testing.T) { mp := &mpmocks.Mempool{} mp.On("Lock").Return() mp.On("Unlock").Return() + mp.On("PreUpdate").Return() mp.On("FlushAppConn", mock.Anything).Return(nil) mp.On("Update", mock.Anything, @@ -72,8 +71,8 @@ func TestApplyBlock(t *testing.T) { require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - state, err = blockExec.ApplyBlock(state, blockID, block) - require.Nil(t, err) + state, err = blockExec.ApplyBlock(state, blockID, block, block.Height) + require.NoError(t, err) // TODO check state and mempool assert.EqualValues(t, 1, state.Version.Consensus.App, "App version wasn't updated") @@ -85,14 +84,14 @@ func TestApplyBlock(t *testing.T) { // block. func TestFinalizeBlockDecidedLastCommit(t *testing.T) { app := &testApp{} - baseTime := time.Now() + baseTime := cmttime.Now() cc := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.NoError(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests - state, stateDB, privVals := makeState(7, 1) + state, stateDB, privVals := makeState(7, 1, chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -117,6 +116,7 @@ func TestFinalizeBlockDecidedLastCommit(t *testing.T) { mp := &mpmocks.Mempool{} mp.On("Lock").Return() mp.On("Unlock").Return() + mp.On("PreUpdate").Return() mp.On("FlushAppConn", mock.Anything).Return(nil) mp.On("Update", mock.Anything, @@ -145,7 +145,7 @@ func TestFinalizeBlockDecidedLastCommit(t *testing.T) { bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - _, err = blockExec.ApplyBlock(state, blockID, block) + _, err = blockExec.ApplyBlock(state, blockID, block, block.Height) require.NoError(t, err) require.True(t, app.LastTime.After(baseTime)) @@ -167,7 +167,7 @@ func TestFinalizeBlockValidators(t *testing.T) { require.NoError(t, err) defer proxyApp.Stop() //nolint:errcheck // no need to check error again - state, stateDB, _ := makeState(2, 2) + state, stateDB, _ := makeState(2, 2, chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -223,7 +223,7 @@ func TestFinalizeBlockValidators(t *testing.T) { // block for height 2 block := makeBlock(state, 2, lastCommit.ToCommit()) - _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateStore, 1) + _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateStore, 1, 2) require.NoError(t, err, tc.desc) require.True(t, !tc.shouldHaveTime || @@ -236,11 +236,10 @@ func TestFinalizeBlockValidators(t *testing.T) { for i, v := range app.CommitVotes { if ctr < len(tc.expectedAbsentValidators) && tc.expectedAbsentValidators[ctr] == i { - - assert.Equal(t, v.BlockIdFlag, cmtproto.BlockIDFlagAbsent) + assert.Equal(t, cmtproto.BlockIDFlagAbsent, v.BlockIdFlag) ctr++ } else { - assert.NotEqual(t, v.BlockIdFlag, cmtproto.BlockIDFlagAbsent) + assert.NotEqual(t, cmtproto.BlockIDFlagAbsent, v.BlockIdFlag) } } } @@ -255,7 +254,7 @@ func TestFinalizeBlockMisbehavior(t *testing.T) { require.NoError(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests - state, stateDB, privVals := makeState(1, 1) + state, stateDB, privVals := makeState(1, 1, chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -311,14 +310,14 @@ func TestFinalizeBlockMisbehavior(t *testing.T) { abciMb := []abci.Misbehavior{ { - Type: abci.MisbehaviorType_DUPLICATE_VOTE, + Type: abci.MISBEHAVIOR_TYPE_DUPLICATE_VOTE, Height: 3, Time: defaultEvidenceTime, Validator: types.TM2PB.Validator(state.Validators.Validators[0]), TotalVotingPower: 10, }, { - Type: abci.MisbehaviorType_LIGHT_CLIENT_ATTACK, + Type: abci.MISBEHAVIOR_TYPE_LIGHT_CLIENT_ATTACK, Height: 8, Time: defaultEvidenceTime, Validator: types.TM2PB.Validator(state.Validators.Validators[0]), @@ -333,6 +332,7 @@ func TestFinalizeBlockMisbehavior(t *testing.T) { mp := &mpmocks.Mempool{} mp.On("Lock").Return() mp.On("Unlock").Return() + mp.On("PreUpdate").Return() mp.On("FlushAppConn", mock.Anything).Return(nil) mp.On("Update", mock.Anything, @@ -355,7 +355,7 @@ func TestFinalizeBlockMisbehavior(t *testing.T) { blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - _, err = blockExec.ApplyBlock(state, blockID, block) + _, err = blockExec.ApplyBlock(state, blockID, block, block.Height) require.NoError(t, err) // TODO check state and mempool @@ -368,7 +368,7 @@ func TestProcessProposal(t *testing.T) { logger := log.NewNopLogger() app := &abcimocks.Application{} - app.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) + app.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_ACCEPT}, nil) cc := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) @@ -376,7 +376,7 @@ func TestProcessProposal(t *testing.T) { require.NoError(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests - state, stateDB, privVals := makeState(1, height) + state, stateDB, privVals := makeState(1, height, chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -404,7 +404,7 @@ func TestProcessProposal(t *testing.T) { pk, err := privVal.GetPubKey() require.NoError(t, err) idx, _ := state.Validators.GetByAddress(pk.Address()) - vote := types.MakeVoteNoError(t, privVal, block0.Header.ChainID, idx, height-1, 0, 2, blockID, time.Now()) + vote := types.MakeVoteNoError(t, privVal, block0.Header.ChainID, idx, height-1, 0, 2, blockID, cmttime.Now()) addr := pk.Address() voteInfos = append(voteInfos, abci.VoteInfo{ @@ -424,7 +424,7 @@ func TestProcessProposal(t *testing.T) { block1.Txs = txs - expectedRpp := &abci.RequestProcessProposal{ + expectedRpp := &abci.ProcessProposalRequest{ Txs: block1.Txs.ToSliceOfBytes(), Hash: block1.Hash(), Height: block1.Header.Height, @@ -448,10 +448,6 @@ func TestProcessProposal(t *testing.T) { func TestValidateValidatorUpdates(t *testing.T) { pubkey1 := ed25519.GenPrivKey().PubKey() pubkey2 := ed25519.GenPrivKey().PubKey() - pk1, err := cryptoenc.PubKeyToProto(pubkey1) - assert.NoError(t, err) - pk2, err := cryptoenc.PubKeyToProto(pubkey2) - assert.NoError(t, err) defaultValidatorParams := types.ValidatorParams{PubKeyTypes: []string{types.ABCIPubKeyTypeEd25519}} @@ -465,38 +461,37 @@ func TestValidateValidatorUpdates(t *testing.T) { }{ { "adding a validator is OK", - []abci.ValidatorUpdate{{PubKey: pk2, Power: 20}}, + []abci.ValidatorUpdate{abci.NewValidatorUpdate(pubkey2, 20)}, defaultValidatorParams, false, }, { "updating a validator is OK", - []abci.ValidatorUpdate{{PubKey: pk1, Power: 20}}, + []abci.ValidatorUpdate{abci.NewValidatorUpdate(pubkey1, 20)}, defaultValidatorParams, false, }, { "removing a validator is OK", - []abci.ValidatorUpdate{{PubKey: pk2, Power: 0}}, + []abci.ValidatorUpdate{abci.NewValidatorUpdate(pubkey2, 0)}, defaultValidatorParams, false, }, { "adding a validator with negative power results in error", - []abci.ValidatorUpdate{{PubKey: pk2, Power: -100}}, + []abci.ValidatorUpdate{abci.NewValidatorUpdate(pubkey2, -100)}, defaultValidatorParams, true, }, } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { err := sm.ValidateValidatorUpdates(tc.abciUpdates, tc.validatorParams) if tc.shouldErr { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) } }) } @@ -508,11 +503,6 @@ func TestUpdateValidators(t *testing.T) { pubkey2 := ed25519.GenPrivKey().PubKey() val2 := types.NewValidator(pubkey2, 20) - pk, err := cryptoenc.PubKeyToProto(pubkey1) - require.NoError(t, err) - pk2, err := cryptoenc.PubKeyToProto(pubkey2) - require.NoError(t, err) - testCases := []struct { name string @@ -525,43 +515,42 @@ func TestUpdateValidators(t *testing.T) { { "adding a validator is OK", types.NewValidatorSet([]*types.Validator{val1}), - []abci.ValidatorUpdate{{PubKey: pk2, Power: 20}}, + []abci.ValidatorUpdate{abci.NewValidatorUpdate(pubkey2, 20)}, types.NewValidatorSet([]*types.Validator{val1, val2}), false, }, { "updating a validator is OK", types.NewValidatorSet([]*types.Validator{val1}), - []abci.ValidatorUpdate{{PubKey: pk, Power: 20}}, + []abci.ValidatorUpdate{abci.NewValidatorUpdate(pubkey1, 20)}, types.NewValidatorSet([]*types.Validator{types.NewValidator(pubkey1, 20)}), false, }, { "removing a validator is OK", types.NewValidatorSet([]*types.Validator{val1, val2}), - []abci.ValidatorUpdate{{PubKey: pk2, Power: 0}}, + []abci.ValidatorUpdate{abci.NewValidatorUpdate(pubkey2, 0)}, types.NewValidatorSet([]*types.Validator{val1}), false, }, { "removing a non-existing validator results in error", types.NewValidatorSet([]*types.Validator{val1}), - []abci.ValidatorUpdate{{PubKey: pk2, Power: 0}}, + []abci.ValidatorUpdate{abci.NewValidatorUpdate(pubkey2, 0)}, types.NewValidatorSet([]*types.Validator{val1}), true, }, } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { updates, err := types.PB2TM.ValidatorUpdates(tc.abciUpdates) - assert.NoError(t, err) + require.NoError(t, err) err = tc.currentSet.UpdateWithChangeSet(updates) if tc.shouldErr { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) require.Equal(t, tc.resultingSet.Size(), tc.currentSet.Size()) assert.Equal(t, tc.resultingSet.TotalVotingPower(), tc.currentSet.TotalVotingPower()) @@ -584,13 +573,14 @@ func TestFinalizeBlockValidatorUpdates(t *testing.T) { require.NoError(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests - state, stateDB, _ := makeState(1, 1) + state, stateDB, _ := makeState(1, 1, chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) mp := &mpmocks.Mempool{} mp.On("Lock").Return() mp.On("Unlock").Return() + mp.On("PreUpdate").Return() mp.On("FlushAppConn", mock.Anything).Return(nil) mp.On("Update", mock.Anything, @@ -631,13 +621,11 @@ func TestFinalizeBlockValidatorUpdates(t *testing.T) { blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} pubkey := ed25519.GenPrivKey().PubKey() - pk, err := cryptoenc.PubKeyToProto(pubkey) - require.NoError(t, err) app.ValidatorUpdates = []abci.ValidatorUpdate{ - {PubKey: pk, Power: 10}, + abci.NewValidatorUpdate(pubkey, 10), } - state, err = blockExec.ApplyBlock(state, blockID, block) + state, err = blockExec.ApplyBlock(state, blockID, block, block.Height) require.NoError(t, err) // test new validator was added to NextValidators if assert.Equal(t, state.Validators.Size()+1, state.NextValidators.Size()) { @@ -664,7 +652,7 @@ func TestFinalizeBlockValidatorUpdates(t *testing.T) { } // TestFinalizeBlockValidatorUpdatesResultingInEmptySet checks that processing validator updates that -// would result in empty set causes no panic, an error is raised and NextValidators is not updated +// would result in empty set causes no panic, an error is raised and NextValidators is not updated. func TestFinalizeBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { app := &testApp{} cc := proxy.NewLocalClientCreator(app) @@ -673,7 +661,7 @@ func TestFinalizeBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { require.NoError(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests - state, stateDB, _ := makeState(1, 1) + state, stateDB, _ := makeState(1, 1, chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -692,15 +680,15 @@ func TestFinalizeBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - vp, err := cryptoenc.PubKeyToProto(state.Validators.Validators[0].PubKey) + pk := state.Validators.Validators[0].PubKey require.NoError(t, err) // Remove the only validator app.ValidatorUpdates = []abci.ValidatorUpdate{ - {PubKey: vp, Power: 0}, + abci.NewValidatorUpdate(pk, 0), } - assert.NotPanics(t, func() { state, err = blockExec.ApplyBlock(state, blockID, block) }) - assert.Error(t, err) + assert.NotPanics(t, func() { state, err = blockExec.ApplyBlock(state, blockID, block, block.Height) }) + require.Error(t, err) assert.NotEmpty(t, state.NextValidators.Validators) } @@ -716,13 +704,14 @@ func TestEmptyPrepareProposal(t *testing.T) { require.NoError(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests - state, stateDB, privVals := makeState(1, height) + state, stateDB, privVals := makeState(1, height, chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) mp := &mpmocks.Mempool{} mp.On("Lock").Return() mp.On("Unlock").Return() + mp.On("PreUpdate").Return() mp.On("FlushAppConn", mock.Anything).Return(nil) mp.On("Update", mock.Anything, @@ -743,7 +732,7 @@ func TestEmptyPrepareProposal(t *testing.T) { blockStore, ) pa, _ := state.Validators.GetByIndex(0) - commit, _, err := makeValidCommit(height, types.BlockID{}, state.Validators, privVals) + commit, err := makeValidCommit(height, types.BlockID{}, state.Validators, privVals) require.NoError(t, err) _, err = blockExec.CreateProposalBlock(ctx, height, state, commit, pa) require.NoError(t, err) @@ -756,7 +745,7 @@ func TestPrepareProposalTxsAllIncluded(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - state, stateDB, privVals := makeState(1, height) + state, stateDB, privVals := makeState(1, height, chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -769,7 +758,7 @@ func TestPrepareProposalTxsAllIncluded(t *testing.T) { mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(txs[2:]) app := &abcimocks.Application{} - app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.PrepareProposalResponse{ Txs: txs.ToSliceOfBytes(), }, nil) cc := proxy.NewLocalClientCreator(app) @@ -788,7 +777,7 @@ func TestPrepareProposalTxsAllIncluded(t *testing.T) { blockStore, ) pa, _ := state.Validators.GetByIndex(0) - commit, _, err := makeValidCommit(height, types.BlockID{}, state.Validators, privVals) + commit, err := makeValidCommit(height, types.BlockID{}, state.Validators, privVals) require.NoError(t, err) block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa) require.NoError(t, err) @@ -807,7 +796,7 @@ func TestPrepareProposalReorderTxs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - state, stateDB, privVals := makeState(1, height) + state, stateDB, privVals := makeState(1, height, chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -823,7 +812,7 @@ func TestPrepareProposalReorderTxs(t *testing.T) { txs = append(txs[len(txs)/2:], txs[:len(txs)/2]...) app := &abcimocks.Application{} - app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.PrepareProposalResponse{ Txs: txs.ToSliceOfBytes(), }, nil) @@ -843,7 +832,7 @@ func TestPrepareProposalReorderTxs(t *testing.T) { blockStore, ) pa, _ := state.Validators.GetByIndex(0) - commit, _, err := makeValidCommit(height, types.BlockID{}, state.Validators, privVals) + commit, err := makeValidCommit(height, types.BlockID{}, state.Validators, privVals) require.NoError(t, err) block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa) require.NoError(t, err) @@ -861,7 +850,7 @@ func TestPrepareProposalErrorOnTooManyTxs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - state, stateDB, privVals := makeState(1, height) + state, stateDB, privVals := makeState(1, height, chainID) // limit max block size state.ConsensusParams.Block.MaxBytes = 60 * 1024 stateStore := sm.NewStore(stateDB, sm.StoreOptions{ @@ -879,7 +868,64 @@ func TestPrepareProposalErrorOnTooManyTxs(t *testing.T) { mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(txs) app := &abcimocks.Application{} - app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.PrepareProposalResponse{ + Txs: txs.ToSliceOfBytes(), + }, nil) + + cc := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) + err := proxyApp.Start() + require.NoError(t, err) + defer proxyApp.Stop() //nolint:errcheck // ignore for tests + + blockStore := store.NewBlockStore(dbm.NewMemDB()) + blockExec := sm.NewBlockExecutor( + stateStore, + log.NewNopLogger(), + proxyApp.Consensus(), + mp, + evpool, + blockStore, + ) + pa, _ := state.Validators.GetByIndex(0) + commit, err := makeValidCommit(height, types.BlockID{}, state.Validators, privVals) + require.NoError(t, err) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa) + require.Nil(t, block) + require.ErrorContains(t, err, "transaction data size exceeds maximum") + + mp.AssertExpectations(t) +} + +// TestPrepareProposalCountSerializationOverhead tests that the block creation logic returns +// an error if the ResponsePrepareProposal returned from the application is at the limit of +// its size and will go beyond the limit upon serialization. +func TestPrepareProposalCountSerializationOverhead(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + state, stateDB, privVals := makeState(1, height, chainID) + // limit max block size + var bytesPerTx int64 = 4 + const nValidators = 1 + nonDataSize := 5000 - types.MaxDataBytes(5000, 0, nValidators) + state.ConsensusParams.Block.MaxBytes = bytesPerTx*1024 + nonDataSize + maxDataBytes := types.MaxDataBytes(state.ConsensusParams.Block.MaxBytes, 0, nValidators) + + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) + + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, int64(0)) + + txs := test.MakeNTxs(height, maxDataBytes/bytesPerTx) + mp := &mpmocks.Mempool{} + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(txs) + + app := &abcimocks.Application{} + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.PrepareProposalResponse{ Txs: txs.ToSliceOfBytes(), }, nil) @@ -899,7 +945,7 @@ func TestPrepareProposalErrorOnTooManyTxs(t *testing.T) { blockStore, ) pa, _ := state.Validators.GetByIndex(0) - commit, _, err := makeValidCommit(height, types.BlockID{}, state.Validators, privVals) + commit, err := makeValidCommit(height, types.BlockID{}, state.Validators, privVals) require.NoError(t, err) block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa) require.Nil(t, block) @@ -915,7 +961,7 @@ func TestPrepareProposalErrorOnPrepareProposalError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - state, stateDB, privVals := makeState(1, height) + state, stateDB, privVals := makeState(1, height, chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -953,7 +999,7 @@ func TestPrepareProposalErrorOnPrepareProposalError(t *testing.T) { blockStore, ) pa, _ := state.Validators.GetByIndex(0) - commit, _, err := makeValidCommit(height, types.BlockID{}, state.Validators, privVals) + commit, err := makeValidCommit(height, types.BlockID{}, state.Validators, privVals) require.NoError(t, err) block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, pa) require.Nil(t, block) @@ -962,7 +1008,7 @@ func TestPrepareProposalErrorOnPrepareProposalError(t *testing.T) { mp.AssertExpectations(t) } -// TestCreateProposalBlockPanicOnAbsentVoteExtensions ensures that the CreateProposalBlock +// TestCreateProposalAbsentVoteExtensions ensures that the CreateProposalBlock // call correctly panics when the vote extension data is missing from the extended commit // data that the method receives. func TestCreateProposalAbsentVoteExtensions(t *testing.T) { @@ -1007,18 +1053,18 @@ func TestCreateProposalAbsentVoteExtensions(t *testing.T) { app := abcimocks.NewApplication(t) if !testCase.expectPanic { - app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.PrepareProposalResponse{}, nil) } cc := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.NoError(t, err) - state, stateDB, privVals := makeState(1, int(testCase.height-1)) + state, stateDB, privVals := makeState(1, int(testCase.height-1), chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) - state.ConsensusParams.ABCI.VoteExtensionsEnableHeight = testCase.extensionEnableHeight + state.ConsensusParams.Feature.VoteExtensionsEnableHeight = testCase.extensionEnableHeight mp := &mpmocks.Mempool{} mp.On("Lock").Return() mp.On("Unlock").Return() @@ -1047,7 +1093,7 @@ func TestCreateProposalAbsentVoteExtensions(t *testing.T) { require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} pa, _ := state.Validators.GetByIndex(0) - lastCommit, _, _ := makeValidCommit(testCase.height-1, blockID, state.Validators, privVals) + lastCommit, _ := makeValidCommit(testCase.height-1, blockID, state.Validators, privVals) stripSignatures(lastCommit) if testCase.expectPanic { require.Panics(t, func() { diff --git a/state/export_test.go b/state/export_test.go index aa2085bea45..4e7b8d6d6cf 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -2,7 +2,6 @@ package state import ( dbm "github.com/cometbft/cometbft-db" - abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/types" ) @@ -26,7 +25,7 @@ func UpdateState( state State, blockID types.BlockID, header *types.Header, - resp *abci.ResponseFinalizeBlock, + resp *abci.FinalizeBlockResponse, validatorUpdates []*types.Validator, ) (State, error) { return updateState(state, blockID, header, resp, validatorUpdates) @@ -40,14 +39,30 @@ func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params types.V // SaveValidatorsInfo is an alias for the private saveValidatorsInfo method in // store.go, exported exclusively and explicitly for testing. -func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) error { - stateStore := dbStore{db, StoreOptions{DiscardABCIResponses: false}} - return stateStore.saveValidatorsInfo(height, lastHeightChanged, valSet) +func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet, keyLayoutVersion string) error { + var keyLayout KeyLayout + switch keyLayoutVersion { + case "v1", "": + keyLayout = v1LegacyLayout{} + case "v2": + keyLayout = v2Layout{} + } + stateStore := dbStore{db, keyLayout, StoreOptions{DiscardABCIResponses: false, Metrics: NopMetrics()}} + batch := stateStore.db.NewBatch() + err := stateStore.saveValidatorsInfo(height, lastHeightChanged, valSet, batch) + if err != nil { + return err + } + err = batch.WriteSync() + if err != nil { + return err + } + return nil } // FindMinBlockRetainHeight is an alias for the private // findMinBlockRetainHeight method in pruner.go, exported exclusively and -// expicitly for testing. +// explicitly for testing. func (p *Pruner) FindMinRetainHeight() int64 { return p.findMinBlockRetainHeight() } @@ -64,6 +79,10 @@ func (p *Pruner) PruneBlockIndexerToRetainHeight(lastRetainHeight int64) int64 { return p.pruneBlockIndexerToRetainHeight(lastRetainHeight) } +func (p *Pruner) PruneBlocksToHeight(height int64) (uint64, int64, error) { + return p.pruneBlocksToHeight(height) +} + func Int64ToBytes(val int64) []byte { return int64ToBytes(val) } diff --git a/state/helpers_test.go b/state/helpers_test.go index f094b79ab85..b9f52cbdc83 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -3,16 +3,14 @@ package state_test import ( "bytes" "context" - "fmt" "time" dbm "github.com/cometbft/cometbft-db" - abci "github.com/cometbft/cometbft/abci/types" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/internal/test" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/proxy" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" @@ -46,7 +44,7 @@ func makeAndCommitGoodBlock( } // Simulate a lastCommit for this block from all validators for the next height - commit, _, err := makeValidCommit(height, blockID, state.Validators, privVals) + commit, err := makeValidCommit(height, blockID, state.Validators, privVals) if err != nil { return state, types.BlockID{}, nil, err } @@ -69,7 +67,7 @@ func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *types.Commi Hash: block.Hash(), PartSetHeader: partSet.Header(), } - state, err = blockExec.ApplyBlock(state, blockID, block) + state, err = blockExec.ApplyBlock(state, blockID, block, block.Height) if err != nil { return state, types.BlockID{}, err } @@ -91,7 +89,7 @@ func makeValidCommit( blockID types.BlockID, vals *types.ValidatorSet, privVals map[string]types.PrivValidator, -) (*types.ExtendedCommit, []*types.Vote, error) { +) (*types.ExtendedCommit, error) { sigs := make([]types.ExtendedCommitSig, vals.Size()) votes := make([]*types.Vote, vals.Size()) for i := 0; i < vals.Size(); i++ { @@ -102,12 +100,12 @@ func makeValidCommit( int32(i), height, 0, - cmtproto.PrecommitType, + types.PrecommitType, blockID, - time.Now(), + cmttime.Now(), ) if err != nil { - return nil, nil, err + return nil, err } sigs[i] = vote.ExtendedCommitSig() votes[i] = vote @@ -116,47 +114,7 @@ func makeValidCommit( Height: height, BlockID: blockID, ExtendedSignatures: sigs, - }, votes, nil -} - -func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValidator) { - vals := make([]types.GenesisValidator, nVals) - privVals := make(map[string]types.PrivValidator, nVals) - for i := 0; i < nVals; i++ { - secret := []byte(fmt.Sprintf("test%d", i)) - pk := ed25519.GenPrivKeyFromSecret(secret) - valAddr := pk.PubKey().Address() - vals[i] = types.GenesisValidator{ - Address: valAddr, - PubKey: pk.PubKey(), - Power: 1000, - Name: fmt.Sprintf("test%d", i), - } - privVals[valAddr.String()] = types.NewMockPVWithParams(pk, false, false) - } - s, _ := sm.MakeGenesisState(&types.GenesisDoc{ - ChainID: chainID, - Validators: vals, - AppHash: nil, - }) - - stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB, sm.StoreOptions{ - DiscardABCIResponses: false, - }) - if err := stateStore.Save(s); err != nil { - panic(err) - } - - for i := 1; i < height; i++ { - s.LastBlockHeight++ - s.LastValidators = s.Validators.Copy() - if err := stateStore.Save(s); err != nil { - panic(err) - } - } - - return s, stateDB, privVals + }, nil } func genValSet(size int) *types.ValidatorSet { @@ -170,15 +128,15 @@ func genValSet(size int) *types.ValidatorSet { func makeHeaderPartsResponsesValPubKeyChange( state sm.State, pubkey crypto.PubKey, -) (types.Header, types.BlockID, *abci.ResponseFinalizeBlock) { +) (types.Header, types.BlockID, *abci.FinalizeBlockResponse) { block := makeBlock(state, state.LastBlockHeight+1, new(types.Commit)) - abciResponses := &abci.ResponseFinalizeBlock{} + abciResponses := &abci.FinalizeBlockResponse{} // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { abciResponses.ValidatorUpdates = []abci.ValidatorUpdate{ - types.TM2PB.NewValidatorUpdate(val.PubKey, 0), - types.TM2PB.NewValidatorUpdate(pubkey, 10), + abci.NewValidatorUpdate(val.PubKey, 0), + abci.NewValidatorUpdate(pubkey, 10), } } @@ -188,16 +146,14 @@ func makeHeaderPartsResponsesValPubKeyChange( func makeHeaderPartsResponsesValPowerChange( state sm.State, power int64, -) (types.Header, types.BlockID, *abci.ResponseFinalizeBlock) { +) (types.Header, types.BlockID, *abci.FinalizeBlockResponse) { block := makeBlock(state, state.LastBlockHeight+1, new(types.Commit)) - abciResponses := &abci.ResponseFinalizeBlock{} + abciResponses := &abci.FinalizeBlockResponse{} // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) if val.VotingPower != power { - abciResponses.ValidatorUpdates = []abci.ValidatorUpdate{ - types.TM2PB.NewValidatorUpdate(val.PubKey, power), - } + abciResponses.ValidatorUpdates = []abci.ValidatorUpdate{abci.NewValidatorUpdate(val.PubKey, power)} } return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses @@ -206,9 +162,9 @@ func makeHeaderPartsResponsesValPowerChange( func makeHeaderPartsResponsesParams( state sm.State, params cmtproto.ConsensusParams, -) (types.Header, types.BlockID, *abci.ResponseFinalizeBlock) { +) (types.Header, types.BlockID, *abci.FinalizeBlockResponse) { block := makeBlock(state, state.LastBlockHeight+1, new(types.Commit)) - abciResponses := &abci.ResponseFinalizeBlock{ + abciResponses := &abci.FinalizeBlockResponse{ ConsensusParamUpdates: ¶ms, } return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses @@ -231,7 +187,7 @@ func randomGenesisDoc() *types.GenesisDoc { } } -//---------------------------------------------------------------------------- +// ---------------------------------------------------------------------------- type testApp struct { abci.BaseApplication @@ -245,7 +201,7 @@ type testApp struct { var _ abci.Application = (*testApp)(nil) -func (app *testApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { +func (app *testApp) FinalizeBlock(_ context.Context, req *abci.FinalizeBlockRequest) (*abci.FinalizeBlockResponse, error) { app.CommitVotes = req.DecidedLastCommit.Votes app.Misbehavior = req.Misbehavior app.LastTime = req.Time @@ -256,7 +212,7 @@ func (app *testApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBl } } - return &abci.ResponseFinalizeBlock{ + return &abci.FinalizeBlockResponse{ ValidatorUpdates: app.ValidatorUpdates, ConsensusParamUpdates: &cmtproto.ConsensusParams{ Version: &cmtproto.VersionParams{ @@ -268,14 +224,14 @@ func (app *testApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBl }, nil } -func (app *testApp) Commit(_ context.Context, _ *abci.RequestCommit) (*abci.ResponseCommit, error) { - return &abci.ResponseCommit{RetainHeight: 1}, nil +func (*testApp) Commit(_ context.Context, _ *abci.CommitRequest) (*abci.CommitResponse, error) { + return &abci.CommitResponse{RetainHeight: 1}, nil } -func (app *testApp) PrepareProposal( +func (*testApp) PrepareProposal( _ context.Context, - req *abci.RequestPrepareProposal, -) (*abci.ResponsePrepareProposal, error) { + req *abci.PrepareProposalRequest, +) (*abci.PrepareProposalResponse, error) { txs := make([][]byte, 0, len(req.Txs)) var totalBytes int64 for _, tx := range req.Txs { @@ -288,17 +244,50 @@ func (app *testApp) PrepareProposal( } txs = append(txs, tx) } - return &abci.ResponsePrepareProposal{Txs: txs}, nil + return &abci.PrepareProposalResponse{Txs: txs}, nil } -func (app *testApp) ProcessProposal( +func (*testApp) ProcessProposal( _ context.Context, - req *abci.RequestProcessProposal, -) (*abci.ResponseProcessProposal, error) { + req *abci.ProcessProposalRequest, +) (*abci.ProcessProposalResponse, error) { for _, tx := range req.Txs { if len(tx) == 0 { - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_REJECT}, nil + } + } + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_ACCEPT}, nil +} + +func makeStateWithParams(nVals, height int, params *types.ConsensusParams, chainID string) (sm.State, dbm.DB, map[string]types.PrivValidator) { + vals, privVals := test.GenesisValidatorSet(nVals) + + s, _ := sm.MakeGenesisState(&types.GenesisDoc{ + ChainID: chainID, + Validators: vals, + AppHash: nil, + ConsensusParams: params, + }) + + stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) + if err := stateStore.Save(s); err != nil { + panic(err) + } + + for i := 1; i < height; i++ { + s.LastBlockHeight++ + s.LastValidators = s.Validators.Copy() + if err := stateStore.Save(s); err != nil { + panic(err) } } - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil + + return s, stateDB, privVals +} + +func makeState(nVals, height int, chainID string) (sm.State, dbm.DB, map[string]types.PrivValidator) { + return makeStateWithParams(nVals, height, test.ConsensusParams(), chainID) } diff --git a/state/indexer/block.go b/state/indexer/block.go index 844de3077e2..d8573e4561a 100644 --- a/state/indexer/block.go +++ b/state/indexer/block.go @@ -17,7 +17,7 @@ type BlockIndexer interface { Has(height int64) (bool, error) // Index indexes FinalizeBlock events for a given block by its height. - Index(types.EventDataNewBlockEvents) error + Index(events types.EventDataNewBlockEvents) error // Search performs a query for block heights that match a given FinalizeBlock // event search criteria. diff --git a/state/indexer/block/indexer.go b/state/indexer/block/indexer.go index b489e022daf..48a38e57dc1 100644 --- a/state/indexer/block/indexer.go +++ b/state/indexer/block/indexer.go @@ -5,7 +5,6 @@ import ( "fmt" dbm "github.com/cometbft/cometbft-db" - "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/state/indexer" blockidxkv "github.com/cometbft/cometbft/state/indexer/block/kv" @@ -16,32 +15,55 @@ import ( "github.com/cometbft/cometbft/state/txindex/null" ) -// EventSinksFromConfig constructs a slice of indexer.EventSink using the provided +// IndexerFromConfig constructs a slice of indexer.EventSink using the provided // configuration. -// -//nolint:lll -func IndexerFromConfig(cfg *config.Config, dbProvider config.DBProvider, chainID string) (txindex.TxIndexer, indexer.BlockIndexer, error) { +func IndexerFromConfig(cfg *config.Config, dbProvider config.DBProvider, chainID string) ( + txIdx txindex.TxIndexer, blockIdx indexer.BlockIndexer, allIndexersDisabled bool, err error, +) { switch cfg.TxIndex.Indexer { case "kv": store, err := dbProvider(&config.DBContext{ID: "tx_index", Config: cfg}) if err != nil { - return nil, nil, err + return nil, nil, false, err } - return kv.NewTxIndex(store), blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events"))), nil + return kv.NewTxIndex(store), + blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")), + blockidxkv.WithCompaction(cfg.Storage.Compact, cfg.Storage.CompactionInterval)), + false, + nil case "psql": conn := cfg.TxIndex.PsqlConn if conn == "" { - return nil, nil, errors.New("the psql connection settings cannot be empty") + return nil, nil, false, errors.New("the psql connection settings cannot be empty") + } + opts := []psql.EventSinkOption{} + + txIndexCfg := cfg.TxIndex + if txIndexCfg.TableBlocks != "" { + opts = append(opts, psql.WithTableBlocks(txIndexCfg.TableBlocks)) + } + + if txIndexCfg.TableTxResults != "" { + opts = append(opts, psql.WithTableTxResults(txIndexCfg.TableTxResults)) } - es, err := psql.NewEventSink(cfg.TxIndex.PsqlConn, chainID) + + if txIndexCfg.TableEvents != "" { + opts = append(opts, psql.WithTableEvents(txIndexCfg.TableEvents)) + } + + if txIndexCfg.TableAttributes != "" { + opts = append(opts, psql.WithTableAttributes(txIndexCfg.TableAttributes)) + } + + es, err := psql.NewEventSink(cfg.TxIndex.PsqlConn, chainID, opts...) if err != nil { - return nil, nil, fmt.Errorf("creating psql indexer: %w", err) + return nil, nil, false, fmt.Errorf("creating psql indexer: %w", err) } - return es.TxIndexer(), es.BlockIndexer(), nil + return es.TxIndexer(), es.BlockIndexer(), false, nil default: - return &null.TxIndex{}, &blockidxnull.BlockerIndexer{}, nil + return &null.TxIndex{}, &blockidxnull.BlockerIndexer{}, true, nil } } diff --git a/state/indexer/block/kv/kv.go b/state/indexer/block/kv/kv.go index 049f63dac99..9f1ec4d3d81 100644 --- a/state/indexer/block/kv/kv.go +++ b/state/indexer/block/kv/kv.go @@ -10,16 +10,15 @@ import ( "strconv" "strings" - "github.com/cometbft/cometbft/state" "github.com/google/orderedcode" dbm "github.com/cometbft/cometbft-db" - abci "github.com/cometbft/cometbft/abci/types" idxutil "github.com/cometbft/cometbft/internal/indexer" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/pubsub/query" "github.com/cometbft/cometbft/libs/pubsub/query/syntax" + "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/state/indexer" "github.com/cometbft/cometbft/types" ) @@ -40,12 +39,31 @@ type BlockerIndexer struct { // Matching will be done both on height AND eventSeq eventSeq int64 log log.Logger + + compact bool + compactionInterval int64 + lastPruned int64 +} +type IndexerOption func(*BlockerIndexer) + +// WithCompaction sets the compaction parameters. +func WithCompaction(compact bool, compactionInterval int64) IndexerOption { + return func(idx *BlockerIndexer) { + idx.compact = compact + idx.compactionInterval = compactionInterval + } } -func New(store dbm.DB) *BlockerIndexer { - return &BlockerIndexer{ +func New(store dbm.DB, options ...IndexerOption) *BlockerIndexer { + bsIndexer := &BlockerIndexer{ store: store, } + + for _, option := range options { + option(bsIndexer) + } + + return bsIndexer } func (idx *BlockerIndexer) SetLogger(l log.Logger) { @@ -67,7 +85,7 @@ func (idx *BlockerIndexer) Has(height int64) (bool, error) { // The following is indexed: // // primary key: encode(block.height | height) => encode(height) -// FinalizeBlock events: encode(eventType.eventAttr|eventValue|height|finalize_block|eventSeq) => encode(height) +// FinalizeBlock events: encode(eventType.eventAttr|eventValue|height|finalize_block|eventSeq) => encode(height). func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockEvents) error { batch := idx.store.NewBatch() defer batch.Close() @@ -98,12 +116,15 @@ func getKeys(indexer BlockerIndexer) [][]byte { panic(err) } for ; itr.Valid(); itr.Next() { - keys = append(keys, itr.Key()) + key := make([]byte, len(itr.Key())) + copy(key, itr.Key()) + + keys = append(keys, key) } return keys } -func (idx *BlockerIndexer) Prune(retainHeight int64) (int64, int64, error) { +func (idx *BlockerIndexer) Prune(retainHeight int64) (numPruned int64, newRetainHeight int64, err error) { // Returns numPruned, newRetainHeight, err // numPruned: the number of heights pruned or 0 in case of error. E.x. if heights {1, 3, 7} were pruned and there was no error, numPruned == 3 // newRetainHeight: new retain height after pruning or lastRetainHeight in case of error @@ -142,7 +163,7 @@ func (idx *BlockerIndexer) Prune(retainHeight int64) (int64, int64, error) { if err != nil { return 0, lastRetainHeight, err } - deleted := 0 + deleted := int64(0) affectedHeights := make(map[int64]struct{}) for ; itr.Valid(); itr.Next() { if keyBelongsToHeightRange(itr.Key(), lastRetainHeight, retainHeight) { @@ -173,6 +194,12 @@ func (idx *BlockerIndexer) Prune(retainHeight int64) (int64, int64, error) { return 0, lastRetainHeight, errWriteBatch } + if idx.compact && idx.lastPruned+deleted >= idx.compactionInterval { + err = idx.store.Compact(nil, nil) + idx.lastPruned = 0 + } + idx.lastPruned += deleted + return int64(len(affectedHeights)), retainHeight, err } @@ -197,7 +224,7 @@ func (idx *BlockerIndexer) GetRetainHeight() (int64, error) { return height, nil } -func (idx *BlockerIndexer) setLastRetainHeight(height int64, batch dbm.Batch) error { +func (*BlockerIndexer) setLastRetainHeight(height int64, batch dbm.Batch) error { return batch.Set(LastBlockIndexerRetainHeightKey, int64ToBytes(height)) } @@ -287,7 +314,6 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, // additional constraint on events) continue - } prefix, err := orderedcode.Append(nil, qr.Key) if err != nil { @@ -351,6 +377,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, // fetch matching heights results = make([]int64, 0, len(filteredHeights)) resultMap := make(map[int64]struct{}) +FOR_LOOP: for _, hBz := range filteredHeights { h := int64FromBytes(hBz) @@ -367,8 +394,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, select { case <-ctx.Done(): - break - + break FOR_LOOP default: } } @@ -461,17 +487,14 @@ LOOP: } if err != nil { idx.log.Error("failed to parse bounds:", err) - } else { - if withinBounds { - idx.setTmpHeights(tmpHeights, it) - } + } else if withinBounds { + idx.setTmpHeights(tmpHeights, it) } } select { case <-ctx.Done(): - break - + break LOOP default: } } @@ -493,6 +516,7 @@ LOOP: // Remove/reduce matches in filteredHashes that were not found in this // match (tmpHashes). +FOR_LOOP: for k, v := range filteredHeights { tmpHeight := tmpHeights[k] @@ -503,8 +527,7 @@ LOOP: select { case <-ctx.Done(): - break - + break FOR_LOOP default: } } @@ -513,13 +536,20 @@ LOOP: return filteredHeights, nil } -func (idx *BlockerIndexer) setTmpHeights(tmpHeights map[string][]byte, it dbm.Iterator) { +func (*BlockerIndexer) setTmpHeights(tmpHeights map[string][]byte, it dbm.Iterator) { // If we return attributes that occur within the same events, then store the event sequence in the // result map as well eventSeq, _ := parseEventSeqFromEventKey(it.Key()) - retVal := it.Value() - tmpHeights[string(retVal)+strconv.FormatInt(eventSeq, 10)] = it.Value() + // value comes from cometbft-db Iterator interface Value() API. + // Therefore, we must make a copy before storing references to it. + var ( + value = it.Value() + valueCp = make([]byte, len(value)) + ) + copy(valueCp, value) + + tmpHeights[string(valueCp)+strconv.FormatInt(eventSeq, 10)] = valueCp } // match returns all matching heights that meet a given query condition and start @@ -553,7 +583,6 @@ func (idx *BlockerIndexer) match( defer it.Close() for ; it.Valid(); it.Next() { - keyHeight, err := parseHeightFromEventKey(it.Key()) if err != nil { idx.log.Error("failure to parse height from key:", err) @@ -591,8 +620,8 @@ func (idx *BlockerIndexer) match( } defer it.Close() + LOOP_EXISTS: for ; it.Valid(); it.Next() { - keyHeight, err := parseHeightFromEventKey(it.Key()) if err != nil { idx.log.Error("failure to parse height from key:", err) @@ -611,7 +640,7 @@ func (idx *BlockerIndexer) match( select { case <-ctx.Done(): - break + break LOOP_EXISTS default: } @@ -633,6 +662,7 @@ func (idx *BlockerIndexer) match( } defer it.Close() + LOOP_CONTAINS: for ; it.Valid(); it.Next() { eventValue, err := parseValueFromEventKey(it.Key()) if err != nil { @@ -658,7 +688,7 @@ func (idx *BlockerIndexer) match( select { case <-ctx.Done(): - break + break LOOP_CONTAINS default: } @@ -684,6 +714,7 @@ func (idx *BlockerIndexer) match( // Remove/reduce matches in filteredHeights that were not found in this // match (tmpHeights). +FOR_LOOP: for k, v := range filteredHeights { tmpHeight := tmpHeights[k] if tmpHeight == nil || !bytes.Equal(tmpHeight, v) { @@ -691,8 +722,7 @@ func (idx *BlockerIndexer) match( select { case <-ctx.Done(): - break - + break FOR_LOOP default: } } @@ -705,7 +735,7 @@ func (idx *BlockerIndexer) indexEvents(batch dbm.Batch, events []abci.Event, hei heightBz := int64ToBytes(height) for _, event := range events { - idx.eventSeq = idx.eventSeq + 1 + idx.eventSeq++ // only index events with a non-empty type if len(event.Type) == 0 { continue @@ -717,7 +747,7 @@ func (idx *BlockerIndexer) indexEvents(batch dbm.Batch, events []abci.Event, hei } // index iff the event specified index:true and it's not a reserved event - compositeKey := fmt.Sprintf("%s.%s", event.Type, attr.Key) + compositeKey := event.Type + "." + attr.Key if compositeKey == types.BlockHeightKey { return fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeKey) } diff --git a/state/indexer/block/kv/kv_test.go b/state/indexer/block/kv/kv_test.go index 67f5e1ef605..8dfa236f798 100644 --- a/state/indexer/block/kv/kv_test.go +++ b/state/indexer/block/kv/kv_test.go @@ -5,19 +5,19 @@ import ( "context" "fmt" "os" + "strconv" "testing" "time" - "github.com/cometbft/cometbft/internal/test" - blockidxkv "github.com/cometbft/cometbft/state/indexer/block/kv" - "github.com/cometbft/cometbft/state/txindex/kv" "github.com/stretchr/testify/require" "golang.org/x/exp/slices" db "github.com/cometbft/cometbft-db" - abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/libs/pubsub/query" + blockidxkv "github.com/cometbft/cometbft/state/indexer/block/kv" + "github.com/cometbft/cometbft/state/txindex/kv" "github.com/cometbft/cometbft/types" ) @@ -66,7 +66,7 @@ func BenchmarkBlockerIndexer_Prune(_ *testing.B) { } }() - store, err := db.NewDB("block", db.GoLevelDBBackend, config.DBDir()) + store, err := db.NewDB("block", db.PebbleDBBackend, config.DBDir()) if err != nil { panic(err) } @@ -143,7 +143,7 @@ func TestBlockIndexer(t *testing.T) { Attributes: []abci.EventAttribute{ { Key: "foo", - Value: fmt.Sprintf("%d", i), + Value: strconv.Itoa(i), Index: index, }, }, @@ -207,7 +207,6 @@ func TestBlockIndexer(t *testing.T) { } for name, tc := range testCases { - tc := tc t.Run(name, func(t *testing.T) { results, err := indexer.Search(context.Background(), tc.q) require.NoError(t, err) @@ -298,7 +297,6 @@ func TestBlockIndexerMulti(t *testing.T) { q *query.Query results []int64 }{ - "query return all events from a height - exact": { q: query.MustCompile("block.height = 1"), results: []int64{1}, @@ -366,7 +364,6 @@ func TestBlockIndexerMulti(t *testing.T) { } for name, tc := range testCases { - tc := tc t.Run(name, func(t *testing.T) { results, err := indexer.Search(context.Background(), tc.q) require.NoError(t, err) @@ -376,7 +373,6 @@ func TestBlockIndexerMulti(t *testing.T) { } func TestBigInt(t *testing.T) { - bigInt := "10000000000000000000" bigFloat := bigInt + ".76" bigFloatLower := bigInt + ".1" @@ -436,7 +432,6 @@ func TestBigInt(t *testing.T) { q *query.Query results []int64 }{ - "query return all events from a height - exact": { q: query.MustCompile("block.height = 1"), results: []int64{1}, @@ -503,7 +498,6 @@ func TestBigInt(t *testing.T) { }, } for name, tc := range testCases { - tc := tc t.Run(name, func(t *testing.T) { results, err := indexer.Search(context.Background(), tc.q) require.NoError(t, err) diff --git a/state/indexer/block/kv/util.go b/state/indexer/block/kv/util.go index 8fd3b90edf3..7a81542b261 100644 --- a/state/indexer/block/kv/util.go +++ b/state/indexer/block/kv/util.go @@ -2,6 +2,7 @@ package kv import ( "encoding/binary" + "errors" "fmt" "math/big" "strconv" @@ -95,7 +96,7 @@ func getHeightFromKey(key []byte) int64 { if len(remaining) == 0 && blockHeightKeyPrefix == types.BlockHeightKey { return possibleHeight } - panic(fmt.Errorf("key must be either heightKey or eventKey")) + panic(errors.New("key must be either heightKey or eventKey")) } func eventKey(compositeKey, eventValue string, height int64, eventSeq int64) ([]byte, error) { @@ -174,7 +175,7 @@ func parseEventSeqFromEventKey(key []byte) (int64, error) { // function_type = 'being_block_event' | 'end_block_event' if len(remaining) == 0 { // The event was not properly indexed - return 0, fmt.Errorf("failed to parse event sequence, invalid event format") + return 0, errors.New("failed to parse event sequence, invalid event format") } var typ string remaining2, err := orderedcode.Parse(remaining, &typ) // Check if we have scenarios 2. or 3. (described above). @@ -197,7 +198,7 @@ func parseEventSeqFromEventKey(key []byte) (int64, error) { // Remove all occurrences of height equality queries except one. While we are traversing the conditions, check whether the only condition in // addition to match events is the height equality or height range query. At the same time, if we do have a height range condition // ignore the height equality condition. If a height equality exists, place the condition index in the query and the desired height -// into the heightInfo struct +// into the heightInfo struct. func dedupHeight(conditions []syntax.Condition) (dedupConditions []syntax.Condition, heightInfo HeightInfo, found bool) { heightInfo.heightEqIdx = -1 heightRangeExists := false @@ -249,10 +250,9 @@ func checkHeightConditions(heightInfo HeightInfo, keyHeight int64) (bool, error) if err != nil || !withinBounds { return false, err } - } else { - if heightInfo.height != 0 && keyHeight != heightInfo.height { - return false, nil - } + } else if heightInfo.height != 0 && keyHeight != heightInfo.height { + return false, nil } + return true, nil } diff --git a/state/indexer/block/null/null.go b/state/indexer/block/null/null.go index 3918340b2df..b206447811e 100644 --- a/state/indexer/block/null/null.go +++ b/state/indexer/block/null/null.go @@ -15,29 +15,29 @@ var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) // TxIndex implements a no-op block indexer. type BlockerIndexer struct{} -func (idx *BlockerIndexer) SetRetainHeight(_ int64) error { +func (*BlockerIndexer) SetRetainHeight(_ int64) error { return nil } -func (idx *BlockerIndexer) GetRetainHeight() (int64, error) { +func (*BlockerIndexer) GetRetainHeight() (int64, error) { return 0, nil } -func (idx *BlockerIndexer) Prune(_ int64) (int64, int64, error) { +func (*BlockerIndexer) Prune(_ int64) (numPruned, newRetainHeight int64, err error) { return 0, 0, nil } -func (idx *BlockerIndexer) Has(int64) (bool, error) { +func (*BlockerIndexer) Has(int64) (bool, error) { return false, errors.New(`indexing is disabled (set 'tx_index = "kv"' in config)`) } -func (idx *BlockerIndexer) Index(types.EventDataNewBlockEvents) error { +func (*BlockerIndexer) Index(types.EventDataNewBlockEvents) error { return nil } -func (idx *BlockerIndexer) Search(context.Context, *query.Query) ([]int64, error) { +func (*BlockerIndexer) Search(context.Context, *query.Query) ([]int64, error) { return []int64{}, nil } -func (idx *BlockerIndexer) SetLogger(log.Logger) { +func (*BlockerIndexer) SetLogger(log.Logger) { } diff --git a/state/indexer/mocks/block_indexer.go b/state/indexer/mocks/block_indexer.go index a92057cb9ed..de8e9be65cd 100644 --- a/state/indexer/mocks/block_indexer.go +++ b/state/indexer/mocks/block_indexer.go @@ -23,6 +23,10 @@ type BlockIndexer struct { func (_m *BlockIndexer) GetRetainHeight() (int64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetRetainHeight") + } + var r0 int64 var r1 error if rf, ok := ret.Get(0).(func() (int64, error)); ok { @@ -47,6 +51,10 @@ func (_m *BlockIndexer) GetRetainHeight() (int64, error) { func (_m *BlockIndexer) Has(height int64) (bool, error) { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for Has") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(int64) (bool, error)); ok { @@ -67,13 +75,17 @@ func (_m *BlockIndexer) Has(height int64) (bool, error) { return r0, r1 } -// Index provides a mock function with given fields: _a0 -func (_m *BlockIndexer) Index(_a0 types.EventDataNewBlockEvents) error { - ret := _m.Called(_a0) +// Index provides a mock function with given fields: events +func (_m *BlockIndexer) Index(events types.EventDataNewBlockEvents) error { + ret := _m.Called(events) + + if len(ret) == 0 { + panic("no return value specified for Index") + } var r0 error if rf, ok := ret.Get(0).(func(types.EventDataNewBlockEvents) error); ok { - r0 = rf(_a0) + r0 = rf(events) } else { r0 = ret.Error(0) } @@ -85,6 +97,10 @@ func (_m *BlockIndexer) Index(_a0 types.EventDataNewBlockEvents) error { func (_m *BlockIndexer) Prune(retainHeight int64) (int64, int64, error) { ret := _m.Called(retainHeight) + if len(ret) == 0 { + panic("no return value specified for Prune") + } + var r0 int64 var r1 int64 var r2 error @@ -116,6 +132,10 @@ func (_m *BlockIndexer) Prune(retainHeight int64) (int64, int64, error) { func (_m *BlockIndexer) Search(ctx context.Context, q *query.Query) ([]int64, error) { ret := _m.Called(ctx, q) + if len(ret) == 0 { + panic("no return value specified for Search") + } + var r0 []int64 var r1 error if rf, ok := ret.Get(0).(func(context.Context, *query.Query) ([]int64, error)); ok { @@ -147,6 +167,10 @@ func (_m *BlockIndexer) SetLogger(l log.Logger) { func (_m *BlockIndexer) SetRetainHeight(retainHeight int64) error { ret := _m.Called(retainHeight) + if len(ret) == 0 { + panic("no return value specified for SetRetainHeight") + } + var r0 error if rf, ok := ret.Get(0).(func(int64) error); ok { r0 = rf(retainHeight) diff --git a/state/indexer/query_range.go b/state/indexer/query_range.go index eb85b9bfee8..3718044d903 100644 --- a/state/indexer/query_range.go +++ b/state/indexer/query_range.go @@ -10,20 +10,20 @@ import ( // QueryRanges defines a mapping between a composite event key and a QueryRange. // -// e.g.account.number => queryRange{lowerBound: 1, upperBound: 5} +// e.g.account.number => queryRange{lowerBound: 1, upperBound: 5}. type QueryRanges map[string]QueryRange // QueryRange defines a range within a query condition. type QueryRange struct { - LowerBound interface{} // int || time.Time - UpperBound interface{} // int || time.Time + LowerBound any // int || time.Time + UpperBound any // int || time.Time Key string IncludeLowerBound bool IncludeUpperBound bool } // AnyBound returns either the lower bound if non-nil, otherwise the upper bound. -func (qr QueryRange) AnyBound() interface{} { +func (qr QueryRange) AnyBound() any { if qr.LowerBound != nil { return qr.LowerBound } @@ -33,7 +33,7 @@ func (qr QueryRange) AnyBound() interface{} { // LowerBoundValue returns the value for the lower bound. If the lower bound is // nil, nil will be returned. -func (qr QueryRange) LowerBoundValue() interface{} { +func (qr QueryRange) LowerBoundValue() any { if qr.LowerBound == nil { return nil } @@ -66,7 +66,7 @@ func (qr QueryRange) LowerBoundValue() interface{} { // UpperBoundValue returns the value for the upper bound. If the upper bound is // nil, nil will be returned. -func (qr QueryRange) UpperBoundValue() interface{} { +func (qr QueryRange) UpperBoundValue() any { if qr.UpperBound == nil { return nil } @@ -144,7 +144,7 @@ func LookForRangesWithHeight(conditions []syntax.Condition) (queryRange QueryRan return queryRange, indexes, heightRange } -// Deprecated: This function is not used anymore and will be replaced with LookForRangesWithHeight +// Deprecated: This function is not used anymore and will be replaced with LookForRangesWithHeight. func LookForRanges(conditions []syntax.Condition) (ranges QueryRanges, indexes []int) { ranges = make(QueryRanges) for i, c := range conditions { @@ -190,7 +190,7 @@ func IsRangeOperation(op syntax.Token) bool { } } -func conditionArg(c syntax.Condition) interface{} { +func conditionArg(c syntax.Condition) any { if c.Arg == nil { return nil } diff --git a/state/indexer/sink/psql/backport.go b/state/indexer/sink/psql/backport.go index c7f2c1699dc..80de300e4a3 100644 --- a/state/indexer/sink/psql/backport.go +++ b/state/indexer/sink/psql/backport.go @@ -17,18 +17,13 @@ import ( "context" "errors" - "github.com/cometbft/cometbft/libs/log" - abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/pubsub/query" "github.com/cometbft/cometbft/state/txindex" "github.com/cometbft/cometbft/types" ) -const ( - eventTypeFinalizeBlock = "finalize_block" -) - // TxIndexer returns a bridge from es to the CometBFT v0.34 transaction indexer. func (es *EventSink) TxIndexer() BackportTxIndexer { return BackportTxIndexer{psql: es} @@ -38,15 +33,15 @@ func (es *EventSink) TxIndexer() BackportTxIndexer { // indexing operations to an underlying PostgreSQL event sink. type BackportTxIndexer struct{ psql *EventSink } -func (b BackportTxIndexer) GetRetainHeight() (int64, error) { +func (BackportTxIndexer) GetRetainHeight() (int64, error) { return 0, nil } -func (b BackportTxIndexer) SetRetainHeight(_ int64) error { +func (BackportTxIndexer) SetRetainHeight(_ int64) error { return nil } -func (b BackportTxIndexer) Prune(_ int64) (int64, int64, error) { +func (BackportTxIndexer) Prune(_ int64) (numPruned, newRetainHeight int64, err error) { // Not implemented return 0, 0, nil } @@ -69,8 +64,8 @@ func (BackportTxIndexer) Get([]byte) (*abci.TxResult, error) { // Search is implemented to satisfy the TxIndexer interface, but it is not // supported by the psql event sink and reports an error for all inputs. -func (BackportTxIndexer) Search(context.Context, *query.Query) ([]*abci.TxResult, error) { - return nil, errors.New("the TxIndexer.Search method is not supported") +func (BackportTxIndexer) Search(context.Context, *query.Query, txindex.Pagination) ([]*abci.TxResult, int, error) { + return nil, 0, errors.New("the TxIndexer.Search method is not supported") } func (BackportTxIndexer) SetLogger(log.Logger) {} @@ -85,15 +80,15 @@ func (es *EventSink) BlockIndexer() BackportBlockIndexer { // delegating indexing operations to an underlying PostgreSQL event sink. type BackportBlockIndexer struct{ psql *EventSink } -func (b BackportBlockIndexer) SetRetainHeight(_ int64) error { +func (BackportBlockIndexer) SetRetainHeight(_ int64) error { return nil } -func (b BackportBlockIndexer) GetRetainHeight() (int64, error) { +func (BackportBlockIndexer) GetRetainHeight() (int64, error) { return 0, nil } -func (b BackportBlockIndexer) Prune(_ int64) (int64, int64, error) { +func (BackportBlockIndexer) Prune(_ int64) (numPruned, newRetainHeight int64, err error) { // Not implemented return 0, 0, nil } diff --git a/state/indexer/sink/psql/psql.go b/state/indexer/sink/psql/psql.go index e383c7aa289..c972e3684c1 100644 --- a/state/indexer/sink/psql/psql.go +++ b/state/indexer/sink/psql/psql.go @@ -6,45 +6,96 @@ import ( "database/sql" "errors" "fmt" + "strconv" "strings" "time" "github.com/cosmos/gogoproto/proto" + "github.com/lib/pq" abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/pubsub/query" "github.com/cometbft/cometbft/types" ) const ( - tableBlocks = "blocks" - tableTxResults = "tx_results" - tableEvents = "events" - tableAttributes = "attributes" - driverName = "postgres" + defaultTableBlocks = "blocks" + defaultTableTxResults = "tx_results" + defaultTableEvents = "events" + defaultTableAttributes = "attributes" + driverName = "postgres" ) // EventSink is an indexer backend providing the tx/block index services. This // implementation stores records in a PostgreSQL database using the schema // defined in state/indexer/sink/psql/schema.sql. type EventSink struct { - store *sql.DB - chainID string + store *sql.DB + chainID string + tableBlocks string + tableTxResults string + tableEvents string + tableAttributes string } +type EventSinkOption func(*EventSink) + // NewEventSink constructs an event sink associated with the PostgreSQL // database specified by connStr. Events written to the sink are attributed to // the specified chainID. -func NewEventSink(connStr, chainID string) (*EventSink, error) { - db, err := sql.Open(driverName, connStr) - if err != nil { - return nil, err +func NewEventSink(connStr, chainID string, opts ...EventSinkOption) (*EventSink, error) { + es := &EventSink{ + chainID: chainID, + tableBlocks: defaultTableBlocks, + tableTxResults: defaultTableTxResults, + tableEvents: defaultTableEvents, + tableAttributes: defaultTableAttributes, + } + + for _, opt := range opts { + opt(es) + } + + if es.store == nil && connStr != "" { + db, err := sql.Open(driverName, connStr) + if err != nil { + return nil, err + } + es.store = db + } + + return es, nil +} + +func WithStore(store *sql.DB) EventSinkOption { + return func(es *EventSink) { + es.store = store + } +} + +func WithTableBlocks(tableBlocks string) EventSinkOption { + return func(es *EventSink) { + es.tableBlocks = tableBlocks + } +} + +func WithTableTxResults(tableTxResults string) EventSinkOption { + return func(es *EventSink) { + es.tableTxResults = tableTxResults + } +} + +func WithTableEvents(tableEvents string) EventSinkOption { + return func(es *EventSink) { + es.tableEvents = tableEvents } +} - return &EventSink{ - store: db, - chainID: chainID, - }, nil +func WithTableAttributes(tableAttributes string) EventSinkOption { + return func(es *EventSink) { + es.tableAttributes = tableAttributes + } } // DB returns the underlying Postgres connection used by the sink. @@ -67,66 +118,57 @@ func runInTransaction(db *sql.DB, query func(*sql.Tx) error) error { return dbtx.Commit() } -// queryWithID executes the specified SQL query with the given arguments, -// expecting a single-row, single-column result containing an ID. If the query -// succeeds, the ID from the result is returned. -func queryWithID(tx *sql.Tx, query string, args ...interface{}) (uint32, error) { - var id uint32 - if err := tx.QueryRow(query, args...).Scan(&id); err != nil { - return 0, err - } - return id, nil +func runBulkInsert(db *sql.DB, tableName string, columns []string, inserts [][]any) error { + return runInTransaction(db, func(tx *sql.Tx) error { + stmt, err := tx.Prepare(pq.CopyIn(tableName, columns...)) + if err != nil { + return fmt.Errorf("preparing bulk insert statement: %w", err) + } + defer stmt.Close() + for _, insert := range inserts { + if _, err := stmt.Exec(insert...); err != nil { + return fmt.Errorf("executing insert statement: %w", err) + } + } + if _, err := stmt.Exec(); err != nil { + return fmt.Errorf("flushing bulk insert: %w", err) + } + return nil + }) +} + +func randomBigserial() int64 { + return rand.Int63() } -// insertEvents inserts a slice of events and any indexed attributes of those -// events into the database associated with dbtx. -// -// If txID > 0, the event is attributed to the transaction with that -// ID; otherwise it is recorded as a block event. -func insertEvents(dbtx *sql.Tx, blockID, txID uint32, evts []abci.Event) error { +var ( + txrInsertColumns = []string{"rowid", "block_id", "index", "created_at", "tx_hash", "tx_result"} + eventInsertColumns = []string{"rowid", "block_id", "tx_id", "type"} + attrInsertColumns = []string{"event_id", "key", "composite_key", "value"} +) + +func bulkInsertEvents(blockID, txID int64, events []abci.Event) (eventInserts, attrInserts [][]any) { // Populate the transaction ID field iff one is defined (> 0). - var txIDArg interface{} + var txIDArg any if txID > 0 { txIDArg = txID } - - const ( - insertEventQuery = ` - INSERT INTO ` + tableEvents + ` (block_id, tx_id, type) - VALUES ($1, $2, $3) - RETURNING rowid; - ` - insertAttributeQuery = ` - INSERT INTO ` + tableAttributes + ` (event_id, key, composite_key, value) - VALUES ($1, $2, $3, $4); - ` - ) - - // Add each event to the events table, and retrieve its row ID to use when - // adding any attributes the event provides. - for _, evt := range evts { + for _, event := range events { // Skip events with an empty type. - if evt.Type == "" { + if event.Type == "" { continue } - - eid, err := queryWithID(dbtx, insertEventQuery, blockID, txIDArg, evt.Type) - if err != nil { - return err - } - - // Add any attributes flagged for indexing. - for _, attr := range evt.Attributes { + eventID := randomBigserial() + eventInserts = append(eventInserts, []any{eventID, blockID, txIDArg, event.Type}) + for _, attr := range event.Attributes { if !attr.Index { continue } - compositeKey := evt.Type + "." + attr.Key - if _, err := dbtx.Exec(insertAttributeQuery, eid, attr.Key, compositeKey, attr.Value); err != nil { - return err - } + compositeKey := event.Type + "." + attr.Key + attrInserts = append(attrInserts, []any{eventID, attr.Key, compositeKey, attr.Value}) } } - return nil + return eventInserts, attrInserts } // makeIndexedEvent constructs an event from the specified composite key and @@ -148,107 +190,133 @@ func makeIndexedEvent(compositeKey, value string) abci.Event { func (es *EventSink) IndexBlockEvents(h types.EventDataNewBlockEvents) error { ts := time.Now().UTC() - return runInTransaction(es.store, func(dbtx *sql.Tx) error { - // Add the block to the blocks table and report back its row ID for use - // in indexing the events for the block. - blockID, err := queryWithID(dbtx, ` -INSERT INTO `+tableBlocks+` (height, chain_id, created_at) + // Add the block to the blocks table and report back its row ID for use + // in indexing the events for the block. + var blockID int64 + //nolint:execinquery + err := es.store.QueryRow(` +INSERT INTO `+es.tableBlocks+` (height, chain_id, created_at) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING RETURNING rowid; -`, h.Height, es.chainID, ts) - if err == sql.ErrNoRows { - return nil // we already saw this block; quietly succeed - } else if err != nil { - return fmt.Errorf("indexing block header: %w", err) - } +`, h.Height, es.chainID, ts).Scan(&blockID) + if errors.Is(err, sql.ErrNoRows) { + return nil // we already saw this block; quietly succeed + } else if err != nil { + return fmt.Errorf("indexing block header: %w", err) + } - // Insert the special block meta-event for height. - if err := insertEvents(dbtx, blockID, 0, []abci.Event{ - makeIndexedEvent(types.BlockHeightKey, fmt.Sprint(h.Height)), - }); err != nil { - return fmt.Errorf("block meta-events: %w", err) - } - // Insert all the block events. Order is important here, - if err := insertEvents(dbtx, blockID, 0, h.Events); err != nil { - return fmt.Errorf("finalizeblock events: %w", err) - } - return nil - }) + // Insert the special block meta-event for height. + events := append([]abci.Event{makeIndexedEvent(types.BlockHeightKey, strconv.FormatInt(h.Height, 10))}, h.Events...) + // Insert all the block events. Order is important here, + eventInserts, attrInserts := bulkInsertEvents(blockID, 0, events) + if err := runBulkInsert(es.store, es.tableEvents, eventInsertColumns, eventInserts); err != nil { + return fmt.Errorf("failed bulk insert of events: %w", err) + } + if err := runBulkInsert(es.store, es.tableAttributes, attrInsertColumns, attrInserts); err != nil { + return fmt.Errorf("failed bulk insert of attributes: %w", err) + } + return nil +} + +// getBlockIDs returns corresponding block ids for the provided heights. +func (es *EventSink) getBlockIDs(heights []int64) ([]int64, error) { + var blockIDs pq.Int64Array + if err := es.store.QueryRow(` +SELECT array_agg(( + SELECT rowid FROM `+es.tableBlocks+` WHERE height = txr.height AND chain_id = $1 +)) FROM unnest($2::bigint[]) AS txr(height);`, + es.chainID, pq.Array(heights)).Scan(&blockIDs); err != nil { + return nil, fmt.Errorf("getting block ids for txs from sql: %w", err) + } + return blockIDs, nil +} + +func prefetchTxrExistence(db *sql.DB, blockIDs []int64, indexes []uint32, txResultsTable string) ([]bool, error) { + var existence []bool + if err := db.QueryRow(` +SELECT array_agg(( + SELECT EXISTS(SELECT 1 FROM `+txResultsTable+` WHERE block_id = txr.block_id AND index = txr.index) +)) FROM UNNEST($1::bigint[], $2::integer[]) as txr(block_id, index);`, + pq.Array(blockIDs), pq.Array(indexes)).Scan((*pq.BoolArray)(&existence)); err != nil { + return nil, fmt.Errorf("fetching already indexed txrs: %w", err) + } + return existence, nil } func (es *EventSink) IndexTxEvents(txrs []*abci.TxResult) error { ts := time.Now().UTC() - - for _, txr := range txrs { + heights := make([]int64, len(txrs)) + indexes := make([]uint32, len(txrs)) + for i, txr := range txrs { + heights[i] = txr.Height + indexes[i] = txr.Index + } + // prefetch blockIDs for all txrs. Every block header must have been indexed + // prior to the transactions belonging to it. + blockIDs, err := es.getBlockIDs(heights) + if err != nil { + return fmt.Errorf("getting block ids for txs: %w", err) + } + alreadyIndexed, err := prefetchTxrExistence(es.store, blockIDs, indexes, es.tableTxResults) + if err != nil { + return fmt.Errorf("failed to prefetch which txrs were already indexed: %w", err) + } + txrInserts, attrInserts, eventInserts := make([][]any, 0, len(txrs)), make([][]any, 0, len(txrs)), make([][]any, 0, len(txrs)) + for i, txr := range txrs { + if alreadyIndexed[i] { + continue + } // Encode the result message in protobuf wire format for indexing. resultData, err := proto.Marshal(txr) if err != nil { return fmt.Errorf("marshaling tx_result: %w", err) } - // Index the hash of the underlying transaction as a hex string. txHash := fmt.Sprintf("%X", types.Tx(txr.Tx).Hash()) - - if err := runInTransaction(es.store, func(dbtx *sql.Tx) error { - // Find the block associated with this transaction. The block header - // must have been indexed prior to the transactions belonging to it. - blockID, err := queryWithID(dbtx, ` -SELECT rowid FROM `+tableBlocks+` WHERE height = $1 AND chain_id = $2; -`, txr.Height, es.chainID) - if err != nil { - return fmt.Errorf("finding block ID: %w", err) - } - - // Insert a record for this tx_result and capture its ID for indexing events. - txID, err := queryWithID(dbtx, ` -INSERT INTO `+tableTxResults+` (block_id, index, created_at, tx_hash, tx_result) - VALUES ($1, $2, $3, $4, $5) - ON CONFLICT DO NOTHING - RETURNING rowid; -`, blockID, txr.Index, ts, txHash, resultData) - if err == sql.ErrNoRows { - return nil // we already saw this transaction; quietly succeed - } else if err != nil { - return fmt.Errorf("indexing tx_result: %w", err) - } - - // Insert the special transaction meta-events for hash and height. - if err := insertEvents(dbtx, blockID, txID, []abci.Event{ - makeIndexedEvent(types.TxHashKey, txHash), - makeIndexedEvent(types.TxHeightKey, fmt.Sprint(txr.Height)), - }); err != nil { - return fmt.Errorf("indexing transaction meta-events: %w", err) - } - // Index any events packaged with the transaction. - if err := insertEvents(dbtx, blockID, txID, txr.Result.Events); err != nil { - return fmt.Errorf("indexing transaction events: %w", err) - } - return nil - }); err != nil { - return err - } + // Generate random ID for this tx_result and insert a record for it + txID := randomBigserial() + txrInserts = append(txrInserts, []any{txID, blockIDs[i], txr.Index, ts, txHash, resultData}) + // Insert the special transaction meta-events for hash and height. + events := append([]abci.Event{ + makeIndexedEvent(types.TxHashKey, txHash), + makeIndexedEvent(types.TxHeightKey, strconv.FormatInt(txr.Height, 10)), + }, + txr.Result.Events..., + ) + newEventInserts, newAttrInserts := bulkInsertEvents(blockIDs[i], txID, events) + eventInserts = append(eventInserts, newEventInserts...) + attrInserts = append(attrInserts, newAttrInserts...) + } + if err := runBulkInsert(es.store, es.tableTxResults, txrInsertColumns, txrInserts); err != nil { + return fmt.Errorf("bulk inserting txrs: %w", err) + } + if err := runBulkInsert(es.store, es.tableEvents, eventInsertColumns, eventInserts); err != nil { + return fmt.Errorf("bulk inserting events: %w", err) + } + if err := runBulkInsert(es.store, es.tableAttributes, attrInsertColumns, attrInserts); err != nil { + return fmt.Errorf("bulk inserting attributes: %w", err) } return nil } // SearchBlockEvents is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) SearchBlockEvents(_ context.Context, _ *query.Query) ([]int64, error) { +func (*EventSink) SearchBlockEvents(_ context.Context, _ *query.Query) ([]int64, error) { return nil, errors.New("block search is not supported via the postgres event sink") } // SearchTxEvents is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) SearchTxEvents(_ context.Context, _ *query.Query) ([]*abci.TxResult, error) { +func (*EventSink) SearchTxEvents(_ context.Context, _ *query.Query) ([]*abci.TxResult, error) { return nil, errors.New("tx search is not supported via the postgres event sink") } // GetTxByHash is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) GetTxByHash(_ []byte) (*abci.TxResult, error) { +func (*EventSink) GetTxByHash(_ []byte) (*abci.TxResult, error) { return nil, errors.New("getTxByHash is not supported via the postgres event sink") } // HasBlock is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) HasBlock(_ int64) (bool, error) { +func (*EventSink) HasBlock(_ int64) (bool, error) { return false, errors.New("hasBlock is not supported via the postgres event sink") } diff --git a/state/indexer/sink/psql/psql_test.go b/state/indexer/sink/psql/psql_test.go index 04bc704e84e..3daeb144745 100644 --- a/state/indexer/sink/psql/psql_test.go +++ b/state/indexer/sink/psql/psql_test.go @@ -3,6 +3,7 @@ package psql import ( "context" "database/sql" + "errors" "flag" "fmt" "log" @@ -18,13 +19,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + _ "github.com/lib/pq" + abci "github.com/cometbft/cometbft/abci/types" tmlog "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/state/txindex" "github.com/cometbft/cometbft/types" - - // Register the Postgres database driver. - _ "github.com/lib/pq" ) var ( @@ -46,6 +46,8 @@ const ( viewBlockEvents = "block_events" viewTxEvents = "tx_events" + + eventTypeFinalizeBlock = "finalize_block" ) func TestMain(m *testing.M) { @@ -140,11 +142,12 @@ func TestMain(m *testing.M) { func TestIndexing(t *testing.T) { t.Run("IndexBlockEvents", func(t *testing.T) { - indexer := &EventSink{store: testDB(), chainID: chainID} + indexer, err := NewEventSink("", chainID, WithStore(testDB())) + require.Nil(t, err, "event sink creation") require.NoError(t, indexer.IndexBlockEvents(newTestBlockEvents())) - verifyBlock(t, 1) - verifyBlock(t, 2) + verifyBlock(t, indexer, 1) + verifyBlock(t, indexer, 2) verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(1) }) verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(2) }) @@ -154,14 +157,15 @@ func TestIndexing(t *testing.T) { return v != nil, err }) - require.NoError(t, verifyTimeStamp(tableBlocks)) + require.NoError(t, verifyTimeStamp(indexer.tableBlocks)) // Attempting to reindex the same events should gracefully succeed. require.NoError(t, indexer.IndexBlockEvents(newTestBlockEvents())) }) t.Run("IndexTxEvents", func(t *testing.T) { - indexer := &EventSink{store: testDB(), chainID: chainID} + indexer, err := NewEventSink("", chainID, WithStore(testDB())) + require.Nil(t, err, "event sink creation") txResult := txResultWithEvents([]abci.Event{ makeIndexedEvent("account.number", "1"), @@ -178,11 +182,11 @@ func TestIndexing(t *testing.T) { }) require.NoError(t, indexer.IndexTxEvents([]*abci.TxResult{txResult})) - txr, err := loadTxResult(types.Tx(txResult.Tx).Hash()) + txr, err := loadTxResult(indexer, types.Tx(txResult.Tx).Hash()) require.NoError(t, err) assert.Equal(t, txResult, txr) - require.NoError(t, verifyTimeStamp(tableTxResults)) + require.NoError(t, verifyTimeStamp(indexer.tableTxResults)) require.NoError(t, verifyTimeStamp(viewTxEvents)) verifyNotImplemented(t, "getTxByHash", func() (bool, error) { @@ -200,11 +204,12 @@ func TestIndexing(t *testing.T) { }) t.Run("IndexerService", func(t *testing.T) { - indexer := &EventSink{store: testDB(), chainID: chainID} + indexer, err := NewEventSink("", chainID, WithStore(testDB())) + require.Nil(t, err, "event sink creation") // event bus eventBus := types.NewEventBus() - err := eventBus.Start() + err = eventBus.Start() require.NoError(t, err) t.Cleanup(func() { if err := eventBus.Stop(); err != nil { @@ -269,7 +274,7 @@ func newTestBlockEvents() types.EventDataNewBlockEvents { } } -// readSchema loads the indexing database schema file +// readSchema loads the indexing database schema file. func readSchema() ([]*schema.Migration, error) { const filename = "schema.sql" contents, err := os.ReadFile(filename) @@ -312,11 +317,11 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { } } -func loadTxResult(hash []byte) (*abci.TxResult, error) { +func loadTxResult(indexer *EventSink, hash []byte) (*abci.TxResult, error) { hashString := fmt.Sprintf("%X", hash) var resultData []byte - if err := testDB().QueryRow(` -SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1; + if err := indexer.store.QueryRow(` +SELECT tx_result FROM `+indexer.tableTxResults+` WHERE tx_hash = $1; `, hashString).Scan(&resultData); err != nil { return nil, fmt.Errorf("lookup transaction for hash %q failed: %v", hashString, err) } @@ -337,21 +342,22 @@ SELECT DISTINCT %[1]s.created_at `, tableName), time.Now().Add(-2*time.Second)).Err() } -func verifyBlock(t *testing.T, height int64) { +func verifyBlock(t *testing.T, indexer *EventSink, height int64) { + t.Helper() // Check that the blocks table contains an entry for this height. - if err := testDB().QueryRow(` -SELECT height FROM `+tableBlocks+` WHERE height = $1; -`, height).Err(); err == sql.ErrNoRows { + if err := indexer.store.QueryRow(` +SELECT height FROM `+indexer.tableBlocks+` WHERE height = $1; +`, height).Err(); errors.Is(err, sql.ErrNoRows) { t.Errorf("No block found for height=%d", height) } else if err != nil { t.Fatalf("Database query failed: %v", err) } // Verify the presence of begin_block and end_block events. - if err := testDB().QueryRow(` + if err := indexer.store.QueryRow(` SELECT type, height, chain_id FROM `+viewBlockEvents+` WHERE height = $1 AND type = $2 AND chain_id = $3; -`, height, eventTypeFinalizeBlock, chainID).Err(); err == sql.ErrNoRows { +`, height, eventTypeFinalizeBlock, chainID).Err(); errors.Is(err, sql.ErrNoRows) { t.Errorf("No %q event found for height=%d", eventTypeFinalizeBlock, height) } else if err != nil { t.Fatalf("Database query failed: %v", err) @@ -368,7 +374,7 @@ func verifyNotImplemented(t *testing.T, label string, f func() (bool, error)) { want := label + " is not supported via the postgres event sink" ok, err := f() assert.False(t, ok) - require.NotNil(t, err) + require.Error(t, err) assert.Equal(t, want, err.Error()) } diff --git a/state/metrics.gen.go b/state/metrics.gen.go index e57cf8efff6..4df2a237ffb 100644 --- a/state/metrics.gen.go +++ b/state/metrics.gen.go @@ -3,8 +3,8 @@ package state import ( - "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/cometbft/cometbft/libs/metrics/discard" + prometheus "github.com/cometbft/cometbft/libs/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) @@ -14,14 +14,6 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { labels = append(labels, labelsAndValues[i]) } return &Metrics{ - BlockProcessingTime: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_processing_time", - Help: "Time spent processing FinalizeBlock", - - Buckets: stdprometheus.LinearBuckets(1, 10, 10), - }, labels).With(labelsAndValues...), ConsensusParamUpdates: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -88,12 +80,25 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "block_indexer_base_height", Help: "BlockIndexerBaseHeight shows the first height at which block indices are available", }, labels).With(labelsAndValues...), + StoreAccessDurationSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "store_access_duration_seconds", + Help: "The duration of accesses to the state store labeled by which method was called on the store.", + + Buckets: stdprometheus.ExponentialBuckets(0.0002, 10, 5), + }, append(labels, "method")).With(labelsAndValues...), + FireBlockEventsDelaySeconds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "fire_block_events_delay_seconds", + Help: "The duration of event firing related to a new block", + }, labels).With(labelsAndValues...), } } func NopMetrics() *Metrics { return &Metrics{ - BlockProcessingTime: discard.NewHistogram(), ConsensusParamUpdates: discard.NewCounter(), ValidatorSetUpdates: discard.NewCounter(), PruningServiceBlockRetainHeight: discard.NewGauge(), @@ -105,5 +110,7 @@ func NopMetrics() *Metrics { ABCIResultsBaseHeight: discard.NewGauge(), TxIndexerBaseHeight: discard.NewGauge(), BlockIndexerBaseHeight: discard.NewGauge(), + StoreAccessDurationSeconds: discard.NewHistogram(), + FireBlockEventsDelaySeconds: discard.NewGauge(), } } diff --git a/state/metrics.go b/state/metrics.go index 0547daf99ec..9b269c34e4b 100644 --- a/state/metrics.go +++ b/state/metrics.go @@ -1,7 +1,7 @@ package state import ( - "github.com/go-kit/kit/metrics" + "github.com/cometbft/cometbft/libs/metrics" ) const ( @@ -14,17 +14,14 @@ const ( // Metrics contains metrics exposed by this package. type Metrics struct { - // Time spent processing FinalizeBlock - BlockProcessingTime metrics.Histogram `metrics_buckettype:"lin" metrics_bucketsizes:"1, 10, 10"` - // ConsensusParamUpdates is the total number of times the application has // updated the consensus params since process start. - //metrics:Number of consensus parameter updates returned by the application since process start. + // metrics:Number of consensus parameter updates returned by the application since process start. ConsensusParamUpdates metrics.Counter // ValidatorSetUpdates is the total number of times the application has // updated the validator set since process start. - //metrics:Number of validator set updates returned by the application since process start. + // metrics:Number of validator set updates returned by the application since process start. ValidatorSetUpdates metrics.Counter // PruningServiceBlockRetainHeight is the accepted block @@ -62,4 +59,11 @@ type Metrics struct { // BlockIndexerBaseHeight shows the first height at which // block indices are available BlockIndexerBaseHeight metrics.Gauge + + // The duration of accesses to the state store labeled by which method + // was called on the store. + StoreAccessDurationSeconds metrics.Histogram `metrics_bucketsizes:"0.0002, 10, 5" metrics_buckettype:"exp" metrics_labels:"method"` + + // The duration of event firing related to a new block + FireBlockEventsDelaySeconds metrics.Gauge } diff --git a/state/mocks/block_store.go b/state/mocks/block_store.go index c48fc813cc6..eea2d0599de 100644 --- a/state/mocks/block_store.go +++ b/state/mocks/block_store.go @@ -18,6 +18,10 @@ type BlockStore struct { func (_m *BlockStore) Base() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Base") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() @@ -32,6 +36,10 @@ func (_m *BlockStore) Base() int64 { func (_m *BlockStore) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -46,6 +54,10 @@ func (_m *BlockStore) Close() error { func (_m *BlockStore) DeleteLatestBlock() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for DeleteLatestBlock") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -60,6 +72,10 @@ func (_m *BlockStore) DeleteLatestBlock() error { func (_m *BlockStore) Height() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Height") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() @@ -74,6 +90,10 @@ func (_m *BlockStore) Height() int64 { func (_m *BlockStore) LoadBaseMeta() *types.BlockMeta { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for LoadBaseMeta") + } + var r0 *types.BlockMeta if rf, ok := ret.Get(0).(func() *types.BlockMeta); ok { r0 = rf() @@ -87,10 +107,18 @@ func (_m *BlockStore) LoadBaseMeta() *types.BlockMeta { } // LoadBlock provides a mock function with given fields: height -func (_m *BlockStore) LoadBlock(height int64) *types.Block { +func (_m *BlockStore) LoadBlock(height int64) (*types.Block, *types.BlockMeta) { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlock") + } + var r0 *types.Block + var r1 *types.BlockMeta + if rf, ok := ret.Get(0).(func(int64) (*types.Block, *types.BlockMeta)); ok { + return rf(height) + } if rf, ok := ret.Get(0).(func(int64) *types.Block); ok { r0 = rf(height) } else { @@ -99,14 +127,30 @@ func (_m *BlockStore) LoadBlock(height int64) *types.Block { } } - return r0 + if rf, ok := ret.Get(1).(func(int64) *types.BlockMeta); ok { + r1 = rf(height) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*types.BlockMeta) + } + } + + return r0, r1 } // LoadBlockByHash provides a mock function with given fields: hash -func (_m *BlockStore) LoadBlockByHash(hash []byte) *types.Block { +func (_m *BlockStore) LoadBlockByHash(hash []byte) (*types.Block, *types.BlockMeta) { ret := _m.Called(hash) + if len(ret) == 0 { + panic("no return value specified for LoadBlockByHash") + } + var r0 *types.Block + var r1 *types.BlockMeta + if rf, ok := ret.Get(0).(func([]byte) (*types.Block, *types.BlockMeta)); ok { + return rf(hash) + } if rf, ok := ret.Get(0).(func([]byte) *types.Block); ok { r0 = rf(hash) } else { @@ -115,13 +159,25 @@ func (_m *BlockStore) LoadBlockByHash(hash []byte) *types.Block { } } - return r0 + if rf, ok := ret.Get(1).(func([]byte) *types.BlockMeta); ok { + r1 = rf(hash) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*types.BlockMeta) + } + } + + return r0, r1 } // LoadBlockCommit provides a mock function with given fields: height func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlockCommit") + } + var r0 *types.Commit if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { r0 = rf(height) @@ -138,6 +194,10 @@ func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { func (_m *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlockExtendedCommit") + } + var r0 *types.ExtendedCommit if rf, ok := ret.Get(0).(func(int64) *types.ExtendedCommit); ok { r0 = rf(height) @@ -154,6 +214,10 @@ func (_m *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommi func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlockMeta") + } + var r0 *types.BlockMeta if rf, ok := ret.Get(0).(func(int64) *types.BlockMeta); ok { r0 = rf(height) @@ -170,6 +234,10 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { func (_m *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { ret := _m.Called(hash) + if len(ret) == 0 { + panic("no return value specified for LoadBlockMetaByHash") + } + var r0 *types.BlockMeta if rf, ok := ret.Get(0).(func([]byte) *types.BlockMeta); ok { r0 = rf(hash) @@ -186,6 +254,10 @@ func (_m *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { ret := _m.Called(height, index) + if len(ret) == 0 { + panic("no return value specified for LoadBlockPart") + } + var r0 *types.Part if rf, ok := ret.Get(0).(func(int64, int) *types.Part); ok { r0 = rf(height, index) @@ -202,6 +274,10 @@ func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { func (_m *BlockStore) LoadSeenCommit(height int64) *types.Commit { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadSeenCommit") + } + var r0 *types.Commit if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { r0 = rf(height) @@ -218,6 +294,10 @@ func (_m *BlockStore) LoadSeenCommit(height int64) *types.Commit { func (_m *BlockStore) PruneBlocks(height int64, _a1 state.State) (uint64, int64, error) { ret := _m.Called(height, _a1) + if len(ret) == 0 { + panic("no return value specified for PruneBlocks") + } + var r0 uint64 var r1 int64 var r2 error @@ -259,6 +339,10 @@ func (_m *BlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts func (_m *BlockStore) Size() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() diff --git a/state/mocks/evidence_pool.go b/state/mocks/evidence_pool.go index 124a05d2a90..8e00beb7864 100644 --- a/state/mocks/evidence_pool.go +++ b/state/mocks/evidence_pool.go @@ -14,13 +14,17 @@ type EvidencePool struct { mock.Mock } -// AddEvidence provides a mock function with given fields: _a0 -func (_m *EvidencePool) AddEvidence(_a0 types.Evidence) error { - ret := _m.Called(_a0) +// AddEvidence provides a mock function with given fields: ev +func (_m *EvidencePool) AddEvidence(ev types.Evidence) error { + ret := _m.Called(ev) + + if len(ret) == 0 { + panic("no return value specified for AddEvidence") + } var r0 error if rf, ok := ret.Get(0).(func(types.Evidence) error); ok { - r0 = rf(_a0) + r0 = rf(ev) } else { r0 = ret.Error(0) } @@ -28,13 +32,17 @@ func (_m *EvidencePool) AddEvidence(_a0 types.Evidence) error { return r0 } -// CheckEvidence provides a mock function with given fields: _a0 -func (_m *EvidencePool) CheckEvidence(_a0 types.EvidenceList) error { - ret := _m.Called(_a0) +// CheckEvidence provides a mock function with given fields: evList +func (_m *EvidencePool) CheckEvidence(evList types.EvidenceList) error { + ret := _m.Called(evList) + + if len(ret) == 0 { + panic("no return value specified for CheckEvidence") + } var r0 error if rf, ok := ret.Get(0).(func(types.EvidenceList) error); ok { - r0 = rf(_a0) + r0 = rf(evList) } else { r0 = ret.Error(0) } @@ -46,6 +54,10 @@ func (_m *EvidencePool) CheckEvidence(_a0 types.EvidenceList) error { func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) { ret := _m.Called(maxBytes) + if len(ret) == 0 { + panic("no return value specified for PendingEvidence") + } + var r0 []types.Evidence var r1 int64 if rf, ok := ret.Get(0).(func(int64) ([]types.Evidence, int64)); ok { @@ -68,9 +80,9 @@ func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64 return r0, r1 } -// Update provides a mock function with given fields: _a0, _a1 -func (_m *EvidencePool) Update(_a0 state.State, _a1 types.EvidenceList) { - _m.Called(_a0, _a1) +// Update provides a mock function with given fields: _a0, evList +func (_m *EvidencePool) Update(_a0 state.State, evList types.EvidenceList) { + _m.Called(_a0, evList) } // NewEvidencePool creates a new instance of EvidencePool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. diff --git a/state/mocks/store.go b/state/mocks/store.go index 7d94c70a72f..3831a21cdde 100644 --- a/state/mocks/store.go +++ b/state/mocks/store.go @@ -3,12 +3,12 @@ package mocks import ( - abcitypes "github.com/cometbft/cometbft/abci/types" - mock "github.com/stretchr/testify/mock" - state "github.com/cometbft/cometbft/state" + mock "github.com/stretchr/testify/mock" types "github.com/cometbft/cometbft/types" + + v1 "github.com/cometbft/cometbft/api/cometbft/abci/v1" ) // Store is an autogenerated mock type for the Store type @@ -20,6 +20,10 @@ type Store struct { func (_m *Store) Bootstrap(_a0 state.State) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Bootstrap") + } + var r0 error if rf, ok := ret.Get(0).(func(state.State) error); ok { r0 = rf(_a0) @@ -34,6 +38,10 @@ func (_m *Store) Bootstrap(_a0 state.State) error { func (_m *Store) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -48,6 +56,10 @@ func (_m *Store) Close() error { func (_m *Store) GetABCIResRetainHeight() (int64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetABCIResRetainHeight") + } + var r0 int64 var r1 error if rf, ok := ret.Get(0).(func() (int64, error)); ok { @@ -72,6 +84,10 @@ func (_m *Store) GetABCIResRetainHeight() (int64, error) { func (_m *Store) GetApplicationRetainHeight() (int64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetApplicationRetainHeight") + } + var r0 int64 var r1 error if rf, ok := ret.Get(0).(func() (int64, error)); ok { @@ -96,6 +112,10 @@ func (_m *Store) GetApplicationRetainHeight() (int64, error) { func (_m *Store) GetCompanionBlockRetainHeight() (int64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetCompanionBlockRetainHeight") + } + var r0 int64 var r1 error if rf, ok := ret.Get(0).(func() (int64, error)); ok { @@ -120,6 +140,10 @@ func (_m *Store) GetCompanionBlockRetainHeight() (int64, error) { func (_m *Store) GetOfflineStateSyncHeight() (int64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetOfflineStateSyncHeight") + } + var r0 int64 var r1 error if rf, ok := ret.Get(0).(func() (int64, error)); ok { @@ -144,6 +168,10 @@ func (_m *Store) GetOfflineStateSyncHeight() (int64, error) { func (_m *Store) Load() (state.State, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Load") + } + var r0 state.State var r1 error if rf, ok := ret.Get(0).(func() (state.State, error)); ok { @@ -168,6 +196,10 @@ func (_m *Store) Load() (state.State, error) { func (_m *Store) LoadConsensusParams(height int64) (types.ConsensusParams, error) { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadConsensusParams") + } + var r0 types.ConsensusParams var r1 error if rf, ok := ret.Get(0).(func(int64) (types.ConsensusParams, error)); ok { @@ -189,19 +221,23 @@ func (_m *Store) LoadConsensusParams(height int64) (types.ConsensusParams, error } // LoadFinalizeBlockResponse provides a mock function with given fields: height -func (_m *Store) LoadFinalizeBlockResponse(height int64) (*abcitypes.ResponseFinalizeBlock, error) { +func (_m *Store) LoadFinalizeBlockResponse(height int64) (*v1.FinalizeBlockResponse, error) { ret := _m.Called(height) - var r0 *abcitypes.ResponseFinalizeBlock + if len(ret) == 0 { + panic("no return value specified for LoadFinalizeBlockResponse") + } + + var r0 *v1.FinalizeBlockResponse var r1 error - if rf, ok := ret.Get(0).(func(int64) (*abcitypes.ResponseFinalizeBlock, error)); ok { + if rf, ok := ret.Get(0).(func(int64) (*v1.FinalizeBlockResponse, error)); ok { return rf(height) } - if rf, ok := ret.Get(0).(func(int64) *abcitypes.ResponseFinalizeBlock); ok { + if rf, ok := ret.Get(0).(func(int64) *v1.FinalizeBlockResponse); ok { r0 = rf(height) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcitypes.ResponseFinalizeBlock) + r0 = ret.Get(0).(*v1.FinalizeBlockResponse) } } @@ -214,23 +250,27 @@ func (_m *Store) LoadFinalizeBlockResponse(height int64) (*abcitypes.ResponseFin return r0, r1 } -// LoadFromDBOrGenesisDoc provides a mock function with given fields: _a0 -func (_m *Store) LoadFromDBOrGenesisDoc(_a0 *types.GenesisDoc) (state.State, error) { - ret := _m.Called(_a0) +// LoadFromDBOrGenesisDoc provides a mock function with given fields: doc +func (_m *Store) LoadFromDBOrGenesisDoc(doc *types.GenesisDoc) (state.State, error) { + ret := _m.Called(doc) + + if len(ret) == 0 { + panic("no return value specified for LoadFromDBOrGenesisDoc") + } var r0 state.State var r1 error if rf, ok := ret.Get(0).(func(*types.GenesisDoc) (state.State, error)); ok { - return rf(_a0) + return rf(doc) } if rf, ok := ret.Get(0).(func(*types.GenesisDoc) state.State); ok { - r0 = rf(_a0) + r0 = rf(doc) } else { r0 = ret.Get(0).(state.State) } if rf, ok := ret.Get(1).(func(*types.GenesisDoc) error); ok { - r1 = rf(_a0) + r1 = rf(doc) } else { r1 = ret.Error(1) } @@ -238,23 +278,27 @@ func (_m *Store) LoadFromDBOrGenesisDoc(_a0 *types.GenesisDoc) (state.State, err return r0, r1 } -// LoadFromDBOrGenesisFile provides a mock function with given fields: _a0 -func (_m *Store) LoadFromDBOrGenesisFile(_a0 string) (state.State, error) { - ret := _m.Called(_a0) +// LoadFromDBOrGenesisFile provides a mock function with given fields: filepath +func (_m *Store) LoadFromDBOrGenesisFile(filepath string) (state.State, error) { + ret := _m.Called(filepath) + + if len(ret) == 0 { + panic("no return value specified for LoadFromDBOrGenesisFile") + } var r0 state.State var r1 error if rf, ok := ret.Get(0).(func(string) (state.State, error)); ok { - return rf(_a0) + return rf(filepath) } if rf, ok := ret.Get(0).(func(string) state.State); ok { - r0 = rf(_a0) + r0 = rf(filepath) } else { r0 = ret.Get(0).(state.State) } if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(_a0) + r1 = rf(filepath) } else { r1 = ret.Error(1) } @@ -263,19 +307,23 @@ func (_m *Store) LoadFromDBOrGenesisFile(_a0 string) (state.State, error) { } // LoadLastFinalizeBlockResponse provides a mock function with given fields: height -func (_m *Store) LoadLastFinalizeBlockResponse(height int64) (*abcitypes.ResponseFinalizeBlock, error) { +func (_m *Store) LoadLastFinalizeBlockResponse(height int64) (*v1.FinalizeBlockResponse, error) { ret := _m.Called(height) - var r0 *abcitypes.ResponseFinalizeBlock + if len(ret) == 0 { + panic("no return value specified for LoadLastFinalizeBlockResponse") + } + + var r0 *v1.FinalizeBlockResponse var r1 error - if rf, ok := ret.Get(0).(func(int64) (*abcitypes.ResponseFinalizeBlock, error)); ok { + if rf, ok := ret.Get(0).(func(int64) (*v1.FinalizeBlockResponse, error)); ok { return rf(height) } - if rf, ok := ret.Get(0).(func(int64) *abcitypes.ResponseFinalizeBlock); ok { + if rf, ok := ret.Get(0).(func(int64) *v1.FinalizeBlockResponse); ok { r0 = rf(height) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcitypes.ResponseFinalizeBlock) + r0 = ret.Get(0).(*v1.FinalizeBlockResponse) } } @@ -292,6 +340,10 @@ func (_m *Store) LoadLastFinalizeBlockResponse(height int64) (*abcitypes.Respons func (_m *Store) LoadValidators(height int64) (*types.ValidatorSet, error) { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadValidators") + } + var r0 *types.ValidatorSet var r1 error if rf, ok := ret.Get(0).(func(int64) (*types.ValidatorSet, error)); ok { @@ -314,30 +366,34 @@ func (_m *Store) LoadValidators(height int64) (*types.ValidatorSet, error) { return r0, r1 } -// PruneABCIResponses provides a mock function with given fields: targetRetainHeight -func (_m *Store) PruneABCIResponses(targetRetainHeight int64) (int64, int64, error) { - ret := _m.Called(targetRetainHeight) +// PruneABCIResponses provides a mock function with given fields: targetRetainHeight, forceCompact +func (_m *Store) PruneABCIResponses(targetRetainHeight int64, forceCompact bool) (int64, int64, error) { + ret := _m.Called(targetRetainHeight, forceCompact) + + if len(ret) == 0 { + panic("no return value specified for PruneABCIResponses") + } var r0 int64 var r1 int64 var r2 error - if rf, ok := ret.Get(0).(func(int64) (int64, int64, error)); ok { - return rf(targetRetainHeight) + if rf, ok := ret.Get(0).(func(int64, bool) (int64, int64, error)); ok { + return rf(targetRetainHeight, forceCompact) } - if rf, ok := ret.Get(0).(func(int64) int64); ok { - r0 = rf(targetRetainHeight) + if rf, ok := ret.Get(0).(func(int64, bool) int64); ok { + r0 = rf(targetRetainHeight, forceCompact) } else { r0 = ret.Get(0).(int64) } - if rf, ok := ret.Get(1).(func(int64) int64); ok { - r1 = rf(targetRetainHeight) + if rf, ok := ret.Get(1).(func(int64, bool) int64); ok { + r1 = rf(targetRetainHeight, forceCompact) } else { r1 = ret.Get(1).(int64) } - if rf, ok := ret.Get(2).(func(int64) error); ok { - r2 = rf(targetRetainHeight) + if rf, ok := ret.Get(2).(func(int64, bool) error); ok { + r2 = rf(targetRetainHeight, forceCompact) } else { r2 = ret.Error(2) } @@ -345,24 +401,42 @@ func (_m *Store) PruneABCIResponses(targetRetainHeight int64) (int64, int64, err return r0, r1, r2 } -// PruneStates provides a mock function with given fields: fromHeight, toHeight, evidenceThresholdHeight -func (_m *Store) PruneStates(fromHeight int64, toHeight int64, evidenceThresholdHeight int64) error { - ret := _m.Called(fromHeight, toHeight, evidenceThresholdHeight) +// PruneStates provides a mock function with given fields: fromHeight, toHeight, evidenceThresholdHeight, previouslyPrunedStates +func (_m *Store) PruneStates(fromHeight int64, toHeight int64, evidenceThresholdHeight int64, previouslyPrunedStates uint64) (uint64, error) { + ret := _m.Called(fromHeight, toHeight, evidenceThresholdHeight, previouslyPrunedStates) - var r0 error - if rf, ok := ret.Get(0).(func(int64, int64, int64) error); ok { - r0 = rf(fromHeight, toHeight, evidenceThresholdHeight) + if len(ret) == 0 { + panic("no return value specified for PruneStates") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(int64, int64, int64, uint64) (uint64, error)); ok { + return rf(fromHeight, toHeight, evidenceThresholdHeight, previouslyPrunedStates) + } + if rf, ok := ret.Get(0).(func(int64, int64, int64, uint64) uint64); ok { + r0 = rf(fromHeight, toHeight, evidenceThresholdHeight, previouslyPrunedStates) } else { - r0 = ret.Error(0) + r0 = ret.Get(0).(uint64) } - return r0 + if rf, ok := ret.Get(1).(func(int64, int64, int64, uint64) error); ok { + r1 = rf(fromHeight, toHeight, evidenceThresholdHeight, previouslyPrunedStates) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // Save provides a mock function with given fields: _a0 func (_m *Store) Save(_a0 state.State) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Save") + } + var r0 error if rf, ok := ret.Get(0).(func(state.State) error); ok { r0 = rf(_a0) @@ -377,6 +451,10 @@ func (_m *Store) Save(_a0 state.State) error { func (_m *Store) SaveABCIResRetainHeight(height int64) error { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for SaveABCIResRetainHeight") + } + var r0 error if rf, ok := ret.Get(0).(func(int64) error); ok { r0 = rf(height) @@ -391,6 +469,10 @@ func (_m *Store) SaveABCIResRetainHeight(height int64) error { func (_m *Store) SaveApplicationRetainHeight(height int64) error { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for SaveApplicationRetainHeight") + } + var r0 error if rf, ok := ret.Get(0).(func(int64) error); ok { r0 = rf(height) @@ -405,6 +487,10 @@ func (_m *Store) SaveApplicationRetainHeight(height int64) error { func (_m *Store) SaveCompanionBlockRetainHeight(height int64) error { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for SaveCompanionBlockRetainHeight") + } + var r0 error if rf, ok := ret.Get(0).(func(int64) error); ok { r0 = rf(height) @@ -416,11 +502,15 @@ func (_m *Store) SaveCompanionBlockRetainHeight(height int64) error { } // SaveFinalizeBlockResponse provides a mock function with given fields: height, res -func (_m *Store) SaveFinalizeBlockResponse(height int64, res *abcitypes.ResponseFinalizeBlock) error { +func (_m *Store) SaveFinalizeBlockResponse(height int64, res *v1.FinalizeBlockResponse) error { ret := _m.Called(height, res) + if len(ret) == 0 { + panic("no return value specified for SaveFinalizeBlockResponse") + } + var r0 error - if rf, ok := ret.Get(0).(func(int64, *abcitypes.ResponseFinalizeBlock) error); ok { + if rf, ok := ret.Get(0).(func(int64, *v1.FinalizeBlockResponse) error); ok { r0 = rf(height, res) } else { r0 = ret.Error(0) @@ -433,6 +523,10 @@ func (_m *Store) SaveFinalizeBlockResponse(height int64, res *abcitypes.Response func (_m *Store) SetOfflineStateSyncHeight(height int64) error { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for SetOfflineStateSyncHeight") + } + var r0 error if rf, ok := ret.Get(0).(func(int64) error); ok { r0 = rf(height) diff --git a/state/pruner.go b/state/pruner.go index 9ce2b11e6db..f4b304b1e9f 100644 --- a/state/pruner.go +++ b/state/pruner.go @@ -38,6 +38,10 @@ type Pruner struct { interval time.Duration observer PrunerObserver metrics *Metrics + + // Preserve the number of state entries pruned. + // Used to calculated correctly when to trigger compactions + prunedStates uint64 } type prunerConfig struct { @@ -292,7 +296,7 @@ func (p *Pruner) GetABCIResRetainHeight() (int64, error) { } // GetTxIndexerRetainHeight is a convenience method for accessing the -// GetTxIndexerRetainHeight method of the underlying indexer +// GetTxIndexerRetainHeight method of the underlying indexer. func (p *Pruner) GetTxIndexerRetainHeight() (int64, error) { return p.txIndexer.GetRetainHeight() } @@ -446,10 +450,15 @@ func (p *Pruner) pruneABCIResToRetainHeight(lastRetainHeight int64) int64 { return lastRetainHeight } + // If the block retain height is 0, pruning of the block and state stores might be disabled + // This should not prevent Comet from pruning ABCI results if needed. + // We could by default always compact when pruning the responses, but in case the state store + // is being compacted we introduce an overhead that might cause performance penalties. + forceCompact := p.findMinBlockRetainHeight() == 0 // newRetainHeight is the height just after that which we have successfully // pruned. In case of an error it will be 0, but then it will also be // ignored. - numPruned, newRetainHeight, err := p.stateStore.PruneABCIResponses(targetRetainHeight) + numPruned, newRetainHeight, err := p.stateStore.PruneABCIResponses(targetRetainHeight, forceCompact) if err != nil { p.logger.Error("Failed to prune ABCI responses", "err", err, "targetRetainHeight", targetRetainHeight) return lastRetainHeight @@ -500,8 +509,12 @@ func (p *Pruner) pruneBlocksToHeight(height int64) (uint64, int64, error) { if err != nil { return 0, 0, ErrFailedToPruneBlocks{Height: height, Err: err} } - if err := p.stateStore.PruneStates(base, height, evRetainHeight); err != nil { - return 0, 0, ErrFailedToPruneStates{Height: height, Err: err} + if pruned > 0 { + prunedStates, err := p.stateStore.PruneStates(base, height, evRetainHeight, p.prunedStates) + p.prunedStates += prunedStates + if err != nil { + return 0, 0, ErrFailedToPruneStates{Height: height, Err: err} + } } return pruned, evRetainHeight, err } diff --git a/state/pruner_test.go b/state/pruner_test.go index 30180e53a4d..c5b2f1437ff 100644 --- a/state/pruner_test.go +++ b/state/pruner_test.go @@ -5,6 +5,10 @@ import ( "fmt" "os" "testing" + "time" + + "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" db "github.com/cometbft/cometbft-db" abci "github.com/cometbft/cometbft/abci/types" @@ -13,15 +17,14 @@ import ( "github.com/cometbft/cometbft/libs/pubsub/query" sm "github.com/cometbft/cometbft/state" blockidxkv "github.com/cometbft/cometbft/state/indexer/block/kv" + "github.com/cometbft/cometbft/state/txindex" "github.com/cometbft/cometbft/state/txindex/kv" "github.com/cometbft/cometbft/store" "github.com/cometbft/cometbft/types" - "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" ) func TestPruneBlockIndexerToRetainHeight(t *testing.T) { - pruner, _, blockIndexer, _ := createTestSetup(t) + pruner, _, blockIndexer := createTestSetup(t) for height := int64(1); height <= 4; height++ { events, _, _ := getEventsAndResults(height) @@ -36,14 +39,14 @@ func TestPruneBlockIndexerToRetainHeight(t *testing.T) { heights, err := blockIndexer.Search(context.Background(), query.MustCompile("block.height <= 2")) require.NoError(t, err) - require.Equal(t, heights, []int64{1, 2}) + require.Equal(t, []int64{1, 2}, heights) newRetainHeight := pruner.PruneBlockIndexerToRetainHeight(0) require.Equal(t, int64(2), newRetainHeight) heights, err = blockIndexer.Search(context.Background(), query.MustCompile("block.height <= 2")) require.NoError(t, err) - require.Equal(t, heights, []int64{2}) + require.Equal(t, []int64{2}, heights) err = pruner.SetBlockIndexerRetainHeight(int64(4)) require.NoError(t, err) @@ -53,13 +56,13 @@ func TestPruneBlockIndexerToRetainHeight(t *testing.T) { heights, err = blockIndexer.Search(context.Background(), query.MustCompile("block.height <= 4")) require.NoError(t, err) - require.Equal(t, heights, []int64{2, 3, 4}) + require.Equal(t, []int64{2, 3, 4}, heights) pruner.PruneBlockIndexerToRetainHeight(2) heights, err = blockIndexer.Search(context.Background(), query.MustCompile("block.height <= 4")) require.NoError(t, err) - require.Equal(t, heights, []int64{4}) + require.Equal(t, []int64{4}, heights) events, _, _ := getEventsAndResults(1) @@ -68,17 +71,17 @@ func TestPruneBlockIndexerToRetainHeight(t *testing.T) { heights, err = blockIndexer.Search(context.Background(), query.MustCompile("block.height <= 4")) require.NoError(t, err) - require.Equal(t, heights, []int64{1, 4}) + require.Equal(t, []int64{1, 4}, heights) pruner.PruneBlockIndexerToRetainHeight(4) heights, err = blockIndexer.Search(context.Background(), query.MustCompile("block.height <= 4")) require.NoError(t, err) - require.Equal(t, heights, []int64{1, 4}) + require.Equal(t, []int64{1, 4}, heights) } func TestPruneTxIndexerToRetainHeight(t *testing.T) { - pruner, txIndexer, _, _ := createTestSetup(t) + pruner, txIndexer, _ := createTestSetup(t) for height := int64(1); height <= 4; height++ { _, txResult1, txResult2 := getEventsAndResults(height) @@ -94,16 +97,16 @@ func TestPruneTxIndexerToRetainHeight(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), actual) - results, err := txIndexer.Search(context.Background(), query.MustCompile("tx.height < 2")) + results, _, err := txIndexer.Search(context.Background(), query.MustCompile("tx.height < 2"), txindex.Pagination{}) require.NoError(t, err) require.True(t, containsAllTxs(results, []string{"foo1", "bar1"})) newRetainHeight := pruner.PruneTxIndexerToRetainHeight(0) require.Equal(t, int64(2), newRetainHeight) - results, err = txIndexer.Search(context.Background(), query.MustCompile("tx.height < 2")) + results, _, err = txIndexer.Search(context.Background(), query.MustCompile("tx.height < 2"), txindex.Pagination{}) require.NoError(t, err) - require.Equal(t, 0, len(results)) + require.Empty(t, results) err = pruner.SetTxIndexerRetainHeight(int64(4)) require.NoError(t, err) @@ -111,15 +114,15 @@ func TestPruneTxIndexerToRetainHeight(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(4), actual) - results, err = txIndexer.Search(context.Background(), query.MustCompile("tx.height < 4")) + results, _, err = txIndexer.Search(context.Background(), query.MustCompile("tx.height < 4"), txindex.Pagination{}) require.NoError(t, err) require.True(t, containsAllTxs(results, []string{"foo2", "bar2", "foo3", "bar3"})) pruner.PruneTxIndexerToRetainHeight(2) - results, err = txIndexer.Search(context.Background(), query.MustCompile("tx.height < 4")) + results, _, err = txIndexer.Search(context.Background(), query.MustCompile("tx.height < 4"), txindex.Pagination{}) require.NoError(t, err) - require.Equal(t, 0, len(results)) + require.Empty(t, results) _, txResult1, txResult2 := getEventsAndResults(1) err = txIndexer.Index(txResult1) @@ -127,13 +130,13 @@ func TestPruneTxIndexerToRetainHeight(t *testing.T) { err = txIndexer.Index(txResult2) require.NoError(t, err) - results, err = txIndexer.Search(context.Background(), query.MustCompile("tx.height <= 4")) + results, _, err = txIndexer.Search(context.Background(), query.MustCompile("tx.height <= 4"), txindex.Pagination{}) require.NoError(t, err) require.True(t, containsAllTxs(results, []string{"foo1", "bar1", "foo4", "bar4"})) pruner.PruneTxIndexerToRetainHeight(4) - results, err = txIndexer.Search(context.Background(), query.MustCompile("tx.height <= 4")) + results, _, err = txIndexer.Search(context.Background(), query.MustCompile("tx.height <= 4"), txindex.Pagination{}) require.NoError(t, err) require.True(t, containsAllTxs(results, []string{"foo1", "bar1", "foo4", "bar4"})) } @@ -149,7 +152,8 @@ func containsAllTxs(results []*abci.TxResult, txs []string) bool { return true } -func createTestSetup(t *testing.T) (*sm.Pruner, *kv.TxIndex, blockidxkv.BlockerIndexer, *types.EventBus) { +func createTestSetup(t *testing.T) (*sm.Pruner, *kv.TxIndex, blockidxkv.BlockerIndexer) { + t.Helper() config := test.ResetTestRoot("pruner_test") t.Cleanup(func() { err := os.RemoveAll(config.RootDir) @@ -186,7 +190,7 @@ func createTestSetup(t *testing.T) (*sm.Pruner, *kv.TxIndex, blockidxkv.BlockerI bs := store.NewBlockStore(blockDB) pruner := sm.NewPruner(stateStore, bs, blockIndexer, txIndexer, log.TestingLogger()) - return pruner, txIndexer, *blockIndexer, eventBus + return pruner, txIndexer, *blockIndexer } func getEventsAndResults(height int64) (types.EventDataNewBlockEvents, *abci.TxResult, *abci.TxResult) { @@ -220,3 +224,56 @@ func getEventsAndResults(height int64) (types.EventDataNewBlockEvents, *abci.TxR } return events, txResult1, txResult2 } + +// When trying to prune the only block in the store it should not succeed +// State should also not be pruned. +func TestPruningWithHeight1(t *testing.T) { + config := test.ResetTestRoot("blockchain_reactor_pruning_test") + defer os.RemoveAll(config.RootDir) + state, bs, txIndexer, blockIndexer, cleanup, stateStore := makeStateAndBlockStoreAndIndexers() + defer cleanup() + require.EqualValues(t, 0, bs.Base()) + require.EqualValues(t, 0, bs.Height()) + require.EqualValues(t, 0, bs.Size()) + + err := initStateStoreRetainHeights(stateStore) + require.NoError(t, err) + + obs := newPrunerObserver(1) + + pruner := sm.NewPruner( + stateStore, + bs, + blockIndexer, + txIndexer, + log.TestingLogger(), + sm.WithPrunerInterval(time.Second*1), + sm.WithPrunerObserver(obs), + sm.WithPrunerCompanionEnabled(), + ) + + err = pruner.SetApplicationBlockRetainHeight(1) + require.Error(t, err) + err = pruner.SetApplicationBlockRetainHeight(0) + require.NoError(t, err) + + block := state.MakeBlock(1, test.MakeNTxs(1, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) + partSet, err := block.MakePartSet(2) + require.NoError(t, err) + + bs.SaveBlock(block, partSet, &types.Commit{Height: 1}) + require.EqualValues(t, 1, bs.Base()) + require.EqualValues(t, 1, bs.Height()) + + err = stateStore.Save(state) + require.NoError(t, err) + + err = pruner.SetApplicationBlockRetainHeight(1) + require.NoError(t, err) + err = pruner.SetCompanionBlockRetainHeight(1) + require.NoError(t, err) + + pruned, _, err := pruner.PruneBlocksToHeight(1) + require.Equal(t, uint64(0), pruned) + require.NoError(t, err) +} diff --git a/state/rollback.go b/state/rollback.go index 6420192cf72..05ea03d2af5 100644 --- a/state/rollback.go +++ b/state/rollback.go @@ -4,8 +4,8 @@ import ( "errors" "fmt" - cmtstate "github.com/cometbft/cometbft/proto/tendermint/state" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" + cmtstate "github.com/cometbft/cometbft/api/cometbft/state/v1" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" "github.com/cometbft/cometbft/version" ) @@ -65,10 +65,11 @@ func Rollback(bs BlockStore, ss Store, removeBlock bool) (int64, []byte, error) return -1, nil, err } + nextHeight := rollbackHeight + 1 valChangeHeight := invalidState.LastHeightValidatorsChanged // this can only happen if the validator set changed since the last block - if valChangeHeight > rollbackHeight { - valChangeHeight = rollbackHeight + 1 + if valChangeHeight > nextHeight+1 { + valChangeHeight = nextHeight + 1 } paramsChangeHeight := invalidState.LastHeightConsensusParamsChanged @@ -84,7 +85,7 @@ func Rollback(bs BlockStore, ss Store, removeBlock bool) (int64, []byte, error) Block: version.BlockProtocol, App: previousParams.Version.App, }, - Software: version.TMCoreSemVer, + Software: version.CMTSemVer, }, // immutable fields ChainID: invalidState.ChainID, diff --git a/state/rollback_test.go b/state/rollback_test.go index 9495cb4649f..21376dc575a 100644 --- a/state/rollback_test.go +++ b/state/rollback_test.go @@ -8,15 +8,15 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - + cmtstate "github.com/cometbft/cometbft/api/cometbft/state/v1" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/tmhash" - cmtstate "github.com/cometbft/cometbft/proto/tendermint/state" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/state/mocks" "github.com/cometbft/cometbft/store" "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" "github.com/cometbft/cometbft/version" ) @@ -124,7 +124,7 @@ func TestRollbackHard(t *testing.T) { currState := state.State{ Version: cmtstate.Version{ Consensus: block.Header.Version, - Software: version.TMCoreSemVer, + Software: version.CMTSemVer, }, LastBlockHeight: block.Height, LastBlockTime: block.Time, @@ -181,7 +181,7 @@ func TestRollbackHard(t *testing.T) { nextState := state.State{ Version: cmtstate.Version{ Consensus: block.Header.Version, - Software: version.TMCoreSemVer, + Software: version.CMTSemVer, }, LastBlockHeight: nextBlock.Height, LastBlockTime: nextBlock.Time, @@ -235,10 +235,11 @@ func TestRollbackDifferentStateHeight(t *testing.T) { _, _, err := state.Rollback(blockStore, stateStore, false) require.Error(t, err) - require.Equal(t, err.Error(), "statestore height (100) is not one below or equal to blockstore height (102)") + require.Equal(t, "statestore height (100) is not one below or equal to blockstore height (102)", err.Error()) } func setupStateStore(t *testing.T, height int64) state.Store { + t.Helper() stateStore := state.NewStore(dbm.NewMemDB(), state.StoreOptions{DiscardABCIResponses: false}) valSet, _ := types.RandValidatorSet(5, 10) @@ -251,7 +252,7 @@ func setupStateStore(t *testing.T, height int64) state.Store { Block: version.BlockProtocol, App: 10, }, - Software: version.TMCoreSemVer, + Software: version.CMTSemVer, }, ChainID: "test-chain", InitialHeight: 10, @@ -259,11 +260,11 @@ func setupStateStore(t *testing.T, height int64) state.Store { AppHash: tmhash.Sum([]byte("app_hash")), LastResultsHash: tmhash.Sum([]byte("last_results_hash")), LastBlockHeight: height, - LastBlockTime: time.Now(), + LastBlockTime: cmttime.Now(), LastValidators: valSet, Validators: valSet.CopyIncrementProposerPriority(1), NextValidators: valSet.CopyIncrementProposerPriority(2), - LastHeightValidatorsChanged: height + 1, + LastHeightValidatorsChanged: height + 1 + 1, ConsensusParams: *params, LastHeightConsensusParamsChanged: height + 1, } diff --git a/state/services.go b/state/services.go index 280a945668f..7a1d5d32dca 100644 --- a/state/services.go +++ b/state/services.go @@ -4,12 +4,12 @@ import ( "github.com/cometbft/cometbft/types" ) -//------------------------------------------------------ +// ------------------------------------------------------ // blockchain services types // NOTE: Interfaces used by RPC must be thread safe! -//------------------------------------------------------ +// ------------------------------------------------------ -//------------------------------------------------------ +// ------------------------------------------------------ // blockstore //go:generate ../scripts/mockery_generate.sh BlockStore @@ -22,14 +22,14 @@ type BlockStore interface { LoadBaseMeta() *types.BlockMeta LoadBlockMeta(height int64) *types.BlockMeta - LoadBlock(height int64) *types.Block + LoadBlock(height int64) (*types.Block, *types.BlockMeta) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) PruneBlocks(height int64, state State) (uint64, int64, error) - LoadBlockByHash(hash []byte) *types.Block + LoadBlockByHash(hash []byte) (*types.Block, *types.BlockMeta) LoadBlockMetaByHash(hash []byte) *types.BlockMeta LoadBlockPart(height int64, index int) *types.Part @@ -42,7 +42,7 @@ type BlockStore interface { Close() error } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // evidence pool //go:generate ../scripts/mockery_generate.sh EvidencePool @@ -50,13 +50,13 @@ type BlockStore interface { // EvidencePool defines the EvidencePool interface used by State. type EvidencePool interface { PendingEvidence(maxBytes int64) (ev []types.Evidence, size int64) - AddEvidence(types.Evidence) error - Update(State, types.EvidenceList) - CheckEvidence(types.EvidenceList) error + AddEvidence(ev types.Evidence) error + Update(state State, evList types.EvidenceList) + CheckEvidence(evList types.EvidenceList) error } // EmptyEvidencePool is an empty implementation of EvidencePool, useful for testing. It also complies -// to the consensus evidence pool interface +// to the consensus evidence pool interface. type EmptyEvidencePool struct{} func (EmptyEvidencePool) PendingEvidence(int64) (ev []types.Evidence, size int64) { diff --git a/state/state.go b/state/state.go index 15fb8e5e62b..2eadaaf6006 100644 --- a/state/state.go +++ b/state/state.go @@ -9,19 +9,19 @@ import ( "github.com/cosmos/gogoproto/proto" - cmtstate "github.com/cometbft/cometbft/proto/tendermint/state" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" + cmtstate "github.com/cometbft/cometbft/api/cometbft/state/v1" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" "github.com/cometbft/cometbft/types" cmttime "github.com/cometbft/cometbft/types/time" "github.com/cometbft/cometbft/version" ) -// database keys +// database keys. var ( stateKey = []byte("stateKey") ) -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // InitStateVersion sets the Consensus.Block and Software versions, // but leaves the Consensus.App version blank. @@ -32,10 +32,10 @@ var InitStateVersion = cmtstate.Version{ Block: version.BlockProtocol, App: 0, }, - Software: version.TMCoreSemVer, + Software: version.CMTSemVer, } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // State is a short description of the latest committed block of the consensus protocol. // It keeps all information necessary to validate new blocks, @@ -77,11 +77,14 @@ type State struct { // the latest AppHash we've received from calling abci.Commit() AppHash []byte + + // delay between the time when this block is committed and the next height is started. + // previously `timeout_commit` in config.toml + NextBlockDelay time.Duration } // Copy makes a copy of the State for mutating. func (state State) Copy() State { - return State{ Version: state.Version, ChainID: state.ChainID, @@ -102,6 +105,8 @@ func (state State) Copy() State { AppHash: state.AppHash, LastResultsHash: state.LastResultsHash, + + NextBlockDelay: state.NextBlockDelay, } } @@ -130,7 +135,7 @@ func (state State) IsEmpty() bool { return state.Validators == nil // XXX can't compare to Empty } -// ToProto takes the local state type and returns the equivalent proto type +// ToProto takes the local state type and returns the equivalent proto type. func (state *State) ToProto() (*cmtstate.State, error) { if state == nil { return nil, errors.New("state is nil") @@ -170,11 +175,12 @@ func (state *State) ToProto() (*cmtstate.State, error) { sm.LastHeightConsensusParamsChanged = state.LastHeightConsensusParamsChanged sm.LastResultsHash = state.LastResultsHash sm.AppHash = state.AppHash + sm.NextBlockDelay = state.NextBlockDelay return sm, nil } -// FromProto takes a state proto message & returns the local state type +// FromProto takes a state proto message & returns the local state type. func FromProto(pb *cmtstate.State) (*State, error) { //nolint:golint if pb == nil { return nil, errors.New("nil State") @@ -221,11 +227,12 @@ func FromProto(pb *cmtstate.State) (*State, error) { //nolint:golint state.LastHeightConsensusParamsChanged = pb.LastHeightConsensusParamsChanged state.LastResultsHash = pb.LastResultsHash state.AppHash = pb.AppHash + state.NextBlockDelay = pb.NextBlockDelay return state, nil } -//------------------------------------------------------------------------ +// ------------------------------------------------------------------------ // Create a block from the latest state // MakeBlock builds a block from the current state with the given txs, commit, @@ -238,16 +245,18 @@ func (state State) MakeBlock( evidence []types.Evidence, proposerAddress []byte, ) *types.Block { - // Build base block with block data. block := types.MakeBlock(height, txs, lastCommit, evidence) // Set time. var timestamp time.Time - if height == state.InitialHeight { + switch { + case state.ConsensusParams.Feature.PbtsEnabled(height): + timestamp = cmttime.Now() + case height == state.InitialHeight: timestamp = state.LastBlockTime // genesis time - } else { - timestamp = MedianTime(lastCommit, state.LastValidators) + default: + timestamp = lastCommit.MedianTime(state.LastValidators) } // Fill rest of header with state data. @@ -262,30 +271,7 @@ func (state State) MakeBlock( return block } -// MedianTime computes a median time for a given Commit (based on Timestamp field of votes messages) and the -// corresponding validator set. The computed time is always between timestamps of -// the votes sent by honest processes, i.e., a faulty processes can not arbitrarily increase or decrease the -// computed value. -func MedianTime(commit *types.Commit, validators *types.ValidatorSet) time.Time { - weightedTimes := make([]*cmttime.WeightedTime, len(commit.Signatures)) - totalVotingPower := int64(0) - - for i, commitSig := range commit.Signatures { - if commitSig.BlockIDFlag == types.BlockIDFlagAbsent { - continue - } - _, validator := validators.GetByAddress(commitSig.ValidatorAddress) - // If there's no condition, TestValidateBlockCommit panics; not needed normally. - if validator != nil { - totalVotingPower += validator.VotingPower - weightedTimes[i] = cmttime.NewWeightedTime(commitSig.Timestamp, validator.VotingPower) - } - } - - return cmttime.WeightedMedian(weightedTimes, totalVotingPower) -} - -//------------------------------------------------------------------------ +// ------------------------------------------------------------------------ // Genesis // MakeGenesisStateFromFile reads and unmarshals state from the given @@ -304,11 +290,11 @@ func MakeGenesisStateFromFile(genDocFile string) (State, error) { func MakeGenesisDocFromFile(genDocFile string) (*types.GenesisDoc, error) { genDocJSON, err := os.ReadFile(genDocFile) if err != nil { - return nil, fmt.Errorf("couldn't read GenesisDoc file: %v", err) + return nil, fmt.Errorf("couldn't read GenesisDoc file: %w", err) } genDoc, err := types.GenesisDocFromJSON(genDocJSON) if err != nil { - return nil, fmt.Errorf("error reading GenesisDoc: %v", err) + return nil, fmt.Errorf("error reading GenesisDoc: %w", err) } return genDoc, nil } @@ -351,5 +337,8 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { LastHeightConsensusParamsChanged: genDoc.InitialHeight, AppHash: genDoc.AppHash, + + // NextBlockDelay is set to 0 because the genesis block is committed. + NextBlockDelay: 0, }, nil } diff --git a/state/state_test.go b/state/state_test.go index 69e5f33ae3c..1e426617886 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -6,24 +6,31 @@ import ( "math" "math/big" "os" + "strconv" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/crypto/ed25519" - cryptoenc "github.com/cometbft/cometbft/crypto/encoding" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/internal/test" - cmtrand "github.com/cometbft/cometbft/libs/rand" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" ) // setupTestCase does setup common to all test cases. func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { + t.Helper() + tearDown, stateDB, state, _ := setupTestCaseWithStore(t) + return tearDown, stateDB, state +} + +// setupTestCase does setup common to all test cases. +func setupTestCaseWithStore(t *testing.T) (func(t *testing.T), dbm.DB, sm.State, sm.Store) { + t.Helper() config := test.ResetTestRoot("state_") dbType := dbm.BackendType(config.DBBackend) stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) @@ -32,17 +39,21 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { }) require.NoError(t, err) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) - assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile") + require.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile") err = stateStore.Save(state) require.NoError(t, err) - tearDown := func(t *testing.T) { os.RemoveAll(config.RootDir) } + tearDown := func(t *testing.T) { + t.Helper() + os.RemoveAll(config.RootDir) + } - return tearDown, stateDB, state + return tearDown, stateDB, state, stateStore } // TestStateCopy tests the correct copying behavior of State. func TestStateCopy(t *testing.T) { + t.Helper() tearDown, _, state := setupTestCase(t) defer tearDown(t) assert := assert.New(t) @@ -65,11 +76,11 @@ func TestMakeGenesisStateNilValidators(t *testing.T) { ChainID: "dummy", Validators: nil, } - require.Nil(t, doc.ValidateAndComplete()) + require.NoError(t, doc.ValidateAndComplete()) state, err := sm.MakeGenesisState(&doc) - require.Nil(t, err) - require.Equal(t, 0, len(state.Validators.Validators)) - require.Equal(t, 0, len(state.NextValidators.Validators)) + require.NoError(t, err) + require.Empty(t, state.Validators.Validators) + require.Empty(t, state.NextValidators.Validators) } // TestStateSaveLoad tests saving and loading State from a db. @@ -107,20 +118,22 @@ func TestFinalizeBlockResponsesSaveLoad1(t *testing.T) { // Build mock responses. block := makeBlock(state, 2, new(types.Commit)) - abciResponses := new(abci.ResponseFinalizeBlock) + abciResponses := new(abci.FinalizeBlockResponse) dtxs := make([]*abci.ExecTxResult, 2) abciResponses.TxResults = dtxs abciResponses.TxResults[0] = &abci.ExecTxResult{Data: []byte("foo"), Events: nil} abciResponses.TxResults[1] = &abci.ExecTxResult{Data: []byte("bar"), Log: "ok", Events: nil} abciResponses.ValidatorUpdates = []abci.ValidatorUpdate{ - types.TM2PB.NewValidatorUpdate(ed25519.GenPrivKey().PubKey(), 10), + abci.NewValidatorUpdate(ed25519.GenPrivKey().PubKey(), 10), } + abciResponses.AppHash = make([]byte, 1) + err := stateStore.SaveFinalizeBlockResponse(block.Height, abciResponses) require.NoError(t, err) loadedABCIResponses, err := stateStore.LoadFinalizeBlockResponse(block.Height) - assert.NoError(err) + require.NoError(t, err) assert.Equal(abciResponses, loadedABCIResponses) } @@ -185,15 +198,15 @@ func TestFinalizeBlockResponsesSaveLoad2(t *testing.T) { for i := range cases { h := int64(i + 1) res, err := stateStore.LoadFinalizeBlockResponse(h) - assert.Error(err, "%d: %#v", i, res) + require.Error(t, err, "%d: %#v", i, res) } // Add all cases. for i, tc := range cases { h := int64(i + 1) // last block height, one below what we save - responses := &abci.ResponseFinalizeBlock{ + responses := &abci.FinalizeBlockResponse{ TxResults: tc.added, - AppHash: []byte(fmt.Sprintf("%d", h)), + AppHash: []byte(strconv.FormatInt(h, 10)), } err := stateStore.SaveFinalizeBlockResponse(h, responses) require.NoError(t, err) @@ -203,11 +216,11 @@ func TestFinalizeBlockResponsesSaveLoad2(t *testing.T) { for i, tc := range cases { h := int64(i + 1) res, err := stateStore.LoadFinalizeBlockResponse(h) - if assert.NoError(err, "%d", i) { + if assert.NoError(err, "%d", i) { //nolint:testifylint // require.Error doesn't work with the conditional here t.Log(res) - responses := &abci.ResponseFinalizeBlock{ + responses := &abci.FinalizeBlockResponse{ TxResults: tc.expected, - AppHash: []byte(fmt.Sprintf("%d", h)), + AppHash: []byte(strconv.FormatInt(h, 10)), } assert.Equal(sm.TxResultsHash(responses.TxResults), sm.TxResultsHash(res.TxResults), "%d", i) } @@ -230,12 +243,12 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { // Should be able to load for height 1. v, err := statestore.LoadValidators(1) - assert.Nil(err, "expected no err at height 1") + require.NoError(t, err, "expected no err at height 1") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") // Should be able to load for height 2. v, err = statestore.LoadValidators(2) - assert.Nil(err, "expected no err at height 2") + require.NoError(t, err, "expected no err at height 2") assert.Equal(v.Hash(), state.NextValidators.Hash(), "expected validator hashes to match") // Increment height, save; should be able to load for next & next next height. @@ -244,14 +257,14 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { err = statestore.Save(state) require.NoError(t, err) vp0, err := statestore.LoadValidators(nextHeight + 0) - assert.Nil(err, "expected no err") + require.NoError(t, err) vp1, err := statestore.LoadValidators(nextHeight + 1) - assert.Nil(err, "expected no err") + require.NoError(t, err) assert.Equal(vp0.Hash(), state.Validators.Hash(), "expected validator hashes to match") assert.Equal(vp1.Hash(), state.NextValidators.Hash(), "expected next validator hashes to match") } -// TestValidatorChangesSaveLoad tests saving and loading a validator set with changes. +// TestOneValidatorChangesSaveLoad tests saving and loading a validator set with changes. func TestOneValidatorChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) @@ -261,11 +274,11 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} - N := len(changeHeights) + n := len(changeHeights) // Build the validator history by running updateState // with the right validator set for each height. - highestHeight := changeHeights[N-1] + 5 + highestHeight := changeHeights[n-1] + 5 changeIndex := 0 _, val := state.Validators.GetByIndex(0) power := val.VotingPower @@ -302,8 +315,8 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { for i, power := range testCases { v, err := stateStore.LoadValidators(int64(i + 1 + 1)) // +1 because vset changes delayed by 1 block. - assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) - assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) + require.NoError(t, err, "expected no err at height %d", i) + assert.Equal(t, 1, v.Size(), "validator set size is greater than 1: %d", v.Size()) _, val := v.GetByIndex(0) assert.Equal(t, val.VotingPower, power, fmt.Sprintf(`unexpected powerat @@ -363,10 +376,10 @@ func TestProposerFrequency(t *testing.T) { maxPower := 1000 nTestCases := 5 for i := 0; i < nTestCases; i++ { - N := cmtrand.Int()%maxVals + 1 - vals := make([]*types.Validator, N) + n := cmtrand.Int()%maxVals + 1 + vals := make([]*types.Validator, n) totalVotePower := int64(0) - for j := 0; j < N; j++ { + for j := 0; j < n; j++ { // make sure votePower > 0 votePower := int64(cmtrand.Int()%maxPower) + 1 totalVotePower += votePower @@ -383,7 +396,7 @@ func TestProposerFrequency(t *testing.T) { } } -// new val set with given powers and random initial priorities +// new val set with given powers and random initial priorities. func genValSetWithPowers(powers []int64) *types.ValidatorSet { size := len(powers) vals := make([]*types.Validator, size) @@ -399,15 +412,16 @@ func genValSetWithPowers(powers []int64) *types.ValidatorSet { return valSet } -// test a proposer appears as frequently as expected +// test a proposer appears as frequently as expected. func testProposerFreq(t *testing.T, caseNum int, valSet *types.ValidatorSet) { - N := valSet.Size() + t.Helper() + n := valSet.Size() totalPower := valSet.TotalVotingPower() // run the proposer selection and track frequencies runMult := 1 runs := int(totalPower) * runMult - freqs := make([]int, N) + freqs := make([]int, n) for i := 0; i < runs; i++ { prop := valSet.GetProposer() idx, _ := valSet.GetByAddress(prop.Address) @@ -426,11 +440,11 @@ func testProposerFreq(t *testing.T, caseNum int, valSet *types.ValidatorSet) { // to be 1 for the 2 validator case in // https://github.com/cwgoes/tm-proposer-idris // and inferred to generalize to N-1 - bound := N - 1 - require.True( + bound := n - 1 + require.LessOrEqual( t, - abs <= bound, - fmt.Sprintf("Case %d val %d (%d): got %d, expected %d", caseNum, i, N, gotFreq, expectFreq), + abs, bound, + fmt.Sprintf("Case %d val %d (%d): got %d, expected %d", caseNum, i, n, gotFreq, expectFreq), ) } } @@ -454,11 +468,11 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - abciResponses := &abci.ResponseFinalizeBlock{} + abciResponses := &abci.FinalizeBlockResponse{} validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.ValidatorUpdates) require.NoError(t, err) updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) - assert.NoError(t, err) + require.NoError(t, err) curTotal := val1VotingPower // one increment step and one validator: 0 + power - total_power == 0 assert.Equal(t, 0+val1VotingPower-curTotal, updatedState.NextValidators.Validators[0].ProposerPriority) @@ -466,16 +480,14 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // add a validator val2PubKey := ed25519.GenPrivKey().PubKey() val2VotingPower := int64(100) - fvp, err := cryptoenc.PubKeyToProto(val2PubKey) - require.NoError(t, err) - updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val2VotingPower} + updateAddVal := abci.NewValidatorUpdate(val2PubKey, val2VotingPower) validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) - assert.NoError(t, err) + require.NoError(t, err) updatedState2, err := sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) - assert.NoError(t, err) + require.NoError(t, err) - require.Equal(t, len(updatedState2.NextValidators.Validators), 2) + require.Len(t, updatedState2.NextValidators.Validators, 2) _, updatedVal1 := updatedState2.NextValidators.GetByAddress(val1PubKey.Address()) _, addedVal2 := updatedState2.NextValidators.GetByAddress(val2PubKey.Address()) @@ -505,23 +517,23 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // Updating a validator does not reset the ProposerPriority to zero: // 1. Add - Val2 VotingPower change to 1 => updatedVotingPowVal2 := int64(1) - updateVal := abci.ValidatorUpdate{PubKey: fvp, Power: updatedVotingPowVal2} + updateVal := abci.NewValidatorUpdate(val2PubKey, updatedVotingPowVal2) validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateVal}) - assert.NoError(t, err) + require.NoError(t, err) // this will cause the diff of priorities (77) // to be larger than threshold == 2*totalVotingPower (22): updatedState3, err := sm.UpdateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates) - assert.NoError(t, err) + require.NoError(t, err) - require.Equal(t, len(updatedState3.NextValidators.Validators), 2) + require.Len(t, updatedState3.NextValidators.Validators, 2) _, prevVal1 := updatedState3.Validators.GetByAddress(val1PubKey.Address()) _, prevVal2 := updatedState3.Validators.GetByAddress(val2PubKey.Address()) _, updatedVal1 = updatedState3.NextValidators.GetByAddress(val1PubKey.Address()) _, updatedVal2 := updatedState3.NextValidators.GetByAddress(val2PubKey.Address()) // 2. Scale - // old prios: v1(10):-38, v2(1):39 + // old prios: cryptov1(10):-38, v2(1):39 wantVal1Prio = prevVal1.ProposerPriority wantVal2Prio = prevVal2.ProposerPriority // scale to diffMax = 22 = 2 * tvp, diff=39-(-38)=77 @@ -530,14 +542,14 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { dist := wantVal2Prio - wantVal1Prio // ratio := (dist + 2*totalPower - 1) / 2*totalPower = 98/22 = 4 ratio := (dist + 2*totalPower - 1) / (2 * totalPower) - // v1(10):-38/4, v2(1):39/4 + // cryptov1(10):-38/4, v2(1):39/4 wantVal1Prio /= ratio // -9 wantVal2Prio /= ratio // 9 // 3. Center - noop // 4. IncrementProposerPriority() -> - // v1(10):-9+10, v2(1):9+1 -> v2 proposer so subsract tvp(11) - // v1(10):1, v2(1):-1 + // cryptov1(10):-9+10, v2(1):9+1 -> v2 proposer so subsract tvp(11) + // cryptov1(10):1, v2(1):-1 wantVal2Prio += updatedVal2.VotingPower // 10 -> prop wantVal1Prio += updatedVal1.VotingPower // 1 wantVal2Prio -= totalPower // -1 @@ -568,12 +580,12 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} // no updates: - abciResponses := &abci.ResponseFinalizeBlock{} + abciResponses := &abci.FinalizeBlockResponse{} validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.ValidatorUpdates) require.NoError(t, err) updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) - assert.NoError(t, err) + require.NoError(t, err) // 0 + 10 (initial prio) - 10 (avg) - 10 (mostest - total) = -10 totalPower := val1VotingPower @@ -583,16 +595,14 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // add a validator with the same voting power as the first val2PubKey := ed25519.GenPrivKey().PubKey() - fvp, err := cryptoenc.PubKeyToProto(val2PubKey) - require.NoError(t, err) - updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val1VotingPower} + updateAddVal := abci.NewValidatorUpdate(val2PubKey, val1VotingPower) validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) - assert.NoError(t, err) + require.NoError(t, err) updatedState2, err := sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) - assert.NoError(t, err) + require.NoError(t, err) - require.Equal(t, len(updatedState2.NextValidators.Validators), 2) + require.Len(t, updatedState2.NextValidators.Validators, 2) assert.Equal(t, updatedState2.Validators, updatedState.NextValidators) // val1 will still be proposer as val2 just got added: @@ -633,7 +643,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { require.NoError(t, err) updatedState3, err := sm.UpdateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, updatedState3.Validators.Proposer.Address, updatedState3.NextValidators.Proposer.Address) @@ -668,12 +678,12 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // no changes in voting power and both validators have same voting power // -> proposers should alternate: oldState := updatedState3 - abciResponses = &abci.ResponseFinalizeBlock{} + abciResponses = &abci.FinalizeBlockResponse{} validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.ValidatorUpdates) require.NoError(t, err) oldState, err = sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) - assert.NoError(t, err) + require.NoError(t, err) expectedVal1Prio2 = 1 expectedVal2Prio2 = -1 expectedVal1Prio = -9 @@ -681,12 +691,12 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { for i := 0; i < 1000; i++ { // no validator updates: - abciResponses := &abci.ResponseFinalizeBlock{} + abciResponses := &abci.FinalizeBlockResponse{} validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.ValidatorUpdates) require.NoError(t, err) updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) - assert.NoError(t, err) + require.NoError(t, err) // alternate (and cyclic priorities): assert.NotEqual( t, @@ -729,14 +739,14 @@ func TestLargeGenesisValidator(t *testing.T) { // reset state validators to above validator state.Validators = types.NewValidatorSet([]*types.Validator{genesisVal}) state.NextValidators = state.Validators - require.True(t, len(state.Validators.Validators) == 1) + require.Len(t, state.Validators.Validators, 1) // update state a few times with no validator updates // asserts that the single validator's ProposerPrio stays the same oldState := state for i := 0; i < 10; i++ { // no updates: - abciResponses := &abci.ResponseFinalizeBlock{} + abciResponses := &abci.FinalizeBlockResponse{} validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.ValidatorUpdates) require.NoError(t, err) @@ -762,12 +772,10 @@ func TestLargeGenesisValidator(t *testing.T) { // see: https://github.com/tendermint/tendermint/issues/2960 firstAddedValPubKey := ed25519.GenPrivKey().PubKey() firstAddedValVotingPower := int64(10) - fvp, err := cryptoenc.PubKeyToProto(firstAddedValPubKey) - require.NoError(t, err) - firstAddedVal := abci.ValidatorUpdate{PubKey: fvp, Power: firstAddedValVotingPower} + firstAddedVal := abci.NewValidatorUpdate(firstAddedValPubKey, firstAddedValVotingPower) validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal}) - assert.NoError(t, err) - abciResponses := &abci.ResponseFinalizeBlock{ + require.NoError(t, err) + abciResponses := &abci.FinalizeBlockResponse{ ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}, } block := makeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) @@ -782,7 +790,7 @@ func TestLargeGenesisValidator(t *testing.T) { lastState := updatedState for i := 0; i < 200; i++ { // no updates: - abciResponses := &abci.ResponseFinalizeBlock{} + abciResponses := &abci.FinalizeBlockResponse{} validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.ValidatorUpdates) require.NoError(t, err) @@ -807,19 +815,17 @@ func TestLargeGenesisValidator(t *testing.T) { _, addedOldVal := oldState.NextValidators.GetByAddress(firstAddedValPubKey.Address()) _, addedNewVal := state.NextValidators.GetByAddress(firstAddedValPubKey.Address()) // expect large negative proposer priority for both (genesis validator decreased, 2nd validator increased): - assert.True(t, oldGenesisVal.ProposerPriority > newGenesisVal.ProposerPriority) - assert.True(t, addedOldVal.ProposerPriority < addedNewVal.ProposerPriority) + assert.Greater(t, oldGenesisVal.ProposerPriority, newGenesisVal.ProposerPriority) + assert.Less(t, addedOldVal.ProposerPriority, addedNewVal.ProposerPriority) // add 10 validators with the same voting power as the one added directly after genesis: for i := 0; i < 10; i++ { addedPubKey := ed25519.GenPrivKey().PubKey() - ap, err := cryptoenc.PubKeyToProto(addedPubKey) - require.NoError(t, err) - addedVal := abci.ValidatorUpdate{PubKey: ap, Power: firstAddedValVotingPower} + addedVal := abci.NewValidatorUpdate(addedPubKey, firstAddedValVotingPower) validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{addedVal}) - assert.NoError(t, err) + require.NoError(t, err) - abciResponses := &abci.ResponseFinalizeBlock{ + abciResponses := &abci.FinalizeBlockResponse{ ValidatorUpdates: []abci.ValidatorUpdate{addedVal}, } block := makeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) @@ -830,13 +836,11 @@ func TestLargeGenesisValidator(t *testing.T) { state, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) } - require.Equal(t, 10+2, len(state.NextValidators.Validators)) + require.Len(t, state.NextValidators.Validators, 10+2) // remove genesis validator: - gp, err := cryptoenc.PubKeyToProto(genesisPubKey) - require.NoError(t, err) - removeGenesisVal := abci.ValidatorUpdate{PubKey: gp, Power: 0} - abciResponses = &abci.ResponseFinalizeBlock{ + removeGenesisVal := abci.NewValidatorUpdate(genesisPubKey, 0) + abciResponses = &abci.FinalizeBlockResponse{ ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}, } @@ -852,7 +856,7 @@ func TestLargeGenesisValidator(t *testing.T) { updatedState, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) // only the first added val (not the genesis val) should be left - assert.Equal(t, 11, len(updatedState.NextValidators.Validators)) + require.Len(t, updatedState.NextValidators.Validators, 11) // call update state until the effect for the 3rd added validator // being proposer for a long time after the genesis validator left wears off: @@ -860,7 +864,7 @@ func TestLargeGenesisValidator(t *testing.T) { count := 0 isProposerUnchanged := true for isProposerUnchanged { - abciResponses := &abci.ResponseFinalizeBlock{} + abciResponses := &abci.FinalizeBlockResponse{} validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.ValidatorUpdates) require.NoError(t, err) block = makeBlock(curState, curState.LastBlockHeight+1, new(types.Commit)) @@ -885,7 +889,7 @@ func TestLargeGenesisValidator(t *testing.T) { proposers := make([]*types.Validator, numVals) for i := 0; i < 100; i++ { // no updates: - abciResponses := &abci.ResponseFinalizeBlock{} + abciResponses := &abci.FinalizeBlockResponse{} validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.ValidatorUpdates) require.NoError(t, err) @@ -923,17 +927,17 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) { nextHeight := state.LastBlockHeight + 1 v0, err := stateStore.LoadValidators(nextHeight) - assert.Nil(t, err) + require.NoError(t, err) acc0 := v0.Validators[0].ProposerPriority v1, err := stateStore.LoadValidators(nextHeight + 1) - assert.Nil(t, err) + require.NoError(t, err) acc1 := v1.Validators[0].ProposerPriority assert.NotEqual(t, acc1, acc0, "expected ProposerPriority value to change between heights") } -// TestValidatorChangesSaveLoad tests saving and loading a validator set with +// TestManyValidatorChangesSaveLoad tests saving and loading a validator set with // changes. func TestManyValidatorChangesSaveLoad(t *testing.T) { const valSetSize = 7 @@ -960,14 +964,14 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) - require.Nil(t, err) + require.NoError(t, err) nextHeight := state.LastBlockHeight + 1 err = stateStore.Save(state) require.NoError(t, err) // Load nextheight, it should be the oldpubkey. v0, err := stateStore.LoadValidators(nextHeight) - assert.Nil(t, err) + require.NoError(t, err) assert.Equal(t, valSetSize, v0.Size()) index, val := v0.GetByAddress(pubkeyOld.Address()) assert.NotNil(t, val) @@ -977,7 +981,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { // Load nextheight+1, it should be the new pubkey. v1, err := stateStore.LoadValidators(nextHeight + 1) - assert.Nil(t, err) + require.NoError(t, err) assert.Equal(t, valSetSize, v1.Size()) index, val = v1.GetByAddress(pubkey.Address()) assert.NotNil(t, val) @@ -1011,21 +1015,22 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} - N := len(changeHeights) + n := len(changeHeights) // Each valset is just one validator. // create list of them. - params := make([]types.ConsensusParams, N+1) + params := make([]types.ConsensusParams, n+1) params[0] = state.ConsensusParams - for i := 1; i < N+1; i++ { + for i := 1; i < n+1; i++ { params[i] = *types.DefaultConsensusParams() + // FIXME: shouldn't PBTS be enabled by default? + params[i].Feature.PbtsEnableHeight = 1 params[i].Block.MaxBytes += int64(i) - } // Build the params history by running updateState // with the right params set for each height. - highestHeight := changeHeights[N-1] + 5 + highestHeight := changeHeights[n-1] + 5 changeIndex := 0 cp := params[changeIndex] var err error @@ -1062,7 +1067,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { for _, testCase := range testCases { p, err := stateStore.LoadConsensusParams(testCase.height) - assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", testCase.height)) + require.NoError(t, err, "expected no err at height %d", testCase.height) assert.EqualValues(t, testCase.params, p, fmt.Sprintf(`unexpected consensus params at height %d`, testCase.height)) } @@ -1084,12 +1089,11 @@ func TestStateProto(t *testing.T) { } for _, tt := range tc { - tt := tt pbs, err := tt.state.ToProto() if !tt.expPass1 { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err, tt.testName) + require.NoError(t, err, tt.testName) } smt, err := sm.FromProto(pbs) diff --git a/state/store.go b/state/store.go index 5eec25adac8..4372ac47c67 100644 --- a/state/store.go +++ b/state/store.go @@ -4,16 +4,20 @@ import ( "encoding/binary" "errors" "fmt" + "strconv" + "time" "github.com/cosmos/gogoproto/proto" + "github.com/google/orderedcode" dbm "github.com/cometbft/cometbft-db" - abci "github.com/cometbft/cometbft/abci/types" + cmtstate "github.com/cometbft/cometbft/api/cometbft/state/v1" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" + cmtos "github.com/cometbft/cometbft/internal/os" + "github.com/cometbft/cometbft/libs/log" cmtmath "github.com/cometbft/cometbft/libs/math" - cmtos "github.com/cometbft/cometbft/libs/os" - cmtstate "github.com/cometbft/cometbft/proto/tendermint/state" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cometbft/cometbft/libs/metrics" "github.com/cometbft/cometbft/types" ) @@ -21,7 +25,7 @@ const ( // persist validators every valSetCheckpointInterval blocks to avoid // LoadValidators taking too much time. // https://github.com/tendermint/tendermint/pull/3438 - // 100000 results in ~ 100ms to get 100 validators (see BenchmarkLoadValidators) + // 100000 results in ~ 100ms to get 100 validators (see BenchmarkLoadValidators). valSetCheckpointInterval = 100000 ) @@ -30,61 +34,153 @@ var ( ErrInvalidHeightValue = errors.New("invalid height value") ) -//------------------------------------------------------------------------ +// ------------------------------------------------------------------------. +type KeyLayout interface { + CalcValidatorsKey(height int64) []byte + + CalcConsensusParamsKey(height int64) []byte -func calcValidatorsKey(height int64) []byte { - return []byte(fmt.Sprintf("validatorsKey:%v", height)) + CalcABCIResponsesKey(height int64) []byte } -func calcConsensusParamsKey(height int64) []byte { - return []byte(fmt.Sprintf("consensusParamsKey:%v", height)) +// v1LegacyLayout is a legacy implementation of BlockKeyLayout, kept for backwards +// compatibility. Newer code should use [v2Layout]. +type v1LegacyLayout struct{} + +// In the following [v1LegacyLayout] methods, we preallocate the key's slice to speed +// up append operations and avoid extra allocations. +// The size of the slice is the length of the prefix plus the length the string +// representation of a 64-bit integer. Namely, the longest 64-bit int has 19 digits, +// therefore its string representation is 20 bytes long (19 digits + 1 byte for the +// sign). + +// CalcABCIResponsesKey implements StateKeyLayout. +// It returns a database key of the form "abciResponsesKey:" to store/ +// retrieve the response of FinalizeBlock (i.e., the results of executing a block) +// for the block at the given height to/from +// the database. +func (v1LegacyLayout) CalcABCIResponsesKey(height int64) []byte { + const ( + prefix = "abciResponsesKey:" + prefixLen = len(prefix) + ) + key := make([]byte, 0, prefixLen+20) + + key = append(key, prefix...) + key = strconv.AppendInt(key, height, 10) + + return key +} + +// CalcConsensusParamsKey implements StateKeyLayout. +// It returns a database key of the form "consensusParamsKey:" to store/ +// retrieve the consensus parameters at the given height to/from the database. +func (v1LegacyLayout) CalcConsensusParamsKey(height int64) []byte { + const ( + prefix = "consensusParamsKey:" + prefixLen = len(prefix) + ) + key := make([]byte, 0, prefixLen+20) + + key = append(key, prefix...) + key = strconv.AppendInt(key, height, 10) + + return key } -func calcABCIResponsesKey(height int64) []byte { - return []byte(fmt.Sprintf("abciResponsesKey:%v", height)) +// CalcValidatorsKey implements StateKeyLayout. +// It returns a database key of the form "validatorsKey:" to store/retrieve +// the validators set at the given height to/from the database. +func (v1LegacyLayout) CalcValidatorsKey(height int64) []byte { + const ( + prefix = "validatorsKey:" + prefixLen = len(prefix) + ) + key := make([]byte, 0, prefixLen+20) + + key = append(key, prefix...) + key = strconv.AppendInt(key, height, 10) + + return key } -//---------------------- +var _ KeyLayout = (*v1LegacyLayout)(nil) + +// ---------------------- var ( - lastABCIResponseKey = []byte("lastABCIResponseKey") + lastABCIResponseKey = []byte("lastABCIResponseKey") // DEPRECATED lastABCIResponsesRetainHeightKey = []byte("lastABCIResponsesRetainHeight") offlineStateSyncHeight = []byte("offlineStateSyncHeightKey") ) +var ( + // prefixes must be unique across all db's. + prefixValidators = int64(6) + prefixConsensusParams = int64(7) + prefixABCIResponses = int64(8) +) + +type v2Layout struct{} + +func (v2Layout) encodeKey(prefix, height int64) []byte { + res, err := orderedcode.Append(nil, prefix, height) + if err != nil { + panic(err) + } + return res +} + +// CalcABCIResponsesKey implements StateKeyLayout. +func (v2l v2Layout) CalcABCIResponsesKey(height int64) []byte { + return v2l.encodeKey(prefixABCIResponses, height) +} + +// CalcConsensusParamsKey implements StateKeyLayout. +func (v2l v2Layout) CalcConsensusParamsKey(height int64) []byte { + return v2l.encodeKey(prefixConsensusParams, height) +} + +// CalcValidatorsKey implements StateKeyLayout. +func (v2l v2Layout) CalcValidatorsKey(height int64) []byte { + return v2l.encodeKey(prefixValidators, height) +} + +var _ KeyLayout = (*v2Layout)(nil) + //go:generate ../scripts/mockery_generate.sh Store // Store defines the state store interface // // It is used to retrieve current state and save and load ABCI responses, -// validators and consensus parameters +// validators and consensus parameters. type Store interface { // LoadFromDBOrGenesisFile loads the most recent state. // If the chain is new it will use the genesis file from the provided genesis file path as the current state. - LoadFromDBOrGenesisFile(string) (State, error) + LoadFromDBOrGenesisFile(filepath string) (State, error) // LoadFromDBOrGenesisDoc loads the most recent state. // If the chain is new it will use the genesis doc as the current state. - LoadFromDBOrGenesisDoc(*types.GenesisDoc) (State, error) + LoadFromDBOrGenesisDoc(doc *types.GenesisDoc) (State, error) // Load loads the current state of the blockchain Load() (State, error) // LoadValidators loads the validator set at a given height LoadValidators(height int64) (*types.ValidatorSet, error) // LoadFinalizeBlockResponse loads the abciResponse for a given height - LoadFinalizeBlockResponse(height int64) (*abci.ResponseFinalizeBlock, error) - // LoadLastABCIResponse loads the last abciResponse for a given height - LoadLastFinalizeBlockResponse(height int64) (*abci.ResponseFinalizeBlock, error) + LoadFinalizeBlockResponse(height int64) (*abci.FinalizeBlockResponse, error) + // LoadLastFinalizeBlockResponse loads the last abciResponse for a given height + LoadLastFinalizeBlockResponse(height int64) (*abci.FinalizeBlockResponse, error) // LoadConsensusParams loads the consensus params for a given height LoadConsensusParams(height int64) (types.ConsensusParams, error) // Save overwrites the previous state with the updated one Save(state State) error // SaveFinalizeBlockResponse saves ABCIResponses for a given height - SaveFinalizeBlockResponse(height int64, res *abci.ResponseFinalizeBlock) error + SaveFinalizeBlockResponse(height int64, res *abci.FinalizeBlockResponse) error // Bootstrap is used for bootstrapping state when not starting from a initial height. Bootstrap(state State) error // PruneStates takes the height from which to start pruning and which height stop at - PruneStates(fromHeight, toHeight, evidenceThresholdHeight int64) error + PruneStates(fromHeight, toHeight, evidenceThresholdHeight int64, previouslyPrunedStates uint64) (uint64, error) // PruneABCIResponses will prune all ABCI responses below the given height. - PruneABCIResponses(targetRetainHeight int64) (int64, int64, error) + PruneABCIResponses(targetRetainHeight int64, forceCompact bool) (int64, int64, error) // SaveApplicationRetainHeight persists the application retain height from the application SaveApplicationRetainHeight(height int64) error // GetApplicationRetainHeight returns the retain height set by the application @@ -105,10 +201,12 @@ type Store interface { Close() error } -// dbStore wraps a db (github.com/cometbft/cometbft-db) +// dbStore wraps a db (github.com/cometbft/cometbft-db). type dbStore struct { db dbm.DB + DBKeyLayout KeyLayout + StoreOptions } @@ -118,6 +216,18 @@ type StoreOptions struct { // the store will maintain only the response object from the latest // height. DiscardABCIResponses bool + + Compact bool + + CompactionInterval int64 + + // Metrics defines the metrics collector to use for the state store. + // if none is specified then a NopMetrics collector is used. + Metrics *Metrics + + Logger log.Logger + + DBKeyLayout string } var _ Store = (*dbStore)(nil) @@ -130,14 +240,69 @@ func IsEmpty(store dbStore) (bool, error) { return state.IsEmpty(), nil } +func setDBKeyLayout(store *dbStore, dbKeyLayoutVersion string) string { + empty, _ := IsEmpty(*store) + if !empty { + version, err := store.db.Get([]byte("version")) + if err != nil { + // WARN: This is because currently cometBFT DB does not return an error if the key does not exist + // If this behavior changes we need to account for that. + panic(err) + } + if len(version) != 0 { + dbKeyLayoutVersion = string(version) + } + } + + switch dbKeyLayoutVersion { + case "v1", "": + store.DBKeyLayout = &v1LegacyLayout{} + dbKeyLayoutVersion = "v1" + case "v2": + store.DBKeyLayout = &v2Layout{} + dbKeyLayoutVersion = "v2" + default: + panic("Unknown version. Expected v1 or v2, given " + dbKeyLayoutVersion) + } + + if err := store.db.SetSync([]byte("version"), []byte(dbKeyLayoutVersion)); err != nil { + panic(err) + } + return dbKeyLayoutVersion +} + // NewStore creates the dbStore of the state pkg. func NewStore(db dbm.DB, options StoreOptions) Store { - return dbStore{db, options} + if options.Metrics == nil { + options.Metrics = NopMetrics() + } + + store := dbStore{ + db: db, + StoreOptions: options, + } + + if options.DBKeyLayout == "" { + options.DBKeyLayout = "v1" + } + + dbKeyLayoutVersion := setDBKeyLayout(&store, options.DBKeyLayout) + + if options.Logger != nil { + options.Logger.Info( + "State store key layout version ", + "version", + "v"+dbKeyLayoutVersion, + ) + } + + return store } // LoadStateFromDBOrGenesisFile loads the most recent state from the database, // or creates a new one from the given genesisFilePath. func (store dbStore) LoadFromDBOrGenesisFile(genesisFilePath string) (State, error) { + defer addTimeSample(store.StoreOptions.Metrics.StoreAccessDurationSeconds.With("method", "load_from_db_or_genesis_file"), time.Now())() state, err := store.Load() if err != nil { return State{}, err @@ -156,6 +321,7 @@ func (store dbStore) LoadFromDBOrGenesisFile(genesisFilePath string) (State, err // LoadStateFromDBOrGenesisDoc loads the most recent state from the database, // or creates a new one from the given genesisDoc. func (store dbStore) LoadFromDBOrGenesisDoc(genesisDoc *types.GenesisDoc) (State, error) { + defer addTimeSample(store.StoreOptions.Metrics.StoreAccessDurationSeconds.With("method", "load_from_db_or_genesis_doc"), time.Now())() state, err := store.Load() if err != nil { return State{}, err @@ -178,10 +344,14 @@ func (store dbStore) Load() (State, error) { } func (store dbStore) loadState(key []byte) (state State, err error) { + start := time.Now() buf, err := store.db.Get(key) if err != nil { return state, err } + + addTimeSample(store.StoreOptions.Metrics.StoreAccessDurationSeconds.With("method", "load"), start)() + if len(buf) == 0 { return state, nil } @@ -209,61 +379,94 @@ func (store dbStore) Save(state State) error { } func (store dbStore) save(state State, key []byte) error { + start := time.Now() + + batch := store.db.NewBatch() + defer func(batch dbm.Batch) { + err := batch.Close() + if err != nil { + panic(err) + } + }(batch) nextHeight := state.LastBlockHeight + 1 // If first block, save validators for the block. if nextHeight == 1 { nextHeight = state.InitialHeight // This extra logic due to validator set changes being delayed 1 block. // It may get overwritten due to InitChain validator updates. - if err := store.saveValidatorsInfo(nextHeight, nextHeight, state.Validators); err != nil { + if err := store.saveValidatorsInfo(nextHeight, nextHeight, state.Validators, batch); err != nil { return err } } // Save next validators. - if err := store.saveValidatorsInfo(nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators); err != nil { + if err := store.saveValidatorsInfo(nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators, batch); err != nil { return err } - // Save next consensus params. if err := store.saveConsensusParamsInfo(nextHeight, - state.LastHeightConsensusParamsChanged, state.ConsensusParams); err != nil { + state.LastHeightConsensusParamsChanged, state.ConsensusParams, batch); err != nil { return err } - err := store.db.SetSync(key, state.Bytes()) - if err != nil { + + // Counting the amount of time taken to marshall the state. + // In case the state is big this can impact the metrics reporting + stateMarshallTime := time.Now() + stateBytes := state.Bytes() + stateMarshallDiff := time.Since(stateMarshallTime).Seconds() + + if err := batch.Set(key, stateBytes); err != nil { return err } - + if err := batch.WriteSync(); err != nil { + panic(err) + } + store.StoreOptions.Metrics.StoreAccessDurationSeconds.With("method", "save").Observe(time.Since(start).Seconds() - stateMarshallDiff) return nil } // BootstrapState saves a new state, used e.g. by state sync when starting from non-zero height. func (store dbStore) Bootstrap(state State) error { + batch := store.db.NewBatch() + defer func(batch dbm.Batch) { + err := batch.Close() + if err != nil { + panic(err) + } + }(batch) height := state.LastBlockHeight + 1 + defer addTimeSample(store.StoreOptions.Metrics.StoreAccessDurationSeconds.With("method", "bootstrap"), time.Now())() if height == 1 { height = state.InitialHeight } if height > 1 && !state.LastValidators.IsNilOrEmpty() { - if err := store.saveValidatorsInfo(height-1, height-1, state.LastValidators); err != nil { + if err := store.saveValidatorsInfo(height-1, height-1, state.LastValidators, batch); err != nil { return err } } - if err := store.saveValidatorsInfo(height, height, state.Validators); err != nil { + if err := store.saveValidatorsInfo(height, height, state.Validators, batch); err != nil { return err } - if err := store.saveValidatorsInfo(height+1, height+1, state.NextValidators); err != nil { + if err := store.saveValidatorsInfo(height+1, height+1, state.NextValidators, batch); err != nil { return err } if err := store.saveConsensusParamsInfo(height, - state.LastHeightConsensusParamsChanged, state.ConsensusParams); err != nil { + state.LastHeightConsensusParamsChanged, state.ConsensusParams, batch); err != nil { + return err + } + + if err := batch.Set(stateKey, state.Bytes()); err != nil { return err } - return store.db.SetSync(stateKey, state.Bytes()) + if err := batch.WriteSync(); err != nil { + panic(err) + } + + return batch.Close() } // PruneStates deletes states between the given heights (including from, excluding to). It is not @@ -274,21 +477,23 @@ func (store dbStore) Bootstrap(state State) error { // encoding not preserving ordering: https://github.com/tendermint/tendermint/issues/4567 // This will cause some old states to be left behind when doing incremental partial prunes, // specifically older checkpoints and LastHeightChanged targets. -func (store dbStore) PruneStates(from int64, to int64, evidenceThresholdHeight int64) error { +func (store dbStore) PruneStates(from int64, to int64, evidenceThresholdHeight int64, previosulyPrunedStates uint64) (uint64, error) { + defer addTimeSample(store.StoreOptions.Metrics.StoreAccessDurationSeconds.With("method", "prune_states"), time.Now())() if from <= 0 || to <= 0 { - return fmt.Errorf("from height %v and to height %v must be greater than 0", from, to) + return 0, fmt.Errorf("from height %v and to height %v must be greater than 0", from, to) } if from >= to { - return fmt.Errorf("from height %v must be lower than to height %v", from, to) + return 0, fmt.Errorf("from height %v must be lower than to height %v", from, to) } - valInfo, err := loadValidatorsInfo(store.db, min(to, evidenceThresholdHeight)) + valInfo, elapsedTime, err := loadValidatorsInfo(store.db, store.DBKeyLayout.CalcValidatorsKey(min(to, evidenceThresholdHeight))) if err != nil { - return fmt.Errorf("validators at height %v not found: %w", to, err) + return 0, fmt.Errorf("validators at height %v not found: %w", to, err) } + paramsInfo, err := store.loadConsensusParamsInfo(to) if err != nil { - return fmt.Errorf("consensus params at height %v not found: %w", to, err) + return 0, fmt.Errorf("consensus params at height %v not found: %w", to, err) } keepVals := make(map[int64]bool) @@ -312,16 +517,17 @@ func (store dbStore) PruneStates(from int64, to int64, evidenceThresholdHeight i // params, otherwise they will panic if they're retrieved directly (instead of // indirectly via a LastHeightChanged pointer). if keepVals[h] { - v, err := loadValidatorsInfo(store.db, h) + v, tmpTime, err := loadValidatorsInfo(store.db, store.DBKeyLayout.CalcValidatorsKey(h)) + elapsedTime += tmpTime if err != nil || v.ValidatorSet == nil { vip, err := store.LoadValidators(h) if err != nil { - return err + return pruned, err } pvi, err := vip.ToProto() if err != nil { - return err + return pruned, err } v.ValidatorSet = pvi @@ -329,17 +535,17 @@ func (store dbStore) PruneStates(from int64, to int64, evidenceThresholdHeight i bz, err := v.Marshal() if err != nil { - return err + return pruned, err } - err = batch.Set(calcValidatorsKey(h), bz) + err = batch.Set(store.DBKeyLayout.CalcValidatorsKey(h), bz) if err != nil { - return err + return pruned, err } } } else if h < evidenceThresholdHeight { - err = batch.Delete(calcValidatorsKey(h)) + err = batch.Delete(store.DBKeyLayout.CalcValidatorsKey(h)) if err != nil { - return err + return pruned, err } } // else we keep the validator set because we might need @@ -348,37 +554,37 @@ func (store dbStore) PruneStates(from int64, to int64, evidenceThresholdHeight i if keepParams[h] { p, err := store.loadConsensusParamsInfo(h) if err != nil { - return err + return pruned, err } if p.ConsensusParams.Equal(&cmtproto.ConsensusParams{}) { params, err := store.LoadConsensusParams(h) if err != nil { - return err + return pruned, err } p.ConsensusParams = params.ToProto() p.LastHeightChanged = h bz, err := p.Marshal() if err != nil { - return err + return pruned, err } - err = batch.Set(calcConsensusParamsKey(h), bz) + err = batch.Set(store.DBKeyLayout.CalcConsensusParamsKey(h), bz) if err != nil { - return err + return pruned, err } } } else { - err = batch.Delete(calcConsensusParamsKey(h)) + err = batch.Delete(store.DBKeyLayout.CalcConsensusParamsKey(h)) if err != nil { - return err + return pruned, err } } - err = batch.Delete(calcABCIResponsesKey(h)) + err = batch.Delete(store.DBKeyLayout.CalcABCIResponsesKey(h)) if err != nil { - return err + return pruned, err } pruned++ @@ -386,7 +592,7 @@ func (store dbStore) PruneStates(from int64, to int64, evidenceThresholdHeight i if pruned%1000 == 0 && pruned > 0 { err := batch.Write() if err != nil { - return err + return pruned, err } batch.Close() batch = store.db.NewBatch() @@ -396,19 +602,30 @@ func (store dbStore) PruneStates(from int64, to int64, evidenceThresholdHeight i err = batch.WriteSync() if err != nil { - return err + return pruned, err } - return nil + // We do not want to panic or interrupt consensus on compaction failure + if store.StoreOptions.Compact && previosulyPrunedStates+pruned >= uint64(store.StoreOptions.CompactionInterval) { + // When the range is nil,nil, the database will try to compact + // ALL levels. Another option is to set a predefined range of + // specific keys. + err = store.db.Compact(nil, nil) + } + + store.StoreOptions.Metrics.StoreAccessDurationSeconds.With("method", "pruning_load_validator_info").Observe(elapsedTime) + return pruned, err } // PruneABCIResponses attempts to prune all ABCI responses up to, but not // including, the given height. On success, returns the number of heights // pruned and the new retain height. -func (store dbStore) PruneABCIResponses(targetRetainHeight int64) (int64, int64, error) { +func (store dbStore) PruneABCIResponses(targetRetainHeight int64, forceCompact bool) (pruned int64, newRetainHeight int64, err error) { if store.DiscardABCIResponses { return 0, 0, nil } + + defer addTimeSample(store.StoreOptions.Metrics.StoreAccessDurationSeconds.With("method", "prune_abci_responses"), time.Now())() lastRetainHeight, err := store.getLastABCIResponsesRetainHeight() if err != nil { return 0, 0, fmt.Errorf("failed to look up last ABCI responses retain height: %w", err) @@ -420,11 +637,10 @@ func (store dbStore) PruneABCIResponses(targetRetainHeight int64) (int64, int64, batch := store.db.NewBatch() defer batch.Close() - pruned := int64(0) batchPruned := int64(0) for h := lastRetainHeight; h < targetRetainHeight; h++ { - if err := batch.Delete(calcABCIResponsesKey(h)); err != nil { + if err := batch.Delete(store.DBKeyLayout.CalcABCIResponsesKey(h)); err != nil { return pruned, lastRetainHeight + pruned, fmt.Errorf("failed to delete ABCI responses at height %d: %w", h, err) } batchPruned++ @@ -444,107 +660,136 @@ func (store dbStore) PruneABCIResponses(targetRetainHeight int64) (int64, int64, defer batch.Close() } } - return pruned + batchPruned, targetRetainHeight, batch.WriteSync() + + if err = batch.WriteSync(); err != nil { + return pruned + batchPruned, targetRetainHeight, err + } + + if forceCompact && store.Compact { + if pruned+batchPruned >= store.CompactionInterval || targetRetainHeight-lastRetainHeight >= store.CompactionInterval { + err = store.db.Compact(nil, nil) + } + } + return pruned + batchPruned, targetRetainHeight, err } -//------------------------------------------------------------------------ +// ------------------------------------------------------------------------ // TxResultsHash returns the root hash of a Merkle tree of // ExecTxResulst responses (see ABCIResults.Hash) // -// See merkle.SimpleHashFromByteSlices +// See merkle.SimpleHashFromByteSlices. func TxResultsHash(txResults []*abci.ExecTxResult) []byte { return types.NewResults(txResults).Hash() } -// LoadFinalizeBlockResponse loads the DiscardABCIResponses for the given height from the -// database. If the node has D set to true, ErrABCIResponsesNotPersisted -// is persisted. If not found, ErrNoABCIResponsesForHeight is returned. -func (store dbStore) LoadFinalizeBlockResponse(height int64) (*abci.ResponseFinalizeBlock, error) { +// LoadFinalizeBlockResponse loads FinalizeBlockResponse for the given height +// from the database. If the node has DiscardABCIResponses set to true, +// ErrFinalizeBlockResponsesNotPersisted is returned. If not found, +// ErrNoABCIResponsesForHeight is returned. +func (store dbStore) LoadFinalizeBlockResponse(height int64) (*abci.FinalizeBlockResponse, error) { if store.DiscardABCIResponses { return nil, ErrFinalizeBlockResponsesNotPersisted } - buf, err := store.db.Get(calcABCIResponsesKey(height)) + start := time.Now() + buf, err := store.db.Get(store.DBKeyLayout.CalcABCIResponsesKey(height)) if err != nil { return nil, err } + + addTimeSample(store.StoreOptions.Metrics.StoreAccessDurationSeconds.With("method", "load_abci_responses"), start)() + if len(buf) == 0 { return nil, ErrNoABCIResponsesForHeight{height} } - resp := new(abci.ResponseFinalizeBlock) + resp := new(abci.FinalizeBlockResponse) err = resp.Unmarshal(buf) - if err != nil { + // Check for an error or if the resp.AppHash is nil if so + // this means the unmarshalling should be a LegacyABCIResponses + // Depending on a source message content (serialized as ABCIResponses) + // there are instances where it can be deserialized as a FinalizeBlockResponse + // without causing an error. But the values will not be deserialized properly + // and, it will contain zero values, and one of them is an AppHash == nil + // This can be verified in the /state/compatibility_test.go file + if err != nil || resp.AppHash == nil { // The data might be of the legacy ABCI response type, so // we try to unmarshal that legacyResp := new(cmtstate.LegacyABCIResponses) - rerr := legacyResp.Unmarshal(buf) - if rerr != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmtos.Exit(fmt.Sprintf(`LoadFinalizeBlockResponse: Data has been corrupted or its spec has - changed: %v\n`, err)) + if err := legacyResp.Unmarshal(buf); err != nil { + // only return an error, this method is only invoked through the `/block_results` not for state logic and + // some tests, so no need to exit cometbft if there's an error, just return it. + store.Logger.Error("failed in LoadFinalizeBlockResponse", "error", ErrABCIResponseCorruptedOrSpecChangeForHeight{Height: height, Err: err}) + return nil, ErrABCIResponseCorruptedOrSpecChangeForHeight{Height: height, Err: err} } // The state store contains the old format. Migrate to - // the new ResponseFinalizeBlock format. Note that the + // the new FinalizeBlockResponse format. Note that the // new struct expects the AppHash which we don't have. return responseFinalizeBlockFromLegacy(legacyResp), nil } // TODO: ensure that buf is completely read. + // Otherwise return the FinalizeBlockResponse return resp, nil } -// LoadLastFinalizeBlockResponses loads the FinalizeBlockResponses from the most recent height. +// LoadLastFinalizeBlockResponse loads the FinalizeBlockResponses from the most recent height. // The height parameter is used to ensure that the response corresponds to the latest height. // If not, an error is returned. // // This method is used for recovering in the case that we called the Commit ABCI // method on the application but crashed before persisting the results. -func (store dbStore) LoadLastFinalizeBlockResponse(height int64) (*abci.ResponseFinalizeBlock, error) { - bz, err := store.db.Get(lastABCIResponseKey) +func (store dbStore) LoadLastFinalizeBlockResponse(height int64) (*abci.FinalizeBlockResponse, error) { + start := time.Now() + buf, err := store.db.Get(store.DBKeyLayout.CalcABCIResponsesKey(height)) if err != nil { return nil, err } - - if len(bz) == 0 { - return nil, errors.New("no last ABCI response has been persisted") + addTimeSample(store.StoreOptions.Metrics.StoreAccessDurationSeconds.With("method", "load_last_abci_response"), start)() + if len(buf) == 0 { + // DEPRECATED lastABCIResponseKey + // It is possible if this is called directly after an upgrade that + // `lastABCIResponseKey` contains the last ABCI responses. + bz, err := store.db.Get(lastABCIResponseKey) + if err == nil && len(bz) > 0 { + info := new(cmtstate.ABCIResponsesInfo) + err = info.Unmarshal(bz) + if err != nil { + cmtos.Exit(fmt.Sprintf(`LoadLastFinalizeBlockResponse: Data has been corrupted or its spec has changed: %v\n`, err)) + } + // Here we validate the result by comparing its height to the expected height. + if height != info.GetHeight() { + return nil, fmt.Errorf("expected height %d but last stored abci responses was at height %d", height, info.GetHeight()) + } + if info.FinalizeBlock == nil { + // sanity check + if info.LegacyAbciResponses == nil { + panic("state store contains last abci response but it is empty") + } + return responseFinalizeBlockFromLegacy(info.LegacyAbciResponses), nil + } + return info.FinalizeBlock, nil + } + // END OF DEPRECATED lastABCIResponseKey + return nil, fmt.Errorf("expected last ABCI responses at height %d, but none are found", height) } - - info := new(cmtstate.ABCIResponsesInfo) - err = info.Unmarshal(bz) + resp := new(abci.FinalizeBlockResponse) + err = resp.Unmarshal(buf) if err != nil { - cmtos.Exit(fmt.Sprintf(`LoadLastFinalizeBlockResponse: Data has been corrupted or its spec has - changed: %v\n`, err)) + cmtos.Exit(fmt.Sprintf(`LoadLastFinalizeBlockResponse: Data has been corrupted or its spec has changed: %v\n`, err)) } - - // Here we validate the result by comparing its height to the expected height. - if height != info.GetHeight() { - return nil, fmt.Errorf("expected height %d but last stored abci responses was at height %d", height, info.GetHeight()) - } - - // It is possible if this is called directly after an upgrade that - // ResponseFinalizeBlock is nil. In which case we use the legacy - // ABCI responses - if info.ResponseFinalizeBlock == nil { - // sanity check - if info.LegacyAbciResponses == nil { - panic("state store contains last abci response but it is empty") - } - return responseFinalizeBlockFromLegacy(info.LegacyAbciResponses), nil - } - - return info.ResponseFinalizeBlock, nil + return resp, nil } -// SaveFinalizeBlockResponse persists the ResponseFinalizeBlock to the database. +// SaveFinalizeBlockResponse persists the FinalizeBlockResponse to the database. // This is useful in case we crash after app.Commit and before s.Save(). // Responses are indexed by height so they can also be loaded later to produce // Merkle proofs. // // CONTRACT: height must be monotonically increasing every time this is called. -func (store dbStore) SaveFinalizeBlockResponse(height int64, resp *abci.ResponseFinalizeBlock) error { +func (store dbStore) SaveFinalizeBlockResponse(height int64, resp *abci.FinalizeBlockResponse) error { var dtxs []*abci.ExecTxResult // strip nil values, for _, tx := range resp.TxResults { @@ -554,30 +799,36 @@ func (store dbStore) SaveFinalizeBlockResponse(height int64, resp *abci.Response } resp.TxResults = dtxs - // If the flag is false then we save the ABCIResponse. This can be used for the /BlockResults - // query or to reindex an event using the command line. - if !store.DiscardABCIResponses { - bz, err := resp.Marshal() - if err != nil { + bz, err := resp.Marshal() + if err != nil { + return err + } + + // Save the ABCI response. + // + // We always save the last ABCI response for crash recovery. + // If `store.DiscardABCIResponses` is true, then we delete the previous ABCI response. + start := time.Now() + if store.DiscardABCIResponses && height > 1 { + if err := store.db.Delete(store.DBKeyLayout.CalcABCIResponsesKey(height - 1)); err != nil { return err } - if err := store.db.Set(calcABCIResponsesKey(height), bz); err != nil { - return err + // Compact the database to cleanup ^ responses. + // + // This is because PruneABCIResponses will not delete anything if + // DiscardABCIResponses is true, so we have to do it here. + if height%1000 == 0 { + if err := store.db.Compact(nil, nil); err != nil { + return err + } } } - // We always save the last ABCI response for crash recovery. - // This overwrites the previous saved ABCI Response. - response := &cmtstate.ABCIResponsesInfo{ - ResponseFinalizeBlock: resp, - Height: height, - } - bz, err := response.Marshal() - if err != nil { + if err := store.db.SetSync(store.DBKeyLayout.CalcABCIResponsesKey(height), bz); err != nil { return err } - - return store.db.SetSync(lastABCIResponseKey, bz) + addTimeSample(store.StoreOptions.Metrics.StoreAccessDurationSeconds.With("method", "save_abci_responses"), start)() + return nil } func (store dbStore) getValue(key []byte) ([]byte, error) { @@ -592,7 +843,7 @@ func (store dbStore) getValue(key []byte) ([]byte, error) { return bz, nil } -// ApplicationRetainHeight +// ApplicationRetainHeight. func (store dbStore) SaveApplicationRetainHeight(height int64) error { return store.db.SetSync(AppRetainHeightKey, int64ToBytes(height)) } @@ -611,7 +862,7 @@ func (store dbStore) GetApplicationRetainHeight() (int64, error) { return height, nil } -// DataCompanionRetainHeight +// DataCompanionRetainHeight. func (store dbStore) SaveCompanionBlockRetainHeight(height int64) error { return store.db.SetSync(CompanionBlockRetainHeightKey, int64ToBytes(height)) } @@ -630,7 +881,7 @@ func (store dbStore) GetCompanionBlockRetainHeight() (int64, error) { return height, nil } -// DataCompanionRetainHeight +// DataCompanionRetainHeight. func (store dbStore) SaveABCIResRetainHeight(height int64) error { return store.db.SetSync(ABCIResultsRetainHeightKey, int64ToBytes(height)) } @@ -665,18 +916,20 @@ func (store dbStore) setLastABCIResponsesRetainHeight(height int64) error { return store.db.SetSync(lastABCIResponsesRetainHeightKey, int64ToBytes(height)) } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // LoadValidators loads the ValidatorSet for a given height. // Returns ErrNoValSetForHeight if the validator set can't be found for this height. func (store dbStore) LoadValidators(height int64) (*types.ValidatorSet, error) { - valInfo, err := loadValidatorsInfo(store.db, height) + valInfo, elapsedTime, err := loadValidatorsInfo(store.db, store.DBKeyLayout.CalcValidatorsKey(height)) if err != nil { return nil, ErrNoValSetForHeight{height} } + // (WARN) This includes time to unmarshal the validator info if valInfo.ValidatorSet == nil { lastStoredHeight := lastStoredHeightFor(height, valInfo.LastHeightChanged) - valInfo2, err := loadValidatorsInfo(store.db, lastStoredHeight) + valInfo2, tmpTime, err := loadValidatorsInfo(store.db, store.DBKeyLayout.CalcValidatorsKey(lastStoredHeight)) + elapsedTime += tmpTime if err != nil || valInfo2.ValidatorSet == nil { return nil, fmt.Errorf("couldn't find validators at height %d (height %d was originally requested): %w", @@ -705,7 +958,7 @@ func (store dbStore) LoadValidators(height int64) (*types.ValidatorSet, error) { if err != nil { return nil, err } - + store.StoreOptions.Metrics.StoreAccessDurationSeconds.With("method", "load_validators").Observe(elapsedTime) return vip, nil } @@ -715,14 +968,17 @@ func lastStoredHeightFor(height, lastHeightChanged int64) int64 { } // CONTRACT: Returned ValidatorsInfo can be mutated. -func loadValidatorsInfo(db dbm.DB, height int64) (*cmtstate.ValidatorsInfo, error) { - buf, err := db.Get(calcValidatorsKey(height)) +func loadValidatorsInfo(db dbm.DB, valInfoKey []byte) (*cmtstate.ValidatorsInfo, float64, error) { + start := time.Now() + buf, err := db.Get(valInfoKey) if err != nil { - return nil, err + return nil, 0, err } + elapsedTime := time.Since(start).Seconds() + if len(buf) == 0 { - return nil, errors.New("value retrieved from db is empty") + return nil, 0, errors.New("value retrieved from db is empty") } v := new(cmtstate.ValidatorsInfo) @@ -734,7 +990,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*cmtstate.ValidatorsInfo, erro } // TODO: ensure that buf is completely read. - return v, nil + return v, elapsedTime, nil } // saveValidatorsInfo persists the validator set. @@ -742,7 +998,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*cmtstate.ValidatorsInfo, erro // `height` is the effective height for which the validator is responsible for // signing. It should be called from s.Save(), right before the state itself is // persisted. -func (store dbStore) saveValidatorsInfo(height, lastHeightChanged int64, valSet *types.ValidatorSet) error { +func (store dbStore) saveValidatorsInfo(height, lastHeightChanged int64, valSet *types.ValidatorSet, batch dbm.Batch) error { if lastHeightChanged > height { return errors.New("lastHeightChanged cannot be greater than ValidatorsInfo height") } @@ -763,16 +1019,17 @@ func (store dbStore) saveValidatorsInfo(height, lastHeightChanged int64, valSet if err != nil { return err } - - err = store.db.Set(calcValidatorsKey(height), bz) + start := time.Now() + err = batch.Set(store.DBKeyLayout.CalcValidatorsKey(height), bz) if err != nil { return err } + defer addTimeSample(store.StoreOptions.Metrics.StoreAccessDurationSeconds.With("method", "saveValidatorsInfo"), start)() return nil } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // ConsensusParamsInfo represents the latest consensus params, or the last height it changed @@ -805,10 +1062,14 @@ func (store dbStore) LoadConsensusParams(height int64) (types.ConsensusParams, e } func (store dbStore) loadConsensusParamsInfo(height int64) (*cmtstate.ConsensusParamsInfo, error) { - buf, err := store.db.Get(calcConsensusParamsKey(height)) + start := time.Now() + buf, err := store.db.Get(store.DBKeyLayout.CalcConsensusParamsKey(height)) if err != nil { return nil, err } + + addTimeSample(store.StoreOptions.Metrics.StoreAccessDurationSeconds.With("method", "load_consensus_params"), start)() + if len(buf) == 0 { return nil, errors.New("value retrieved from db is empty") } @@ -828,7 +1089,7 @@ func (store dbStore) loadConsensusParamsInfo(height int64) (*cmtstate.ConsensusP // It should be called from s.Save(), right before the state itself is persisted. // If the consensus params did not change after processing the latest block, // only the last height for which they changed is persisted. -func (store dbStore) saveConsensusParamsInfo(nextHeight, changeHeight int64, params types.ConsensusParams) error { +func (store dbStore) saveConsensusParamsInfo(nextHeight, changeHeight int64, params types.ConsensusParams, batch dbm.Batch) error { paramsInfo := &cmtstate.ConsensusParamsInfo{ LastHeightChanged: changeHeight, } @@ -841,7 +1102,7 @@ func (store dbStore) saveConsensusParamsInfo(nextHeight, changeHeight int64, par return err } - err = store.db.Set(calcConsensusParamsKey(nextHeight), bz) + err = batch.Set(store.DBKeyLayout.CalcConsensusParamsKey(nextHeight), bz) if err != nil { return err } @@ -857,7 +1118,7 @@ func (store dbStore) SetOfflineStateSyncHeight(height int64) error { return nil } -// Gets the height at which the store is bootstrapped after out of band statesync +// Gets the height at which the store is bootstrapped after out of band statesync. func (store dbStore) GetOfflineStateSyncHeight() (int64, error) { buf, err := store.db.Get(offlineStateSyncHeight) if err != nil { @@ -887,19 +1148,57 @@ func min(a int64, b int64) int64 { } // responseFinalizeBlockFromLegacy is a convenience function that takes the old abci responses and morphs -// it to the finalize block response. Note that the app hash is missing -func responseFinalizeBlockFromLegacy(legacyResp *cmtstate.LegacyABCIResponses) *abci.ResponseFinalizeBlock { - return &abci.ResponseFinalizeBlock{ - TxResults: legacyResp.DeliverTxs, - ValidatorUpdates: legacyResp.EndBlock.ValidatorUpdates, - ConsensusParamUpdates: legacyResp.EndBlock.ConsensusParamUpdates, - Events: append(legacyResp.BeginBlock.Events, legacyResp.EndBlock.Events...), - // NOTE: AppHash is missing in the response but will - // be caught and filled in consensus/replay.go +// it to the finalize block response. Note that the app hash is missing. +func responseFinalizeBlockFromLegacy(legacyResp *cmtstate.LegacyABCIResponses) *abci.FinalizeBlockResponse { + var response abci.FinalizeBlockResponse + events := make([]abci.Event, 0) + + if legacyResp.DeliverTxs != nil { + response.TxResults = legacyResp.DeliverTxs + } + + // Check for begin block and end block and only append events or assign values if they are not nil + if legacyResp.BeginBlock != nil { + if legacyResp.BeginBlock.Events != nil { + // Add BeginBlock attribute to BeginBlock events + for idx := range legacyResp.BeginBlock.Events { + legacyResp.BeginBlock.Events[idx].Attributes = append(legacyResp.BeginBlock.Events[idx].Attributes, abci.EventAttribute{ + Key: "mode", + Value: "BeginBlock", + Index: false, + }) + } + events = append(events, legacyResp.BeginBlock.Events...) + } } + if legacyResp.EndBlock != nil { + if legacyResp.EndBlock.ValidatorUpdates != nil { + response.ValidatorUpdates = legacyResp.EndBlock.ValidatorUpdates + } + if legacyResp.EndBlock.ConsensusParamUpdates != nil { + response.ConsensusParamUpdates = legacyResp.EndBlock.ConsensusParamUpdates + } + if legacyResp.EndBlock.Events != nil { + // Add EndBlock attribute to BeginBlock events + for idx := range legacyResp.EndBlock.Events { + legacyResp.EndBlock.Events[idx].Attributes = append(legacyResp.EndBlock.Events[idx].Attributes, abci.EventAttribute{ + Key: "mode", + Value: "EndBlock", + Index: false, + }) + } + events = append(events, legacyResp.EndBlock.Events...) + } + } + + response.Events = events + + // NOTE: AppHash is missing in the response but will + // be caught and filled in consensus/replay.go + return &response } -// ----- Util +// ----- Util. func int64FromBytes(bz []byte) int64 { v, _ := binary.Varint(bz) return v @@ -910,3 +1209,11 @@ func int64ToBytes(i int64) []byte { n := binary.PutVarint(buf, i) return buf[:n] } + +// addTimeSample returns a function that, when called, adds an observation to m. +// The observation added to m is the number of seconds elapsed since addTimeSample +// was initially called. addTimeSample is meant to be called in a defer to calculate +// the amount of time a function takes to complete. +func addTimeSample(m metrics.Histogram, start time.Time) func() { + return func() { m.Observe(time.Since(start).Seconds()) } +} diff --git a/state/store_db_key_layout_test.go b/state/store_db_key_layout_test.go new file mode 100644 index 00000000000..cbc64c3a99a --- /dev/null +++ b/state/store_db_key_layout_test.go @@ -0,0 +1,45 @@ +package state + +import ( + "strconv" + "testing" +) + +// Fuzzing only the CalcABCIResponsesKey method, because the other methods of +// v1LegacyLayout do the same thing, just with a different prefix. +// Therefore, results will be the same. +func FuzzCalcABCIResponsesKey(f *testing.F) { + layout := v1LegacyLayout{} + + // Add seed inputs for fuzzing. + f.Add(int64(0)) + f.Add(int64(42)) + f.Add(int64(1245600)) + f.Add(int64(1234567890)) + f.Add(int64(9223372036854775807)) + + f.Fuzz(func(t *testing.T, height int64) { + if height < 0 { + // height won't be < 0, so skip + t.SkipNow() + } + + key := layout.CalcABCIResponsesKey(height) + + const prefix = "abciResponsesKey:" + gotPrefix := string(key[:len(prefix)]) + + if len(key) < len(prefix) || gotPrefix != prefix { + t.Fatalf("key does not start with prefix '%s': %s", prefix, key) + } + + heightStr := string(key[len(prefix):]) + gotHeight, err := strconv.ParseInt(heightStr, 10, 64) + if err != nil { + t.Fatalf("parsing height from key: %s, error: %s", key, err) + } + if gotHeight != height { + t.Errorf("want height %d, but got%d", height, gotHeight) + } + }) +} diff --git a/state/store_test.go b/state/store_test.go index 7affe5e178b..30051ebdd4b 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -6,23 +6,20 @@ import ( "testing" "time" - cfg "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/state/indexer" - "github.com/cometbft/cometbft/state/indexer/block" - "github.com/cometbft/cometbft/state/txindex" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - abci "github.com/cometbft/cometbft/abci/types" - "github.com/cometbft/cometbft/crypto" + cmtstate "github.com/cometbft/cometbft/api/cometbft/state/v1" + cfg "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/libs/log" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cmtstate "github.com/cometbft/cometbft/proto/tendermint/state" sm "github.com/cometbft/cometbft/state" + "github.com/cometbft/cometbft/state/indexer" + "github.com/cometbft/cometbft/state/indexer/block" + "github.com/cometbft/cometbft/state/txindex" "github.com/cometbft/cometbft/store" "github.com/cometbft/cometbft/types" ) @@ -31,22 +28,32 @@ func TestStoreLoadValidators(t *testing.T) { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, + DBKeyLayout: "v2", }) val, _ := types.RandValidator(true, 10) vals := types.NewValidatorSet([]*types.Validator{val}) // 1) LoadValidators loads validators using a height where they were last changed - err := sm.SaveValidatorsInfo(stateDB, 1, 1, vals) + err := sm.SaveValidatorsInfo(stateDB, 1, 1, vals, "v2") + require.NoError(t, err) + + // The store was initialized with v2 so we cannot find a validator using the representation + // used by v1 + err = sm.SaveValidatorsInfo(stateDB, 2, 1, vals, "v1") require.NoError(t, err) - err = sm.SaveValidatorsInfo(stateDB, 2, 1, vals) + _, err = stateStore.LoadValidators(2) + require.Error(t, err) + + err = sm.SaveValidatorsInfo(stateDB, 2, 1, vals, "v2") require.NoError(t, err) loadedVals, err := stateStore.LoadValidators(2) require.NoError(t, err) + assert.NotZero(t, loadedVals.Size()) // 2) LoadValidators loads validators using a checkpoint height - err = sm.SaveValidatorsInfo(stateDB, sm.ValSetCheckpointInterval, 1, vals) + err = sm.SaveValidatorsInfo(stateDB, sm.ValSetCheckpointInterval, 1, vals, "v2") require.NoError(t, err) loadedVals, err = stateStore.LoadValidators(sm.ValSetCheckpointInterval) @@ -59,12 +66,17 @@ func BenchmarkLoadValidators(b *testing.B) { config := test.ResetTestRoot("state_") defer os.RemoveAll(config.RootDir) + dbType := dbm.BackendType(config.DBBackend) + stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) require.NoError(b, err) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, + DBKeyLayout: "v2", }) + state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) if err != nil { b.Fatal(err) @@ -72,13 +84,19 @@ func BenchmarkLoadValidators(b *testing.B) { state.Validators = genValSet(valSetSize) state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) + err = stateStore.Save(state) require.NoError(b, err) for i := 10; i < 10000000000; i *= 10 { // 10, 100, 1000, ... - i := i - if err := sm.SaveValidatorsInfo(stateDB, - int64(i), state.LastHeightValidatorsChanged, state.NextValidators); err != nil { + err := sm.SaveValidatorsInfo( + stateDB, + int64(i), + state.LastHeightValidatorsChanged, + state.NextValidators, + "v2", + ) + if err != nil { b.Fatal(err) } @@ -124,7 +142,6 @@ func TestPruneStates(t *testing.T) { "prune when evidence height < height": {20, 1, 18, 17, false, []int64{13, 17, 18, 19, 20}, []int64{15, 18, 19, 20}, []int64{18, 19, 20}}, } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { db := dbm.NewMemDB() stateStore := sm.NewStore(db, sm.StoreOptions{ @@ -134,7 +151,7 @@ func TestPruneStates(t *testing.T) { // Generate a bunch of state data. Validators change for heights ending with 3, and // parameters when ending with 5. - validator := &types.Validator{Address: cmtrand.Bytes(crypto.AddressSize), VotingPower: 100, PubKey: pk} + validator := &types.Validator{Address: pk.Address(), VotingPower: 100, PubKey: pk} validatorSet := &types.ValidatorSet{ Validators: []*types.Validator{validator}, Proposer: validator, @@ -169,18 +186,19 @@ func TestPruneStates(t *testing.T) { err := stateStore.Save(state) require.NoError(t, err) - err = stateStore.SaveFinalizeBlockResponse(h, &abci.ResponseFinalizeBlock{ + err = stateStore.SaveFinalizeBlockResponse(h, &abci.FinalizeBlockResponse{ TxResults: []*abci.ExecTxResult{ {Data: []byte{1}}, {Data: []byte{2}}, {Data: []byte{3}}, }, + AppHash: make([]byte, 1), }) require.NoError(t, err) } // Test assertions - err := stateStore.PruneStates(tc.pruneFrom, tc.pruneTo, tc.evidenceThresholdHeight) + _, err := stateStore.PruneStates(tc.pruneFrom, tc.pruneTo, tc.evidenceThresholdHeight, 0) if tc.expectErr { require.Error(t, err) return @@ -238,7 +256,7 @@ func TestTxResultsHash(t *testing.T) { proof := results.ProveResult(0) bz, err := results[0].Marshal() require.NoError(t, err) - assert.NoError(t, proof.Verify(root, bz)) + require.NoError(t, proof.Verify(root, bz)) } func sliceToMap(s []int64) map[int64]bool { @@ -258,10 +276,10 @@ func makeStateAndBlockStoreAndIndexers() (sm.State, *store.BlockStore, txindex.T }) state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) if err != nil { - panic(fmt.Sprintf("error constructing state from genesis file: %s", err.Error())) + panic("error constructing state from genesis file: " + err.Error()) } - txIndexer, blockIndexer, err := block.IndexerFromConfig(config, cfg.DefaultDBProvider, "test") + txIndexer, blockIndexer, _, err := block.IndexerFromConfig(config, cfg.DefaultDBProvider, "test") if err != nil { panic(err) } @@ -269,7 +287,10 @@ func makeStateAndBlockStoreAndIndexers() (sm.State, *store.BlockStore, txindex.T return state, store.NewBlockStore(blockDB), txIndexer, blockIndexer, func() { os.RemoveAll(config.RootDir) }, stateStore } -func initStateStoreRetainHeights(stateStore sm.Store, appBlockRH, dcBlockRH, dcBlockResultsRH int64) error { +func initStateStoreRetainHeights(stateStore sm.Store) error { + appBlockRH := int64(0) + dcBlockRH := int64(0) + dcBlockResultsRH := int64(0) if err := stateStore.SaveApplicationRetainHeight(appBlockRH); err != nil { return fmt.Errorf("failed to set initial application block retain height: %w", err) } @@ -282,7 +303,8 @@ func initStateStoreRetainHeights(stateStore sm.Store, appBlockRH, dcBlockRH, dcB return nil } -func fillStore(t *testing.T, height int64, stateStore sm.Store, bs *store.BlockStore, state sm.State, response1 *abci.ResponseFinalizeBlock) { +func fillStore(t *testing.T, height int64, stateStore sm.Store, bs *store.BlockStore, state sm.State, response1 *abci.FinalizeBlockResponse) { + t.Helper() if response1 != nil { for h := int64(1); h <= height; h++ { err := stateStore.SaveFinalizeBlockResponse(h, response1) @@ -293,13 +315,13 @@ func fillStore(t *testing.T, height int64, stateStore sm.Store, bs *store.BlockS require.NoError(t, err) // check to see if the saved response height is the same as the loaded height. assert.Equal(t, lastResponse, response1) - // check if the abci response didnt save in the abciresponses. + // check if the abci response didn't save in the abciresponses. responses, err := stateStore.LoadFinalizeBlockResponse(height) require.NoError(t, err, responses) require.Equal(t, response1, responses) } b1 := state.MakeBlock(state.LastBlockHeight+1, test.MakeNTxs(state.LastBlockHeight+1, 10), new(types.Commit), nil, nil) - partSet, err := b1.MakePartSet(2) + partSet, err := b1.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) bs.SaveBlock(b1, partSet, &types.Commit{Height: state.LastBlockHeight + 1}) } @@ -312,7 +334,7 @@ func TestSaveRetainHeight(t *testing.T) { fillStore(t, height, stateStore, bs, state, nil) pruner := sm.NewPruner(stateStore, bs, blockIndexer, txIndexer, log.TestingLogger()) - err := initStateStoreRetainHeights(stateStore, 0, 0, 0) + err := initStateStoreRetainHeights(stateStore) require.NoError(t, err) // We should not save a height that is 0 @@ -337,7 +359,7 @@ func TestMinRetainHeight(t *testing.T) { defer callbackF() pruner := sm.NewPruner(stateStore, bs, blockIndexer, txIndexer, log.TestingLogger(), sm.WithPrunerCompanionEnabled()) - require.NoError(t, initStateStoreRetainHeights(stateStore, 0, 0, 0)) + require.NoError(t, initStateStoreRetainHeights(stateStore)) minHeight := pruner.FindMinRetainHeight() require.Equal(t, int64(0), minHeight) @@ -362,10 +384,11 @@ func TestABCIResPruningStandalone(t *testing.T) { require.Error(t, err) require.Nil(t, responses) // stub the abciresponses. - response1 := &abci.ResponseFinalizeBlock{ + response1 := &abci.FinalizeBlockResponse{ TxResults: []*abci.ExecTxResult{ {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, }, + AppHash: make([]byte, 1), } _, bs, txIndexer, blockIndexer, callbackF, stateStore := makeStateAndBlockStoreAndIndexers() defer callbackF() @@ -455,10 +478,11 @@ func TestFinalizeBlockResponsePruning(t *testing.T) { require.Error(t, err) require.Nil(t, responses) // stub the abciresponses. - response1 := &abci.ResponseFinalizeBlock{ + response1 := &abci.FinalizeBlockResponse{ TxResults: []*abci.ExecTxResult{ {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, }, + AppHash: make([]byte, 1), } state, bs, txIndexer, blockIndexer, callbackF, stateStore := makeStateAndBlockStoreAndIndexers() defer callbackF() @@ -466,7 +490,7 @@ func TestFinalizeBlockResponsePruning(t *testing.T) { state.LastBlockHeight = height - 1 fillStore(t, height, stateStore, bs, state, response1) - err = initStateStoreRetainHeights(stateStore, 0, 0, 0) + err = initStateStoreRetainHeights(stateStore) require.NoError(t, err) obs := newPrunerObserver(1) @@ -503,63 +527,70 @@ func TestFinalizeBlockResponsePruning(t *testing.T) { } func TestLastFinalizeBlockResponses(t *testing.T) { - // create an empty state store. - t.Run("Not persisting responses", func(t *testing.T) { + t.Run("persisting responses", func(t *testing.T) { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) + responses, err := stateStore.LoadFinalizeBlockResponse(1) require.Error(t, err) require.Nil(t, responses) - // stub the abciresponses. - response1 := &abci.ResponseFinalizeBlock{ + + response1 := &abci.FinalizeBlockResponse{ TxResults: []*abci.ExecTxResult{ {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, }, + AppHash: make([]byte, 1), } - // create new db and state store and set discard abciresponses to false. + stateDB = dbm.NewMemDB() stateStore = sm.NewStore(stateDB, sm.StoreOptions{DiscardABCIResponses: false}) height := int64(10) + // save the last abci response. err = stateStore.SaveFinalizeBlockResponse(height, response1) require.NoError(t, err) + // search for the last finalize block response and check if it has saved. lastResponse, err := stateStore.LoadLastFinalizeBlockResponse(height) require.NoError(t, err) - // check to see if the saved response height is the same as the loaded height. + assert.Equal(t, lastResponse, response1) - // use an incorret height to make sure the state store errors. + + // use an incorrect height to make sure the state store errors. _, err = stateStore.LoadLastFinalizeBlockResponse(height + 1) - assert.Error(t, err) - // check if the abci response didnt save in the abciresponses. + require.Error(t, err) + + // check if the abci response didn't save in the abciresponses. responses, err = stateStore.LoadFinalizeBlockResponse(height) require.NoError(t, err, responses) require.Equal(t, response1, responses) }) - t.Run("persisting responses", func(t *testing.T) { + t.Run("not persisting responses", func(t *testing.T) { stateDB := dbm.NewMemDB() height := int64(10) - // stub the second abciresponse. - response2 := &abci.ResponseFinalizeBlock{ + + response2 := &abci.FinalizeBlockResponse{ TxResults: []*abci.ExecTxResult{ {Code: 44, Data: []byte("Hello again"), Log: "????"}, }, } - // create a new statestore with the responses on. + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: true, }) - // save an additional response. + err := stateStore.SaveFinalizeBlockResponse(height+1, response2) require.NoError(t, err) + // check to see if the response saved by calling the last response. lastResponse2, err := stateStore.LoadLastFinalizeBlockResponse(height + 1) require.NoError(t, err) - // check to see if the saved response height is the same as the loaded height. + assert.Equal(t, response2, lastResponse2) + // should error as we are no longer saving the response. _, err = stateStore.LoadFinalizeBlockResponse(height + 1) assert.Equal(t, sm.ErrFinalizeBlockResponsesNotPersisted, err) @@ -607,7 +638,7 @@ func TestFinalizeBlockRecoveryUsingLegacyABCIResponses(t *testing.T) { resp, err := stateStore.LoadLastFinalizeBlockResponse(height) require.NoError(t, err) require.Equal(t, resp.ConsensusParamUpdates, &cp) - require.Equal(t, resp.Events, legacyResp.LegacyAbciResponses.BeginBlock.Events) + require.Equal(t, len(resp.Events), len(legacyResp.LegacyAbciResponses.BeginBlock.Events)) require.Equal(t, resp.TxResults[0], legacyResp.LegacyAbciResponses.DeliverTxs[0]) } diff --git a/state/tx_filter_test.go b/state/tx_filter_test.go index 4c5384720a6..0b870f8266f 100644 --- a/state/tx_filter_test.go +++ b/state/tx_filter_test.go @@ -4,12 +4,10 @@ import ( "os" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtrand "github.com/cometbft/cometbft/internal/rand" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" ) @@ -25,8 +23,8 @@ func TestTxFilter(t *testing.T) { tx types.Tx isErr bool }{ - {types.Tx(cmtrand.Bytes(2155)), false}, - {types.Tx(cmtrand.Bytes(2156)), true}, + {types.Tx(cmtrand.Bytes(2122)), false}, + {types.Tx(cmtrand.Bytes(2123)), true}, {types.Tx(cmtrand.Bytes(3000)), true}, } @@ -41,9 +39,9 @@ func TestTxFilter(t *testing.T) { f := sm.TxPreCheck(state) if tc.isErr { - assert.NotNil(t, f(tc.tx), "#%v", i) + require.Error(t, f(tc.tx), "#%v", i) } else { - assert.Nil(t, f(tc.tx), "#%v", i) + require.NoError(t, f(tc.tx), "#%v", i) } } } diff --git a/state/txindex/indexer.go b/state/txindex/indexer.go index 806b02bb268..fa870c9ccb2 100644 --- a/state/txindex/indexer.go +++ b/state/txindex/indexer.go @@ -4,9 +4,8 @@ import ( "context" "errors" - "github.com/cometbft/cometbft/libs/log" - abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/pubsub/query" ) @@ -27,9 +26,9 @@ type TxIndexer interface { Get(hash []byte) (*abci.TxResult, error) // Search allows you to query for transactions. - Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) + Search(ctx context.Context, q *query.Query, pagSettings Pagination) ([]*abci.TxResult, int, error) - //Set Logger + // Set Logger SetLogger(l log.Logger) Prune(retainHeight int64) (int64, int64, error) @@ -45,6 +44,16 @@ type Batch struct { Ops []*abci.TxResult } +// Pagination provides pagination information for queries. +// This allows us to use the same TxSearch API for pruning to return all relevant data, +// while still limiting public queries to pagination. +type Pagination struct { + OrderDesc bool + IsPaginated bool + Page int + PerPage int +} + // NewBatch creates a new Batch. func NewBatch(n int64) *Batch { return &Batch{ @@ -63,5 +72,5 @@ func (b *Batch) Size() int { return len(b.Ops) } -// ErrorEmptyHash indicates empty hash +// ErrorEmptyHash indicates empty hash. var ErrorEmptyHash = errors.New("transaction hash cannot be empty") diff --git a/state/txindex/indexer_service.go b/state/txindex/indexer_service.go index 27dd70d3313..f57189b6388 100644 --- a/state/txindex/indexer_service.go +++ b/state/txindex/indexer_service.go @@ -32,7 +32,6 @@ func NewIndexerService( eventBus *types.EventBus, terminateOnError bool, ) *IndexerService { - is := &IndexerService{ txIdxr: txIdxr, blockIdxr: blockIdxr, @@ -87,7 +86,7 @@ func (is *IndexerService) OnStart() error { ) if is.terminateOnError { - if err := is.Stop(); err != nil { + if err := is.Stop(); err != nil { //nolint:revive // suppress max-control-nesting linter is.Logger.Error("failed to stop", "err", err) } return diff --git a/state/txindex/indexer_service_test.go b/state/txindex/indexer_service_test.go index b7f41dbe52b..1d8e6344176 100644 --- a/state/txindex/indexer_service_test.go +++ b/state/txindex/indexer_service_test.go @@ -5,12 +5,12 @@ import ( "testing" "time" - db "github.com/cometbft/cometbft-db" - "github.com/cometbft/cometbft/state/indexer" "github.com/stretchr/testify/require" + db "github.com/cometbft/cometbft-db" abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/libs/log" + "github.com/cometbft/cometbft/state/indexer" blockidxkv "github.com/cometbft/cometbft/state/indexer/block/kv" "github.com/cometbft/cometbft/state/txindex" "github.com/cometbft/cometbft/state/txindex/kv" @@ -49,6 +49,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { } func createTestSetup(t *testing.T) (*txindex.IndexerService, *kv.TxIndex, indexer.BlockIndexer, *types.EventBus) { + t.Helper() // event bus eventBus := types.NewEventBus() eventBus.SetLogger(log.TestingLogger()) diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index 5fc4ad71e77..a29e3ed3f65 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -12,25 +12,25 @@ import ( "strconv" "strings" - "github.com/cometbft/cometbft/libs/log" - "github.com/cometbft/cometbft/state" - "github.com/cosmos/gogoproto/proto" dbm "github.com/cometbft/cometbft-db" - abci "github.com/cometbft/cometbft/abci/types" idxutil "github.com/cometbft/cometbft/internal/indexer" + "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/pubsub/query" "github.com/cometbft/cometbft/libs/pubsub/query/syntax" + "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/state/indexer" "github.com/cometbft/cometbft/state/txindex" "github.com/cometbft/cometbft/types" ) const ( - tagKeySeparator = "/" - eventSeqSeparator = "$es$" + tagKeySeparator = "/" + tagKeySeparatorRune = '/' + eventSeqSeparator = "$es$" + eventSeqSeperatorRuneAt0 = '$' ) var ( @@ -45,9 +45,23 @@ type TxIndex struct { eventSeq int64 log log.Logger + + compact bool + compactionInterval int64 + lastPruned int64 } -func (txi *TxIndex) Prune(retainHeight int64) (int64, int64, error) { +type IndexerOption func(*TxIndex) + +// WithCompaction sets the compaciton parameters. +func WithCompaction(compact bool, compactionInterval int64) IndexerOption { + return func(txi *TxIndex) { + txi.compact = compact + txi.compactionInterval = compactionInterval + } +} + +func (txi *TxIndex) Prune(retainHeight int64) (numPruned int64, newRetainHeight int64, err error) { // Returns numPruned, newRetainHeight, err // numPruned: the number of heights pruned. E.x. if heights {1, 3, 7} were pruned, numPruned == 3 // newRetainHeight: new retain height after pruning @@ -62,8 +76,8 @@ func (txi *TxIndex) Prune(retainHeight int64) (int64, int64, error) { } ctx := context.Background() - results, err := txi.Search(ctx, query.MustCompile( - fmt.Sprintf("tx.height < %d AND tx.height >= %d", retainHeight, lastRetainHeight))) + results, _, err := txi.Search(ctx, query.MustCompile( + fmt.Sprintf("tx.height < %d AND tx.height >= %d", retainHeight, lastRetainHeight)), txindex.Pagination{}) if err != nil { return 0, lastRetainHeight, err } @@ -135,7 +149,14 @@ func (txi *TxIndex) Prune(retainHeight int64) (int64, int64, error) { } numHeightsPersistentlyPruned = numHeightsBatchPruned currentPersistentlyRetainedHeight = currentBatchRetainedHeight - return numHeightsPersistentlyPruned, currentPersistentlyRetainedHeight, nil + + txi.lastPruned += numHeightsBatchPruned + if txi.compact && txi.lastPruned >= txi.compactionInterval { + err = txi.store.Compact(nil, nil) + txi.lastPruned = 0 + } + + return numHeightsPersistentlyPruned, currentPersistentlyRetainedHeight, err } func (txi *TxIndex) SetRetainHeight(retainHeight int64) error { @@ -159,7 +180,7 @@ func (txi *TxIndex) GetRetainHeight() (int64, error) { return height, nil } -func (txi *TxIndex) setIndexerRetainHeight(height int64, batch dbm.Batch) error { +func (*TxIndex) setIndexerRetainHeight(height int64, batch dbm.Batch) error { return batch.Set(LastTxIndexerRetainHeightKey, int64ToBytes(height)) } @@ -176,10 +197,16 @@ func (txi *TxIndex) getIndexerRetainHeight() (int64, error) { } // NewTxIndex creates new KV indexer. -func NewTxIndex(store dbm.DB) *TxIndex { - return &TxIndex{ +func NewTxIndex(store dbm.DB, options ...IndexerOption) *TxIndex { + txIndex := &TxIndex{ store: store, } + + for _, option := range options { + option(txIndex) + } + + return txIndex } func (txi *TxIndex) SetLogger(l log.Logger) { @@ -329,7 +356,7 @@ func (txi *TxIndex) deleteEvents(result *abci.TxResult, batch dbm.Batch) error { continue } - compositeTag := fmt.Sprintf("%s.%s", event.Type, attr.Key) + compositeTag := event.Type + "." + attr.Key if attr.GetIndex() { zeroKey := keyForEvent(compositeTag, attr.Value, result, 0) endKey := keyForEvent(compositeTag, attr.Value, result, math.MaxInt64) @@ -351,7 +378,7 @@ func (txi *TxIndex) deleteEvents(result *abci.TxResult, batch dbm.Batch) error { func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store dbm.Batch) error { for _, event := range result.Result.Events { - txi.eventSeq = txi.eventSeq + 1 + txi.eventSeq++ // only index events with a non-empty type if len(event.Type) == 0 { continue @@ -363,7 +390,7 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store dbm.Ba } // index if `index: true` is set - compositeTag := fmt.Sprintf("%s.%s", event.Type, attr.Key) + compositeTag := event.Type + "." + attr.Key // ensure event does not conflict with a reserved prefix key if compositeTag == types.TxHashKey || compositeTag == types.TxHeightKey { return fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeTag) @@ -380,6 +407,38 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store dbm.Ba return nil } +type hashKey struct { + hash string + height int64 +} + +type hashKeySorter struct { + keys []hashKey + by func(t1, t2 *hashKey) bool +} + +func (t *hashKeySorter) Less(i, j int) bool { return t.by(&t.keys[i], &t.keys[j]) } +func (t *hashKeySorter) Len() int { return len(t.keys) } +func (t *hashKeySorter) Swap(i, j int) { t.keys[i], t.keys[j] = t.keys[j], t.keys[i] } + +func byHeightDesc(i, j *hashKey) bool { + hi := i.height + hj := j.height + if hi == hj { + return i.hash > j.hash + } + return hi > hj +} + +func byHeightAsc(i, j *hashKey) bool { + hi := i.height + hj := j.height + if hi == hj { + return i.hash < j.hash + } + return hi < hj +} + // Search performs a search using the given query. // // It breaks the query into conditions (like "tx.height > 5"). For each @@ -391,16 +450,16 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store dbm.Ba // // Search will exit early and return any result fetched so far, // when a message is received on the context chan. -func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { +func (txi *TxIndex) Search(ctx context.Context, q *query.Query, pagSettings txindex.Pagination) ([]*abci.TxResult, int, error) { select { case <-ctx.Done(): - return make([]*abci.TxResult, 0), nil + return make([]*abci.TxResult, 0), 0, nil default: } var hashesInitialized bool - filteredHashes := make(map[string][]byte) + filteredHashes := make(map[string]TxInfo) // get a list of conditions (like "tx.height > 5") conditions := q.Syntax() @@ -408,16 +467,16 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul // if there is a hash condition, return the result immediately hash, ok, err := lookForHash(conditions) if err != nil { - return nil, fmt.Errorf("error during searching for a hash in the query: %w", err) + return nil, 0, fmt.Errorf("error during searching for a hash in the query: %w", err) } else if ok { res, err := txi.Get(hash) switch { case err != nil: - return []*abci.TxResult{}, fmt.Errorf("error while retrieving the result: %w", err) + return []*abci.TxResult{}, 0, fmt.Errorf("error while retrieving the result: %w", err) case res == nil: - return []*abci.TxResult{}, nil + return []*abci.TxResult{}, 0, nil default: - return []*abci.TxResult{res}, nil + return []*abci.TxResult{res}, 0, nil } } @@ -442,7 +501,6 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul skipIndexes = append(skipIndexes, rangeIndexes...) for _, qr := range ranges { - // If we have a query range over height and want to still look for // specific event values we do not want to simply return all // transactios in this height range. We remember the height range info @@ -487,14 +545,60 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul } } - results := make([]*abci.TxResult, 0, len(filteredHashes)) + numResults := len(filteredHashes) + + // Convert map keys to slice for deterministic ordering + hashKeys := make([]hashKey, 0, numResults) + for k, v := range filteredHashes { + hashKeys = append(hashKeys, hashKey{hash: k, height: v.Height}) + } + + var by func(i, j *hashKey) bool + + if pagSettings.OrderDesc { + by = byHeightDesc + } else { + by = byHeightAsc + } + + // Sort by height + sort.Sort(&hashKeySorter{ + keys: hashKeys, + by: by, + }) + + // If paginated, determine which hash keys to return + if pagSettings.IsPaginated { + // Now that we know the total number of results, validate that the page + // requested is within bounds + pagSettings.Page, err = validatePage(&pagSettings.Page, pagSettings.PerPage, numResults) + if err != nil { + return nil, 0, err + } + + // Calculate pagination start and end indices + startIndex := (pagSettings.Page - 1) * pagSettings.PerPage + endIndex := startIndex + pagSettings.PerPage + + // Apply pagination limits + if endIndex > len(hashKeys) { + endIndex = len(hashKeys) + } + if startIndex >= len(hashKeys) { + return []*abci.TxResult{}, 0, nil + } + + hashKeys = hashKeys[startIndex:endIndex] + } + + results := make([]*abci.TxResult, 0, len(hashKeys)) resultMap := make(map[string]struct{}) RESULTS_LOOP: - for _, h := range filteredHashes { - + for _, hKey := range hashKeys { + h := filteredHashes[hKey.hash].TxBytes res, err := txi.Get(h) if err != nil { - return nil, fmt.Errorf("failed to get Tx{%X}: %w", h, err) + return nil, 0, fmt.Errorf("failed to get Tx{%X}: %w", h, err) } hashString := string(h) if _, ok := resultMap[hashString]; !ok { @@ -509,7 +613,7 @@ RESULTS_LOOP: } } - return results, nil + return results, numResults, nil } func lookForHash(conditions []syntax.Condition) (hash []byte, ok bool, err error) { @@ -519,12 +623,26 @@ func lookForHash(conditions []syntax.Condition) (hash []byte, ok bool, err error return decoded, true, err } } - return + return nil, false, nil +} + +type TxInfo struct { + TxBytes []byte + Height int64 } -func (txi *TxIndex) setTmpHashes(tmpHeights map[string][]byte, it dbm.Iterator) { - eventSeq := extractEventSeqFromKey(it.Key()) - tmpHeights[string(it.Value())+eventSeq] = it.Value() +func (*TxIndex) setTmpHashes(tmpHeights map[string]TxInfo, key, value []byte, height int64) { + // value comes from cometbft-db Iterator interface Value() API. + // Therefore, we must make a copy before storing references to it. + valueCp := make([]byte, len(value)) + copy(valueCp, value) + + eventSeq := extractEventSeqFromKey(key) + txInfo := TxInfo{ + TxBytes: valueCp, + Height: height, + } + tmpHeights[string(valueCp)+eventSeq] = txInfo } // match returns all matching txs by hash that meet a given condition and start @@ -532,21 +650,26 @@ func (txi *TxIndex) setTmpHashes(tmpHeights map[string][]byte, it dbm.Iterator) // non-intersecting matches are removed. // // NOTE: filteredHashes may be empty if no previous condition has matched. +// +// Additionally, this method retrieves the height of the hash via the key, +// and adds it to the TxInfo struct, which is then added to the filteredHashes. +// This is done to paginate the results prior to retrieving all the TxResults, +// which is needed for performance reasons. func (txi *TxIndex) match( ctx context.Context, c syntax.Condition, startKeyBz []byte, - filteredHashes map[string][]byte, + filteredHashes map[string]TxInfo, firstRun bool, heightInfo HeightInfo, -) map[string][]byte { +) map[string]TxInfo { // A previous match was attempted but resulted in no matches, so we return // no matches (assuming AND operand). if !firstRun && len(filteredHashes) == 0 { return filteredHashes } - tmpHashes := make(map[string][]byte) + tmpHashes := make(map[string]TxInfo) switch { case c.Op == syntax.TEq: @@ -558,10 +681,10 @@ func (txi *TxIndex) match( EQ_LOOP: for ; it.Valid(); it.Next() { - // If we have a height range in a query, we need only transactions // for this height - keyHeight, err := extractHeightFromKey(it.Key()) + key := it.Key() + keyHeight, err := extractHeightFromKey(key) if err != nil { txi.log.Error("failure to parse height from key:", err) continue @@ -574,7 +697,7 @@ func (txi *TxIndex) match( if !withinBounds { continue } - txi.setTmpHashes(tmpHashes, it) + txi.setTmpHashes(tmpHashes, key, it.Value(), keyHeight) // Potentially exit early. select { case <-ctx.Done(): @@ -597,7 +720,8 @@ func (txi *TxIndex) match( EXISTS_LOOP: for ; it.Valid(); it.Next() { - keyHeight, err := extractHeightFromKey(it.Key()) + key := it.Key() + keyHeight, err := extractHeightFromKey(key) if err != nil { txi.log.Error("failure to parse height from key:", err) continue @@ -610,7 +734,7 @@ func (txi *TxIndex) match( if !withinBounds { continue } - txi.setTmpHashes(tmpHashes, it) + txi.setTmpHashes(tmpHashes, key, it.Value(), keyHeight) // Potentially exit early. select { @@ -640,7 +764,8 @@ func (txi *TxIndex) match( } if strings.Contains(extractValueFromKey(it.Key()), c.Arg.Value()) { - keyHeight, err := extractHeightFromKey(it.Key()) + key := it.Key() + keyHeight, err := extractHeightFromKey(key) if err != nil { txi.log.Error("failure to parse height from key:", err) continue @@ -653,7 +778,7 @@ func (txi *TxIndex) match( if !withinBounds { continue } - txi.setTmpHashes(tmpHashes, it) + txi.setTmpHashes(tmpHashes, key, it.Value(), keyHeight) } // Potentially exit early. @@ -686,15 +811,18 @@ func (txi *TxIndex) match( REMOVE_LOOP: for k, v := range filteredHashes { tmpHash := tmpHashes[k] - if tmpHash == nil || !bytes.Equal(tmpHash, v) { + if tmpHash.TxBytes == nil || !bytes.Equal(tmpHash.TxBytes, v.TxBytes) { delete(filteredHashes, k) - - // Potentially exit early. - select { - case <-ctx.Done(): - break REMOVE_LOOP - default: - } + } else { + // If there is a match, update the height in filteredHashes + v.Height = tmpHash.Height + filteredHashes[k] = v + } + // Potentially exit early. + select { + case <-ctx.Done(): + break REMOVE_LOOP + default: } } @@ -706,51 +834,58 @@ REMOVE_LOOP: // any non-intersecting matches are removed. // // NOTE: filteredHashes may be empty if no previous condition has matched. +// +// Additionally, this method retrieves the height of the hash via the key, +// and adds it to the TxInfo struct, which is then added to the filteredHashes. +// This is done to paginate the results prior to retrieving all the TxResults, +// which is needed for performance reasons. func (txi *TxIndex) matchRange( ctx context.Context, qr indexer.QueryRange, startKey []byte, - filteredHashes map[string][]byte, + filteredHashes map[string]TxInfo, firstRun bool, heightInfo HeightInfo, -) map[string][]byte { +) map[string]TxInfo { // A previous match was attempted but resulted in no matches, so we return // no matches (assuming AND operand). if !firstRun && len(filteredHashes) == 0 { return filteredHashes } - tmpHashes := make(map[string][]byte) + tmpHashes := make(map[string]TxInfo) it, err := dbm.IteratePrefix(txi.store, startKey) if err != nil { panic(err) } defer it.Close() + bigIntValue := new(big.Int) LOOP: for ; it.Valid(); it.Next() { - if !isTagKey(it.Key()) { + key := it.Key() + if !isTagKey(key) { continue } if _, ok := qr.AnyBound().(*big.Float); ok { - v := new(big.Int) - v, ok := v.SetString(extractValueFromKey(it.Key()), 10) + value := extractValueFromKey(key) + v, ok := bigIntValue.SetString(value, 10) var vF *big.Float if !ok { - vF, _, err = big.ParseFloat(extractValueFromKey(it.Key()), 10, 125, big.ToNearestEven) + vF, _, err = big.ParseFloat(value, 10, 125, big.ToNearestEven) if err != nil { continue LOOP } - + } + // Regardless of the query condition, we retrieve the height in order to sort later + keyHeight, err := extractHeightFromKey(it.Key()) + if err != nil { + txi.log.Error("failure to parse height from key:", err) + continue } if qr.Key != types.TxHeightKey { - keyHeight, err := extractHeightFromKey(it.Key()) - if err != nil { - txi.log.Error("failure to parse height from key:", err) - continue - } withinBounds, err := checkHeightConditions(heightInfo, keyHeight) if err != nil { txi.log.Error("failure checking for height bounds:", err) @@ -761,7 +896,6 @@ LOOP: } } var withinBounds bool - var err error if !ok { withinBounds, err = idxutil.CheckBounds(qr, vF) } else { @@ -769,10 +903,8 @@ LOOP: } if err != nil { txi.log.Error("failed to parse bounds:", err) - } else { - if withinBounds { - txi.setTmpHashes(tmpHashes, it) - } + } else if withinBounds { + txi.setTmpHashes(tmpHashes, key, it.Value(), keyHeight) } // XXX: passing time in a ABCI Events is not yet implemented @@ -810,15 +942,19 @@ LOOP: REMOVE_LOOP: for k, v := range filteredHashes { tmpHash := tmpHashes[k] - if tmpHash == nil || !bytes.Equal(tmpHashes[k], v) { + if tmpHash.TxBytes == nil || !bytes.Equal(tmpHash.TxBytes, v.TxBytes) { delete(filteredHashes, k) + } else { + // If there is a match, update the height in filteredHashes + v.Height = tmpHash.Height + filteredHashes[k] = v + } - // Potentially exit early. - select { - case <-ctx.Done(): - break REMOVE_LOOP - default: - } + // Potentially exit early. + select { + case <-ctx.Done(): + break REMOVE_LOOP + default: } } @@ -832,41 +968,83 @@ func isTagKey(key []byte) bool { // tags should 4. Alternatively it should be 3 if the event was not indexed // with the corresponding event sequence. However, some attribute values in // production can contain the tag separator. Therefore, the condition is >= 3. - numTags := strings.Count(string(key), tagKeySeparator) - return numTags >= 3 + numTags := 0 + for i := 0; i < len(key); i++ { + if key[i] == tagKeySeparatorRune { + numTags++ + if numTags >= 3 { + return true + } + } + } + return false } func extractHeightFromKey(key []byte) (int64, error) { - parts := strings.SplitN(string(key), tagKeySeparator, -1) + // the height is the second last element in the key. + // Find the position of the last occurrence of tagKeySeparator + endPos := bytes.LastIndexByte(key, tagKeySeparatorRune) + if endPos == -1 { + return 0, errors.New("separator not found") + } - return strconv.ParseInt(parts[len(parts)-2], 10, 64) + // Find the position of the second last occurrence of tagKeySeparator + startPos := bytes.LastIndexByte(key[:endPos-1], tagKeySeparatorRune) + if startPos == -1 { + return 0, errors.New("second last separator not found") + } + + // Extract the height part of the key + height, err := strconv.ParseInt(string(key[startPos+1:endPos]), 10, 64) + if err != nil { + return 0, err + } + return height, nil } -func extractValueFromKey(key []byte) string { - keyString := string(key) - parts := strings.SplitN(keyString, tagKeySeparator, -1) - partsLen := len(parts) - value := strings.TrimPrefix(keyString, parts[0]+tagKeySeparator) - suffix := "" - suffixLen := 2 +func extractValueFromKey(key []byte) string { + // Find the positions of tagKeySeparator in the byte slice + var indices []int + for i, b := range key { + if b == tagKeySeparatorRune { + indices = append(indices, i) + } + } - for i := 1; i <= suffixLen; i++ { - suffix = tagKeySeparator + parts[partsLen-i] + suffix + // If there are less than 2 occurrences of tagKeySeparator, return an empty string + if len(indices) < 2 { + return "" } - return strings.TrimSuffix(value, suffix) + // Extract the value between the first and second last occurrence of tagKeySeparator + value := key[indices[0]+1 : indices[len(indices)-2]] + + // Trim any leading or trailing whitespace + value = bytes.TrimSpace(value) + + // TODO: Do an unsafe cast to avoid an extra allocation here + return string(value) } func extractEventSeqFromKey(key []byte) string { - parts := strings.SplitN(string(key), tagKeySeparator, -1) + endPos := bytes.LastIndexByte(key, tagKeySeparatorRune) - lastEl := parts[len(parts)-1] + if endPos == -1 { + return "0" + } - if strings.Contains(lastEl, eventSeqSeparator) { - return strings.SplitN(lastEl, eventSeqSeparator, 2)[1] + for ; endPos < len(key); endPos++ { + if key[endPos] == eventSeqSeperatorRuneAt0 { + eventSeq := string(key[endPos:]) + if eventSeq, ok := strings.CutPrefix(eventSeq, eventSeqSeparator); ok { + return eventSeq + } + } } + return "0" } + func keyForEvent(key string, value string, result *abci.TxResult, eventSeq int64) []byte { return []byte(fmt.Sprintf("%s/%s/%d/%d%s", key, @@ -896,10 +1074,31 @@ func startKeyForCondition(c syntax.Condition, height int64) []byte { return startKey(c.Tag, c.Arg.Value()) } -func startKey(fields ...interface{}) []byte { +func startKey(fields ...any) []byte { var b bytes.Buffer for _, f := range fields { - b.Write([]byte(fmt.Sprintf("%v", f) + tagKeySeparator)) + b.WriteString(fmt.Sprintf("%v", f) + tagKeySeparator) } return b.Bytes() } + +func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { + if perPage < 1 { + return 1, fmt.Errorf("zero or negative perPage: %d", perPage) + } + + if pagePtr == nil { // no page parameter + return 1, nil + } + + pages := ((totalCount - 1) / perPage) + 1 + if pages == 0 { + pages = 1 // one page (even if it's empty) + } + page := *pagePtr + if page <= 0 || page > pages { + return 1, fmt.Errorf("page should be within [1, %d] range, given %d", pages, page) + } + + return page, nil +} diff --git a/state/txindex/kv/kv_bench_test.go b/state/txindex/kv/kv_bench_test.go index 035948c2ce6..9dc3408c185 100644 --- a/state/txindex/kv/kv_bench_test.go +++ b/state/txindex/kv/kv_bench_test.go @@ -8,66 +8,94 @@ import ( "testing" dbm "github.com/cometbft/cometbft-db" - abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/libs/pubsub/query" + "github.com/cometbft/cometbft/state/txindex" "github.com/cometbft/cometbft/types" ) -func BenchmarkTxSearch(b *testing.B) { +func generateDummyTxs(b *testing.B, indexer *TxIndex, numHeights int, numTxs int) { + b.Helper() + for h := 0; h < numHeights; h++ { + batch := txindex.NewBatch(int64(numTxs)) + + for i := 0; i < numTxs; i++ { + events := []abci.Event{ + { + Type: "transfer", + Attributes: []abci.EventAttribute{ + {Key: "address", Value: fmt.Sprintf("address_%d", (h*numTxs+i)%100), Index: true}, + {Key: "amount", Value: "50", Index: true}, + }, + }, + } + + txBz := make([]byte, 8) + if _, err := rand.Read(txBz); err != nil { + b.Errorf("failed produce random bytes: %s", err) + } + + if err := batch.Add(&abci.TxResult{ + Height: int64(h), + Index: uint32(i), + Tx: types.Tx(string(txBz)), + Result: abci.ExecTxResult{ + Data: []byte{0}, + Code: abci.CodeTypeOK, + Log: "", + Events: events, + }, + }); err != nil { + b.Errorf("failed to index tx: %s", err) + } + } + + if err := indexer.AddBatch(batch); err != nil { + b.Errorf("failed to add batch: %s", err) + } + } +} + +func BenchmarkTxSearchDisk(b *testing.B) { dbDir, err := os.MkdirTemp("", "benchmark_tx_search_test") if err != nil { b.Errorf("failed to create temporary directory: %s", err) } - db, err := dbm.NewGoLevelDB("benchmark_tx_search_test", dbDir) + db, err := dbm.NewPebbleDB("benchmark_tx_search_test", dbDir) if err != nil { b.Errorf("failed to create database: %s", err) } indexer := NewTxIndex(db) + generateDummyTxs(b, indexer, 1000, 20) - for i := 0; i < 35000; i++ { - events := []abci.Event{ - { - Type: "transfer", - Attributes: []abci.EventAttribute{ - {Key: "address", Value: fmt.Sprintf("address_%d", i%100), Index: true}, - {Key: "amount", Value: "50", Index: true}, - }, - }, - } + txQuery := query.MustCompile(`transfer.address = 'address_43' AND transfer.amount = 50`) - txBz := make([]byte, 8) - if _, err := rand.Read(txBz); err != nil { - b.Errorf("failed produce random bytes: %s", err) - } + b.ResetTimer() - txResult := &abci.TxResult{ - Height: int64(i), - Index: 0, - Tx: types.Tx(string(txBz)), - Result: abci.ExecTxResult{ - Data: []byte{0}, - Code: abci.CodeTypeOK, - Log: "", - Events: events, - }, - } + ctx := context.Background() - if err := indexer.Index(txResult); err != nil { - b.Errorf("failed to index tx: %s", err) + for i := 0; i < b.N; i++ { + if _, _, err := indexer.Search(ctx, txQuery, DefaultPagination); err != nil { + b.Errorf("failed to query for txs: %s", err) } } +} - txQuery := query.MustCompile(`transfer.address = 'address_43' AND transfer.amount = 50`) +func BenchmarkTxSearchBigResult(b *testing.B) { + db := dbm.NewMemDB() + indexer := NewTxIndex(db) + generateDummyTxs(b, indexer, 20000, 50) + + txQuery := query.MustCompile(`transfer.amount = 50`) b.ResetTimer() ctx := context.Background() for i := 0; i < b.N; i++ { - if _, err := indexer.Search(ctx, txQuery); err != nil { + if _, _, err := indexer.Search(ctx, txQuery, DefaultPagination); err != nil { b.Errorf("failed to query for txs: %s", err) } } diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index 27d340a2795..8fd0b3b3fff 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -7,21 +7,27 @@ import ( "os" "testing" - blockidxkv "github.com/cometbft/cometbft/state/indexer/block/kv" "github.com/cosmos/gogoproto/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/slices" db "github.com/cometbft/cometbft-db" - abci "github.com/cometbft/cometbft/abci/types" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/pubsub/query" - cmtrand "github.com/cometbft/cometbft/libs/rand" + blockidxkv "github.com/cometbft/cometbft/state/indexer/block/kv" "github.com/cometbft/cometbft/state/txindex" "github.com/cometbft/cometbft/types" ) +var DefaultPagination = txindex.Pagination{ + IsPaginated: true, + Page: 1, + PerPage: 100, + OrderDesc: false, +} + func TestTxIndex(t *testing.T) { indexer := NewTxIndex(db.NewMemDB()) @@ -130,7 +136,7 @@ func TestTxIndex_Prune(t *testing.T) { assert.True(t, isSubset(keys1, keys2)) numPruned, retainedHeight, err := indexer.Prune(2) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, int64(1), numPruned) assert.Equal(t, int64(2), retainedHeight) @@ -217,10 +223,9 @@ func TestTxSearch(t *testing.T) { ctx := context.Background() for _, tc := range testCases { - tc := tc t.Run(tc.q, func(t *testing.T) { - results, err := indexer.Search(ctx, query.MustCompile(tc.q)) - assert.NoError(t, err) + results, _, err := indexer.Search(ctx, query.MustCompile(tc.q), DefaultPagination) + require.NoError(t, err) assert.Len(t, results, tc.resultsLength) if tc.resultsLength > 0 { @@ -310,10 +315,9 @@ func TestTxSearchEventMatch(t *testing.T) { ctx := context.Background() for _, tc := range testCases { - tc := tc t.Run(tc.q, func(t *testing.T) { - results, err := indexer.Search(ctx, query.MustCompile(tc.q)) - assert.NoError(t, err) + results, _, err := indexer.Search(ctx, query.MustCompile(tc.q), DefaultPagination) + require.NoError(t, err) assert.Len(t, results, tc.resultsLength) if tc.resultsLength > 0 { @@ -326,7 +330,6 @@ func TestTxSearchEventMatch(t *testing.T) { } func TestTxSearchEventMatchByHeight(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ @@ -386,10 +389,9 @@ func TestTxSearchEventMatchByHeight(t *testing.T) { ctx := context.Background() for _, tc := range testCases { - tc := tc t.Run(tc.q, func(t *testing.T) { - results, err := indexer.Search(ctx, query.MustCompile(tc.q)) - assert.NoError(t, err) + results, _, err := indexer.Search(ctx, query.MustCompile(tc.q), DefaultPagination) + require.NoError(t, err) assert.Len(t, results, tc.resultsLength) if tc.resultsLength > 0 { @@ -420,8 +422,8 @@ func TestTxSearchWithCancelation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - results, err := indexer.Search(ctx, query.MustCompile(`account.number = 1`)) - assert.NoError(t, err) + results, _, err := indexer.Search(ctx, query.MustCompile(`account.number = 1`), DefaultPagination) + require.NoError(t, err) assert.Empty(t, results) } @@ -491,9 +493,8 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { ctx := context.Background() for _, tc := range testCases { - tc := tc t.Run(tc.q, func(t *testing.T) { - results, err := indexer.Search(ctx, query.MustCompile(tc.q)) + results, _, err := indexer.Search(ctx, query.MustCompile(tc.q), DefaultPagination) require.NoError(t, err) for _, txr := range results { for _, tr := range tc.results { @@ -576,15 +577,14 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { ctx := context.Background() for _, tc := range testCases { - results, err := indexer.Search(ctx, query.MustCompile(tc.q)) - assert.NoError(t, err) + results, _, err := indexer.Search(ctx, query.MustCompile(tc.q), DefaultPagination) + require.NoError(t, err) n := 0 if tc.found { n = 1 } assert.Len(t, results, n) assert.True(t, !tc.found || proto.Equal(txResult, results[0])) - } } @@ -733,8 +733,8 @@ func TestTxSearchMultipleTxs(t *testing.T) { ctx := context.Background() - results, err := indexer.Search(ctx, query.MustCompile(`account.number >= 1`)) - assert.NoError(t, err) + results, _, err := indexer.Search(ctx, query.MustCompile(`account.number >= 1`), DefaultPagination) + require.NoError(t, err) require.Len(t, results, 3) } @@ -754,7 +754,8 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { } } -func benchmarkTxIndex(txsCount int64, b *testing.B) { +func benchmarkTxIndex(b *testing.B, txsCount int64) { + b.Helper() dir, err := os.MkdirTemp("", "tx_index_db") require.NoError(b, err) defer os.RemoveAll(dir) @@ -822,7 +823,8 @@ func TestBigInt(t *testing.T) { {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigFloat, Index: true}}}, {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigFloat, Index: true}, {Key: "amount", Value: "5", Index: true}}}, {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigIntSmaller, Index: true}}}, - {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigInt, Index: true}, {Key: "amount", Value: "3", Index: true}}}}) + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigInt, Index: true}, {Key: "amount", Value: "3", Index: true}}}, + }) txResult2.Tx = types.Tx("NEW TX") txResult2.Height = 2 @@ -860,10 +862,9 @@ func TestBigInt(t *testing.T) { ctx := context.Background() for _, tc := range testCases { - tc := tc t.Run(tc.q, func(t *testing.T) { - results, err := indexer.Search(ctx, query.MustCompile(tc.q)) - assert.NoError(t, err) + results, _, err := indexer.Search(ctx, query.MustCompile(tc.q), DefaultPagination) + require.NoError(t, err) assert.Len(t, results, tc.resultsLength) if tc.resultsLength > 0 && tc.txRes != nil { assert.True(t, proto.Equal(results[0], tc.txRes)) @@ -872,11 +873,24 @@ func TestBigInt(t *testing.T) { } } -func BenchmarkTxIndex1(b *testing.B) { benchmarkTxIndex(1, b) } -func BenchmarkTxIndex500(b *testing.B) { benchmarkTxIndex(500, b) } -func BenchmarkTxIndex1000(b *testing.B) { benchmarkTxIndex(1000, b) } -func BenchmarkTxIndex2000(b *testing.B) { benchmarkTxIndex(2000, b) } -func BenchmarkTxIndex10000(b *testing.B) { benchmarkTxIndex(10000, b) } +func BenchmarkTxIndex(b *testing.B) { + testCases := []struct { + name string + txsCount int64 + }{ + {"1", 1}, + {"500", 500}, + {"1000", 1000}, + {"2000", 2000}, + {"10000", 10000}, + } + + for _, tc := range testCases { + b.Run(tc.name, func(b *testing.B) { + benchmarkTxIndex(b, tc.txsCount) + }) + } +} func isSubset(smaller [][]byte, bigger [][]byte) bool { for _, elem := range smaller { @@ -915,3 +929,29 @@ func setDiff(bigger [][]byte, smaller [][]byte) [][]byte { } return diff } + +func TestExtractEventSeqFromKey(t *testing.T) { + testCases := []struct { + str string + expected string + }{ + { + "0/0/0/1234$es$0", + "0", + }, + { + "0/0/0/1234$es$1234", + "1234", + }, + { + "0/0/0/1234", + "0", + }, + } + + for _, tc := range testCases { + t.Run(tc.expected, func(t *testing.T) { + assert.Equal(t, extractEventSeqFromKey([]byte(tc.str)), tc.expected) + }) + } +} diff --git a/state/txindex/kv/utils.go b/state/txindex/kv/utils.go index 77bab41a8d0..1a0003f2227 100644 --- a/state/txindex/kv/utils.go +++ b/state/txindex/kv/utils.go @@ -5,12 +5,13 @@ import ( "fmt" "math/big" + "github.com/google/orderedcode" + abci "github.com/cometbft/cometbft/abci/types" idxutil "github.com/cometbft/cometbft/internal/indexer" cmtsyntax "github.com/cometbft/cometbft/libs/pubsub/query/syntax" "github.com/cometbft/cometbft/state/indexer" "github.com/cometbft/cometbft/types" - "github.com/google/orderedcode" ) type HeightInfo struct { @@ -101,11 +102,10 @@ func checkHeightConditions(heightInfo HeightInfo, keyHeight int64) (bool, error) if err != nil || !withinBounds { return false, err } - } else { - if heightInfo.height != 0 && keyHeight != heightInfo.height { - return false, nil - } + } else if heightInfo.height != 0 && keyHeight != heightInfo.height { + return false, nil } + return true, nil } @@ -128,7 +128,10 @@ func getKeys(indexer *TxIndex) [][]byte { panic(err) } for ; itr.Valid(); itr.Next() { - keys = append(keys, itr.Key()) + key := make([]byte, len(itr.Key())) + copy(key, itr.Key()) + + keys = append(keys, key) } return keys } diff --git a/state/txindex/mocks/tx_indexer.go b/state/txindex/mocks/tx_indexer.go index 5ba235e59aa..a1178323ddf 100644 --- a/state/txindex/mocks/tx_indexer.go +++ b/state/txindex/mocks/tx_indexer.go @@ -12,7 +12,7 @@ import ( txindex "github.com/cometbft/cometbft/state/txindex" - types "github.com/cometbft/cometbft/abci/types" + v1 "github.com/cometbft/cometbft/api/cometbft/abci/v1" ) // TxIndexer is an autogenerated mock type for the TxIndexer type @@ -24,6 +24,10 @@ type TxIndexer struct { func (_m *TxIndexer) AddBatch(b *txindex.Batch) error { ret := _m.Called(b) + if len(ret) == 0 { + panic("no return value specified for AddBatch") + } + var r0 error if rf, ok := ret.Get(0).(func(*txindex.Batch) error); ok { r0 = rf(b) @@ -35,19 +39,23 @@ func (_m *TxIndexer) AddBatch(b *txindex.Batch) error { } // Get provides a mock function with given fields: hash -func (_m *TxIndexer) Get(hash []byte) (*types.TxResult, error) { +func (_m *TxIndexer) Get(hash []byte) (*v1.TxResult, error) { ret := _m.Called(hash) - var r0 *types.TxResult + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 *v1.TxResult var r1 error - if rf, ok := ret.Get(0).(func([]byte) (*types.TxResult, error)); ok { + if rf, ok := ret.Get(0).(func([]byte) (*v1.TxResult, error)); ok { return rf(hash) } - if rf, ok := ret.Get(0).(func([]byte) *types.TxResult); ok { + if rf, ok := ret.Get(0).(func([]byte) *v1.TxResult); ok { r0 = rf(hash) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.TxResult) + r0 = ret.Get(0).(*v1.TxResult) } } @@ -64,6 +72,10 @@ func (_m *TxIndexer) Get(hash []byte) (*types.TxResult, error) { func (_m *TxIndexer) GetRetainHeight() (int64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetRetainHeight") + } + var r0 int64 var r1 error if rf, ok := ret.Get(0).(func() (int64, error)); ok { @@ -85,11 +97,15 @@ func (_m *TxIndexer) GetRetainHeight() (int64, error) { } // Index provides a mock function with given fields: result -func (_m *TxIndexer) Index(result *types.TxResult) error { +func (_m *TxIndexer) Index(result *v1.TxResult) error { ret := _m.Called(result) + if len(ret) == 0 { + panic("no return value specified for Index") + } + var r0 error - if rf, ok := ret.Get(0).(func(*types.TxResult) error); ok { + if rf, ok := ret.Get(0).(func(*v1.TxResult) error); ok { r0 = rf(result) } else { r0 = ret.Error(0) @@ -102,6 +118,10 @@ func (_m *TxIndexer) Index(result *types.TxResult) error { func (_m *TxIndexer) Prune(retainHeight int64) (int64, int64, error) { ret := _m.Called(retainHeight) + if len(ret) == 0 { + panic("no return value specified for Prune") + } + var r0 int64 var r1 int64 var r2 error @@ -129,30 +149,41 @@ func (_m *TxIndexer) Prune(retainHeight int64) (int64, int64, error) { return r0, r1, r2 } -// Search provides a mock function with given fields: ctx, q -func (_m *TxIndexer) Search(ctx context.Context, q *query.Query) ([]*types.TxResult, error) { - ret := _m.Called(ctx, q) +// Search provides a mock function with given fields: ctx, q, pagSettings +func (_m *TxIndexer) Search(ctx context.Context, q *query.Query, pagSettings txindex.Pagination) ([]*v1.TxResult, int, error) { + ret := _m.Called(ctx, q, pagSettings) - var r0 []*types.TxResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *query.Query) ([]*types.TxResult, error)); ok { - return rf(ctx, q) + if len(ret) == 0 { + panic("no return value specified for Search") } - if rf, ok := ret.Get(0).(func(context.Context, *query.Query) []*types.TxResult); ok { - r0 = rf(ctx, q) + + var r0 []*v1.TxResult + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, *query.Query, txindex.Pagination) ([]*v1.TxResult, int, error)); ok { + return rf(ctx, q, pagSettings) + } + if rf, ok := ret.Get(0).(func(context.Context, *query.Query, txindex.Pagination) []*v1.TxResult); ok { + r0 = rf(ctx, q, pagSettings) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*types.TxResult) + r0 = ret.Get(0).([]*v1.TxResult) } } - if rf, ok := ret.Get(1).(func(context.Context, *query.Query) error); ok { - r1 = rf(ctx, q) + if rf, ok := ret.Get(1).(func(context.Context, *query.Query, txindex.Pagination) int); ok { + r1 = rf(ctx, q, pagSettings) } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(int) } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context, *query.Query, txindex.Pagination) error); ok { + r2 = rf(ctx, q, pagSettings) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // SetLogger provides a mock function with given fields: l @@ -164,6 +195,10 @@ func (_m *TxIndexer) SetLogger(l log.Logger) { func (_m *TxIndexer) SetRetainHeight(retainHeight int64) error { ret := _m.Called(retainHeight) + if len(ret) == 0 { + panic("no return value specified for SetRetainHeight") + } + var r0 error if rf, ok := ret.Get(0).(func(int64) error); ok { r0 = rf(retainHeight) diff --git a/state/txindex/null/null.go b/state/txindex/null/null.go index 42869415529..bcad17ab24e 100644 --- a/state/txindex/null/null.go +++ b/state/txindex/null/null.go @@ -4,9 +4,8 @@ import ( "context" "errors" - "github.com/cometbft/cometbft/libs/log" - abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/pubsub/query" "github.com/cometbft/cometbft/state/txindex" ) @@ -16,37 +15,36 @@ var _ txindex.TxIndexer = (*TxIndex)(nil) // TxIndex acts as a /dev/null. type TxIndex struct{} -func (txi *TxIndex) SetRetainHeight(_ int64) error { +func (*TxIndex) SetRetainHeight(_ int64) error { return nil } -func (txi *TxIndex) GetRetainHeight() (int64, error) { +func (*TxIndex) GetRetainHeight() (int64, error) { return 0, nil } -func (txi *TxIndex) Prune(_ int64) (int64, int64, error) { +func (*TxIndex) Prune(_ int64) (numPruned, newRetainHeight int64, err error) { return 0, 0, nil } // Get on a TxIndex is disabled and panics when invoked. -func (txi *TxIndex) Get(_ []byte) (*abci.TxResult, error) { +func (*TxIndex) Get(_ []byte) (*abci.TxResult, error) { return nil, errors.New(`indexing is disabled (set 'tx_index = "kv"' in config)`) } // AddBatch is a noop and always returns nil. -func (txi *TxIndex) AddBatch(_ *txindex.Batch) error { +func (*TxIndex) AddBatch(_ *txindex.Batch) error { return nil } // Index is a noop and always returns nil. -func (txi *TxIndex) Index(_ *abci.TxResult) error { +func (*TxIndex) Index(_ *abci.TxResult) error { return nil } -func (txi *TxIndex) Search(_ context.Context, _ *query.Query) ([]*abci.TxResult, error) { - return []*abci.TxResult{}, nil +func (*TxIndex) Search(_ context.Context, _ *query.Query, _ txindex.Pagination) ([]*abci.TxResult, int, error) { + return []*abci.TxResult{}, 0, nil } -func (txi *TxIndex) SetLogger(log.Logger) { - +func (*TxIndex) SetLogger(log.Logger) { } diff --git a/state/validation.go b/state/validation.go index 19f89400661..4b1bb92232c 100644 --- a/state/validation.go +++ b/state/validation.go @@ -4,12 +4,14 @@ import ( "bytes" "errors" "fmt" + "time" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" ) -//----------------------------------------------------- +// ----------------------------------------------------- // Validate block func validateBlock(state State, block *types.Block) error { @@ -52,7 +54,7 @@ func validateBlock(state State, block *types.Block) error { // Validate app info if !bytes.Equal(block.AppHash, state.AppHash) { - return fmt.Errorf("wrong Block.Header.AppHash. Expected %X, got %v", + return fmt.Errorf("wrong Block.Header.AppHash. Expected %X, got %v. Check ABCI app for non-determinism", state.AppHash, block.AppHash, ) @@ -111,6 +113,10 @@ func validateBlock(state State, block *types.Block) error { } // Validate block Time + if block.Time != cmttime.Canonical(block.Time) { + return fmt.Errorf("block time %v is not canonical", block.Time) + } + switch { case block.Height > state.InitialHeight: if !block.Time.After(state.LastBlockTime) { @@ -119,18 +125,20 @@ func validateBlock(state State, block *types.Block) error { state.LastBlockTime, ) } - medianTime := MedianTime(block.LastCommit, state.LastValidators) - if !block.Time.Equal(medianTime) { - return fmt.Errorf("invalid block time. Expected %v, got %v", - medianTime, - block.Time, - ) + if !state.ConsensusParams.Feature.PbtsEnabled(block.Height) { + medianTime := block.LastCommit.MedianTime(state.LastValidators) + if !block.Time.Equal(medianTime) { + return fmt.Errorf("invalid block time. Expected %v, got %v", + medianTime.Format(time.RFC3339Nano), + block.Time.Format(time.RFC3339Nano), + ) + } } case block.Height == state.InitialHeight: genesisTime := state.LastBlockTime - if !block.Time.Equal(genesisTime) { - return fmt.Errorf("block time %v is not equal to genesis time %v", + if block.Time.Before(genesisTime) { + return fmt.Errorf("block time %v is before genesis time %v", block.Time, genesisTime, ) diff --git a/state/validation_test.go b/state/validation_test.go index 6535f48f876..71ad6c1ee93 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -4,25 +4,22 @@ import ( "testing" "time" - cmterrors "github.com/cometbft/cometbft/types/errors" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/crypto/tmhash" "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/libs/log" mpmocks "github.com/cometbft/cometbft/mempool/mocks" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/state/mocks" "github.com/cometbft/cometbft/store" "github.com/cometbft/cometbft/types" + cmterrors "github.com/cometbft/cometbft/types/errors" cmttime "github.com/cometbft/cometbft/types/time" ) @@ -33,13 +30,18 @@ func TestValidateBlockHeader(t *testing.T) { require.NoError(t, proxyApp.Start()) defer proxyApp.Stop() //nolint:errcheck // ignore for tests - state, stateDB, privVals := makeState(3, 1) + cp := test.ConsensusParams() + pbtsEnableHeight := validationTestsStopHeight / 2 + cp.Feature.PbtsEnableHeight = pbtsEnableHeight + + state, stateDB, privVals := makeStateWithParams(3, 1, cp, chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) mp := &mpmocks.Mempool{} mp.On("Lock").Return() mp.On("Unlock").Return() + mp.On("PreUpdate").Return() mp.On("FlushAppConn", mock.Anything).Return(nil) mp.On("Update", mock.Anything, @@ -78,7 +80,14 @@ func TestValidateBlockHeader(t *testing.T) { {"Version wrong2", func(block *types.Block) { block.Version = wrongVersion2 }}, {"ChainID wrong", func(block *types.Block) { block.ChainID = "not-the-real-one" }}, {"Height wrong", func(block *types.Block) { block.Height += 10 }}, - {"Time wrong", func(block *types.Block) { block.Time = block.Time.Add(-time.Second * 1) }}, + {"Time non-monotonic", func(block *types.Block) { block.Time = block.Time.Add(-2 * time.Second) }}, + {"Time wrong", func(block *types.Block) { + if block.Height > 1 && block.Height < pbtsEnableHeight { + block.Time = block.Time.Add(time.Millisecond) // BFT Time + } else { + block.Time = time.Now() // not canonical + } + }}, {"LastBlockID wrong", func(block *types.Block) { block.LastBlockID.PartSetHeader.Total += 10 }}, {"LastCommitHash wrong", func(block *types.Block) { block.LastCommitHash = wrongHash }}, @@ -130,13 +139,14 @@ func TestValidateBlockCommit(t *testing.T) { require.NoError(t, proxyApp.Start()) defer proxyApp.Stop() //nolint:errcheck // ignore for tests - state, stateDB, privVals := makeState(1, 1) + state, stateDB, privVals := makeState(1, 1, chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) mp := &mpmocks.Mempool{} mp.On("Lock").Return() mp.On("Unlock").Return() + mp.On("PreUpdate").Return() mp.On("FlushAppConn", mock.Anything).Return(nil) mp.On("Update", mock.Anything, @@ -178,7 +188,7 @@ func TestValidateBlockCommit(t *testing.T) { 0, 2, state.LastBlockID, - time.Now(), + cmttime.Now(), ) wrongHeightCommit := &types.Commit{ Height: wrongHeightVote.Height, @@ -232,9 +242,9 @@ func TestValidateBlockCommit(t *testing.T) { idx, height, 0, - cmtproto.PrecommitType, + types.PrecommitType, blockID, - time.Now(), + cmttime.Now(), ) bpvPubKey, err := badPrivVal.GetPubKey() @@ -246,16 +256,16 @@ func TestValidateBlockCommit(t *testing.T) { Height: height, Round: 0, Timestamp: cmttime.Now(), - Type: cmtproto.PrecommitType, + Type: types.PrecommitType, BlockID: blockID, } g := goodVote.ToProto() b := badVote.ToProto() - err = badPrivVal.SignVote(chainID, g) + err = badPrivVal.SignVote(chainID, g, false) require.NoError(t, err, "height %d", height) - err = badPrivVal.SignVote(chainID, b) + err = badPrivVal.SignVote(chainID, b, false) require.NoError(t, err, "height %d", height) goodVote.Signature, badVote.Signature = g.Signature, b.Signature @@ -274,7 +284,7 @@ func TestValidateBlockEvidence(t *testing.T) { require.NoError(t, proxyApp.Start()) defer proxyApp.Stop() //nolint:errcheck // ignore for tests - state, stateDB, privVals := makeState(4, 1) + state, stateDB, privVals := makeState(4, 1, chainID) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -289,6 +299,7 @@ func TestValidateBlockEvidence(t *testing.T) { mp := &mpmocks.Mempool{} mp.On("Lock").Return() mp.On("Unlock").Return() + mp.On("PreUpdate").Return() mp.On("FlushAppConn", mock.Anything).Return(nil) mp.On("Update", mock.Anything, @@ -322,7 +333,7 @@ func TestValidateBlockEvidence(t *testing.T) { var currentBytes int64 // more bytes than the maximum allowed for evidence for currentBytes <= maxBytesEvidence { - newEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), + newEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(height, cmttime.Now(), privVals[proposerAddr.String()], chainID) require.NoError(t, err) evidence = append(evidence, newEv) @@ -331,7 +342,7 @@ func TestValidateBlockEvidence(t *testing.T) { block := state.MakeBlock(height, test.MakeNTxs(height, 10), lastCommit, evidence, proposerAddr) err := blockExec.ValidateBlock(state, block) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here _, ok := err.(*types.ErrEvidenceOverflow) require.True(t, ok, "expected error to be of type ErrEvidenceOverflow at height %d but got %v", height, err) } @@ -366,6 +377,5 @@ func TestValidateBlockEvidence(t *testing.T) { ) require.NoError(t, err, "height %d", height) lastCommit = lastExtCommit.ToCommit() - } } diff --git a/statesync/chunks.go b/statesync/chunks.go index d392b708a1d..915816b0c1d 100644 --- a/statesync/chunks.go +++ b/statesync/chunks.go @@ -9,7 +9,7 @@ import ( "time" cmtsync "github.com/cometbft/cometbft/libs/sync" - "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/p2p/nodekey" ) // errDone is returned by chunkQueue.Next() when all chunks have been returned. @@ -21,7 +21,7 @@ type chunk struct { Format uint32 Index uint32 Chunk []byte - Sender p2p.ID + Sender nodekey.ID } // chunkQueue manages chunks for a state sync process, ordering them if requested. It acts as an @@ -32,7 +32,7 @@ type chunkQueue struct { snapshot *snapshot // if this is nil, the queue has been closed dir string // temp dir for on-disk chunk storage chunkFiles map[uint32]string // path to temporary chunk file - chunkSenders map[uint32]p2p.ID // the peer who sent the given chunk + chunkSenders map[uint32]nodekey.ID // the peer who sent the given chunk chunkAllocated map[uint32]bool // chunks that have been allocated via Allocate() chunkReturned map[uint32]bool // chunks returned via Next() waiters map[uint32][]chan<- uint32 // signals WaitFor() waiters about chunk arrival @@ -52,7 +52,7 @@ func newChunkQueue(snapshot *snapshot, tempDir string) (*chunkQueue, error) { snapshot: snapshot, dir: dir, chunkFiles: make(map[uint32]string, snapshot.Chunks), - chunkSenders: make(map[uint32]p2p.ID, snapshot.Chunks), + chunkSenders: make(map[uint32]nodekey.ID, snapshot.Chunks), chunkAllocated: make(map[uint32]bool, snapshot.Chunks), chunkReturned: make(map[uint32]bool, snapshot.Chunks), waiters: make(map[uint32][]chan<- uint32), @@ -83,7 +83,7 @@ func (q *chunkQueue) Add(chunk *chunk) (bool, error) { } path := filepath.Join(q.dir, strconv.FormatUint(uint64(chunk.Index), 10)) - err := os.WriteFile(path, chunk.Chunk, 0600) + err := os.WriteFile(path, chunk.Chunk, 0o600) if err != nil { return false, fmt.Errorf("failed to save chunk %v to file %v: %w", chunk.Index, path, err) } @@ -171,7 +171,7 @@ func (q *chunkQueue) discard(index uint32) error { // DiscardSender discards all *unreturned* chunks from a given sender. If the caller wants to // discard already returned chunks, this can be done via Discard(). -func (q *chunkQueue) DiscardSender(peerID p2p.ID) error { +func (q *chunkQueue) DiscardSender(peerID nodekey.ID) error { q.Lock() defer q.Unlock() @@ -188,7 +188,7 @@ func (q *chunkQueue) DiscardSender(peerID p2p.ID) error { } // GetSender returns the sender of the chunk with the given index, or empty if not found. -func (q *chunkQueue) GetSender(index uint32) p2p.ID { +func (q *chunkQueue) GetSender(index uint32) nodekey.ID { q.Lock() defer q.Unlock() return q.chunkSenders[index] diff --git a/statesync/chunks_test.go b/statesync/chunks_test.go index 5c893c5233f..c117d77df7c 100644 --- a/statesync/chunks_test.go +++ b/statesync/chunks_test.go @@ -7,10 +7,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/p2p/nodekey" ) func setupChunkQueue(t *testing.T) (*chunkQueue, func()) { + t.Helper() snapshot := &snapshot{ Height: 3, Format: 1, @@ -50,7 +51,7 @@ func TestNewChunkQueue_TempDir(t *testing.T) { files, err = os.ReadDir(dir) require.NoError(t, err) - assert.Len(t, files, 0) + assert.Empty(t, files) } func TestChunkQueue(t *testing.T) { @@ -127,7 +128,6 @@ func TestChunkQueue_Add_ChunkErrors(t *testing.T) { "invalid index": {&chunk{Height: 3, Format: 1, Index: 5, Chunk: []byte{3, 1, 0}}}, } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { queue, teardown := setupChunkQueue(t) defer teardown() @@ -273,7 +273,7 @@ func TestChunkQueue_DiscardSender(t *testing.T) { defer teardown() // Allocate and add all chunks to the queue - senders := []p2p.ID{"a", "b", "c"} + senders := []nodekey.ID{"a", "b", "c"} for i := uint32(0); i < queue.Size(); i++ { _, err := queue.Allocate() require.NoError(t, err) @@ -314,9 +314,9 @@ func TestChunkQueue_GetSender(t *testing.T) { queue, teardown := setupChunkQueue(t) defer teardown() - _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{1}, Sender: p2p.ID("a")}) + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{1}, Sender: nodekey.ID("a")}) require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{2}, Sender: p2p.ID("b")}) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{2}, Sender: nodekey.ID("b")}) require.NoError(t, err) assert.EqualValues(t, "a", queue.GetSender(0)) @@ -350,7 +350,7 @@ func TestChunkQueue_Next(t *testing.T) { }() assert.Empty(t, chNext) - _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: p2p.ID("b")}) + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: nodekey.ID("b")}) require.NoError(t, err) select { case <-chNext: @@ -358,17 +358,17 @@ func TestChunkQueue_Next(t *testing.T) { default: } - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: p2p.ID("a")}) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: nodekey.ID("a")}) require.NoError(t, err) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: p2p.ID("a")}, + &chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: nodekey.ID("a")}, <-chNext) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: p2p.ID("b")}, + &chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: nodekey.ID("b")}, <-chNext) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: p2p.ID("e")}) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: nodekey.ID("e")}) require.NoError(t, err) select { case <-chNext: @@ -376,19 +376,19 @@ func TestChunkQueue_Next(t *testing.T) { default: } - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: p2p.ID("c")}) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: nodekey.ID("c")}) require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: p2p.ID("d")}) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: nodekey.ID("d")}) require.NoError(t, err) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: p2p.ID("c")}, + &chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: nodekey.ID("c")}, <-chNext) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: p2p.ID("d")}, + &chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: nodekey.ID("d")}, <-chNext) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: p2p.ID("e")}, + &chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: nodekey.ID("e")}, <-chNext) _, ok := <-chNext diff --git a/statesync/messages.go b/statesync/messages.go index eff6646106d..e9287f166bb 100644 --- a/statesync/messages.go +++ b/statesync/messages.go @@ -6,13 +6,13 @@ import ( "github.com/cosmos/gogoproto/proto" - ssproto "github.com/cometbft/cometbft/proto/tendermint/statesync" + ssproto "github.com/cometbft/cometbft/api/cometbft/statesync/v1" ) const ( - // snapshotMsgSize is the maximum size of a snapshotResponseMessage + // snapshotMsgSize is the maximum size of a snapshotResponseMessage. snapshotMsgSize = int(4e6) - // chunkMsgSize is the maximum size of a chunkResponseMessage + // chunkMsgSize is the maximum size of a chunkResponseMessage. chunkMsgSize = int(16e6) ) diff --git a/statesync/messages_test.go b/statesync/messages_test.go index 97a888aba01..d5a680bcb82 100644 --- a/statesync/messages_test.go +++ b/statesync/messages_test.go @@ -7,9 +7,9 @@ import ( "github.com/cosmos/gogoproto/proto" "github.com/stretchr/testify/require" - "github.com/cometbft/cometbft/p2p" - ssproto "github.com/cometbft/cometbft/proto/tendermint/statesync" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + ssproto "github.com/cometbft/cometbft/api/cometbft/statesync/v1" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" + "github.com/cometbft/cometbft/types" ) func TestValidateMsg(t *testing.T) { @@ -27,52 +27,65 @@ func TestValidateMsg(t *testing.T) { "ChunkResponse valid": { &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}}, - true}, + true, + }, "ChunkResponse 0 height": { &ssproto.ChunkResponse{Height: 0, Format: 1, Index: 1, Chunk: []byte{1}}, - false}, + false, + }, "ChunkResponse 0 format": { &ssproto.ChunkResponse{Height: 1, Format: 0, Index: 1, Chunk: []byte{1}}, - true}, + true, + }, "ChunkResponse 0 chunk": { &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 0, Chunk: []byte{1}}, - true}, + true, + }, "ChunkResponse empty body": { &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{}}, - true}, + true, + }, "ChunkResponse nil body": { &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: nil}, - false}, + false, + }, "ChunkResponse missing": { &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true}, - true}, + true, + }, "ChunkResponse missing with empty": { &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true, Chunk: []byte{}}, - true}, + true, + }, "ChunkResponse missing with body": { &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true, Chunk: []byte{1}}, - false}, + false, + }, "SnapshotsRequest valid": {&ssproto.SnapshotsRequest{}, true}, "SnapshotsResponse valid": { &ssproto.SnapshotsResponse{Height: 1, Format: 1, Chunks: 2, Hash: []byte{1}}, - true}, + true, + }, "SnapshotsResponse 0 height": { &ssproto.SnapshotsResponse{Height: 0, Format: 1, Chunks: 2, Hash: []byte{1}}, - false}, + false, + }, "SnapshotsResponse 0 format": { &ssproto.SnapshotsResponse{Height: 1, Format: 0, Chunks: 2, Hash: []byte{1}}, - true}, + true, + }, "SnapshotsResponse 0 chunks": { &ssproto.SnapshotsResponse{Height: 1, Format: 1, Hash: []byte{1}}, - false}, + false, + }, "SnapshotsResponse no hash": { &ssproto.SnapshotsResponse{Height: 1, Format: 1, Chunks: 2, Hash: []byte{}}, - false}, + false, + }, } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { err := validateMsg(tc.msg) if tc.valid { @@ -86,7 +99,6 @@ func TestValidateMsg(t *testing.T) { //nolint:lll // ignore line length func TestStateSyncVectors(t *testing.T) { - testCases := []struct { testName string msg proto.Message @@ -99,8 +111,7 @@ func TestStateSyncVectors(t *testing.T) { } for _, tc := range testCases { - tc := tc - w := tc.msg.(p2p.Wrapper).Wrap() + w := tc.msg.(types.Wrapper).Wrap() bz, err := proto.Marshal(w) require.NoError(t, err) diff --git a/statesync/metrics.gen.go b/statesync/metrics.gen.go index 1941c9270e3..a2c62b1625c 100644 --- a/statesync/metrics.gen.go +++ b/statesync/metrics.gen.go @@ -3,8 +3,8 @@ package statesync import ( - "github.com/go-kit/kit/metrics/discard" - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/cometbft/cometbft/libs/metrics/discard" + prometheus "github.com/cometbft/cometbft/libs/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) diff --git a/statesync/metrics.go b/statesync/metrics.go index 9a4d7fcefab..5b3177f19d1 100644 --- a/statesync/metrics.go +++ b/statesync/metrics.go @@ -1,7 +1,7 @@ package statesync import ( - "github.com/go-kit/kit/metrics" + "github.com/cometbft/cometbft/libs/metrics" ) const ( diff --git a/statesync/mocks/state_provider.go b/statesync/mocks/state_provider.go index 9717452d605..d45d5ce01f1 100644 --- a/statesync/mocks/state_provider.go +++ b/statesync/mocks/state_provider.go @@ -20,6 +20,10 @@ type StateProvider struct { func (_m *StateProvider) AppHash(ctx context.Context, height uint64) ([]byte, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for AppHash") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) ([]byte, error)); ok { @@ -46,6 +50,10 @@ func (_m *StateProvider) AppHash(ctx context.Context, height uint64) ([]byte, er func (_m *StateProvider) Commit(ctx context.Context, height uint64) (*types.Commit, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for Commit") + } + var r0 *types.Commit var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) (*types.Commit, error)); ok { @@ -72,6 +80,10 @@ func (_m *StateProvider) Commit(ctx context.Context, height uint64) (*types.Comm func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for State") + } + var r0 state.State var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) (state.State, error)); ok { diff --git a/statesync/reactor.go b/statesync/reactor.go index a7374a29182..974f49852d0 100644 --- a/statesync/reactor.go +++ b/statesync/reactor.go @@ -1,3 +1,5 @@ +// Package statesync may be internalized (made private) in future releases. +// XXX Deprecated. package statesync import ( @@ -7,19 +9,20 @@ import ( "time" abci "github.com/cometbft/cometbft/abci/types" + ssproto "github.com/cometbft/cometbft/api/cometbft/statesync/v1" "github.com/cometbft/cometbft/config" cmtsync "github.com/cometbft/cometbft/libs/sync" "github.com/cometbft/cometbft/p2p" - ssproto "github.com/cometbft/cometbft/proto/tendermint/statesync" + tcpconn "github.com/cometbft/cometbft/p2p/transport/tcp/conn" "github.com/cometbft/cometbft/proxy" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" ) const ( - // SnapshotChannel exchanges snapshot metadata + // SnapshotChannel exchanges snapshot metadata. SnapshotChannel = byte(0x60) - // ChunkChannel exchanges chunk contents + // ChunkChannel exchanges chunk contents. ChunkChannel = byte(0x61) // recentSnapshots is the number of recent snapshots to send and receive per peer. recentSnapshots = 10 @@ -60,28 +63,28 @@ func NewReactor( return r } -// GetChannels implements p2p.Reactor. -func (r *Reactor) GetChannels() []*p2p.ChannelDescriptor { - return []*p2p.ChannelDescriptor{ - { +// StreamDescriptors implements p2p.Reactor. +func (*Reactor) StreamDescriptors() []p2p.StreamDescriptor { + return []p2p.StreamDescriptor{ + &tcpconn.ChannelDescriptor{ ID: SnapshotChannel, Priority: 5, SendQueueCapacity: 10, RecvMessageCapacity: snapshotMsgSize, - MessageType: &ssproto.Message{}, + MessageTypeI: &ssproto.Message{}, }, - { + &tcpconn.ChannelDescriptor{ ID: ChunkChannel, Priority: 3, SendQueueCapacity: 10, RecvMessageCapacity: chunkMsgSize, - MessageType: &ssproto.Message{}, + MessageTypeI: &ssproto.Message{}, }, } } // OnStart implements p2p.Reactor. -func (r *Reactor) OnStart() error { +func (*Reactor) OnStart() error { return nil } @@ -95,7 +98,7 @@ func (r *Reactor) AddPeer(peer p2p.Peer) { } // RemovePeer implements p2p.Reactor. -func (r *Reactor) RemovePeer(peer p2p.Peer, _ interface{}) { +func (r *Reactor) RemovePeer(peer p2p.Peer, _ any) { r.mtx.RLock() defer r.mtx.RUnlock() if r.syncer != nil { @@ -171,7 +174,7 @@ func (r *Reactor) Receive(e p2p.Envelope) { case *ssproto.ChunkRequest: r.Logger.Debug("Received chunk request", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, "peer", e.Src.ID()) - resp, err := r.conn.LoadSnapshotChunk(context.TODO(), &abci.RequestLoadSnapshotChunk{ + resp, err := r.conn.LoadSnapshotChunk(context.TODO(), &abci.LoadSnapshotChunkRequest{ Height: msg.Height, Format: msg.Format, Chunk: msg.Index, @@ -225,9 +228,9 @@ func (r *Reactor) Receive(e p2p.Envelope) { } } -// recentSnapshots fetches the n most recent snapshots from the app +// recentSnapshots fetches the n most recent snapshots from the app. func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) { - resp, err := r.conn.ListSnapshots(context.TODO(), &abci.RequestListSnapshots{}) + resp, err := r.conn.ListSnapshots(context.TODO(), &abci.ListSnapshotsRequest{}) if err != nil { return nil, err } @@ -261,7 +264,7 @@ func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) { // Sync runs a state sync, returning the new state and last commit at the snapshot height. // The caller must store the state and commit in the state database and block store. -func (r *Reactor) Sync(stateProvider StateProvider, discoveryTime time.Duration) (sm.State, *types.Commit, error) { +func (r *Reactor) Sync(stateProvider StateProvider, maxDiscoveryTime time.Duration) (sm.State, *types.Commit, error) { r.mtx.Lock() if r.syncer != nil { r.mtx.Unlock() @@ -283,7 +286,8 @@ func (r *Reactor) Sync(stateProvider StateProvider, discoveryTime time.Duration) hook() - state, commit, err := r.syncer.SyncAny(discoveryTime, hook) + const discoveryTime = 5 * time.Second + state, commit, err := r.syncer.SyncAny(discoveryTime, maxDiscoveryTime, hook) r.mtx.Lock() r.syncer = nil diff --git a/statesync/reactor_test.go b/statesync/reactor_test.go index a057cb69781..35995a9aee4 100644 --- a/statesync/reactor_test.go +++ b/statesync/reactor_test.go @@ -10,10 +10,11 @@ import ( "github.com/stretchr/testify/require" abci "github.com/cometbft/cometbft/abci/types" + ssproto "github.com/cometbft/cometbft/api/cometbft/statesync/v1" "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/p2p" p2pmocks "github.com/cometbft/cometbft/p2p/mocks" - ssproto "github.com/cometbft/cometbft/proto/tendermint/statesync" + "github.com/cometbft/cometbft/p2p/nodekey" proxymocks "github.com/cometbft/cometbft/proxy/mocks" ) @@ -41,22 +42,21 @@ func TestReactor_Receive_ChunkRequest(t *testing.T) { } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { // Mock ABCI connection to return local snapshots conn := &proxymocks.AppConnSnapshot{} - conn.On("LoadSnapshotChunk", mock.Anything, &abci.RequestLoadSnapshotChunk{ + conn.On("LoadSnapshotChunk", mock.Anything, &abci.LoadSnapshotChunkRequest{ Height: tc.request.Height, Format: tc.request.Format, Chunk: tc.request.Index, - }).Return(&abci.ResponseLoadSnapshotChunk{Chunk: tc.chunk}, nil) + }).Return(&abci.LoadSnapshotChunkResponse{Chunk: tc.chunk}, nil) // Mock peer to store response, if found peer := &p2pmocks.Peer{} - peer.On("ID").Return(p2p.ID("id")) + peer.On("ID").Return(nodekey.ID("id")) var response *ssproto.ChunkResponse if tc.expectResponse != nil { - peer.On("Send", mock.MatchedBy(func(i interface{}) bool { + peer.On("Send", mock.MatchedBy(func(i any) bool { e, ok := i.(p2p.Envelope) return ok && e.ChannelID == ChunkChannel })).Run(func(args mock.Arguments) { @@ -133,11 +133,10 @@ func TestReactor_Receive_SnapshotsRequest(t *testing.T) { } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { // Mock ABCI connection to return local snapshots conn := &proxymocks.AppConnSnapshot{} - conn.On("ListSnapshots", mock.Anything, &abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{ + conn.On("ListSnapshots", mock.Anything, &abci.ListSnapshotsRequest{}).Return(&abci.ListSnapshotsResponse{ Snapshots: tc.snapshots, }, nil) @@ -145,8 +144,8 @@ func TestReactor_Receive_SnapshotsRequest(t *testing.T) { responses := []*ssproto.SnapshotsResponse{} peer := &p2pmocks.Peer{} if len(tc.expectResponses) > 0 { - peer.On("ID").Return(p2p.ID("id")) - peer.On("Send", mock.MatchedBy(func(i interface{}) bool { + peer.On("ID").Return(nodekey.ID("id")) + peer.On("Send", mock.MatchedBy(func(i any) bool { e, ok := i.(p2p.Envelope) return ok && e.ChannelID == SnapshotChannel })).Run(func(args mock.Arguments) { diff --git a/statesync/snapshots.go b/statesync/snapshots.go index 5d4f9fe4d34..cd9e40c838a 100644 --- a/statesync/snapshots.go +++ b/statesync/snapshots.go @@ -8,6 +8,7 @@ import ( cmtsync "github.com/cometbft/cometbft/libs/sync" "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/p2p/nodekey" ) // snapshotKey is a snapshot key used for lookups. @@ -42,35 +43,35 @@ func (s *snapshot) Key() snapshotKey { type snapshotPool struct { cmtsync.Mutex snapshots map[snapshotKey]*snapshot - snapshotPeers map[snapshotKey]map[p2p.ID]p2p.Peer + snapshotPeers map[snapshotKey]map[nodekey.ID]p2p.Peer // indexes for fast searches formatIndex map[uint32]map[snapshotKey]bool heightIndex map[uint64]map[snapshotKey]bool - peerIndex map[p2p.ID]map[snapshotKey]bool + peerIndex map[nodekey.ID]map[snapshotKey]bool - // blacklists for rejected items - formatBlacklist map[uint32]bool - peerBlacklist map[p2p.ID]bool - snapshotBlacklist map[snapshotKey]bool + // denylists for rejected items + formatRejectlist map[uint32]bool + peerRejectlist map[nodekey.ID]bool + snapshotRejectlist map[snapshotKey]bool } -// newSnapshotPool creates a new snapshot pool. The state source is used for +// newSnapshotPool creates a new snapshot pool. The state source is used for. func newSnapshotPool() *snapshotPool { return &snapshotPool{ - snapshots: make(map[snapshotKey]*snapshot), - snapshotPeers: make(map[snapshotKey]map[p2p.ID]p2p.Peer), - formatIndex: make(map[uint32]map[snapshotKey]bool), - heightIndex: make(map[uint64]map[snapshotKey]bool), - peerIndex: make(map[p2p.ID]map[snapshotKey]bool), - formatBlacklist: make(map[uint32]bool), - peerBlacklist: make(map[p2p.ID]bool), - snapshotBlacklist: make(map[snapshotKey]bool), + snapshots: make(map[snapshotKey]*snapshot), + snapshotPeers: make(map[snapshotKey]map[nodekey.ID]p2p.Peer), + formatIndex: make(map[uint32]map[snapshotKey]bool), + heightIndex: make(map[uint64]map[snapshotKey]bool), + peerIndex: make(map[nodekey.ID]map[snapshotKey]bool), + formatRejectlist: make(map[uint32]bool), + peerRejectlist: make(map[nodekey.ID]bool), + snapshotRejectlist: make(map[snapshotKey]bool), } } // Add adds a snapshot to the pool, unless the peer has already sent recentSnapshots snapshots. It -// returns true if this was a new, non-blacklisted snapshot. The snapshot height is verified using +// returns true if this was a new, non-rejected snapshot. The snapshot height is verified using // the light client, and the expected app hash is set for the snapshot. func (p *snapshotPool) Add(peer p2p.Peer, snapshot *snapshot) (bool, error) { key := snapshot.Key() @@ -79,18 +80,18 @@ func (p *snapshotPool) Add(peer p2p.Peer, snapshot *snapshot) (bool, error) { defer p.Unlock() switch { - case p.formatBlacklist[snapshot.Format]: + case p.formatRejectlist[snapshot.Format]: return false, nil - case p.peerBlacklist[peer.ID()]: + case p.peerRejectlist[peer.ID()]: return false, nil - case p.snapshotBlacklist[key]: + case p.snapshotRejectlist[key]: return false, nil case len(p.peerIndex[peer.ID()]) >= recentSnapshots: return false, nil } if p.snapshotPeers[key] == nil { - p.snapshotPeers[key] = make(map[p2p.ID]p2p.Peer) + p.snapshotPeers[key] = make(map[nodekey.ID]p2p.Peer) } p.snapshotPeers[key][peer.ID()] = peer @@ -193,7 +194,7 @@ func (p *snapshotPool) Reject(snapshot *snapshot) { p.Lock() defer p.Unlock() - p.snapshotBlacklist[key] = true + p.snapshotRejectlist[key] = true p.removeSnapshot(key) } @@ -202,14 +203,14 @@ func (p *snapshotPool) RejectFormat(format uint32) { p.Lock() defer p.Unlock() - p.formatBlacklist[format] = true + p.formatRejectlist[format] = true for key := range p.formatIndex[format] { p.removeSnapshot(key) } } // RejectPeer rejects a peer. It will never be used again. -func (p *snapshotPool) RejectPeer(peerID p2p.ID) { +func (p *snapshotPool) RejectPeer(peerID nodekey.ID) { if peerID == "" { return } @@ -217,18 +218,18 @@ func (p *snapshotPool) RejectPeer(peerID p2p.ID) { defer p.Unlock() p.removePeer(peerID) - p.peerBlacklist[peerID] = true + p.peerRejectlist[peerID] = true } // RemovePeer removes a peer from the pool, and any snapshots that no longer have peers. -func (p *snapshotPool) RemovePeer(peerID p2p.ID) { +func (p *snapshotPool) RemovePeer(peerID nodekey.ID) { p.Lock() defer p.Unlock() p.removePeer(peerID) } // removePeer removes a peer. The caller must hold the mutex lock. -func (p *snapshotPool) removePeer(peerID p2p.ID) { +func (p *snapshotPool) removePeer(peerID nodekey.ID) { for key := range p.peerIndex[peerID] { delete(p.snapshotPeers[key], peerID) if len(p.snapshotPeers[key]) == 0 { diff --git a/statesync/snapshots_test.go b/statesync/snapshots_test.go index d2c52976662..8cdaeffd1f6 100644 --- a/statesync/snapshots_test.go +++ b/statesync/snapshots_test.go @@ -6,8 +6,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/cometbft/cometbft/p2p" p2pmocks "github.com/cometbft/cometbft/p2p/mocks" + "github.com/cometbft/cometbft/p2p/nodekey" ) func TestSnapshot_Key(t *testing.T) { @@ -21,7 +21,6 @@ func TestSnapshot_Key(t *testing.T) { "no metadata": {func(s *snapshot) { s.Metadata = nil }}, } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { s := snapshot{ Height: 3, @@ -40,7 +39,7 @@ func TestSnapshot_Key(t *testing.T) { func TestSnapshotPool_Add(t *testing.T) { peer := &p2pmocks.Peer{} - peer.On("ID").Return(p2p.ID("id")) + peer.On("ID").Return(nodekey.ID("id")) // Adding to the pool should work pool := newSnapshotPool() @@ -55,7 +54,7 @@ func TestSnapshotPool_Add(t *testing.T) { // Adding again from a different peer should return false otherPeer := &p2pmocks.Peer{} - otherPeer.On("ID").Return(p2p.ID("other")) + otherPeer.On("ID").Return(nodekey.ID("other")) added, err = pool.Add(peer, &snapshot{ Height: 1, Format: 1, @@ -75,9 +74,9 @@ func TestSnapshotPool_GetPeer(t *testing.T) { s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} peerA := &p2pmocks.Peer{} - peerA.On("ID").Return(p2p.ID("a")) + peerA.On("ID").Return(nodekey.ID("a")) peerB := &p2pmocks.Peer{} - peerB.On("ID").Return(p2p.ID("b")) + peerB.On("ID").Return(nodekey.ID("b")) _, err := pool.Add(peerA, s) require.NoError(t, err) @@ -92,9 +91,9 @@ func TestSnapshotPool_GetPeer(t *testing.T) { for !seenA || !seenB { peer := pool.GetPeer(s) switch peer.ID() { - case p2p.ID("a"): + case nodekey.ID("a"): seenA = true - case p2p.ID("b"): + case nodekey.ID("b"): seenB = true } } @@ -109,9 +108,9 @@ func TestSnapshotPool_GetPeers(t *testing.T) { s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} peerA := &p2pmocks.Peer{} - peerA.On("ID").Return(p2p.ID("a")) + peerA.On("ID").Return(nodekey.ID("a")) peerB := &p2pmocks.Peer{} - peerB.On("ID").Return(p2p.ID("b")) + peerB.On("ID").Return(nodekey.ID("b")) _, err := pool.Add(peerA, s) require.NoError(t, err) @@ -147,7 +146,7 @@ func TestSnapshotPool_Ranked_Best(t *testing.T) { for i := len(expectSnapshots) - 1; i >= 0; i-- { for _, peerID := range expectSnapshots[i].peers { peer := &p2pmocks.Peer{} - peer.On("ID").Return(p2p.ID(peerID)) + peer.On("ID").Return(nodekey.ID(peerID)) _, err := pool.Add(peer, expectSnapshots[i].snapshot) require.NoError(t, err) } @@ -172,7 +171,7 @@ func TestSnapshotPool_Ranked_Best(t *testing.T) { func TestSnapshotPool_Reject(t *testing.T) { pool := newSnapshotPool() peer := &p2pmocks.Peer{} - peer.On("ID").Return(p2p.ID("id")) + peer.On("ID").Return(nodekey.ID("id")) snapshots := []*snapshot{ {Height: 2, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, @@ -200,7 +199,7 @@ func TestSnapshotPool_Reject(t *testing.T) { func TestSnapshotPool_RejectFormat(t *testing.T) { pool := newSnapshotPool() peer := &p2pmocks.Peer{} - peer.On("ID").Return(p2p.ID("id")) + peer.On("ID").Return(nodekey.ID("id")) snapshots := []*snapshot{ {Height: 2, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, @@ -230,9 +229,9 @@ func TestSnapshotPool_RejectPeer(t *testing.T) { pool := newSnapshotPool() peerA := &p2pmocks.Peer{} - peerA.On("ID").Return(p2p.ID("a")) + peerA.On("ID").Return(nodekey.ID("a")) peerB := &p2pmocks.Peer{} - peerB.On("ID").Return(p2p.ID("b")) + peerB.On("ID").Return(nodekey.ID("b")) s1 := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} s2 := &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}} @@ -270,9 +269,9 @@ func TestSnapshotPool_RemovePeer(t *testing.T) { pool := newSnapshotPool() peerA := &p2pmocks.Peer{} - peerA.On("ID").Return(p2p.ID("a")) + peerA.On("ID").Return(nodekey.ID("a")) peerB := &p2pmocks.Peer{} - peerB.On("ID").Return(p2p.ID("b")) + peerB.On("ID").Return(nodekey.ID("b")) s1 := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} s2 := &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}} diff --git a/statesync/stateprovider.go b/statesync/stateprovider.go index 3dc87dfabe1..771e5e51965 100644 --- a/statesync/stateprovider.go +++ b/statesync/stateprovider.go @@ -2,12 +2,12 @@ package statesync import ( "context" + "errors" "fmt" "strings" - "time" dbm "github.com/cometbft/cometbft-db" - + cmtstate "github.com/cometbft/cometbft/api/cometbft/state/v1" "github.com/cometbft/cometbft/libs/log" cmtsync "github.com/cometbft/cometbft/libs/sync" "github.com/cometbft/cometbft/light" @@ -15,10 +15,10 @@ import ( lighthttp "github.com/cometbft/cometbft/light/provider/http" lightrpc "github.com/cometbft/cometbft/light/rpc" lightdb "github.com/cometbft/cometbft/light/store/db" - cmtstate "github.com/cometbft/cometbft/proto/tendermint/state" rpchttp "github.com/cometbft/cometbft/rpc/client/http" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" "github.com/cometbft/cometbft/version" ) @@ -44,15 +44,14 @@ type lightClientStateProvider struct { providers map[lightprovider.Provider]string } -// NewLightClientStateProvider creates a new StateProvider using a light client and RPC clients. -func NewLightClientStateProvider( - ctx context.Context, +func NewLightClientStateProviderWithDBKeyVersion(ctx context.Context, chainID string, version cmtstate.Version, initialHeight int64, servers []string, trustOptions light.TrustOptions, logger log.Logger, + dbKeyLayoutVereson string, ) (StateProvider, error) { if len(servers) < 2 { return nil, fmt.Errorf("at least 2 RPC servers are required, got %v", len(servers)) @@ -73,7 +72,7 @@ func NewLightClientStateProvider( } lc, err := light.NewClient(ctx, chainID, trustOptions, providers[0], providers[1:], - lightdb.New(dbm.NewMemDB(), ""), light.Logger(logger), light.MaxRetryAttempts(5)) + lightdb.NewWithDBVersion(dbm.NewMemDB(), "", dbKeyLayoutVereson), light.Logger(logger), light.MaxRetryAttempts(5)) if err != nil { return nil, err } @@ -85,13 +84,28 @@ func NewLightClientStateProvider( }, nil } +// NewLightClientStateProvider creates a new StateProvider using a light client and RPC clients. +// DB Key layout will default to v1. +func NewLightClientStateProvider( + ctx context.Context, + chainID string, + version cmtstate.Version, + initialHeight int64, + servers []string, + trustOptions light.TrustOptions, + logger log.Logger, +) (StateProvider, error) { + return NewLightClientStateProviderWithDBKeyVersion(ctx, + chainID, version, initialHeight, servers, trustOptions, logger, "") +} + // AppHash implements StateProvider. func (s *lightClientStateProvider) AppHash(ctx context.Context, height uint64) ([]byte, error) { s.Lock() defer s.Unlock() // We have to fetch the next height, which contains the app hash for the previous height. - header, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+1), time.Now()) + header, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+1), cmttime.Now()) if err != nil { return nil, err } @@ -103,7 +117,7 @@ func (s *lightClientStateProvider) AppHash(ctx context.Context, height uint64) ( // breaking it. We should instead have a Has(ctx, height) method which checks // that the state provider has access to the necessary data for the height. // We piggyback on AppHash() since it's called when adding snapshots to the pool. - _, err = s.lc.VerifyLightBlockAtHeight(ctx, int64(height+2), time.Now()) + _, err = s.lc.VerifyLightBlockAtHeight(ctx, int64(height+2), cmttime.Now()) if err != nil { return nil, err } @@ -114,7 +128,7 @@ func (s *lightClientStateProvider) AppHash(ctx context.Context, height uint64) ( func (s *lightClientStateProvider) Commit(ctx context.Context, height uint64) (*types.Commit, error) { s.Lock() defer s.Unlock() - header, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height), time.Now()) + header, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height), cmttime.Now()) if err != nil { return nil, err } @@ -143,22 +157,22 @@ func (s *lightClientStateProvider) State(ctx context.Context, height uint64) (sm // // We need to fetch the NextValidators from height+2 because if the application changed // the validator set at the snapshot height then this only takes effect at height+2. - lastLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height), time.Now()) + lastLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height), cmttime.Now()) if err != nil { return sm.State{}, err } - currentLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+1), time.Now()) + currentLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+1), cmttime.Now()) if err != nil { return sm.State{}, err } - nextLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+2), time.Now()) + nextLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+2), cmttime.Now()) if err != nil { return sm.State{}, err } state.Version = cmtstate.Version{ Consensus: currentLightBlock.Version, - Software: version.TMCoreSemVer, + Software: version.CMTSemVer, } state.LastBlockHeight = lastLightBlock.Height state.LastBlockTime = lastLightBlock.Time @@ -173,7 +187,7 @@ func (s *lightClientStateProvider) State(ctx context.Context, height uint64) (sm // We'll also need to fetch consensus params via RPC, using light client verification. primaryURL, ok := s.providers[s.lc.Primary()] if !ok || primaryURL == "" { - return sm.State{}, fmt.Errorf("could not find address for primary light client provider") + return sm.State{}, errors.New("could not find address for primary light client provider") } primaryRPC, err := rpcClient(primaryURL) if err != nil { @@ -191,7 +205,7 @@ func (s *lightClientStateProvider) State(ctx context.Context, height uint64) (sm return state, nil } -// rpcClient sets up a new RPC client +// rpcClient sets up a new RPC client. func rpcClient(server string) (*rpchttp.HTTP, error) { if !strings.Contains(server, "://") { server = "http://" + server diff --git a/statesync/syncer.go b/statesync/syncer.go index 38bdedfd1b6..9ce3830210b 100644 --- a/statesync/syncer.go +++ b/statesync/syncer.go @@ -8,12 +8,13 @@ import ( "time" abci "github.com/cometbft/cometbft/abci/types" + ssproto "github.com/cometbft/cometbft/api/cometbft/statesync/v1" "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/libs/log" cmtsync "github.com/cometbft/cometbft/libs/sync" "github.com/cometbft/cometbft/light" "github.com/cometbft/cometbft/p2p" - ssproto "github.com/cometbft/cometbft/proto/tendermint/statesync" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/proxy" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" @@ -22,10 +23,6 @@ import ( const ( // chunkTimeout is the timeout while waiting for the next chunk from the chunk queue. chunkTimeout = 2 * time.Minute - - // minimumDiscoveryTime is the lowest allowable time for a - // SyncAny discovery time. - minimumDiscoveryTime = 5 * time.Second ) var ( @@ -73,7 +70,6 @@ func newSyncer( stateProvider StateProvider, tempDir string, ) *syncer { - return &syncer{ logger: logger, stateProvider: stateProvider, @@ -139,18 +135,18 @@ func (s *syncer) RemovePeer(peer p2p.Peer) { s.snapshots.RemovePeer(peer.ID()) } -// SyncAny tries to sync any of the snapshots in the snapshot pool, waiting to discover further -// snapshots if none were found and discoveryTime > 0. It returns the latest state and block commit -// which the caller must use to bootstrap the node. -func (s *syncer) SyncAny(discoveryTime time.Duration, retryHook func()) (sm.State, *types.Commit, error) { - if discoveryTime != 0 && discoveryTime < minimumDiscoveryTime { - discoveryTime = 5 * minimumDiscoveryTime - } +// SyncAny tries to sync any of the snapshots in the snapshot pool, waiting to +// discover further snapshots if none were found within discoveryTime. It +// returns the latest state and block commit which the caller must use to +// bootstrap the node. +// +// If none snapshots are found after maxDiscoveryTime, errNoSnapshots is +// returned. +func (s *syncer) SyncAny(discoveryTime, maxDiscoveryTime time.Duration, retryHook func()) (sm.State, *types.Commit, error) { + timeStart := time.Now() - if discoveryTime > 0 { - s.logger.Info("Discovering snapshots", "discoverTime", discoveryTime) - time.Sleep(discoveryTime) - } + s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime)) + time.Sleep(discoveryTime) // The app may ask us to retry a snapshot restoration, in which case we need to reuse // the snapshot and chunk queue from the previous loop iteration. @@ -166,11 +162,11 @@ func (s *syncer) SyncAny(discoveryTime time.Duration, retryHook func()) (sm.Stat chunks = nil } if snapshot == nil { - if discoveryTime == 0 { + if maxDiscoveryTime > 0 && time.Since(timeStart) >= maxDiscoveryTime { return sm.State{}, nil, errNoSnapshots } retryHook() - s.logger.Info("sync any", "msg", log.NewLazySprintf("Discovering snapshots for %v", discoveryTime)) + s.logger.Info("sync any", "msg", fmt.Sprintf("Discovering snapshots for %v", discoveryTime)) time.Sleep(discoveryTime) continue } @@ -322,7 +318,7 @@ func (s *syncer) Sync(snapshot *snapshot, chunks *chunkQueue) (sm.State, *types. func (s *syncer) offerSnapshot(snapshot *snapshot) error { s.logger.Info("Offering snapshot to ABCI app", "height", snapshot.Height, "format", snapshot.Format, "hash", log.NewLazySprintf("%X", snapshot.Hash)) - resp, err := s.conn.OfferSnapshot(context.TODO(), &abci.RequestOfferSnapshot{ + resp, err := s.conn.OfferSnapshot(context.TODO(), &abci.OfferSnapshotRequest{ Snapshot: &abci.Snapshot{ Height: snapshot.Height, Format: snapshot.Format, @@ -336,17 +332,17 @@ func (s *syncer) offerSnapshot(snapshot *snapshot) error { return fmt.Errorf("failed to offer snapshot: %w", err) } switch resp.Result { - case abci.ResponseOfferSnapshot_ACCEPT: + case abci.OFFER_SNAPSHOT_RESULT_ACCEPT: s.logger.Info("Snapshot accepted, restoring", "height", snapshot.Height, "format", snapshot.Format, "hash", log.NewLazySprintf("%X", snapshot.Hash)) return nil - case abci.ResponseOfferSnapshot_ABORT: + case abci.OFFER_SNAPSHOT_RESULT_ABORT: return errAbort - case abci.ResponseOfferSnapshot_REJECT: + case abci.OFFER_SNAPSHOT_RESULT_REJECT: return errRejectSnapshot - case abci.ResponseOfferSnapshot_REJECT_FORMAT: + case abci.OFFER_SNAPSHOT_RESULT_REJECT_FORMAT: return errRejectFormat - case abci.ResponseOfferSnapshot_REJECT_SENDER: + case abci.OFFER_SNAPSHOT_RESULT_REJECT_SENDER: return errRejectSender default: return fmt.Errorf("unknown ResponseOfferSnapshot result %v", resp.Result) @@ -364,7 +360,7 @@ func (s *syncer) applyChunks(chunks *chunkQueue) error { return fmt.Errorf("failed to fetch chunk: %w", err) } - resp, err := s.conn.ApplySnapshotChunk(context.TODO(), &abci.RequestApplySnapshotChunk{ + resp, err := s.conn.ApplySnapshotChunk(context.TODO(), &abci.ApplySnapshotChunkRequest{ Index: chunk.Index, Chunk: chunk.Chunk, Sender: string(chunk.Sender), @@ -386,8 +382,8 @@ func (s *syncer) applyChunks(chunks *chunkQueue) error { // Reject any senders as requested by the app for _, sender := range resp.RejectSenders { if sender != "" { - s.snapshots.RejectPeer(p2p.ID(sender)) - err := chunks.DiscardSender(p2p.ID(sender)) + s.snapshots.RejectPeer(nodekey.ID(sender)) + err := chunks.DiscardSender(nodekey.ID(sender)) if err != nil { return fmt.Errorf("failed to reject sender: %w", err) } @@ -395,14 +391,14 @@ func (s *syncer) applyChunks(chunks *chunkQueue) error { } switch resp.Result { - case abci.ResponseApplySnapshotChunk_ACCEPT: - case abci.ResponseApplySnapshotChunk_ABORT: + case abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT: + case abci.APPLY_SNAPSHOT_CHUNK_RESULT_ABORT: return errAbort - case abci.ResponseApplySnapshotChunk_RETRY: + case abci.APPLY_SNAPSHOT_CHUNK_RESULT_RETRY: chunks.Retry(chunk.Index) - case abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT: + case abci.APPLY_SNAPSHOT_CHUNK_RESULT_RETRY_SNAPSHOT: return errRetrySnapshot - case abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT: + case abci.APPLY_SNAPSHOT_CHUNK_RESULT_REJECT_SNAPSHOT: return errRejectSnapshot default: return fmt.Errorf("unknown ResponseApplySnapshotChunk result %v", resp.Result) @@ -441,23 +437,18 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch s.logger.Info("Fetching snapshot chunk", "height", snapshot.Height, "format", snapshot.Format, "chunk", index, "total", chunks.Size()) - ticker := time.NewTicker(s.retryTimeout) - defer ticker.Stop() - s.requestChunk(snapshot, index) select { case <-chunks.WaitFor(index): next = true - case <-ticker.C: + case <-time.After(s.retryTimeout): next = false case <-ctx.Done(): return } - - ticker.Stop() } } @@ -481,9 +472,9 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { }) } -// verifyApp verifies the sync, checking the app hash, last block height and app version +// verifyApp verifies the sync, checking the app hash, last block height and app version. func (s *syncer) verifyApp(snapshot *snapshot, appVersion uint64) error { - resp, err := s.connQuery.Info(context.TODO(), proxy.RequestInfo) + resp, err := s.connQuery.Info(context.TODO(), proxy.InfoRequest) if err != nil { return fmt.Errorf("failed to query ABCI app for appHash: %w", err) } @@ -491,7 +482,7 @@ func (s *syncer) verifyApp(snapshot *snapshot, appVersion uint64) error { // sanity check that the app version in the block matches the application's own record // of its version if resp.AppVersion != appVersion { - // An error here most likely means that the app hasn't inplemented state sync + // An error here most likely means that the app hasn't implemented state sync // or the Info call correctly return fmt.Errorf("app version mismatch. Expected: %d, got: %d", appVersion, resp.AppVersion) diff --git a/statesync/syncer_test.go b/statesync/syncer_test.go index 4fbb47a2e13..fe96466e6b0 100644 --- a/statesync/syncer_test.go +++ b/statesync/syncer_test.go @@ -10,25 +10,30 @@ import ( "github.com/stretchr/testify/require" abci "github.com/cometbft/cometbft/abci/types" + cmtstate "github.com/cometbft/cometbft/api/cometbft/state/v1" + ssproto "github.com/cometbft/cometbft/api/cometbft/statesync/v1" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/libs/log" cmtsync "github.com/cometbft/cometbft/libs/sync" "github.com/cometbft/cometbft/p2p" p2pmocks "github.com/cometbft/cometbft/p2p/mocks" - cmtstate "github.com/cometbft/cometbft/proto/tendermint/state" - ssproto "github.com/cometbft/cometbft/proto/tendermint/statesync" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/proxy" proxymocks "github.com/cometbft/cometbft/proxy/mocks" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/statesync/mocks" "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" "github.com/cometbft/cometbft/version" ) -const testAppVersion = 9 +const ( + testAppVersion = 9 + maxDiscoveryTime = 1 * time.Millisecond // Not 0 because 0 means no timeout. +) -// Sets up a basic syncer that can be used to test OfferSnapshot requests +// Sets up a basic syncer that can be used to test OfferSnapshot requests. func setupOfferSyncer() (*syncer, *proxymocks.AppConnSnapshot) { connQuery := &proxymocks.AppConnQuery{} connSnapshot := &proxymocks.AppConnSnapshot{} @@ -40,10 +45,10 @@ func setupOfferSyncer() (*syncer, *proxymocks.AppConnSnapshot) { return syncer, connSnapshot } -// Sets up a simple peer mock with an ID +// Sets up a simple peer mock with an ID. func simplePeer(id string) *p2pmocks.Peer { peer := &p2pmocks.Peer{} - peer.On("ID").Return(p2p.ID(id)) + peer.On("ID").Return(nodekey.ID(id)) return peer } @@ -55,12 +60,12 @@ func TestSyncer_SyncAny(t *testing.T) { Block: version.BlockProtocol, App: testAppVersion, }, - Software: version.TMCoreSemVer, + Software: version.CMTSemVer, }, LastBlockHeight: 1, LastBlockID: types.BlockID{Hash: []byte("blockhash")}, - LastBlockTime: time.Now(), + LastBlockTime: cmttime.Now(), LastResultsHash: []byte("last_results_hash"), AppHash: []byte("app_hash"), @@ -97,8 +102,8 @@ func TestSyncer_SyncAny(t *testing.T) { // Adding a couple of peers should trigger snapshot discovery messages peerA := &p2pmocks.Peer{} - peerA.On("ID").Return(p2p.ID("a")) - peerA.On("Send", mock.MatchedBy(func(i interface{}) bool { + peerA.On("ID").Return(nodekey.ID("a")) + peerA.On("Send", mock.MatchedBy(func(i any) bool { e, ok := i.(p2p.Envelope) if !ok { return false @@ -110,8 +115,8 @@ func TestSyncer_SyncAny(t *testing.T) { peerA.AssertExpectations(t) peerB := &p2pmocks.Peer{} - peerB.On("ID").Return(p2p.ID("b")) - peerB.On("Send", mock.MatchedBy(func(i interface{}) bool { + peerB.On("ID").Return(nodekey.ID("b")) + peerB.On("Send", mock.MatchedBy(func(i any) bool { e, ok := i.(p2p.Envelope) if !ok { return false @@ -138,7 +143,7 @@ func TestSyncer_SyncAny(t *testing.T) { // We start a sync, with peers sending back chunks when requested. We first reject the snapshot // with height 2 format 2, and accept the snapshot at height 1. - connSnapshot.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshot", mock.Anything, &abci.OfferSnapshotRequest{ Snapshot: &abci.Snapshot{ Height: 2, Format: 2, @@ -146,8 +151,8 @@ func TestSyncer_SyncAny(t *testing.T) { Hash: []byte{1}, }, AppHash: []byte("app_hash_2"), - }).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) - connSnapshot.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + }).Return(&abci.OfferSnapshotResponse{Result: abci.OFFER_SNAPSHOT_RESULT_REJECT_FORMAT}, nil) + connSnapshot.On("OfferSnapshot", mock.Anything, &abci.OfferSnapshotRequest{ Snapshot: &abci.Snapshot{ Height: s.Height, Format: s.Format, @@ -156,7 +161,7 @@ func TestSyncer_SyncAny(t *testing.T) { Metadata: s.Metadata, }, AppHash: []byte("app_hash"), - }).Times(2).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil) + }).Times(2).Return(&abci.OfferSnapshotResponse{Result: abci.OFFER_SNAPSHOT_RESULT_ACCEPT}, nil) chunkRequests := make(map[uint32]int) chunkRequestsMtx := cmtsync.Mutex{} @@ -176,11 +181,11 @@ func TestSyncer_SyncAny(t *testing.T) { chunkRequests[msg.Index]++ chunkRequestsMtx.Unlock() } - peerA.On("Send", mock.MatchedBy(func(i interface{}) bool { + peerA.On("Send", mock.MatchedBy(func(i any) bool { e, ok := i.(p2p.Envelope) return ok && e.ChannelID == ChunkChannel })).Maybe().Run(onChunkRequest).Return(true) - peerB.On("Send", mock.MatchedBy(func(i interface{}) bool { + peerB.On("Send", mock.MatchedBy(func(i any) bool { e, ok := i.(p2p.Envelope) return ok && e.ChannelID == ChunkChannel })).Maybe().Run(onChunkRequest).Return(true) @@ -188,30 +193,30 @@ func TestSyncer_SyncAny(t *testing.T) { // The first time we're applying chunk 2 we tell it to retry the snapshot and discard chunk 1, // which should cause it to keep the existing chunk 0 and 2, and restart restoration from // beginning. We also wait for a little while, to exercise the retry logic in fetchChunks(). - connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.ApplySnapshotChunkRequest{ Index: 2, Chunk: []byte{1, 1, 2}, - }).Once().Run(func(args mock.Arguments) { time.Sleep(2 * time.Second) }).Return( - &abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT, + }).Once().Run(func(_ mock.Arguments) { time.Sleep(2 * time.Second) }).Return( + &abci.ApplySnapshotChunkResponse{ + Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_RETRY_SNAPSHOT, RefetchChunks: []uint32{1}, }, nil) - connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.ApplySnapshotChunkRequest{ Index: 0, Chunk: []byte{1, 1, 0}, - }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ + }).Times(2).Return(&abci.ApplySnapshotChunkResponse{Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT}, nil) + connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.ApplySnapshotChunkRequest{ Index: 1, Chunk: []byte{1, 1, 1}, - }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ + }).Times(2).Return(&abci.ApplySnapshotChunkResponse{Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT}, nil) + connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.ApplySnapshotChunkRequest{ Index: 2, Chunk: []byte{1, 1, 2}, - }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connQuery.On("Info", mock.Anything, proxy.RequestInfo).Return(&abci.ResponseInfo{ + }).Once().Return(&abci.ApplySnapshotChunkResponse{Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT}, nil) + connQuery.On("Info", mock.Anything, proxy.InfoRequest).Return(&abci.InfoResponse{ AppVersion: testAppVersion, LastBlockHeight: 1, LastBlockAppHash: []byte("app_hash"), }, nil) - newState, lastCommit, err := syncer.SyncAny(0, func() {}) + newState, lastCommit, err := syncer.SyncAny(0, maxDiscoveryTime, func() {}) require.NoError(t, err) time.Sleep(50 * time.Millisecond) // wait for peers to receive requests @@ -233,7 +238,7 @@ func TestSyncer_SyncAny(t *testing.T) { func TestSyncer_SyncAny_noSnapshots(t *testing.T) { syncer, _ := setupOfferSyncer() - _, _, err := syncer.SyncAny(0, func() {}) + _, _, err := syncer.SyncAny(0, maxDiscoveryTime, func() {}) assert.Equal(t, errNoSnapshots, err) } @@ -243,11 +248,11 @@ func TestSyncer_SyncAny_abort(t *testing.T) { s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} _, err := syncer.AddSnapshot(simplePeer("id"), s) require.NoError(t, err) - connSnapshot.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshot", mock.Anything, &abci.OfferSnapshotRequest{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) + }).Once().Return(&abci.OfferSnapshotResponse{Result: abci.OFFER_SNAPSHOT_RESULT_ABORT}, nil) - _, _, err = syncer.SyncAny(0, func() {}) + _, _, err = syncer.SyncAny(0, maxDiscoveryTime, func() {}) assert.Equal(t, errAbort, err) connSnapshot.AssertExpectations(t) } @@ -266,19 +271,19 @@ func TestSyncer_SyncAny_reject(t *testing.T) { _, err = syncer.AddSnapshot(simplePeer("id"), s11) require.NoError(t, err) - connSnapshot.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshot", mock.Anything, &abci.OfferSnapshotRequest{ Snapshot: toABCI(s22), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) + }).Once().Return(&abci.OfferSnapshotResponse{Result: abci.OFFER_SNAPSHOT_RESULT_REJECT}, nil) - connSnapshot.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshot", mock.Anything, &abci.OfferSnapshotRequest{ Snapshot: toABCI(s12), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) + }).Once().Return(&abci.OfferSnapshotResponse{Result: abci.OFFER_SNAPSHOT_RESULT_REJECT}, nil) - connSnapshot.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshot", mock.Anything, &abci.OfferSnapshotRequest{ Snapshot: toABCI(s11), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) + }).Once().Return(&abci.OfferSnapshotResponse{Result: abci.OFFER_SNAPSHOT_RESULT_REJECT}, nil) - _, _, err = syncer.SyncAny(0, func() {}) + _, _, err = syncer.SyncAny(0, maxDiscoveryTime, func() {}) assert.Equal(t, errNoSnapshots, err) connSnapshot.AssertExpectations(t) } @@ -297,15 +302,15 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) { _, err = syncer.AddSnapshot(simplePeer("id"), s11) require.NoError(t, err) - connSnapshot.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshot", mock.Anything, &abci.OfferSnapshotRequest{ Snapshot: toABCI(s22), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) + }).Once().Return(&abci.OfferSnapshotResponse{Result: abci.OFFER_SNAPSHOT_RESULT_REJECT_FORMAT}, nil) - connSnapshot.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshot", mock.Anything, &abci.OfferSnapshotRequest{ Snapshot: toABCI(s11), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) + }).Once().Return(&abci.OfferSnapshotResponse{Result: abci.OFFER_SNAPSHOT_RESULT_ABORT}, nil) - _, _, err = syncer.SyncAny(0, func() {}) + _, _, err = syncer.SyncAny(0, maxDiscoveryTime, func() {}) assert.Equal(t, errAbort, err) connSnapshot.AssertExpectations(t) } @@ -335,15 +340,15 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { _, err = syncer.AddSnapshot(peerC, sbc) require.NoError(t, err) - connSnapshot.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshot", mock.Anything, &abci.OfferSnapshotRequest{ Snapshot: toABCI(sbc), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_SENDER}, nil) + }).Once().Return(&abci.OfferSnapshotResponse{Result: abci.OFFER_SNAPSHOT_RESULT_REJECT_SENDER}, nil) - connSnapshot.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshot", mock.Anything, &abci.OfferSnapshotRequest{ Snapshot: toABCI(sa), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) + }).Once().Return(&abci.OfferSnapshotResponse{Result: abci.OFFER_SNAPSHOT_RESULT_REJECT}, nil) - _, _, err = syncer.SyncAny(0, func() {}) + _, _, err = syncer.SyncAny(0, maxDiscoveryTime, func() {}) assert.Equal(t, errNoSnapshots, err) connSnapshot.AssertExpectations(t) } @@ -355,12 +360,12 @@ func TestSyncer_SyncAny_abciError(t *testing.T) { s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} _, err := syncer.AddSnapshot(simplePeer("id"), s) require.NoError(t, err) - connSnapshot.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshot", mock.Anything, &abci.OfferSnapshotRequest{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Once().Return(nil, errBoom) - _, _, err = syncer.SyncAny(0, func() {}) - assert.True(t, errors.Is(err, errBoom)) + _, _, err = syncer.SyncAny(0, maxDiscoveryTime, func() {}) + require.ErrorIs(t, err, errBoom) connSnapshot.AssertExpectations(t) } @@ -369,28 +374,27 @@ func TestSyncer_offerSnapshot(t *testing.T) { boom := errors.New("boom") testcases := map[string]struct { - result abci.ResponseOfferSnapshot_Result + result abci.OfferSnapshotResult err error expectErr error }{ - "accept": {abci.ResponseOfferSnapshot_ACCEPT, nil, nil}, - "abort": {abci.ResponseOfferSnapshot_ABORT, nil, errAbort}, - "reject": {abci.ResponseOfferSnapshot_REJECT, nil, errRejectSnapshot}, - "reject_format": {abci.ResponseOfferSnapshot_REJECT_FORMAT, nil, errRejectFormat}, - "reject_sender": {abci.ResponseOfferSnapshot_REJECT_SENDER, nil, errRejectSender}, - "unknown": {abci.ResponseOfferSnapshot_UNKNOWN, nil, unknownErr}, + "accept": {abci.OFFER_SNAPSHOT_RESULT_ACCEPT, nil, nil}, + "abort": {abci.OFFER_SNAPSHOT_RESULT_ABORT, nil, errAbort}, + "reject": {abci.OFFER_SNAPSHOT_RESULT_REJECT, nil, errRejectSnapshot}, + "reject_format": {abci.OFFER_SNAPSHOT_RESULT_REJECT_FORMAT, nil, errRejectFormat}, + "reject_sender": {abci.OFFER_SNAPSHOT_RESULT_REJECT_SENDER, nil, errRejectSender}, + "unknown": {abci.OFFER_SNAPSHOT_RESULT_UNKNOWN, nil, unknownErr}, "error": {0, boom, boom}, "unknown non-zero": {9, nil, unknownErr}, } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { syncer, connSnapshot := setupOfferSyncer() s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} - connSnapshot.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshot", mock.Anything, &abci.OfferSnapshotRequest{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), - }).Return(&abci.ResponseOfferSnapshot{Result: tc.result}, tc.err) + }).Return(&abci.OfferSnapshotResponse{Result: tc.result}, tc.err) err := syncer.offerSnapshot(s) if tc.expectErr == unknownErr { require.Error(t, err) @@ -410,21 +414,20 @@ func TestSyncer_applyChunks_Results(t *testing.T) { boom := errors.New("boom") testcases := map[string]struct { - result abci.ResponseApplySnapshotChunk_Result + result abci.ApplySnapshotChunkResult err error expectErr error }{ - "accept": {abci.ResponseApplySnapshotChunk_ACCEPT, nil, nil}, - "abort": {abci.ResponseApplySnapshotChunk_ABORT, nil, errAbort}, - "retry": {abci.ResponseApplySnapshotChunk_RETRY, nil, nil}, - "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT, nil, errRetrySnapshot}, - "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT, nil, errRejectSnapshot}, - "unknown": {abci.ResponseApplySnapshotChunk_UNKNOWN, nil, unknownErr}, + "accept": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT, nil, nil}, + "abort": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_ABORT, nil, errAbort}, + "retry": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_RETRY, nil, nil}, + "retry_snapshot": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_RETRY_SNAPSHOT, nil, errRetrySnapshot}, + "reject_snapshot": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_REJECT_SNAPSHOT, nil, errRejectSnapshot}, + "unknown": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_UNKNOWN, nil, unknownErr}, "error": {0, boom, boom}, "unknown non-zero": {9, nil, unknownErr}, } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { connQuery := &proxymocks.AppConnQuery{} connSnapshot := &proxymocks.AppConnSnapshot{} @@ -440,14 +443,14 @@ func TestSyncer_applyChunks_Results(t *testing.T) { _, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body}) require.NoError(t, err) - connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.ApplySnapshotChunkRequest{ Index: 0, Chunk: body, - }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: tc.result}, tc.err) - if tc.result == abci.ResponseApplySnapshotChunk_RETRY { - connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ + }).Once().Return(&abci.ApplySnapshotChunkResponse{Result: tc.result}, tc.err) + if tc.result == abci.APPLY_SNAPSHOT_CHUNK_RESULT_RETRY { + connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.ApplySnapshotChunkRequest{ Index: 0, Chunk: body, - }).Once().Return(&abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_ACCEPT, + }).Once().Return(&abci.ApplySnapshotChunkResponse{ + Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT, }, nil) } @@ -469,16 +472,15 @@ func TestSyncer_applyChunks_Results(t *testing.T) { func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { // Discarding chunks via refetch_chunks should work the same for all results testcases := map[string]struct { - result abci.ResponseApplySnapshotChunk_Result + result abci.ApplySnapshotChunkResult }{ - "accept": {abci.ResponseApplySnapshotChunk_ACCEPT}, - "abort": {abci.ResponseApplySnapshotChunk_ABORT}, - "retry": {abci.ResponseApplySnapshotChunk_RETRY}, - "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, - "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, + "accept": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT}, + "abort": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_ABORT}, + "retry": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_RETRY}, + "retry_snapshot": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_RETRY_SNAPSHOT}, + "reject_snapshot": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_REJECT_SNAPSHOT}, } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { connQuery := &proxymocks.AppConnQuery{} connSnapshot := &proxymocks.AppConnSnapshot{} @@ -501,15 +503,15 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { require.NoError(t, err) // The first two chunks are accepted, before the last one asks for 1 to be refetched - connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.ApplySnapshotChunkRequest{ Index: 0, Chunk: []byte{0}, - }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ + }).Once().Return(&abci.ApplySnapshotChunkResponse{Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT}, nil) + connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.ApplySnapshotChunkRequest{ Index: 1, Chunk: []byte{1}, - }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ + }).Once().Return(&abci.ApplySnapshotChunkResponse{Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT}, nil) + connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.ApplySnapshotChunkRequest{ Index: 2, Chunk: []byte{2}, - }).Once().Return(&abci.ResponseApplySnapshotChunk{ + }).Once().Return(&abci.ApplySnapshotChunkResponse{ Result: tc.result, RefetchChunks: []uint32{1}, }, nil) @@ -534,16 +536,15 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { func TestSyncer_applyChunks_RejectSenders(t *testing.T) { // Banning chunks senders via ban_chunk_senders should work the same for all results testcases := map[string]struct { - result abci.ResponseApplySnapshotChunk_Result + result abci.ApplySnapshotChunkResult }{ - "accept": {abci.ResponseApplySnapshotChunk_ACCEPT}, - "abort": {abci.ResponseApplySnapshotChunk_ABORT}, - "retry": {abci.ResponseApplySnapshotChunk_RETRY}, - "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, - "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, + "accept": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT}, + "abort": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_ABORT}, + "retry": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_RETRY}, + "retry_snapshot": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_RETRY_SNAPSHOT}, + "reject_snapshot": {abci.APPLY_SNAPSHOT_CHUNK_RESULT_REJECT_SNAPSHOT}, } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { connQuery := &proxymocks.AppConnQuery{} connSnapshot := &proxymocks.AppConnSnapshot{} @@ -587,24 +588,24 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { require.NoError(t, err) // The first two chunks are accepted, before the last one asks for b sender to be rejected - connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.ApplySnapshotChunkRequest{ Index: 0, Chunk: []byte{0}, Sender: "a", - }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ + }).Once().Return(&abci.ApplySnapshotChunkResponse{Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT}, nil) + connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.ApplySnapshotChunkRequest{ Index: 1, Chunk: []byte{1}, Sender: "b", - }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ + }).Once().Return(&abci.ApplySnapshotChunkResponse{Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT}, nil) + connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.ApplySnapshotChunkRequest{ Index: 2, Chunk: []byte{2}, Sender: "c", - }).Once().Return(&abci.ResponseApplySnapshotChunk{ + }).Once().Return(&abci.ApplySnapshotChunkResponse{ Result: tc.result, RejectSenders: []string{string(peerB.ID())}, }, nil) // On retry, the last chunk will be tried again, so we just accept it then. - if tc.result == abci.ResponseApplySnapshotChunk_RETRY { - connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ + if tc.result == abci.APPLY_SNAPSHOT_CHUNK_RESULT_RETRY { + connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.ApplySnapshotChunkRequest{ Index: 2, Chunk: []byte{2}, Sender: "c", - }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + }).Once().Return(&abci.ApplySnapshotChunkResponse{Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT}, nil) } // We don't really care about the result of applyChunks, since it has separate test. @@ -639,26 +640,26 @@ func TestSyncer_verifyApp(t *testing.T) { s := &snapshot{Height: 3, Format: 1, Chunks: 5, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} testcases := map[string]struct { - response *abci.ResponseInfo + response *abci.InfoResponse err error expectErr error }{ - "verified": {&abci.ResponseInfo{ + "verified": {&abci.InfoResponse{ LastBlockHeight: 3, LastBlockAppHash: []byte("app_hash"), AppVersion: appVersion, }, nil, nil}, - "invalid app version": {&abci.ResponseInfo{ + "invalid app version": {&abci.InfoResponse{ LastBlockHeight: 3, LastBlockAppHash: []byte("app_hash"), AppVersion: 2, }, nil, appVersionMismatchErr}, - "invalid height": {&abci.ResponseInfo{ + "invalid height": {&abci.InfoResponse{ LastBlockHeight: 5, LastBlockAppHash: []byte("app_hash"), AppVersion: appVersion, }, nil, errVerifyFailed}, - "invalid hash": {&abci.ResponseInfo{ + "invalid hash": {&abci.InfoResponse{ LastBlockHeight: 3, LastBlockAppHash: []byte("xxx"), AppVersion: appVersion, @@ -666,7 +667,6 @@ func TestSyncer_verifyApp(t *testing.T) { "error": {nil, boom, boom}, } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { connQuery := &proxymocks.AppConnQuery{} connSnapshot := &proxymocks.AppConnSnapshot{} @@ -675,7 +675,7 @@ func TestSyncer_verifyApp(t *testing.T) { cfg := config.DefaultStateSyncConfig() syncer := newSyncer(*cfg, log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") - connQuery.On("Info", mock.Anything, proxy.RequestInfo).Return(tc.response, tc.err) + connQuery.On("Info", mock.Anything, proxy.InfoRequest).Return(tc.response, tc.err) err := syncer.verifyApp(s, appVersion) unwrapped := errors.Unwrap(err) if unwrapped != nil { diff --git a/statesync/types.go b/statesync/types.go new file mode 100644 index 00000000000..0d8ea10df68 --- /dev/null +++ b/statesync/types.go @@ -0,0 +1,13 @@ +package statesync + +import ( + ssproto "github.com/cometbft/cometbft/api/cometbft/statesync/v1" + "github.com/cometbft/cometbft/types" +) + +var ( + _ types.Wrapper = &ssproto.ChunkRequest{} + _ types.Wrapper = &ssproto.ChunkResponse{} + _ types.Wrapper = &ssproto.SnapshotsRequest{} + _ types.Wrapper = &ssproto.SnapshotsResponse{} +) diff --git a/store/bench_test.go b/store/bench_test.go new file mode 100644 index 00000000000..cda82859a77 --- /dev/null +++ b/store/bench_test.go @@ -0,0 +1,35 @@ +package store + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/internal/test" + "github.com/cometbft/cometbft/types" + cmttime "github.com/cometbft/cometbft/types/time" +) + +// TestLoadBlockExtendedCommit tests loading the extended commit for a previously +// saved block. The load method should return nil when only a commit was saved and +// return the extended commit otherwise. +func BenchmarkRepeatedLoadSeenCommitSameBlock(b *testing.B) { + state, bs, _, _, cleanup, _ := makeStateAndBlockStoreAndIndexers() + defer cleanup() + h := bs.Height() + 1 + block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) + seenCommit := makeTestExtCommitWithNumSigs(block.Header.Height, cmttime.Now(), 100).ToCommit() + ps, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(b, err) + bs.SaveBlock(block, ps, seenCommit) + + // sanity check + res := bs.LoadSeenCommit(block.Height) + require.Equal(b, seenCommit, res) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + res := bs.LoadSeenCommit(block.Height) + require.NotNil(b, res) + } +} diff --git a/store/db_key_layout.go b/store/db_key_layout.go new file mode 100644 index 00000000000..25ca3421229 --- /dev/null +++ b/store/db_key_layout.go @@ -0,0 +1,213 @@ +package store + +import ( + "encoding/hex" + "strconv" + + "github.com/google/orderedcode" +) + +type BlockKeyLayout interface { + CalcBlockMetaKey(height int64) []byte + + CalcBlockPartKey(height int64, partIndex int) []byte + + CalcBlockCommitKey(height int64) []byte + + CalcSeenCommitKey(height int64) []byte + + CalcExtCommitKey(height int64) []byte + + CalcBlockHashKey(hash []byte) []byte +} + +// v1LegacyLayout is a legacy implementation of BlockKeyLayout, kept for backwards +// compatibility. Newer code should use [v2Layout]. +type v1LegacyLayout struct{} + +// In the following [v1LegacyLayout] methods, we preallocate the key's slice to speed +// up append operations and avoid extra allocations. +// The size of the slice is the length of the prefix plus the length the string +// representation of a 64-bit integer. Namely, the longest 64-bit int has 19 digits, +// therefore its string representation is 20 bytes long (19 digits + 1 byte for the +// sign). + +// CalcBlockCommitKey implements BlockKeyLayout. +// It returns a database key of the form "C:" to store/retrieve the commit +// of the block at the given height to/from the database. +func (*v1LegacyLayout) CalcBlockCommitKey(height int64) []byte { + const ( + prefix = "C:" + prefixLen = len(prefix) + ) + key := make([]byte, 0, prefixLen+20) + + key = append(key, prefix...) + key = strconv.AppendInt(key, height, 10) + + return key +} + +// CalcBlockHashKey implements BlockKeyLayout. +// It returns a database key of the form "BH:hex()" to store/retrieve a block +// to/from the database using its hash. +func (*v1LegacyLayout) CalcBlockHashKey(hash []byte) []byte { + const prefixLen = len("BH:") + + key := make([]byte, prefixLen+hex.EncodedLen(len(hash))) + + key[0], key[1], key[2] = 'B', 'H', ':' + hex.Encode(key[prefixLen:], hash) + + return key +} + +// CalcBlockMetaKey implements BlockKeyLayout. +// It returns a database key of the form "H:" to store/retrieve the metadata +// of the block at the given height to/from the database. +func (*v1LegacyLayout) CalcBlockMetaKey(height int64) []byte { + const ( + prefix = "H:" + prefixLen = len(prefix) + ) + key := make([]byte, 0, prefixLen+20) + + key = append(key, prefix...) + key = strconv.AppendInt(key, height, 10) + + return key +} + +// CalcBlockPartKey implements BlockKeyLayout. +// It returns a database key of the form "P::" to store/retrieve a +// block part to/from the database. +func (*v1LegacyLayout) CalcBlockPartKey(height int64, partIndex int) []byte { + const ( + prefix = "P:" + prefixLen = len(prefix) + ) + + // Here we have 2 ints, therefore 20+1 bytes. + // 1 byte is for the partIndex should be sufficient. We have observed that most + // chains have only a few parts per block. If things change, we can increment + // this number. The theoretical max is 4 and comes from the following + // calculation: + // - the max configurable block size is 100MB (see types/params.go) + // - a block part is 65KB (see types/params.go) + // - the max number of parts that a block can be split into is therefore + // (max block size / block part size) + 1 = (100MB/65KB) + 1 = 1601 + // - the string representation of 1601 consists of 4 digits, therefore 4 bytes. + // + // The total size is : prefixLen + 20 + 1 (len(":")) + 1. + key := make([]byte, 0, prefixLen+20+1+1) + + key = append(key, prefix...) + key = strconv.AppendInt(key, height, 10) + key = append(key, ':') + key = strconv.AppendInt(key, int64(partIndex), 10) + + return key +} + +// CalcExtCommitKey implements BlockKeyLayout. +// It returns a database key of the form "EC:" to store/retrieve the +// ExtendedCommit for the given height to/from the database. +func (*v1LegacyLayout) CalcExtCommitKey(height int64) []byte { + const ( + prefix = "EC:" + prefixLen = len(prefix) + ) + key := make([]byte, 0, prefixLen+20) + + key = append(key, prefix...) + key = strconv.AppendInt(key, height, 10) + + return key +} + +// CalcSeenCommitKey implements BlockKeyLayout. +// It returns a database key of the form "SC:" to store/retrieve a locally +// seen commit for the given height to/from the database. +func (*v1LegacyLayout) CalcSeenCommitKey(height int64) []byte { + const ( + prefix = "SC:" + prefixLen = len(prefix) + ) + key := make([]byte, 0, prefixLen+20) + + key = append(key, prefix...) + key = strconv.AppendInt(key, height, 10) + + return key +} + +var _ BlockKeyLayout = (*v1LegacyLayout)(nil) + +type v2Layout struct{} + +// key prefixes. +const ( + // prefixes are unique across all tm db's. + prefixBlockMeta = int64(0) + prefixBlockPart = int64(1) + prefixBlockCommit = int64(2) + prefixSeenCommit = int64(3) + prefixExtCommit = int64(4) + prefixBlockHash = int64(5) +) + +// CalcBlockCommitKey implements BlockKeyLayout. +func (*v2Layout) CalcBlockCommitKey(height int64) []byte { + key, err := orderedcode.Append(nil, prefixBlockCommit, height) + if err != nil { + panic(err) + } + return key +} + +// CalcBlockHashKey implements BlockKeyLayout. +func (*v2Layout) CalcBlockHashKey(hash []byte) []byte { + key, err := orderedcode.Append(nil, prefixBlockHash, string(hash)) + if err != nil { + panic(err) + } + return key +} + +// CalcBlockMetaKey implements BlockKeyLayout. +func (*v2Layout) CalcBlockMetaKey(height int64) []byte { + key, err := orderedcode.Append(nil, prefixBlockMeta, height) + if err != nil { + panic(err) + } + return key +} + +// CalcBlockPartKey implements BlockKeyLayout. +func (*v2Layout) CalcBlockPartKey(height int64, partIndex int) []byte { + key, err := orderedcode.Append(nil, prefixBlockPart, height, int64(partIndex)) + if err != nil { + panic(err) + } + return key +} + +// CalcExtCommitKey implements BlockKeyLayout. +func (*v2Layout) CalcExtCommitKey(height int64) []byte { + key, err := orderedcode.Append(nil, prefixExtCommit, height) + if err != nil { + panic(err) + } + return key +} + +// CalcSeenCommitKey implements BlockKeyLayout. +func (*v2Layout) CalcSeenCommitKey(height int64) []byte { + key, err := orderedcode.Append(nil, prefixSeenCommit, height) + if err != nil { + panic(err) + } + return key +} + +var _ BlockKeyLayout = (*v2Layout)(nil) diff --git a/store/db_key_layout_test.go b/store/db_key_layout_test.go new file mode 100644 index 00000000000..a80db7ca93d --- /dev/null +++ b/store/db_key_layout_test.go @@ -0,0 +1,89 @@ +package store + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "strconv" + "testing" +) + +// run: go test -fuzz=FuzzCalcBlockPartKey -fuzztime 30s +func FuzzCalcBlockPartKey(f *testing.F) { + layout := &v1LegacyLayout{} + + f.Add(int64(0), 0) + f.Add(int64(141241), 980) + f.Add(int64(1234567890), 12345678901) + f.Add(int64(9223372036854775807), 2147483647) + f.Add(int64(42), 2147483648) + + f.Fuzz(func(t *testing.T, height int64, partIndex int) { + key := layout.CalcBlockPartKey(height, partIndex) + + // Ensure the key starts with the "P:" prefix. + // 2 is the length of "P:". + if len(key) < 2 || key[0] != 'P' || key[1] != ':' { + t.Fatalf("key does not start with 'P:': %s", key) + } + + sepIndex := bytes.LastIndexByte(key, ':') + if sepIndex == -1 { + t.Fatalf("key does not have ':' between height and partIndex: %s", key) + } + + heightStr := string(key[2:sepIndex]) + gotHeight, err := strconv.ParseInt(heightStr, 10, 64) + if err != nil { + t.Fatalf("parsing height from key: %s, error: %s", key, err) + } + if gotHeight != height { + t.Fatalf("want height %d, but got %d", height, gotHeight) + } + + partIndexStr := string(key[sepIndex+1:]) + gotPartIndex, err := strconv.Atoi(partIndexStr) + if err != nil { + t.Fatalf("parsing partIndex from key: %s, error: %s", key, err) + } + if gotPartIndex != partIndex { + t.Errorf("want partIndex %d, but got %d", partIndex, gotPartIndex) + } + }) +} + +// run: go test -fuzz=FuzzCalcBlockHashKey -fuzztime 30s +func FuzzCalcBlockHashKey(f *testing.F) { + layout := &v1LegacyLayout{} + + f.Add([]byte{}) + f.Add([]byte{0x00}) + f.Add([]byte{0xFF}) + f.Add([]byte{0x01, 0x02, 0x03, 0x04}) + f.Add(make([]byte, 32)) + + // empty hash: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + f.Add(sha256.New().Sum(nil)) + + f.Fuzz(func(t *testing.T, hash []byte) { + key := layout.CalcBlockHashKey(hash) + + if len(key) < 3 || key[0] != 'B' || key[1] != 'H' || key[2] != ':' { + t.Fatalf("key does not start with 'BH:': %s", key) + } + + var ( + hashHex = key[3:] + gotHash = make([]byte, len(hash)) + ) + _, err := hex.Decode(gotHash, hashHex) + if err != nil { + t.Fatalf("decoding hash from key: %s, error: %s", key, err) + } + + // Ensure the decoded hash matches the input hash. + if !bytes.Equal(gotHash, hash) { + t.Fatalf("want hash %x\ngot: %x\n", hash, gotHash) + } + }) +} diff --git a/store/doc.go b/store/doc.go new file mode 100644 index 00000000000..219d34f596a --- /dev/null +++ b/store/doc.go @@ -0,0 +1,3 @@ +// XXX: This package may be internalized (made private) in the future +// releases. +package store diff --git a/store/metrics.gen.go b/store/metrics.gen.go new file mode 100644 index 00000000000..0c2ef6aedae --- /dev/null +++ b/store/metrics.gen.go @@ -0,0 +1,32 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package store + +import ( + "github.com/cometbft/cometbft/libs/metrics/discard" + prometheus "github.com/cometbft/cometbft/libs/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + BlockStoreAccessDurationSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_store_access_duration_seconds", + Help: "The duration of accesses to the state store labeled by which method was called on the store.", + + Buckets: stdprometheus.ExponentialBuckets(0.0002, 10, 5), + }, append(labels, "method")).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + BlockStoreAccessDurationSeconds: discard.NewHistogram(), + } +} diff --git a/store/metrics.go b/store/metrics.go new file mode 100644 index 00000000000..699318a1827 --- /dev/null +++ b/store/metrics.go @@ -0,0 +1,20 @@ +package store + +import ( + "github.com/cometbft/cometbft/libs/metrics" +) + +const ( + // MetricsSubsystem is a subsystem shared by all metrics exposed by this + // package. + MetricsSubsystem = "store" +) + +//go:generate go run ../scripts/metricsgen -struct=Metrics + +// Metrics contains metrics exposed by this package. +type Metrics struct { + // The duration of accesses to the state store labeled by which method + // was called on the store. + BlockStoreAccessDurationSeconds metrics.Histogram `metrics_bucketsizes:"0.0002, 10, 5" metrics_buckettype:"exp" metrics_labels:"method"` +} diff --git a/store/store.go b/store/store.go index 2dd24c39a0d..9e3999cd247 100644 --- a/store/store.go +++ b/store/store.go @@ -4,20 +4,28 @@ import ( "errors" "fmt" "strconv" + "time" - cmterrors "github.com/cometbft/cometbft/types/errors" "github.com/cosmos/gogoproto/proto" + lru "github.com/hashicorp/golang-lru/v2" dbm "github.com/cometbft/cometbft-db" - - "github.com/cometbft/cometbft/evidence" + cmtstore "github.com/cometbft/cometbft/api/cometbft/store/v1" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" + "github.com/cometbft/cometbft/internal/evidence" + "github.com/cometbft/cometbft/libs/metrics" cmtsync "github.com/cometbft/cometbft/libs/sync" - cmtstore "github.com/cometbft/cometbft/proto/tendermint/store" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" + cmterrors "github.com/cometbft/cometbft/types/errors" ) +// Assuming the length of a block part is 64kB (`types.BlockPartSizeBytes`), +// the maximum size of a block, that will be batch saved, is 640kB. The +// benchmarks have shown that `goleveldb` still performs well with blocks of +// this size. However, if the block is larger than 1MB, the performance degrades. +const maxBlockPartsToBatch = 10 + /* BlockStore is a simple low level store for blocks. @@ -36,33 +44,143 @@ The store can be assumed to contain all contiguous blocks between base and heigh // deserializing loaded data, indicating probable corruption on disk. */ type BlockStore struct { - db dbm.DB - - // mtx guards access to the struct fields listed below it. We rely on the database to enforce - // fine-grained concurrency control for its data, and thus this mutex does not apply to - // database contents. The only reason for keeping these fields in the struct is that the data + db dbm.DB + metrics *Metrics + + // mtx guards access to the struct fields listed below it. Although we rely on the database + // to enforce fine-grained concurrency control for its data, we need to make sure that + // no external observer can get data from the database that is not in sync with the fields below, + // and vice-versa. Hence, when updating the fields below, we use the mutex to make sure + // that the database is also up to date. This prevents any concurrent external access from + // obtaining inconsistent data. + // The only reason for keeping these fields in the struct is that the data // can't efficiently be queried from the database since the key encoding we use is not // lexicographically ordered (see https://github.com/tendermint/tendermint/issues/4567). mtx cmtsync.RWMutex base int64 height int64 + + dbKeyLayout BlockKeyLayout + + blocksDeleted int64 + compact bool + compactionInterval int64 + + seenCommitCache *lru.Cache[int64, *types.Commit] + blockCommitCache *lru.Cache[int64, *types.Commit] + blockExtendedCommitCache *lru.Cache[int64, *types.ExtendedCommit] + blockPartCache *lru.Cache[blockPartIndex, *types.Part] +} + +type BlockStoreOption func(*BlockStore) + +// WithCompaction sets the compaciton parameters. +func WithCompaction(compact bool, compactionInterval int64) BlockStoreOption { + return func(bs *BlockStore) { + bs.compact = compact + bs.compactionInterval = compactionInterval + } +} + +// WithMetrics sets the metrics. +func WithMetrics(metrics *Metrics) BlockStoreOption { + return func(bs *BlockStore) { bs.metrics = metrics } +} + +// WithDBKeyLayout the metrics. +func WithDBKeyLayout(dbKeyLayout string) BlockStoreOption { + return func(bs *BlockStore) { setDBLayout(bs, dbKeyLayout) } +} + +func setDBLayout(bStore *BlockStore, dbKeyLayoutVersion string) { + if !bStore.IsEmpty() { + var version []byte + var err error + if version, err = bStore.db.Get([]byte("version")); err != nil { + // WARN: This is because currently cometBFT DB does not return an error if the key does not exist + // If this behavior changes we need to account for that. + panic(err) + } + if len(version) != 0 { + dbKeyLayoutVersion = string(version) + } + } + switch dbKeyLayoutVersion { + case "v1", "": + bStore.dbKeyLayout = &v1LegacyLayout{} + dbKeyLayoutVersion = "v1" + case "v2": + bStore.dbKeyLayout = &v2Layout{} + default: + panic("unknown key layout version") + } + if err := bStore.db.SetSync([]byte("version"), []byte(dbKeyLayoutVersion)); err != nil { + panic(err) + } } // NewBlockStore returns a new BlockStore with the given DB, // initialized to the last height that was committed to the DB. -func NewBlockStore(db dbm.DB) *BlockStore { +func NewBlockStore(db dbm.DB, options ...BlockStoreOption) *BlockStore { + start := time.Now() + bs := LoadBlockStoreState(db) - return &BlockStore{ - base: bs.Base, - height: bs.Height, - db: db, + + bStore := &BlockStore{ + base: bs.Base, + height: bs.Height, + db: db, + metrics: NopMetrics(), + } + bStore.addCaches() + + for _, option := range options { + option(bStore) + } + + if bStore.dbKeyLayout == nil { + setDBLayout(bStore, "v1") + } + + addTimeSample(bStore.metrics.BlockStoreAccessDurationSeconds.With("method", "new_block_store"), start)() + return bStore +} + +func (bs *BlockStore) addCaches() { + var err error + // err can only occur if the argument is non-positive, so is impossible in context. + bs.blockCommitCache, err = lru.New[int64, *types.Commit](100) + if err != nil { + panic(err) + } + bs.blockExtendedCommitCache, err = lru.New[int64, *types.ExtendedCommit](100) + if err != nil { + panic(err) + } + bs.seenCommitCache, err = lru.New[int64, *types.Commit](100) + if err != nil { + panic(err) + } + bs.blockPartCache, err = lru.New[blockPartIndex, *types.Part](500) + if err != nil { + panic(err) } } +func (bs *BlockStore) GetVersion() string { + switch bs.dbKeyLayout.(type) { + case *v1LegacyLayout: + return "v1" + case *v2Layout: + return "v2" + } + return "no version set" +} + func (bs *BlockStore) IsEmpty() bool { bs.mtx.RLock() defer bs.mtx.RUnlock() - return bs.base == bs.height && bs.base == 0 + return bs.base == 0 && bs.height == 0 } // Base returns the first known contiguous block height, or 0 for empty block stores. @@ -101,12 +219,12 @@ func (bs *BlockStore) LoadBaseMeta() *types.BlockMeta { // LoadBlock returns the block with the given height. // If no block is found for that height, it returns nil. -func (bs *BlockStore) LoadBlock(height int64) *types.Block { +func (bs *BlockStore) LoadBlock(height int64) (*types.Block, *types.BlockMeta) { + start := time.Now() blockMeta := bs.LoadBlockMeta(height) if blockMeta == nil { - return nil + return nil, nil } - pbb := new(cmtproto.Block) buf := []byte{} for i := 0; i < int(blockMeta.BlockID.PartSetHeader.Total); i++ { @@ -114,35 +232,39 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block { // If the part is missing (e.g. since it has been deleted after we // loaded the block meta) we consider the whole block to be missing. if part == nil { - return nil + return nil, nil } buf = append(buf, part.Bytes...) } + addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_block"), start)() + err := proto.Unmarshal(buf, pbb) if err != nil { // NOTE: The existence of meta should imply the existence of the // block. So, make sure meta is only saved after blocks are saved. panic(fmt.Sprintf("Error reading block: %v", err)) } - block, err := types.BlockFromProto(pbb) if err != nil { panic(cmterrors.ErrMsgFromProto{MessageName: "Block", Err: err}) } - return block + return block, blockMeta } // LoadBlockByHash returns the block with the given hash. // If no block is found for that hash, it returns nil. // Panics if it fails to parse height associated with the given hash. -func (bs *BlockStore) LoadBlockByHash(hash []byte) *types.Block { - bz, err := bs.db.Get(calcBlockHashKey(hash)) +func (bs *BlockStore) LoadBlockByHash(hash []byte) (*types.Block, *types.BlockMeta) { + // WARN this function includes the time for LoadBlock and will count the time it takes to load the entire block, block parts + // AND unmarshall + defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_block_by_hash"), time.Now())() + bz, err := bs.db.Get(bs.dbKeyLayout.CalcBlockHashKey(hash)) if err != nil { panic(err) } if len(bz) == 0 { - return nil + return nil, nil } s := string(bz) @@ -153,29 +275,41 @@ func (bs *BlockStore) LoadBlockByHash(hash []byte) *types.Block { return bs.LoadBlock(height) } +type blockPartIndex struct { + height int64 + index int +} + // LoadBlockPart returns the Part at the given index // from the block at the given height. // If no part is found for the given height and index, it returns nil. +// The returned part should not be modified by the caller. Take a copy if you need to modify it. func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { + part, ok := bs.blockPartCache.Get(blockPartIndex{height, index}) + if ok { + return part + } pbpart := new(cmtproto.Part) - - bz, err := bs.db.Get(calcBlockPartKey(height, index)) + start := time.Now() + bz, err := bs.db.Get(bs.dbKeyLayout.CalcBlockPartKey(height, index)) if err != nil { panic(err) } + + addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_block_part"), start)() + if len(bz) == 0 { return nil } - err = proto.Unmarshal(bz, pbpart) if err != nil { panic(fmt.Errorf("unmarshal to cmtproto.Part failed: %w", err)) } - part, err := types.PartFromProto(pbpart) + part, err = types.PartFromProto(pbpart) if err != nil { panic(fmt.Sprintf("Error reading block part: %v", err)) } - + bs.blockPartCache.Add(blockPartIndex{height, index}, part) return part } @@ -183,11 +317,14 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { // If no block is found for the given height, it returns nil. func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { pbbm := new(cmtproto.BlockMeta) - bz, err := bs.db.Get(calcBlockMetaKey(height)) + start := time.Now() + bz, err := bs.db.Get(bs.dbKeyLayout.CalcBlockMetaKey(height)) if err != nil { panic(err) } + addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_block_meta"), start)() + if len(bz) == 0 { return nil } @@ -196,8 +333,7 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { if err != nil { panic(fmt.Errorf("unmarshal to cmtproto.BlockMeta: %w", err)) } - - blockMeta, err := types.BlockMetaFromProto(pbbm) + blockMeta, err := types.BlockMetaFromTrustedProto(pbbm) if err != nil { panic(cmterrors.ErrMsgFromProto{MessageName: "BlockMetadata", Err: err}) } @@ -208,7 +344,9 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { // LoadBlockMetaByHash returns the blockmeta who's header corresponds to the given // hash. If none is found, returns nil. func (bs *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { - bz, err := bs.db.Get(calcBlockHashKey(hash)) + // WARN Same as for block by hash, this includes the time to get the block metadata and unmarshall it + defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_block_meta_by_hash"), time.Now())() + bz, err := bs.db.Get(bs.dbKeyLayout.CalcBlockHashKey(hash)) if err != nil { panic(err) } @@ -228,15 +366,28 @@ func (bs *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { // This commit consists of the +2/3 and other Precommit-votes for block at `height`, // and it comes from the block.LastCommit for `height+1`. // If no commit is found for the given height, it returns nil. +// +// This return value should not be modified. If you need to modify it, +// do bs.LoadBlockCommit(height).Clone(). func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { + comm, ok := bs.blockCommitCache.Get(height) + if ok { + return comm + } pbc := new(cmtproto.Commit) - bz, err := bs.db.Get(calcBlockCommitKey(height)) + + start := time.Now() + bz, err := bs.db.Get(bs.dbKeyLayout.CalcBlockCommitKey(height)) if err != nil { panic(err) } + + addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_block_commit"), start)() + if len(bz) == 0 { return nil } + err = proto.Unmarshal(bz, pbc) if err != nil { panic(fmt.Errorf("error reading block commit: %w", err)) @@ -245,6 +396,7 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { if err != nil { panic(cmterrors.ErrMsgToProto{MessageName: "Commit", Err: err}) } + bs.blockCommitCache.Add(height, commit) return commit } @@ -252,14 +404,24 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { // The extended commit is not guaranteed to contain the same +2/3 precommits data // as the commit in the block. func (bs *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit { + comm, ok := bs.blockExtendedCommitCache.Get(height) + if ok { + return comm.Clone() + } pbec := new(cmtproto.ExtendedCommit) - bz, err := bs.db.Get(calcExtCommitKey(height)) + + start := time.Now() + bz, err := bs.db.Get(bs.dbKeyLayout.CalcExtCommitKey(height)) if err != nil { panic(fmt.Errorf("fetching extended commit: %w", err)) } + + addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_block_ext_commit"), start)() + if len(bz) == 0 { return nil } + err = proto.Unmarshal(bz, pbec) if err != nil { panic(fmt.Errorf("decoding extended commit: %w", err)) @@ -268,21 +430,31 @@ func (bs *BlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommi if err != nil { panic(fmt.Errorf("converting extended commit: %w", err)) } - return extCommit + bs.blockExtendedCommitCache.Add(height, extCommit) + return extCommit.Clone() } // LoadSeenCommit returns the locally seen Commit for the given height. // This is useful when we've seen a commit, but there has not yet been // a new block at `height + 1` that includes this commit in its block.LastCommit. func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { + comm, ok := bs.seenCommitCache.Get(height) + if ok { + return comm.Clone() + } pbc := new(cmtproto.Commit) - bz, err := bs.db.Get(calcSeenCommitKey(height)) + start := time.Now() + bz, err := bs.db.Get(bs.dbKeyLayout.CalcSeenCommitKey(height)) if err != nil { panic(err) } + + addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_seen_commit"), start)() + if len(bz) == 0 { return nil } + err = proto.Unmarshal(bz, pbc) if err != nil { panic(fmt.Sprintf("error reading block seen commit: %v", err)) @@ -292,7 +464,8 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { if err != nil { panic(fmt.Errorf("converting seen commit: %w", err)) } - return commit + bs.seenCommitCache.Add(height, commit) + return commit.Clone() } // PruneBlocks removes block up to (but not including) a height. It returns the @@ -300,7 +473,7 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { // data needed to prove evidence must not be removed. func (bs *BlockStore) PruneBlocks(height int64, state sm.State) (uint64, int64, error) { if height <= 0 { - return 0, -1, fmt.Errorf("height must be greater than 0") + return 0, -1, errors.New("height must be greater than 0") } bs.mtx.RLock() if height > bs.height { @@ -318,24 +491,19 @@ func (bs *BlockStore) PruneBlocks(height int64, state sm.State) (uint64, int64, batch := bs.db.NewBatch() defer batch.Close() flush := func(batch dbm.Batch, base int64) error { - // We can't trust batches to be atomic, so update base first to make sure noone + // We can't trust batches to be atomic, so update base first to make sure no one // tries to access missing blocks. bs.mtx.Lock() + defer batch.Close() + defer bs.mtx.Unlock() bs.base = base - bs.mtx.Unlock() - bs.saveState() - - err := batch.WriteSync() - if err != nil { - return fmt.Errorf("failed to prune up to height %v: %w", base, err) - } - batch.Close() - return nil + return bs.saveStateAndWriteDB(batch, "failed to prune") } + defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "prune_blocks"), time.Now())() + evidencePoint := height for h := base; h < height; h++ { - meta := bs.LoadBlockMeta(h) if meta == nil { // assume already deleted continue @@ -350,26 +518,29 @@ func (bs *BlockStore) PruneBlocks(height int64, state sm.State) (uint64, int64, // if height is beyond the evidence point we dont delete the header if h < evidencePoint { - if err := batch.Delete(calcBlockMetaKey(h)); err != nil { + if err := batch.Delete(bs.dbKeyLayout.CalcBlockMetaKey(h)); err != nil { return 0, -1, err } } - if err := batch.Delete(calcBlockHashKey(meta.BlockID.Hash)); err != nil { + if err := batch.Delete(bs.dbKeyLayout.CalcBlockHashKey(meta.BlockID.Hash)); err != nil { return 0, -1, err } // if height is beyond the evidence point we dont delete the commit data if h < evidencePoint { - if err := batch.Delete(calcBlockCommitKey(h)); err != nil { + if err := batch.Delete(bs.dbKeyLayout.CalcBlockCommitKey(h)); err != nil { return 0, -1, err } + bs.blockCommitCache.Remove(h) } - if err := batch.Delete(calcSeenCommitKey(h)); err != nil { + if err := batch.Delete(bs.dbKeyLayout.CalcSeenCommitKey(h)); err != nil { return 0, -1, err } + bs.seenCommitCache.Remove(h) for p := 0; p < int(meta.BlockID.PartSetHeader.Total); p++ { - if err := batch.Delete(calcBlockPartKey(h, p)); err != nil { + if err := batch.Delete(bs.dbKeyLayout.CalcBlockPartKey(h, p)); err != nil { return 0, -1, err } + bs.blockPartCache.Remove(blockPartIndex{h, p}) } pruned++ @@ -388,7 +559,21 @@ func (bs *BlockStore) PruneBlocks(height int64, state sm.State) (uint64, int64, if err != nil { return 0, -1, err } - return pruned, evidencePoint, nil + bs.blocksDeleted += int64(pruned) + + if bs.compact && bs.blocksDeleted >= bs.compactionInterval { + // When the range is nil,nil, the database will try to compact + // ALL levels. Another option is to set a predefined range of + // specific keys. + err = bs.db.Compact(nil, nil) + if err == nil { + // If there was no error in compaction we reset the counter. + // Otherwise we preserve the number of blocks deleted so + // we can trigger compaction in the next pruning iteration + bs.blocksDeleted = 0 + } + } + return pruned, evidencePoint, err } // SaveBlock persists the given block, blockParts, and seenCommit to the underlying db. @@ -399,15 +584,30 @@ func (bs *BlockStore) PruneBlocks(height int64, state sm.State) (uint64, int64, // we need this to reload the precommits to catch-up nodes to the // most recent height. Otherwise they'd stall at H-1. func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { + defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "save_block"), time.Now())() if block == nil { panic("BlockStore can only save a non-nil block") } - if err := bs.saveBlockToBatch(block, blockParts, seenCommit); err != nil { + + batch := bs.db.NewBatch() + defer batch.Close() + + if err := bs.saveBlockToBatch(block, blockParts, seenCommit, batch); err != nil { panic(err) } + bs.mtx.Lock() + defer bs.mtx.Unlock() + bs.height = block.Height + if bs.base == 0 { + bs.base = block.Height + } + // Save new BlockStoreState descriptor. This also flushes the database. - bs.saveState() + err := bs.saveStateAndWriteDB(batch, "failed to save block") + if err != nil { + panic(err) + } } // SaveBlockWithExtendedCommit persists the given block, blockParts, and @@ -416,28 +616,58 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s // height. This allows the vote extension data to be persisted for all blocks // that are saved. func (bs *BlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenExtendedCommit *types.ExtendedCommit) { + // WARN includes marshaling the blockstore state + start := time.Now() + if block == nil { panic("BlockStore can only save a non-nil block") } if err := seenExtendedCommit.EnsureExtensions(true); err != nil { panic(fmt.Errorf("problems saving block with extensions: %w", err)) } - if err := bs.saveBlockToBatch(block, blockParts, seenExtendedCommit.ToCommit()); err != nil { + + batch := bs.db.NewBatch() + defer batch.Close() + + if err := bs.saveBlockToBatch(block, blockParts, seenExtendedCommit.ToCommit(), batch); err != nil { panic(err) } height := block.Height + marshallingTime := time.Now() + pbec := seenExtendedCommit.ToProto() + extCommitBytes := mustEncode(pbec) - if err := bs.db.Set(calcExtCommitKey(height), extCommitBytes); err != nil { + + extCommitMarshallTDiff := time.Since(marshallingTime).Seconds() + + if err := batch.Set(bs.dbKeyLayout.CalcExtCommitKey(height), extCommitBytes); err != nil { panic(err) } + bs.mtx.Lock() + defer bs.mtx.Unlock() + bs.height = height + if bs.base == 0 { + bs.base = height + } + // Save new BlockStoreState descriptor. This also flushes the database. - bs.saveState() + err := bs.saveStateAndWriteDB(batch, "failed to save block with extended commit") + if err != nil { + panic(err) + } + + bs.metrics.BlockStoreAccessDurationSeconds.With("method", "save_block_ext_commit").Observe(time.Since(start).Seconds() - extCommitMarshallTDiff) } -func (bs *BlockStore) saveBlockToBatch(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) error { +func (bs *BlockStore) saveBlockToBatch( + block *types.Block, + blockParts *types.PartSet, + seenCommit *types.Commit, + batch dbm.Batch, +) error { if block == nil { panic("BlockStore can only save a non-nil block") } @@ -455,15 +685,23 @@ func (bs *BlockStore) saveBlockToBatch(block *types.Block, blockParts *types.Par return fmt.Errorf("BlockStore cannot save seen commit of a different height (block: %d, commit: %d)", height, seenCommit.Height) } + // If the block is small, batch save the block parts. Otherwise, save the + // parts individually. + saveBlockPartsToBatch := blockParts.Count() <= maxBlockPartsToBatch + + start := time.Now() + // Save block parts. This must be done before the block meta, since callers // typically load the block meta first as an indication that the block exists // and then go on to load block parts - we must make sure the block is // complete as soon as the block meta is written. for i := 0; i < int(blockParts.Total()); i++ { part := blockParts.GetPart(i) - bs.saveBlockPart(height, i, part) + bs.saveBlockPart(height, i, part, batch, saveBlockPartsToBatch) + bs.blockPartCache.Add(blockPartIndex{height, i}, part) } + marshallTime := time.Now() // Save block meta blockMeta := types.NewBlockMeta(block, blockParts) pbm := blockMeta.ToProto() @@ -471,111 +709,109 @@ func (bs *BlockStore) saveBlockToBatch(block *types.Block, blockParts *types.Par return errors.New("nil blockmeta") } metaBytes := mustEncode(pbm) - if err := bs.db.Set(calcBlockMetaKey(height), metaBytes); err != nil { + blockMetaMarshallDiff := time.Since(marshallTime).Seconds() + + if err := batch.Set(bs.dbKeyLayout.CalcBlockMetaKey(height), metaBytes); err != nil { return err } - if err := bs.db.Set(calcBlockHashKey(hash), []byte(fmt.Sprintf("%d", height))); err != nil { + if err := batch.Set(bs.dbKeyLayout.CalcBlockHashKey(hash), []byte(strconv.FormatInt(height, 10))); err != nil { return err } + marshallTime = time.Now() // Save block commit (duplicate and separate from the Block) pbc := block.LastCommit.ToProto() blockCommitBytes := mustEncode(pbc) - if err := bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes); err != nil { + + blockMetaMarshallDiff += time.Since(marshallTime).Seconds() + + if err := batch.Set(bs.dbKeyLayout.CalcBlockCommitKey(height-1), blockCommitBytes); err != nil { return err } + marshallTime = time.Now() + // Save seen commit (seen +2/3 precommits for block) // NOTE: we can delete this at a later height pbsc := seenCommit.ToProto() seenCommitBytes := mustEncode(pbsc) - if err := bs.db.Set(calcSeenCommitKey(height), seenCommitBytes); err != nil { - return err - } - // Done! - bs.mtx.Lock() - bs.height = height - if bs.base == 0 { - bs.base = height + blockMetaMarshallDiff += time.Since(marshallTime).Seconds() + if err := batch.Set(bs.dbKeyLayout.CalcSeenCommitKey(height), seenCommitBytes); err != nil { + return err } - bs.mtx.Unlock() + bs.metrics.BlockStoreAccessDurationSeconds.With("method", "save_block_to_batch").Observe(time.Since(start).Seconds() - blockMetaMarshallDiff) return nil } -func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) { +func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part, batch dbm.Batch, saveBlockPartsToBatch bool) { + defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "save_block_part"), time.Now())() pbp, err := part.ToProto() if err != nil { panic(cmterrors.ErrMsgToProto{MessageName: "Part", Err: err}) } + partBytes := mustEncode(pbp) - if err := bs.db.Set(calcBlockPartKey(height, index), partBytes); err != nil { + + if saveBlockPartsToBatch { + err = batch.Set(bs.dbKeyLayout.CalcBlockPartKey(height, index), partBytes) + } else { + err = bs.db.Set(bs.dbKeyLayout.CalcBlockPartKey(height, index), partBytes) + } + if err != nil { panic(err) } } -func (bs *BlockStore) saveState() { - bs.mtx.RLock() +// Contract: the caller MUST have, at least, a read lock on `bs`. +func (bs *BlockStore) saveStateAndWriteDB(batch dbm.Batch, errMsg string) error { bss := cmtstore.BlockStoreState{ Base: bs.base, Height: bs.height, } - bs.mtx.RUnlock() - SaveBlockStoreState(&bss, bs.db) + start := time.Now() + + SaveBlockStoreState(&bss, batch) + + err := batch.WriteSync() + if err != nil { + return fmt.Errorf("error writing batch to DB %q: (base %d, height %d): %w", + errMsg, bs.base, bs.height, err) + } + defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "save_bs_state"), start)() + + return nil } // SaveSeenCommit saves a seen commit, used by e.g. the state sync reactor when bootstrapping node. func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) error { pbc := seenCommit.ToProto() seenCommitBytes, err := proto.Marshal(pbc) + + defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "save_seen_commit"), time.Now())() + if err != nil { return fmt.Errorf("unable to marshal commit: %w", err) } - return bs.db.Set(calcSeenCommitKey(height), seenCommitBytes) + return bs.db.Set(bs.dbKeyLayout.CalcSeenCommitKey(height), seenCommitBytes) } func (bs *BlockStore) Close() error { return bs.db.Close() } -//----------------------------------------------------------------------------- - -func calcBlockMetaKey(height int64) []byte { - return []byte(fmt.Sprintf("H:%v", height)) -} - -func calcBlockPartKey(height int64, partIndex int) []byte { - return []byte(fmt.Sprintf("P:%v:%v", height, partIndex)) -} - -func calcBlockCommitKey(height int64) []byte { - return []byte(fmt.Sprintf("C:%v", height)) -} - -func calcSeenCommitKey(height int64) []byte { - return []byte(fmt.Sprintf("SC:%v", height)) -} - -func calcExtCommitKey(height int64) []byte { - return []byte(fmt.Sprintf("EC:%v", height)) -} - -func calcBlockHashKey(hash []byte) []byte { - return []byte(fmt.Sprintf("BH:%x", hash)) -} - -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- var blockStoreKey = []byte("blockStore") // SaveBlockStoreState persists the blockStore state to the database. -func SaveBlockStoreState(bsj *cmtstore.BlockStoreState, db dbm.DB) { +func SaveBlockStoreState(bsj *cmtstore.BlockStoreState, batch dbm.Batch) { bytes, err := proto.Marshal(bsj) if err != nil { panic(fmt.Sprintf("Could not marshal state bytes: %v", err)) } - if err := db.SetSync(blockStoreKey, bytes); err != nil { + if err := batch.Set(blockStoreKey, bytes); err != nil { panic(err) } } @@ -607,7 +843,7 @@ func LoadBlockStoreState(db dbm.DB) cmtstore.BlockStoreState { return bsj } -// mustEncode proto encodes a proto.message and panics if fails +// mustEncode proto encodes a proto.message and panics if fails. func mustEncode(pb proto.Message) []byte { bz, err := proto.Marshal(pb) if err != nil { @@ -616,11 +852,13 @@ func mustEncode(pb proto.Message) []byte { return bz } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // DeleteLatestBlock removes the block pointed to by height, // lowering height by one. func (bs *BlockStore) DeleteLatestBlock() error { + defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "delete_latest_block"), time.Now())() + bs.mtx.RLock() targetHeight := bs.height bs.mtx.RUnlock() @@ -631,34 +869,38 @@ func (bs *BlockStore) DeleteLatestBlock() error { // delete what we can, skipping what's already missing, to ensure partial // blocks get deleted fully. if meta := bs.LoadBlockMeta(targetHeight); meta != nil { - if err := batch.Delete(calcBlockHashKey(meta.BlockID.Hash)); err != nil { + if err := batch.Delete(bs.dbKeyLayout.CalcBlockHashKey(meta.BlockID.Hash)); err != nil { return err } for p := 0; p < int(meta.BlockID.PartSetHeader.Total); p++ { - if err := batch.Delete(calcBlockPartKey(targetHeight, p)); err != nil { + if err := batch.Delete(bs.dbKeyLayout.CalcBlockPartKey(targetHeight, p)); err != nil { return err } } } - if err := batch.Delete(calcBlockCommitKey(targetHeight)); err != nil { + if err := batch.Delete(bs.dbKeyLayout.CalcBlockCommitKey(targetHeight)); err != nil { return err } - if err := batch.Delete(calcSeenCommitKey(targetHeight)); err != nil { + if err := batch.Delete(bs.dbKeyLayout.CalcSeenCommitKey(targetHeight)); err != nil { return err } // delete last, so as to not leave keys built on meta.BlockID dangling - if err := batch.Delete(calcBlockMetaKey(targetHeight)); err != nil { + if err := batch.Delete(bs.dbKeyLayout.CalcBlockMetaKey(targetHeight)); err != nil { return err } bs.mtx.Lock() + defer bs.mtx.Unlock() bs.height = targetHeight - 1 - bs.mtx.Unlock() - bs.saveState() + return bs.saveStateAndWriteDB(batch, "failed to delete the latest block") +} - err := batch.WriteSync() - if err != nil { - return fmt.Errorf("failed to delete height %v: %w", targetHeight, err) +// addTimeSample returns a function that, when called, adds an observation to m. +// The observation added to m is the number of seconds elapsed since addTimeSample +// was initially called. addTimeSample is meant to be called in a defer to calculate +// the amount of time a function takes to complete. +func addTimeSample(h metrics.Histogram, start time.Time) func() { + return func() { + h.Observe(time.Since(start).Seconds()) } - return nil } diff --git a/store/store_test.go b/store/store_test.go index f2c37755753..1f1dda6136e 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -1,6 +1,7 @@ package store import ( + "encoding/json" "fmt" "os" "runtime/debug" @@ -8,41 +9,47 @@ import ( "testing" "time" - cfg "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/state/indexer" - "github.com/cometbft/cometbft/state/indexer/block" - "github.com/cometbft/cometbft/state/txindex" "github.com/cosmos/gogoproto/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/cometbft/cometbft-db" - + cmtstore "github.com/cometbft/cometbft/api/cometbft/store/v1" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" + cfg "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/libs/log" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cmtstore "github.com/cometbft/cometbft/proto/tendermint/store" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" sm "github.com/cometbft/cometbft/state" + "github.com/cometbft/cometbft/state/indexer" + "github.com/cometbft/cometbft/state/indexer/block" + "github.com/cometbft/cometbft/state/txindex" "github.com/cometbft/cometbft/types" cmttime "github.com/cometbft/cometbft/types/time" "github.com/cometbft/cometbft/version" ) // make an extended commit with a single vote containing just the height and a -// timestamp +// timestamp. func makeTestExtCommit(height int64, timestamp time.Time) *types.ExtendedCommit { - extCommitSigs := []types.ExtendedCommitSig{{ - CommitSig: types.CommitSig{ - BlockIDFlag: types.BlockIDFlagCommit, - ValidatorAddress: cmtrand.Bytes(crypto.AddressSize), - Timestamp: timestamp, - Signature: []byte("Signature"), - }, - ExtensionSignature: []byte("ExtensionSignature"), - }} + return makeTestExtCommitWithNumSigs(height, timestamp, 1) +} + +func makeTestExtCommitWithNumSigs(height int64, timestamp time.Time, numSigs int) *types.ExtendedCommit { + extCommitSigs := []types.ExtendedCommitSig{} + for i := 0; i < numSigs; i++ { + extCommitSigs = append(extCommitSigs, types.ExtendedCommitSig{ + CommitSig: types.CommitSig{ + BlockIDFlag: types.BlockIDFlagCommit, + ValidatorAddress: cmtrand.Bytes(crypto.AddressSize), + Timestamp: timestamp, + Signature: cmtrand.Bytes(64), + }, + ExtensionSignature: []byte("ExtensionSignature"), + }) + } return &types.ExtendedCommit{ Height: height, BlockID: types.BlockID{ @@ -65,7 +72,7 @@ func makeStateAndBlockStoreAndIndexers() (sm.State, *BlockStore, txindex.TxIndex panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } - txIndexer, blockIndexer, err := block.IndexerFromConfig(config, cfg.DefaultDBProvider, "test") + txIndexer, blockIndexer, _, err := block.IndexerFromConfig(config, cfg.DefaultDBProvider, "test") if err != nil { panic(err) } @@ -91,9 +98,14 @@ func TestLoadBlockStoreState(t *testing.T) { for _, tc := range testCases { db := dbm.NewMemDB() - SaveBlockStoreState(tc.bss, db) + batch := db.NewBatch() + SaveBlockStoreState(tc.bss, batch) + err := batch.WriteSync() + require.NoError(t, err) retrBSJ := LoadBlockStoreState(db) assert.Equal(t, tc.want, retrBSJ, "expected the retrieved DBs to match: %s", tc.testName) + err = batch.Close() + require.NoError(t, err) } } @@ -116,22 +128,21 @@ func TestNewBlockStore(t *testing.T) { } for i, tt := range panicCausers { - tt := tt // Expecting a panic here on trying to parse an invalid blockStore - _, _, panicErr := doFn(func() (interface{}, error) { + _, _, panicErr := doFn(func() (any, error) { err := db.Set(blockStoreKey, tt.data) require.NoError(t, err) _ = NewBlockStore(db) return nil, nil }) - require.NotNil(t, panicErr, "#%d panicCauser: %q expected a panic", i, tt.data) + require.Error(t, panicErr, "#%d panicCauser: %q expected a panic", i, tt.data) assert.Contains(t, fmt.Sprintf("%#v", panicErr), tt.wantErr, "#%d data: %q", i, tt.data) } err = db.Set(blockStoreKey, []byte{}) require.NoError(t, err) bs = NewBlockStore(db) - assert.Equal(t, bs.Height(), int64(0), "expecting empty bytes to be unmarshaled alright") + assert.Equal(t, int64(0), bs.Height(), "expecting empty bytes to be unmarshaled alright") } func newInMemoryBlockStore() (*BlockStore, dbm.DB) { @@ -144,21 +155,23 @@ func newInMemoryBlockStore() (*BlockStore, dbm.DB) { func TestBlockStoreSaveLoadBlock(t *testing.T) { state, bs, _, _, cleanup, _ := makeStateAndBlockStoreAndIndexers() defer cleanup() - require.Equal(t, bs.Base(), int64(0), "initially the base should be zero") - require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") + require.Equal(t, int64(0), bs.Base(), "initially the base should be zero") + require.Equal(t, int64(0), bs.Height(), "initially the height should be zero") // check there are no blocks at various heights noBlockHeights := []int64{0, -1, 100, 1000, 2} for i, height := range noBlockHeights { - if g := bs.LoadBlock(height); g != nil { + if g, _ := bs.LoadBlock(height); g != nil { t.Errorf("#%d: height(%d) got a block; want nil", i, height) } } - // save a block - block := state.MakeBlock(bs.Height()+1, nil, new(types.Commit), nil, state.Validators.GetProposer().Address) - validPartSet, err := block.MakePartSet(2) + // save a block big enough to have two block parts + txs := []types.Tx{make([]byte, types.BlockPartSizeBytes)} // TX taking one block part alone + block := state.MakeBlock(bs.Height()+1, txs, new(types.Commit), nil, state.Validators.GetProposer().Address) + validPartSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) + require.GreaterOrEqual(t, validPartSet.Total(), uint32(2)) part2 := validPartSet.GetPart(1) seenCommit := makeTestExtCommit(block.Header.Height, cmttime.Now()) @@ -281,39 +294,37 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } for i, tuple := range tuples { - tuple := tuple bs, db := newInMemoryBlockStore() // SaveBlock - res, err, panicErr := doFn(func() (interface{}, error) { + res, err, panicErr := doFn(func() (any, error) { bs.SaveBlockWithExtendedCommit(tuple.block, tuple.parts, tuple.seenCommit) if tuple.block == nil { return nil, nil } if tuple.corruptBlockInDB { - err := db.Set(calcBlockMetaKey(tuple.block.Height), []byte("block-bogus")) + err := db.Set(bs.dbKeyLayout.CalcBlockMetaKey(tuple.block.Height), []byte("block-bogus")) require.NoError(t, err) } - bBlock := bs.LoadBlock(tuple.block.Height) - bBlockMeta := bs.LoadBlockMeta(tuple.block.Height) + bBlock, bBlockMeta := bs.LoadBlock(tuple.block.Height) if tuple.eraseSeenCommitInDB { - err := db.Delete(calcSeenCommitKey(tuple.block.Height)) + err := db.Delete(bs.dbKeyLayout.CalcSeenCommitKey(tuple.block.Height)) require.NoError(t, err) } if tuple.corruptSeenCommitInDB { - err := db.Set(calcSeenCommitKey(tuple.block.Height), []byte("bogus-seen-commit")) + err := db.Set(bs.dbKeyLayout.CalcSeenCommitKey(tuple.block.Height), []byte("bogus-seen-commit")) require.NoError(t, err) } bSeenCommit := bs.LoadSeenCommit(tuple.block.Height) commitHeight := tuple.block.Height - 1 if tuple.eraseCommitInDB { - err := db.Delete(calcBlockCommitKey(commitHeight)) + err := db.Delete(bs.dbKeyLayout.CalcBlockCommitKey(commitHeight)) require.NoError(t, err) } if tuple.corruptCommitInDB { - err := db.Set(calcBlockCommitKey(commitHeight), []byte("foo-bogus")) + err := db.Set(bs.dbKeyLayout.CalcBlockCommitKey(commitHeight), []byte("foo-bogus")) require.NoError(t, err) } bCommit := bs.LoadBlockCommit(commitHeight) @@ -339,8 +350,8 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { continue } - assert.Nil(t, panicErr, "#%d: unexpected panic", i) - assert.Nil(t, err, "#%d: expecting a non-nil error", i) + require.NoError(t, panicErr, "#%d: unexpected panic", i) + require.NoError(t, err, "#%d: expecting a non-nil error", i) qua, ok := res.(*quad) if !ok || qua == nil { t.Errorf("#%d: got nil quad back; gotType=%T", i, res) @@ -400,7 +411,7 @@ func TestSaveBlockWithExtendedCommitPanicOnAbsentExtension(t *testing.T) { block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) seenCommit := makeTestExtCommit(block.Header.Height, cmttime.Now()) - ps, err := block.MakePartSet(2) + ps, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) testCase.malleateCommit(seenCommit) if testCase.shouldPanic { @@ -440,7 +451,7 @@ func TestLoadBlockExtendedCommit(t *testing.T) { h := bs.Height() + 1 block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) seenCommit := makeTestExtCommit(block.Header.Height, cmttime.Now()) - ps, err := block.MakePartSet(2) + ps, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) if testCase.saveExtended { bs.SaveBlockWithExtendedCommit(block, ps, seenCommit) @@ -469,7 +480,7 @@ func TestLoadBaseMeta(t *testing.T) { for h := int64(1); h <= 10; h++ { block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) - partSet, err := block.MakePartSet(2) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) seenCommit := makeTestExtCommit(h, cmttime.Now()) bs.SaveBlockWithExtendedCommit(block, partSet, seenCommit) @@ -491,7 +502,7 @@ func TestLoadBlockPart(t *testing.T) { bs, db := newInMemoryBlockStore() const height, index = 10, 1 - loadPart := func() (interface{}, error) { + loadPart := func() (any, error) { part := bs.LoadBlockPart(height, index) return part, nil } @@ -502,30 +513,36 @@ func TestLoadBlockPart(t *testing.T) { // Initially no contents. // 1. Requesting for a non-existent block shouldn't fail res, _, panicErr := doFn(loadPart) - require.Nil(t, panicErr, "a non-existent block part shouldn't cause a panic") + require.NoError(t, panicErr, "a non-existent block part shouldn't cause a panic") require.Nil(t, res, "a non-existent block part should return nil") // 2. Next save a corrupted block then try to load it - err = db.Set(calcBlockPartKey(height, index), []byte("CometBFT")) + err = db.Set(bs.dbKeyLayout.CalcBlockPartKey(height, index), []byte("CometBFT")) require.NoError(t, err) res, _, panicErr = doFn(loadPart) - require.NotNil(t, panicErr, "expecting a non-nil panic") + require.Error(t, panicErr, "expecting a non-nil panic") require.Contains(t, panicErr.Error(), "unmarshal to cmtproto.Part failed") // 3. A good block serialized and saved to the DB should be retrievable block := state.MakeBlock(height, nil, new(types.Commit), nil, state.Validators.GetProposer().Address) - partSet, err := block.MakePartSet(2) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) part1 := partSet.GetPart(0) pb1, err := part1.ToProto() require.NoError(t, err) - err = db.Set(calcBlockPartKey(height, index), mustEncode(pb1)) + err = db.Set(bs.dbKeyLayout.CalcBlockPartKey(height, index), mustEncode(pb1)) require.NoError(t, err) gotPart, _, panicErr := doFn(loadPart) - require.Nil(t, panicErr, "an existent and proper block should not panic") + require.NoError(t, panicErr, "an existent and proper block should not panic") require.Nil(t, res, "a properly saved block should return a proper block") - require.Equal(t, gotPart.(*types.Part), part1, + + // Having to do this because of https://github.com/stretchr/testify/issues/1141 + gotPartJSON, err := json.Marshal(gotPart.(*types.Part)) + require.NoError(t, err) + part1JSON, err := json.Marshal(part1) + require.NoError(t, err) + require.JSONEq(t, string(gotPartJSON), string(part1JSON), "expecting successful retrieval of previously saved block") } @@ -553,7 +570,7 @@ func (o *prunerObserver) PrunerPrunedBlocks(info *sm.BlocksPrunedInfo) { // This test tests the pruning service and its pruning of the blockstore // The state store cannot be pruned here because we do not have proper // state stored. The test is expected to pass even though the log should -// inform about the inability to prune the state store +// inform about the inability to prune the state store. func TestPruningService(t *testing.T) { config := test.ResetTestRoot("blockchain_reactor_pruning_test") defer os.RemoveAll(config.RootDir) @@ -587,7 +604,7 @@ func TestPruningService(t *testing.T) { // make more than 1000 blocks, to test batch deletions for h := int64(1); h <= 1500; h++ { block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) - partSet, err := block.MakePartSet(2) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) seenCommit := makeTestExtCommit(h, cmttime.Now()) bs.SaveBlockWithExtendedCommit(block, partSet, seenCommit) @@ -597,18 +614,18 @@ func TestPruningService(t *testing.T) { assert.EqualValues(t, 1500, bs.Height()) assert.EqualValues(t, 1500, bs.Size()) - state.LastBlockTime = time.Date(2020, 1, 1, 1, 0, 0, 0, time.UTC) + state.LastBlockTime = cmttime.Now().Add(24 * time.Hour) state.LastBlockHeight = 1500 state.ConsensusParams.Evidence.MaxAgeNumBlocks = 400 - state.ConsensusParams.Evidence.MaxAgeDuration = 1 * time.Second + state.ConsensusParams.Evidence.MaxAgeDuration = 1 * time.Minute pk := ed25519.GenPrivKey().PubKey() // Generate a bunch of state data. // This is needed because the pruning is expecting to load the state from the database thus // We have to have acceptable values for all fields of the state - validator := &types.Validator{Address: cmtrand.Bytes(crypto.AddressSize), VotingPower: 100, PubKey: pk} + validator := &types.Validator{Address: pk.Address(), VotingPower: 100, PubKey: pk} validatorSet := &types.ValidatorSet{ Validators: []*types.Validator{validator}, Proposer: validator, @@ -636,8 +653,12 @@ func TestPruningService(t *testing.T) { assert.EqualValues(t, 1200, bs.Base()) assert.EqualValues(t, 1500, bs.Height()) assert.EqualValues(t, 301, bs.Size()) - require.NotNil(t, bs.LoadBlock(1200)) - require.Nil(t, bs.LoadBlock(1199)) + block, meta := bs.LoadBlock(1200) + require.NotNil(t, block) + require.NotNil(t, meta) + block, meta = bs.LoadBlock(1199) + require.Nil(t, block) + require.Nil(t, meta) // The header and commit for heights 1100 onwards // need to remain to verify evidence require.NotNil(t, bs.LoadBlockMeta(1100)) @@ -645,10 +666,14 @@ func TestPruningService(t *testing.T) { require.NotNil(t, bs.LoadBlockCommit(1100)) require.Nil(t, bs.LoadBlockCommit(1099)) for i := int64(1); i < 1200; i++ { - require.Nil(t, bs.LoadBlock(i)) + block, meta = bs.LoadBlock(i) + require.Nil(t, block) + require.Nil(t, meta) } for i := int64(1200); i <= 1500; i++ { - require.NotNil(t, bs.LoadBlock(i)) + block, meta = bs.LoadBlock(i) + require.NotNil(t, block) + require.NotNil(t, meta) } t.Log("Done pruning blocks until height 1200") @@ -669,7 +694,7 @@ func TestPruningService(t *testing.T) { require.NoError(t, err) err = pruner.SetCompanionBlockRetainHeight(1350) - assert.NoError(t, err) + require.NoError(t, err) select { case <-obs.prunedBlocksResInfoCh: @@ -697,10 +722,18 @@ func TestPruningService(t *testing.T) { case <-obs.prunedBlocksResInfoCh: // But we will prune only until 1350 because that was the Companions height // and it is lower - assert.Nil(t, bs.LoadBlock(1349)) - assert.NotNil(t, bs.LoadBlock(1350), fmt.Sprintf("expected block at height 1350 to be there, but it was not; block store base height = %d", bs.Base())) - assert.NotNil(t, bs.LoadBlock(1500)) - assert.Nil(t, bs.LoadBlock(1501)) + block, meta := bs.LoadBlock(1349) + assert.Nil(t, block) + assert.Nil(t, meta) + block, meta = bs.LoadBlock(1350) + assert.NotNil(t, block, fmt.Sprintf("expected block at height 1350 to be there, but it was not; block store base height = %d", bs.Base())) + assert.NotNil(t, meta) + block, meta = bs.LoadBlock(1500) + assert.NotNil(t, block) + assert.NotNil(t, meta) + block, meta = bs.LoadBlock(1501) + assert.Nil(t, block) + assert.Nil(t, meta) t.Log("Done pruning blocks until 1500") case <-time.After(5 * time.Second): @@ -732,7 +765,7 @@ func TestPruneBlocks(t *testing.T) { // make more than 1000 blocks, to test batch deletions for h := int64(1); h <= 1500; h++ { block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) - partSet, err := block.MakePartSet(2) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) seenCommit := makeTestExtCommit(h, cmttime.Now()) bs.SaveBlockWithExtendedCommit(block, partSet, seenCommit) @@ -742,11 +775,11 @@ func TestPruneBlocks(t *testing.T) { assert.EqualValues(t, 1500, bs.Height()) assert.EqualValues(t, 1500, bs.Size()) - state.LastBlockTime = time.Date(2020, 1, 1, 1, 0, 0, 0, time.UTC) + state.LastBlockTime = cmttime.Now().Add(24 * time.Hour) state.LastBlockHeight = 1500 state.ConsensusParams.Evidence.MaxAgeNumBlocks = 400 - state.ConsensusParams.Evidence.MaxAgeDuration = 1 * time.Second + state.ConsensusParams.Evidence.MaxAgeDuration = 1 * time.Minute // Check that basic pruning works pruned, evidenceRetainHeight, err := bs.PruneBlocks(1200, state) @@ -757,8 +790,12 @@ func TestPruneBlocks(t *testing.T) { assert.EqualValues(t, 301, bs.Size()) assert.EqualValues(t, 1100, evidenceRetainHeight) - require.NotNil(t, bs.LoadBlock(1200)) - require.Nil(t, bs.LoadBlock(1199)) + block, meta := bs.LoadBlock(1200) + require.NotNil(t, block) + require.NotNil(t, meta) + block, meta = bs.LoadBlock(1199) + require.Nil(t, block) + require.Nil(t, meta) // The header and commit for heights 1100 onwards // need to remain to verify evidence @@ -768,10 +805,14 @@ func TestPruneBlocks(t *testing.T) { require.Nil(t, bs.LoadBlockCommit(1099)) for i := int64(1); i < 1200; i++ { - require.Nil(t, bs.LoadBlock(i)) + block, meta = bs.LoadBlock(i) + require.Nil(t, block) + require.Nil(t, meta) } for i := int64(1200); i <= 1500; i++ { - require.NotNil(t, bs.LoadBlock(i)) + block, meta = bs.LoadBlock(i) + require.NotNil(t, block) + require.NotNil(t, meta) } // Pruning below the current base should error @@ -804,15 +845,21 @@ func TestPruneBlocks(t *testing.T) { pruned, _, err = bs.PruneBlocks(1500, state) require.NoError(t, err) assert.EqualValues(t, 200, pruned) - assert.Nil(t, bs.LoadBlock(1499)) - assert.NotNil(t, bs.LoadBlock(1500)) - assert.Nil(t, bs.LoadBlock(1501)) + block, meta = bs.LoadBlock(1499) + assert.Nil(t, block) + assert.Nil(t, meta) + block, meta = bs.LoadBlock(1500) + assert.NotNil(t, block) + assert.NotNil(t, meta) + block, meta = bs.LoadBlock(1501) + assert.Nil(t, block) + assert.Nil(t, meta) } func TestLoadBlockMeta(t *testing.T) { bs, db := newInMemoryBlockStore() height := int64(10) - loadMeta := func() (interface{}, error) { + loadMeta := func() (any, error) { meta := bs.LoadBlockMeta(height) return meta, nil } @@ -820,14 +867,14 @@ func TestLoadBlockMeta(t *testing.T) { // Initially no contents. // 1. Requesting for a non-existent blockMeta shouldn't fail res, _, panicErr := doFn(loadMeta) - require.Nil(t, panicErr, "a non-existent blockMeta shouldn't cause a panic") + require.NoError(t, panicErr) require.Nil(t, res, "a non-existent blockMeta should return nil") // 2. Next save a corrupted blockMeta then try to load it - err := db.Set(calcBlockMetaKey(height), []byte("CometBFT-Meta")) + err := db.Set(bs.dbKeyLayout.CalcBlockMetaKey(height), []byte("CometBFT-Meta")) require.NoError(t, err) res, _, panicErr = doFn(loadMeta) - require.NotNil(t, panicErr, "expecting a non-nil panic") + require.Error(t, panicErr) require.Contains(t, panicErr.Error(), "unmarshal to cmtproto.BlockMeta") // 3. A good blockMeta serialized and saved to the DB should be retrievable @@ -837,10 +884,10 @@ func TestLoadBlockMeta(t *testing.T) { }, Height: 1, ProposerAddress: cmtrand.Bytes(crypto.AddressSize), }} pbm := meta.ToProto() - err = db.Set(calcBlockMetaKey(height), mustEncode(pbm)) + err = db.Set(bs.dbKeyLayout.CalcBlockMetaKey(height), mustEncode(pbm)) require.NoError(t, err) gotMeta, _, panicErr := doFn(loadMeta) - require.Nil(t, panicErr, "an existent and proper block should not panic") + require.NoError(t, panicErr, "an existent and proper block should not panic") require.Nil(t, res, "a properly saved blockMeta should return a proper blocMeta ") pbmeta := meta.ToProto() if gmeta, ok := gotMeta.(*types.BlockMeta); ok { @@ -861,7 +908,7 @@ func TestLoadBlockMetaByHash(t *testing.T) { bs := NewBlockStore(dbm.NewMemDB()) b1 := state.MakeBlock(state.LastBlockHeight+1, test.MakeNTxs(state.LastBlockHeight+1, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) - partSet, err := b1.MakePartSet(2) + partSet, err := b1.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) seenCommit := makeTestExtCommit(1, cmttime.Now()) bs.SaveBlock(b1, partSet, seenCommit.ToCommit()) @@ -875,16 +922,16 @@ func TestLoadBlockMetaByHash(t *testing.T) { func TestBlockFetchAtHeight(t *testing.T) { state, bs, _, _, cleanup, _ := makeStateAndBlockStoreAndIndexers() defer cleanup() - require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") + require.Equal(t, int64(0), bs.Height(), "initially the height should be zero") block := state.MakeBlock(bs.Height()+1, nil, new(types.Commit), nil, state.Validators.GetProposer().Address) - partSet, err := block.MakePartSet(2) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) seenCommit := makeTestExtCommit(block.Header.Height, cmttime.Now()) bs.SaveBlockWithExtendedCommit(block, partSet, seenCommit) require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") - blockAtHeight := bs.LoadBlock(bs.Height()) + blockAtHeight, _ := bs.LoadBlock(bs.Height()) b1, err := block.ToProto() require.NoError(t, err) b2, err := blockAtHeight.ToProto() @@ -895,13 +942,13 @@ func TestBlockFetchAtHeight(t *testing.T) { require.Equal(t, block.Hash(), blockAtHeight.Hash(), "expecting a successful load of the last saved block") - blockAtHeightPlus1 := bs.LoadBlock(bs.Height() + 1) + blockAtHeightPlus1, _ := bs.LoadBlock(bs.Height() + 1) require.Nil(t, blockAtHeightPlus1, "expecting an unsuccessful load of Height()+1") - blockAtHeightPlus2 := bs.LoadBlock(bs.Height() + 2) + blockAtHeightPlus2, _ := bs.LoadBlock(bs.Height() + 2) require.Nil(t, blockAtHeightPlus2, "expecting an unsuccessful load of Height()+2") } -func doFn(fn func() (interface{}, error)) (res interface{}, err error, panicErr error) { +func doFn(fn func() (any, error)) (res any, err error, panicErr error) { defer func() { if r := recover(); r != nil { switch e := r.(type) { diff --git a/test/README.md b/test/README.md index 7437fd2d753..e823f84c357 100644 --- a/test/README.md +++ b/test/README.md @@ -1,7 +1,12 @@ # CometBFT Tests -The unit tests (ie. the `go test` s) can be run with `make test`. -The integration tests can be run with `make test_integrations`. +## Unit tests + +The unit tests (ie. `go test`) can be run with `make test` from the root directory of the repository. + +## Integration tests + +The integration tests can be run with `make test_integrations` from the root directory of the repository. Running the integrations test will build a docker container with local version of CometBFT and run the following tests in docker containers: @@ -13,6 +18,24 @@ and run the following tests in docker containers: - persistence tests - crash cometbft at each of many predefined points, restart, and ensure it syncs properly with the app +## End-to-end tests + +You can run e2e nightly tests locally by running: + +```sh +cd test/e2e +make && ./build/generator -g 5 -d networks/nightly/ -p && ./run-multiple.sh networks/nightly/*-group*-*.toml +``` + +If you just want a simple 4-node network, you can run: + +```sh +cd test/e2e +make && ./build/runner -f networks/simple.toml +``` + +Please refer to the [README.MD](e2e/README.md) in the `e2e` folder for more information. + ## Fuzzing [Fuzzing](https://en.wikipedia.org/wiki/Fuzzing) of various system inputs. diff --git a/test/app/clean.sh b/test/app/clean.sh index 31cb7517310..2f03b4ca31d 100755 --- a/test/app/clean.sh +++ b/test/app/clean.sh @@ -1,3 +1,8 @@ +#! /bin/bash + +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes + killall cometbft killall abci-cli -rm -rf ~/.cometbft_app +rm -rf ~/.cometbft diff --git a/test/app/counter_test.sh b/test/app/counter_test.sh index 005004304aa..ad70dd698f0 100755 --- a/test/app/counter_test.sh +++ b/test/app/counter_test.sh @@ -1,8 +1,8 @@ #! /bin/bash -export GO111MODULE=on - -set -u +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes ##################### # counter over socket @@ -19,8 +19,8 @@ function getCode() { echo -1 fi - if [[ $(echo $R | jq 'has("code")') == "true" ]]; then - # this wont actually work if theres an error ... + if [[ $(echo "$R" | jq 'has("code")') == "true" ]]; then + # this won't actually work if there's an error ... echo "$R" | jq ".code" else # protobuf auto adds `omitempty` to everything so code OK and empty data/log @@ -94,7 +94,7 @@ fi echo "... sending tx. expect error" # second time should get rejected by the mempool (return error and non-zero code) -sendTx $TX true +sendTx "$TX" true echo "... sending tx. expect no error" diff --git a/test/app/kvstore_test.sh b/test/app/kvstore_test.sh index 034e28878d5..7efd4f47a8c 100755 --- a/test/app/kvstore_test.sh +++ b/test/app/kvstore_test.sh @@ -1,9 +1,11 @@ #! /bin/bash -set -ex -function toHex() { - echo -n $1 | hexdump -ve '1/1 "%.2X"' | awk '{print "0x" $0}' +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes +function toHex() { + echo -n "$1" | hexdump -ve '1/1 "%.2X"' | awk '{print "0x" $0}' } ##################### @@ -14,12 +16,11 @@ TESTNAME=$1 # store key value pair KEY="abcd" VALUE="dcba" -echo $(toHex $KEY=$VALUE) -curl -s 127.0.0.1:26657/broadcast_tx_commit?tx=$(toHex $KEY=$VALUE) +toHex $KEY=$VALUE +curl -s 127.0.0.1:26657/broadcast_tx_commit?tx="$(toHex $KEY=$VALUE)" echo $? echo "" - ########################### # test using the abci-cli ########################### @@ -27,27 +28,21 @@ echo "" echo "... testing query with abci-cli" # we should be able to look up the key -RESPONSE=`abci-cli query \"$KEY\"` +RESPONSE=$(abci-cli query \"$KEY\") -set +e -A=`echo $RESPONSE | grep "$VALUE"` -if [[ $? != 0 ]]; then +if ! grep -q "$VALUE" <<< "$RESPONSE"; then echo "Failed to find $VALUE for $KEY. Response:" echo "$RESPONSE" exit 1 fi -set -e # we should not be able to look up the value -RESPONSE=`abci-cli query \"$VALUE\"` -set +e -A=`echo $RESPONSE | grep \"value: $VALUE\"` -if [[ $? == 0 ]]; then - echo "Found '$VALUE' for $VALUE when we should not have. Response:" +RESPONSE=$(abci-cli query \"$VALUE\") +if grep -q "value: $VALUE" <<< "$RESPONSE"; then + echo "Found 'value: $VALUE' for $VALUE when we should not have. Response:" echo "$RESPONSE" exit 1 fi -set -e ############################# # test using the /abci_query @@ -56,29 +51,22 @@ set -e echo "... testing query with /abci_query 2" # we should be able to look up the key -RESPONSE=`curl -s "127.0.0.1:26657/abci_query?path=\"\"&data=$(toHex $KEY)&prove=false"` -RESPONSE=`echo $RESPONSE | jq .result.response.log` +RESPONSE=$(curl -s "127.0.0.1:26657/abci_query?path=\"\"&data=$(toHex $KEY)&prove=false") +RESPONSE=$(echo "$RESPONSE" | jq .result.response.log) -set +e -A=`echo $RESPONSE | grep 'exists'` -if [[ $? != 0 ]]; then +if ! grep -q "exists" <<< "$RESPONSE"; then echo "Failed to find 'exists' for $KEY. Response:" echo "$RESPONSE" exit 1 fi -set -e # we should not be able to look up the value -RESPONSE=`curl -s "127.0.0.1:26657/abci_query?path=\"\"&data=$(toHex $VALUE)&prove=false"` -RESPONSE=`echo $RESPONSE | jq .result.response.log` -set +e -A=`echo $RESPONSE | grep 'exists'` -if [[ $? == 0 ]]; then - echo "Found 'exists' for $VALUE when we should not have. Response:" +RESPONSE=$(curl -s "127.0.0.1:26657/abci_query?path=\"\"&data=$(toHex $VALUE)&prove=false") +RESPONSE=$(echo "$RESPONSE" | jq .result.response.log) +if ! grep -q "does not exist" <<< "$RESPONSE"; then + echo "Failed to find 'does not exist' for $VALUE. Response:" echo "$RESPONSE" exit 1 fi -set -e - echo "Passed Test: $TESTNAME" diff --git a/test/app/test.sh b/test/app/test.sh index 1c55a7061b4..d0782240b45 100755 --- a/test/app/test.sh +++ b/test/app/test.sh @@ -1,15 +1,15 @@ #! /bin/bash -set -ex -#- kvstore over socket, curl +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes -# TODO: install everything +#- kvstore over socket, curl -export PATH="$GOBIN:$PATH" -export CMTHOME=$HOME/.cometbft_app +export CMTHOME=$HOME/.cometbft function kvstore_over_socket(){ - rm -rf $CMTHOME + rm -rf "$CMTHOME" cometbft init echo "Starting kvstore_over_socket" abci-cli kvstore > /dev/null & @@ -26,7 +26,7 @@ function kvstore_over_socket(){ # start cometbft first function kvstore_over_socket_reorder(){ - rm -rf $CMTHOME + rm -rf "$CMTHOME" cometbft init echo "Starting kvstore_over_socket_reorder (ie. start cometbft first)" cometbft node > cometbft.log & @@ -42,17 +42,5 @@ function kvstore_over_socket_reorder(){ kill -9 $pid_kvstore $pid_cometbft } -case "$1" in - "kvstore_over_socket") - kvstore_over_socket - ;; -"kvstore_over_socket_reorder") - kvstore_over_socket_reorder - ;; -*) - echo "Running all" - kvstore_over_socket - echo "" - kvstore_over_socket_reorder - echo "" -esac +kvstore_over_socket +kvstore_over_socket_reorder diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile deleted file mode 100644 index 3dd11ce7ead..00000000000 --- a/test/docker/Dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -FROM golang:1.15 - -# Grab deps (jq, hexdump, xxd, killall) -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - jq bsdmainutils vim-common psmisc netcat curl - -# Setup CometBFT repo -ENV REPO $GOPATH/src/github.com/cometbft/cometbft -ENV GOBIN $GOPATH/bin -WORKDIR $REPO - -# Copy in the code -# TODO: rewrite to only copy Makefile & other files? -COPY . $REPO - -# Install the vendored dependencies -# docker caching prevents reinstall on code change! -RUN make tools - -# install ABCI CLI -RUN make install_abci - -# install CometBFT -RUN make install - -RUN cometbft testnet \ - --config $REPO/test/docker/config-template.toml \ - --node-dir-prefix="mach" \ - --v=4 \ - --populate-persistent-peers=false \ - --o=$REPO/test/p2p/data - -# Now copy in the code -# NOTE: this will overwrite whatever is in vendor/ -COPY . $REPO - -# expose the volume for debugging -VOLUME $REPO - -EXPOSE 26656 -EXPOSE 26657 diff --git a/test/docker/build.sh b/test/docker/build.sh deleted file mode 100644 index 39df0872071..00000000000 --- a/test/docker/build.sh +++ /dev/null @@ -1,3 +0,0 @@ -#! /bin/bash - -docker build -t tester -f ./test/docker/Dockerfile . diff --git a/test/docker/config-template.toml b/test/docker/config-template.toml deleted file mode 100644 index a90eb7bd5f0..00000000000 --- a/test/docker/config-template.toml +++ /dev/null @@ -1,2 +0,0 @@ -[rpc] -laddr = "tcp://0.0.0.0:26657" diff --git a/test/e2e/.gitignore b/test/e2e/.gitignore new file mode 100644 index 00000000000..d430b1edcba --- /dev/null +++ b/test/e2e/.gitignore @@ -0,0 +1,3 @@ +monitoring/data-grafana +monitoring/data-prometheus +monitoring/prometheus.yml diff --git a/test/e2e/Makefile b/test/e2e/Makefile index 4cc085881e8..cecf3cc5290 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -1,26 +1,32 @@ -COMETBFT_BUILD_OPTIONS += badgerdb,boltdb,cleveldb,rocksdb +COMETBFT_BUILD_OPTIONS += badgerdb,rocksdb,clock_skew,bls12381 +IMAGE_TAG=cometbft/e2e-node:local-version include ../../common.mk all: docker generator runner -docker: +fast: docker-fast generator runner + +# This will set up a container with all required dependencies for compiling, copy all the source +# code to the container, and compile the binary inside it. +docker: docker-clean @echo "Building E2E Docker image" - @docker build \ - --tag cometbft/e2e-node:local-version \ - -f docker/Dockerfile ../.. + @docker build --tag $(IMAGE_TAG) -f docker/Dockerfile ../.. -docker-debug: +docker-debug: docker-clean @echo "Building E2E Docker image for debugging" - @docker build \ - --tag cometbft/e2e-node:local-version \ - -f docker/Dockerfile.debug ../.. + @docker build --tag $(IMAGE_TAG) -f docker/Dockerfile.debug ../.. + +# This will compile a binary to be executed in a container, set up a slim container, and copy only +# the binary to it. +docker-fast: docker-clean + @echo "Compiling binary for slim E2E Docker image" + @CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o build/node ./node + @echo "Building slim E2E Docker image" + @docker build --tag $(IMAGE_TAG) -f docker/Dockerfile.fast . -docker-fast: - @echo "Building fast-prototyping E2E Docker image" - @docker build \ - --tag cometbft/e2e-node:local-version \ - -f docker/Dockerfile.fast . +docker-clean: + @docker rmi $(IMAGE_TAG) 2>/dev/null; true # We need to build support for database backends into the app in # order to build a binary with a CometBFT node in it (for built-in @@ -28,18 +34,21 @@ docker-fast: node: go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/node ./node -# There is some redundancy between this and the ../../Makefile#build-linux -node-fast: - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o build/node ./node - generator: go build -o build/generator ./generator runner: - go build -o build/runner ./runner + go build -tags bls12381 -o build/runner ./runner + +lint: + @echo "--> Running linter for E2E" + @go run github.com/golangci/golangci-lint/cmd/golangci-lint@latest run grammar-gen: - go run github.com/goccmack/gogll/v3@latest -o pkg/grammar/clean-start/grammar-auto pkg/grammar/clean-start/abci_grammar_clean_start.md - go run github.com/goccmack/gogll/v3@latest -o pkg/grammar/recovery/grammar-auto pkg/grammar/recovery/abci_grammar_recovery.md + go run github.com/goccmack/gogll/v3@latest -o pkg/grammar/grammar-auto pkg/grammar/abci_grammar.md + +clean: docker-clean + rm -drf build/ + rm -drf data/ -.PHONY: all node docker generator runner grammar-gen +.PHONY: all fast node docker docker-debug docker-fast docker-clean generator runner lint grammar-gen clean diff --git a/test/e2e/README.md b/test/e2e/README.md index 9427aa9934a..5f4b34af5bb 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -1,5 +1,18 @@ # End-to-End Tests +- [End-to-End Tests](#end-to-end-tests) + - [Fast compilation](#fast-compilation) + - [Conceptual Overview](#conceptual-overview) + - [Testnet Manifests](#testnet-manifests) + - [Random Testnet Generation](#random-testnet-generation) + - [Test Stages](#test-stages) + - [Tests](#tests) + - [Running Manual Tests](#running-manual-tests) + - [Debugging Failures](#debugging-failures) + - [Enabling IPv6](#enabling-ipv6) + - [Benchmarking Testnets](#benchmarking-testnets) + - [Running Individual Nodes](#running-individual-nodes) + Spins up and tests CometBFT networks in Docker Compose based on a testnet manifest. To run the CI testnet: ```sh @@ -9,9 +22,31 @@ make This creates and runs a testnet named `ci` under `networks/ci/`. +To generate the testnet files in a different directory, run: +```sh +./build/runner -f networks/ci.toml -d networks/foo/bar/ +``` + +### Fast compiling + +If you need to run experiments on a testnet, you will probably want to compile the code multiple +times and `make` could be slow. This is because `make` builds an image by first copying all the +source code into it and then compiling the binary from inside. This is needed if, for example, you +want to create a binary that uses a different database (as in `networks/ci.toml`). + +If you just need to (re-)compile and run the binary without any extra building options, you can use +`make fast`, which will first compile the code and then make a slim Docker image with the binary. +For example: +```sh +make fast +./build/runner -f networks/simple.toml +``` + ## Conceptual Overview -End-to-end testnets are used to test Tendermint functionality as a user would use it, by spinning up a set of nodes with various configurations and making sure the nodes and network behave correctly. The background for the E2E test suite is outlined in [RFC-001](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-066-e2e-testing.md). +End-to-end testnets are used to test CometBFT functionality as a user would use it, by spinning up a +set of nodes with various configurations and making sure the nodes and network behave correctly. The +background for the E2E test suite is outlined in [RFC-001][rfc-001]. The end-to-end tests can be thought of in this manner: @@ -68,7 +103,7 @@ generator. For example: # the CometBFT version in the current local code (as specified in # ../../version/version.go). # -# In the example below, if the local version.TMCoreSemVer value is "v0.34.24", +# In the example below, if the local version value is "v0.34.24", # for example, and the latest official release is v0.34.23, then 1/3rd of the # network will run v0.34.23 and the remaining 2/3rds will run the E2E node built # from the local code. @@ -86,6 +121,19 @@ The generator generates this type of perturbation both on full nodes and on ligh Perturbations of type `upgrade` are a noop if the node's version matches the one in `upgrade_version`. +If you need to generate manifests with a specific `log_level` that will configure the log level parameter in the +CometBFT's config file for each node, you can specify the level using the flags `-l` or `--log-level`. + +``` +./build/generator -g 2 -d networks/nightly/ -l "*:debug,p2p:info" +``` + +This will add the specified log level on each generated manifest (TOML file): + +```toml +log_level = "debug" +``` + ## Test Stages The test runner has the following stages, which can also be executed explicitly by running `./build/runner -f `: @@ -108,10 +156,12 @@ The test runner has the following stages, which can also be executed explicitly Auxiliary commands: -* `logs`: outputs all node logs. +* `logs`: outputs all node logs (specify `--split` to output individual logs). * `tail`: tails (follows) node logs until canceled. +* `monitor`: manages monitoring tools such as Prometheus and Grafana. + ## Tests Test cases are written as normal Go tests in `tests/`. They use a `testNode()` helper which executes each test as a parallel subtest for each node in the network. @@ -125,6 +175,8 @@ To run tests manually, set the `E2E_MANIFEST` environment variable to the path o E2E_MANIFEST=networks/ci.toml go test -v ./tests/... ``` +If the testnet files are located in a custom directory, you need to set it in the `E2E_TESTNET_DIR` environment variable. + Optionally, `E2E_NODE` specifies the name of a single testnet node to test. These environment variables can also be specified in `tests/e2e_test.go` to run tests from an editor or IDE: @@ -134,6 +186,7 @@ func init() { // This can be used to manually specify a testnet manifest and/or node to // run tests against. The testnet must have been started by the runner first. os.Setenv("E2E_MANIFEST", "networks/ci.toml") + os.Setenv("E2E_TESTNET_DIR", "networks/foo") os.Setenv("E2E_NODE", "validator01") } ``` @@ -217,3 +270,21 @@ cometbft start ``` Check `node/config.go` to see how the settings of the test application can be tweaked. + +## Managing monitoring tools + +The `monitor` command manages monitoring tools such as Prometheus and Grafana, with the following +subcommands: +- `monitor start` will spin up a local Docker container with a Prometheus and a Granafa server. +Their web interfaces will be available at `http://localhost:9090` and `http://localhost:3000` +respectively. +- `monitor stop` will shut down the Docker container. + +Before starting any of these services, a Prometheus configuration file `prometheus.yml` must exist +in the `monitoring` directory. This file can be automatically generated when running `setup` on a +manifest that contains the line `prometheus = true`. + +These services run independently of the testnet, to be able to analyse the data even when the +testnet is down. + +[rfc-001]: https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-066-e2e-testing.md diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 27d2b18a6dd..178cbecb11e 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -12,30 +12,39 @@ import ( "math/big" "os" "path/filepath" + "slices" "strconv" "strings" + "sync" "time" + "github.com/cosmos/gogoproto/proto" + gogo "github.com/cosmos/gogoproto/types" + "github.com/cometbft/cometbft/abci/example/kvstore" abci "github.com/cometbft/cometbft/abci/types" + cryptoproto "github.com/cometbft/cometbft/api/cometbft/crypto/v1" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto" cryptoenc "github.com/cometbft/cometbft/crypto/encoding" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/protoio" - cryptoproto "github.com/cometbft/cometbft/proto/tendermint/crypto" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cometbft/cometbft/test/loadtime/payload" + cmttypes "github.com/cometbft/cometbft/types" "github.com/cometbft/cometbft/version" ) const ( appVersion = 1 voteExtensionKey string = "extensionSum" - voteExtensionMaxLen int64 = 1024 * 1024 * 128 //TODO: should be smaller. voteExtensionMaxVal int64 = 128 prefixReservedKey string = "reservedTxKey_" + prefixValidator string = "Validator_" suffixChainID string = "ChainID" suffixVoteExtHeight string = "VoteExtensionsHeight" + suffixPbtsHeight string = "PbtsHeight" suffixInitialHeight string = "InitialHeight" + txTTL uint64 = 15 // height difference at which transactions should be invalid ) // Application is an ABCI application for use by end-to-end tests. It is a @@ -49,6 +58,11 @@ type Application struct { cfg *Config restoreSnapshot *abci.Snapshot restoreChunks [][]byte + // It's OK not to persist this, as it is not part of the state machine + seenTxs sync.Map // cmttypes.TxKey -> uint64 + allKeyTypes []string // Cached slice of all supported key types in CometBFT + + lanePriorities map[string]uint32 } // Config allows for the setting of high level parameters for running the e2e Application @@ -102,8 +116,46 @@ type Config struct { // Vote extension padding size, to simulate different vote extension sizes. VoteExtensionSize uint `toml:"vote_extension_size"` + // VoteExtensionsEnableHeight configures the first height during which + // the chain will use and require vote extension data to be present + // in precommit messages. + VoteExtensionsEnableHeight int64 `toml:"vote_extensions_enable_height"` + + // VoteExtensionsUpdateHeight configures the height at which consensus + // param VoteExtensionsEnableHeight will be set. + // -1 denotes it is set at genesis. + // 0 denotes it is set at InitChain. + VoteExtensionsUpdateHeight int64 `toml:"vote_extensions_update_height"` + // Flag for enabling and disabling logging of ABCI requests. ABCIRequestsLoggingEnabled bool `toml:"abci_requests_logging_enabled"` + + // PbtsEnableHeight configures the first height during which + // the chain will start using Proposer-Based Timestamps (PBTS) + // to create and validate new blocks. + PbtsEnableHeight int64 `toml:"pbts_enable_height"` + + // PbtsUpdateHeight configures the height at which consensus + // param PbtsEnableHeight will be set. + // -1 denotes it is set at genesis. + // 0 denotes it is set at InitChain. + PbtsUpdateHeight int64 `toml:"pbts_update_height"` + + // If true, disables the use of lanes by the application. + // Used to simulate networks that do not want to use lanes, running + // on top of CometBFT with lane support. + NoLanes bool `toml:"no_lanes"` + + // Mapping from lane IDs to lane priorities. These lanes will be used by the + // application for setting up the mempool and for classifying transactions. + Lanes map[string]uint32 `toml:"lanes"` + + // If true, the application will return validator updates and + // `ConsensusParams` updates at every height. + // This is useful to create a more dynamic testnet. + // * An existing validator will be chosen, and its power will alternate 0 and 1 + // * `ConsensusParams` will be flipping on and off key types not set at genesis + ConstantFlip bool `toml:"constant_flip"` } func DefaultConfig(dir string) *Config { @@ -111,6 +163,17 @@ func DefaultConfig(dir string) *Config { PersistInterval: 1, SnapshotInterval: 100, Dir: dir, + Lanes: DefaultLanes(), + } +} + +func DefaultLanes() map[string]uint32 { + return map[string]uint32{ + "100": 100, + "50": 50, + "10": 10, + "5": 5, + "1": 1, } } @@ -124,39 +187,122 @@ func NewApplication(cfg *Config) (*Application, error) { if err != nil { return nil, err } - - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + allKeyTypes := make([]string, 0, len(cmttypes.ABCIPubKeyTypesToNames)) + for keyType := range cmttypes.ABCIPubKeyTypesToNames { + allKeyTypes = append(allKeyTypes, keyType) + } + logger := log.NewLogger(os.Stdout) logger.Info("Application started!") + if cfg.NoLanes { + return &Application{ + logger: logger, + state: state, + snapshots: snapshots, + cfg: cfg, + allKeyTypes: allKeyTypes, + }, nil + } return &Application{ - logger: logger, - state: state, - snapshots: snapshots, - cfg: cfg, + logger: logger, + state: state, + snapshots: snapshots, + cfg: cfg, + lanePriorities: cfg.Lanes, + allKeyTypes: allKeyTypes, }, nil } // Info implements ABCI. -func (app *Application) Info(context.Context, *abci.RequestInfo) (*abci.ResponseInfo, error) { - - r := &abci.Request{Value: &abci.Request_Info{Info: &abci.RequestInfo{}}} +func (app *Application) Info(context.Context, *abci.InfoRequest) (*abci.InfoResponse, error) { + r := &abci.Request{Value: &abci.Request_Info{Info: &abci.InfoRequest{}}} if err := app.logABCIRequest(r); err != nil { return nil, err } height, hash := app.state.Info() - return &abci.ResponseInfo{ + if app.cfg.NoLanes { + return &abci.InfoResponse{ + Version: version.ABCIVersion, + AppVersion: appVersion, + LastBlockHeight: int64(height), + LastBlockAppHash: hash, + }, nil + } + + // We set as default lane the (random) first lane id found in the list of + // lanes. On CheckTx requests, the application will always return a valid + // lane, so the mempool will never need to use the default lane value. + var defaultLane string + for id := range app.lanePriorities { + defaultLane = id + break + } + + return &abci.InfoResponse{ Version: version.ABCIVersion, AppVersion: appVersion, LastBlockHeight: int64(height), LastBlockAppHash: hash, + LanePriorities: app.lanePriorities, + DefaultLane: defaultLane, }, nil } -// Info implements ABCI. -func (app *Application) InitChain(_ context.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { +func (app *Application) updateFeatureEnableHeights(currentHeight int64) *cmtproto.ConsensusParams { + params := &cmtproto.ConsensusParams{ + Feature: &cmtproto.FeatureParams{}, + } + retNil := true + if app.cfg.VoteExtensionsUpdateHeight == currentHeight { + app.logger.Info("enabling vote extensions on the fly", + "current_height", currentHeight, + "enable_height", app.cfg.VoteExtensionsEnableHeight) + params.Feature.VoteExtensionsEnableHeight = &gogo.Int64Value{Value: app.cfg.VoteExtensionsEnableHeight} + retNil = false + app.logger.Info("updating VoteExtensionsHeight in app_state", "height", app.cfg.VoteExtensionsEnableHeight) + app.state.Set(prefixReservedKey+suffixVoteExtHeight, strconv.FormatInt(app.cfg.VoteExtensionsEnableHeight, 10)) + } + if app.cfg.PbtsUpdateHeight == currentHeight { + app.logger.Info("enabling PBTS on the fly", + "current_height", currentHeight, + "enable_height", app.cfg.PbtsEnableHeight) + params.Feature.PbtsEnableHeight = &gogo.Int64Value{Value: app.cfg.PbtsEnableHeight} + retNil = false + app.logger.Info("updating PBTS Height in app_state", "height", app.cfg.PbtsEnableHeight) + app.state.Set(prefixReservedKey+suffixPbtsHeight, strconv.FormatInt(app.cfg.PbtsEnableHeight, 10)) + } + if retNil { + return nil + } + return params +} + +func (app *Application) flipConsensusParams(params *cmtproto.ConsensusParams, height int64) (*cmtproto.ConsensusParams, error) { + if !app.cfg.ConstantFlip { + return params, nil + } + if height < 0 { + return nil, fmt.Errorf("cannot flip ConsensusParams on height < 0 (%d)", height) + } - r := &abci.Request{Value: &abci.Request_InitChain{InitChain: &abci.RequestInitChain{}}} + keyTypes := app.allKeyTypes + if height%2 == 0 { + keyTypes = []string{app.cfg.KeyType} + } + if params == nil { + params = &cmtproto.ConsensusParams{} + } + params.Validator = &cmtproto.ValidatorParams{ + PubKeyTypes: keyTypes, + } + app.logger.Info("flipping key types", "PubKeyTypes", keyTypes) + return params, nil +} + +// Info implements ABCI. +func (app *Application) InitChain(_ context.Context, req *abci.InitChainRequest) (*abci.InitChainResponse, error) { + r := &abci.Request{Value: &abci.Request_InitChain{InitChain: &abci.InitChainRequest{}}} err := app.logABCIRequest(r) if err != nil { return nil, err @@ -171,21 +317,30 @@ func (app *Application) InitChain(_ context.Context, req *abci.RequestInitChain) } app.logger.Info("setting ChainID in app_state", "chainId", req.ChainId) app.state.Set(prefixReservedKey+suffixChainID, req.ChainId) - app.logger.Info("setting VoteExtensionsHeight in app_state", "height", req.ConsensusParams.Abci.VoteExtensionsEnableHeight) - app.state.Set(prefixReservedKey+suffixVoteExtHeight, strconv.FormatInt(req.ConsensusParams.Abci.VoteExtensionsEnableHeight, 10)) + app.logger.Info("setting VoteExtensionsHeight in app_state", "height", req.ConsensusParams.Feature.VoteExtensionsEnableHeight.GetValue()) + app.state.Set(prefixReservedKey+suffixVoteExtHeight, strconv.FormatInt(req.ConsensusParams.Feature.VoteExtensionsEnableHeight.GetValue(), 10)) + app.logger.Info("setting PBTS Height in app_state", "height", req.ConsensusParams.Feature.PbtsEnableHeight.GetValue()) + app.state.Set(prefixReservedKey+suffixPbtsHeight, strconv.FormatInt(req.ConsensusParams.Feature.PbtsEnableHeight.GetValue(), 10)) app.logger.Info("setting initial height in app_state", "initial_height", req.InitialHeight) app.state.Set(prefixReservedKey+suffixInitialHeight, strconv.FormatInt(req.InitialHeight, 10)) // Get validators from genesis if req.Validators != nil { for _, val := range req.Validators { - val := val - if err := app.storeValidator(&val); err != nil { + validator := val + if err := app.storeValidator(&validator); err != nil { return nil, err } } } - resp := &abci.ResponseInitChain{ - AppHash: app.state.GetHash(), + + params := app.updateFeatureEnableHeights(0) + if params, err = app.flipConsensusParams(params, 0); err != nil { + return nil, err + } + + resp := &abci.InitChainResponse{ + ConsensusParams: params, + AppHash: app.state.GetHash(), } if resp.Validators, err = app.validatorUpdates(0); err != nil { return nil, err @@ -194,33 +349,68 @@ func (app *Application) InitChain(_ context.Context, req *abci.RequestInitChain) } // CheckTx implements ABCI. -func (app *Application) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { - - r := &abci.Request{Value: &abci.Request_CheckTx{CheckTx: &abci.RequestCheckTx{}}} +func (app *Application) CheckTx(_ context.Context, req *abci.CheckTxRequest) (*abci.CheckTxResponse, error) { + r := &abci.Request{Value: &abci.Request_CheckTx{CheckTx: &abci.CheckTxRequest{}}} err := app.logABCIRequest(r) if err != nil { return nil, err } - key, _, err := parseTx(req.Tx) + key, value, err := parseTx(req.Tx) if err != nil || key == prefixReservedKey { - return &abci.ResponseCheckTx{ + //nolint:nilerr + return &abci.CheckTxResponse{ Code: kvstore.CodeTypeEncodingError, Log: err.Error(), }, nil } + txKey := cmttypes.Tx(req.Tx).Key() + stHeight, _ := app.state.Info() + if txHeight, ok := app.seenTxs.Load(txKey); ok { + if stHeight < txHeight.(uint64) { + panic(fmt.Sprintf("txHeight is less than current height; txHeight %v, height %v", txHeight, stHeight)) + } + if stHeight > txHeight.(uint64)+txTTL { + app.logger.Debug("Application CheckTx", "msg", "transaction expired", "tx_hash", cmttypes.Tx.Hash(req.Tx), "seen_height", txHeight, "current_height", stHeight) + app.seenTxs.Delete(txKey) + return &abci.CheckTxResponse{ + Code: kvstore.CodeTypeExpired, + Log: fmt.Sprintf("transaction expired; seen height %v, current height %v", txHeight, stHeight), + }, nil + } + } else { + app.seenTxs.Store(txKey, stHeight) + } + if app.cfg.CheckTxDelay != 0 { time.Sleep(app.cfg.CheckTxDelay) } - return &abci.ResponseCheckTx{Code: kvstore.CodeTypeOK, GasWanted: 1}, nil + if app.cfg.NoLanes { + return &abci.CheckTxResponse{Code: kvstore.CodeTypeOK, GasWanted: 1}, nil + } + lane := extractLane(value) + return &abci.CheckTxResponse{Code: kvstore.CodeTypeOK, GasWanted: 1, LaneId: lane}, nil } -// FinalizeBlock implements ABCI. -func (app *Application) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { +// extractLane returns the lane ID as string if value is a Payload, otherwise returns empty string. +func extractLane(value string) string { + valueBytes, err := hex.DecodeString(value) + if err != nil { + panic("could not hex-decode tx value for extracting lane") + } + p := &payload.Payload{} + err = proto.Unmarshal(valueBytes, p) + if err != nil { + return "" + } + return p.GetLane() +} - r := &abci.Request{Value: &abci.Request_FinalizeBlock{FinalizeBlock: &abci.RequestFinalizeBlock{}}} +// FinalizeBlock implements ABCI. +func (app *Application) FinalizeBlock(_ context.Context, req *abci.FinalizeBlockRequest) (*abci.FinalizeBlockResponse, error) { + r := &abci.Request{Value: &abci.Request_FinalizeBlock{FinalizeBlock: &abci.FinalizeBlockRequest{}}} err := app.logABCIRequest(r) if err != nil { return nil, err @@ -238,22 +428,40 @@ func (app *Application) FinalizeBlock(_ context.Context, req *abci.RequestFinali } app.state.Set(key, value) + app.seenTxs.Delete(cmttypes.Tx(tx).Key()) + txs[i] = &abci.ExecTxResult{Code: kvstore.CodeTypeOK} } + for _, ev := range req.Misbehavior { + app.logger.Info("Misbehavior. Slashing validator", + "validator_address", ev.GetValidator().Address, + "type", ev.GetType(), + "height", ev.GetHeight(), + "time", ev.GetTime(), + "total_voting_power", ev.GetTotalVotingPower(), + ) + } + valUpdates, err := app.validatorUpdates(uint64(req.Height)) if err != nil { - panic(err) + return nil, err + } + + params := app.updateFeatureEnableHeights(req.Height) + if params, err = app.flipConsensusParams(params, req.Height); err != nil { + return nil, err } if app.cfg.FinalizeBlockDelay != 0 { time.Sleep(app.cfg.FinalizeBlockDelay) } - return &abci.ResponseFinalizeBlock{ - TxResults: txs, - ValidatorUpdates: valUpdates, - AppHash: app.state.Finalize(), + return &abci.FinalizeBlockResponse{ + TxResults: txs, + ValidatorUpdates: valUpdates, + AppHash: app.state.Finalize(), + ConsensusParamUpdates: params, Events: []abci.Event{ { Type: "val_updates", @@ -264,18 +472,18 @@ func (app *Application) FinalizeBlock(_ context.Context, req *abci.RequestFinali }, { Key: "height", - Value: strconv.Itoa(int(req.Height)), + Value: strconv.FormatInt(req.Height, 10), }, }, }, }, + NextBlockDelay: 1 * time.Second, }, nil } // Commit implements ABCI. -func (app *Application) Commit(_ context.Context, _ *abci.RequestCommit) (*abci.ResponseCommit, error) { - - r := &abci.Request{Value: &abci.Request_Commit{Commit: &abci.RequestCommit{}}} +func (app *Application) Commit(_ context.Context, _ *abci.CommitRequest) (*abci.CommitResponse, error) { + r := &abci.Request{Value: &abci.Request_Commit{Commit: &abci.CommitRequest{}}} err := app.logABCIRequest(r) if err != nil { return nil, err @@ -300,22 +508,21 @@ func (app *Application) Commit(_ context.Context, _ *abci.RequestCommit) (*abci. if app.cfg.RetainBlocks > 0 { retainHeight = int64(height - app.cfg.RetainBlocks + 1) } - return &abci.ResponseCommit{ + return &abci.CommitResponse{ RetainHeight: retainHeight, }, nil } // Query implements ABCI. -func (app *Application) Query(_ context.Context, req *abci.RequestQuery) (*abci.ResponseQuery, error) { - - r := &abci.Request{Value: &abci.Request_Query{Query: &abci.RequestQuery{}}} +func (app *Application) Query(_ context.Context, req *abci.QueryRequest) (*abci.QueryResponse, error) { + r := &abci.Request{Value: &abci.Request_Query{Query: &abci.QueryRequest{}}} err := app.logABCIRequest(r) if err != nil { return nil, err } value, height := app.state.Query(string(req.Data)) - return &abci.ResponseQuery{ + return &abci.QueryResponse{ Height: int64(height), Key: req.Data, Value: []byte(value), @@ -323,9 +530,8 @@ func (app *Application) Query(_ context.Context, req *abci.RequestQuery) (*abci. } // ListSnapshots implements ABCI. -func (app *Application) ListSnapshots(context.Context, *abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) { - - r := &abci.Request{Value: &abci.Request_ListSnapshots{ListSnapshots: &abci.RequestListSnapshots{}}} +func (app *Application) ListSnapshots(context.Context, *abci.ListSnapshotsRequest) (*abci.ListSnapshotsResponse, error) { + r := &abci.Request{Value: &abci.Request_ListSnapshots{ListSnapshots: &abci.ListSnapshotsRequest{}}} err := app.logABCIRequest(r) if err != nil { return nil, err @@ -335,13 +541,12 @@ func (app *Application) ListSnapshots(context.Context, *abci.RequestListSnapshot if err != nil { panic(err) } - return &abci.ResponseListSnapshots{Snapshots: snapshots}, nil + return &abci.ListSnapshotsResponse{Snapshots: snapshots}, nil } // LoadSnapshotChunk implements ABCI. -func (app *Application) LoadSnapshotChunk(_ context.Context, req *abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) { - - r := &abci.Request{Value: &abci.Request_LoadSnapshotChunk{LoadSnapshotChunk: &abci.RequestLoadSnapshotChunk{}}} +func (app *Application) LoadSnapshotChunk(_ context.Context, req *abci.LoadSnapshotChunkRequest) (*abci.LoadSnapshotChunkResponse, error) { + r := &abci.Request{Value: &abci.Request_LoadSnapshotChunk{LoadSnapshotChunk: &abci.LoadSnapshotChunkRequest{}}} err := app.logABCIRequest(r) if err != nil { return nil, err @@ -351,13 +556,12 @@ func (app *Application) LoadSnapshotChunk(_ context.Context, req *abci.RequestLo if err != nil { panic(err) } - return &abci.ResponseLoadSnapshotChunk{Chunk: chunk}, nil + return &abci.LoadSnapshotChunkResponse{Chunk: chunk}, nil } // OfferSnapshot implements ABCI. -func (app *Application) OfferSnapshot(_ context.Context, req *abci.RequestOfferSnapshot) (*abci.ResponseOfferSnapshot, error) { - - r := &abci.Request{Value: &abci.Request_OfferSnapshot{OfferSnapshot: &abci.RequestOfferSnapshot{}}} +func (app *Application) OfferSnapshot(_ context.Context, req *abci.OfferSnapshotRequest) (*abci.OfferSnapshotResponse, error) { + r := &abci.Request{Value: &abci.Request_OfferSnapshot{OfferSnapshot: &abci.OfferSnapshotRequest{}}} err := app.logABCIRequest(r) if err != nil { return nil, err @@ -368,13 +572,12 @@ func (app *Application) OfferSnapshot(_ context.Context, req *abci.RequestOfferS } app.restoreSnapshot = req.Snapshot app.restoreChunks = [][]byte{} - return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil + return &abci.OfferSnapshotResponse{Result: abci.OFFER_SNAPSHOT_RESULT_ACCEPT}, nil } // ApplySnapshotChunk implements ABCI. -func (app *Application) ApplySnapshotChunk(_ context.Context, req *abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) { - - r := &abci.Request{Value: &abci.Request_ApplySnapshotChunk{ApplySnapshotChunk: &abci.RequestApplySnapshotChunk{}}} +func (app *Application) ApplySnapshotChunk(_ context.Context, req *abci.ApplySnapshotChunkRequest) (*abci.ApplySnapshotChunkResponse, error) { + r := &abci.Request{Value: &abci.Request_ApplySnapshotChunk{ApplySnapshotChunk: &abci.ApplySnapshotChunkRequest{}}} err := app.logABCIRequest(r) if err != nil { return nil, err @@ -396,7 +599,7 @@ func (app *Application) ApplySnapshotChunk(_ context.Context, req *abci.RequestA app.restoreSnapshot = nil app.restoreChunks = nil } - return &abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil + return &abci.ApplySnapshotChunkResponse{Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT}, nil } // PrepareProposal will take the given transactions and attempt to prepare a @@ -420,10 +623,9 @@ func (app *Application) ApplySnapshotChunk(_ context.Context, req *abci.RequestA // The special vote extension-generated transaction must fit within an empty block // and takes precedence over all other transactions coming from the mempool. func (app *Application) PrepareProposal( - _ context.Context, req *abci.RequestPrepareProposal, -) (*abci.ResponsePrepareProposal, error) { - - r := &abci.Request{Value: &abci.Request_PrepareProposal{PrepareProposal: &abci.RequestPrepareProposal{}}} + _ context.Context, req *abci.PrepareProposalRequest, +) (*abci.PrepareProposalResponse, error) { + r := &abci.Request{Value: &abci.Request_PrepareProposal{PrepareProposal: &abci.PrepareProposalRequest{}}} err := app.logABCIRequest(r) if err != nil { return nil, err @@ -433,7 +635,7 @@ func (app *Application) PrepareProposal( txs := make([][]byte, 0, len(req.Txs)+1) var totalBytes int64 - extTxPrefix := fmt.Sprintf("%s=", voteExtensionKey) + extTxPrefix := voteExtensionKey + "=" sum, err := app.verifyAndSum(areExtensionsEnabled, req.Height, &req.LocalLastCommit, "prepare_proposal") if err != nil { panic(fmt.Errorf("failed to sum and verify in PrepareProposal; err %w", err)) @@ -445,7 +647,7 @@ func (app *Application) PrepareProposal( } extCommitHex := hex.EncodeToString(extCommitBytes) extTx := []byte(fmt.Sprintf("%s%d|%s", extTxPrefix, sum, extCommitHex)) - extTxLen := int64(len(extTx)) + extTxLen := cmttypes.ComputeProtoSizeForTxs([]cmttypes.Tx{extTx}) app.logger.Info("preparing proposal with special transaction from vote extensions", "extTxLen", extTxLen) if extTxLen > req.MaxTxBytes { panic(fmt.Errorf("serious problem in the e2e app configuration; "+ @@ -467,10 +669,11 @@ func (app *Application) PrepareProposal( app.logger.Error("detected tx that should not come from the mempool", "tx", tx) continue } - if totalBytes+int64(len(tx)) > req.MaxTxBytes { + txLen := cmttypes.ComputeProtoSizeForTxs([]cmttypes.Tx{tx}) + if totalBytes+txLen > req.MaxTxBytes { break } - totalBytes += int64(len(tx)) + totalBytes += txLen // Coherence: No need to call parseTx, as the check is stateless and has been performed by CheckTx txs = append(txs, tx) } @@ -479,16 +682,15 @@ func (app *Application) PrepareProposal( time.Sleep(app.cfg.PrepareProposalDelay) } - return &abci.ResponsePrepareProposal{Txs: txs}, nil + return &abci.PrepareProposalResponse{Txs: txs}, nil } // ProcessProposal implements part of the Application interface. // It accepts any proposal that does not contain a malformed transaction. // NOTE It is up to real Applications to effect punitive behavior in the cases ProcessProposal -// returns ResponseProcessProposal_REJECT, as it is evidence of misbehavior. -func (app *Application) ProcessProposal(_ context.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { - - r := &abci.Request{Value: &abci.Request_ProcessProposal{ProcessProposal: &abci.RequestProcessProposal{}}} +// returns PROCESS_PROPOSAL_STATUS_REJECT, as it is evidence of misbehavior. +func (app *Application) ProcessProposal(_ context.Context, req *abci.ProcessProposalRequest) (*abci.ProcessProposalResponse, error) { + r := &abci.Request{Value: &abci.Request_ProcessProposal{ProcessProposal: &abci.ProcessProposalRequest{}}} err := app.logABCIRequest(r) if err != nil { return nil, err @@ -500,18 +702,18 @@ func (app *Application) ProcessProposal(_ context.Context, req *abci.RequestProc k, v, err := parseTx(tx) if err != nil { app.logger.Error("malformed transaction in ProcessProposal", "tx", tx, "err", err) - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_REJECT}, nil } switch { case areExtensionsEnabled && k == voteExtensionKey: // Additional check for vote extension-related txs if err := app.verifyExtensionTx(req.Height, v); err != nil { app.logger.Error("vote extension transaction failed verification, rejecting proposal", k, v, "err", err) - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_REJECT}, nil } case strings.HasPrefix(k, prefixReservedKey): app.logger.Error("key prefix %q is reserved and cannot be used in transactions, rejecting proposal", k) - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_REJECT}, nil } } @@ -519,7 +721,7 @@ func (app *Application) ProcessProposal(_ context.Context, req *abci.RequestProc time.Sleep(app.cfg.ProcessProposalDelay) } - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil + return &abci.ProcessProposalResponse{Status: abci.PROCESS_PROPOSAL_STATUS_ACCEPT}, nil } // ExtendVote will produce vote extensions in the form of random numbers to @@ -529,7 +731,13 @@ func (app *Application) ProcessProposal(_ context.Context, req *abci.RequestProc // a new transaction will be proposed that updates a special value in the // key/value store ("extensionSum") with the sum of all of the numbers collected // from the vote extensions. -func (app *Application) ExtendVote(_ context.Context, req *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) { +func (app *Application) ExtendVote(_ context.Context, req *abci.ExtendVoteRequest) (*abci.ExtendVoteResponse, error) { + r := &abci.Request{Value: &abci.Request_ExtendVote{ExtendVote: &abci.ExtendVoteRequest{}}} + err := app.logABCIRequest(r) + if err != nil { + return nil, err + } + appHeight, areExtensionsEnabled := app.checkHeightAndExtensions(false, req.Height, "ExtendVote") if !areExtensionsEnabled { panic(fmt.Errorf("received call to ExtendVote at height %d, when vote extensions are disabled", appHeight)) @@ -556,8 +764,8 @@ func (app *Application) ExtendVote(_ context.Context, req *abci.RequestExtendVot extLen = binary.PutVarint(ext, num.Int64()) } - app.logger.Info("generated vote extension", "height", appHeight, "vote_extension", fmt.Sprintf("%x", ext[:4]), "len", extLen) - return &abci.ResponseExtendVote{ + app.logger.Info("generated vote extension", "height", appHeight, "vote_extension", hex.EncodeToString(ext[:4]), "len", extLen) + return &abci.ExtendVoteResponse{ VoteExtension: ext[:extLen], }, nil } @@ -565,7 +773,13 @@ func (app *Application) ExtendVote(_ context.Context, req *abci.RequestExtendVot // VerifyVoteExtension simply validates vote extensions from other validators // without doing anything about them. In this case, it just makes sure that the // vote extension is a well-formed integer value. -func (app *Application) VerifyVoteExtension(_ context.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { +func (app *Application) VerifyVoteExtension(_ context.Context, req *abci.VerifyVoteExtensionRequest) (*abci.VerifyVoteExtensionResponse, error) { + r := &abci.Request{Value: &abci.Request_VerifyVoteExtension{VerifyVoteExtension: &abci.VerifyVoteExtensionRequest{}}} + err := app.logABCIRequest(r) + if err != nil { + return nil, err + } + appHeight, areExtensionsEnabled := app.checkHeightAndExtensions(false, req.Height, "VerifyVoteExtension") if !areExtensionsEnabled { panic(fmt.Errorf("received call to VerifyVoteExtension at height %d, when vote extensions are disabled", appHeight)) @@ -573,16 +787,16 @@ func (app *Application) VerifyVoteExtension(_ context.Context, req *abci.Request // We don't allow vote extensions to be optional if len(req.VoteExtension) == 0 { app.logger.Error("received empty vote extension") - return &abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_REJECT, + return &abci.VerifyVoteExtensionResponse{ + Status: abci.VERIFY_VOTE_EXTENSION_STATUS_REJECT, }, nil } num, err := parseVoteExtension(app.cfg, req.VoteExtension) if err != nil { - app.logger.Error("failed to parse vote extension", "vote_extension", fmt.Sprintf("%x", req.VoteExtension[:4]), "err", err) - return &abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_REJECT, + app.logger.Error("failed to parse vote extension", "vote_extension", hex.EncodeToString(req.VoteExtension[:4]), "err", err) + return &abci.VerifyVoteExtensionResponse{ + Status: abci.VERIFY_VOTE_EXTENSION_STATUS_REJECT, }, nil } @@ -590,9 +804,9 @@ func (app *Application) VerifyVoteExtension(_ context.Context, req *abci.Request time.Sleep(app.cfg.VoteExtensionDelay) } - app.logger.Info("verified vote extension value", "height", req.Height, "vote_extension", fmt.Sprintf("%x", req.VoteExtension[:4]), "num", num) - return &abci.ResponseVerifyVoteExtension{ - Status: abci.ResponseVerifyVoteExtension_ACCEPT, + app.logger.Info("verified vote extension value", "height", req.Height, "vote_extension", hex.EncodeToString(req.VoteExtension[:4]), "num", num) + return &abci.VerifyVoteExtensionResponse{ + Status: abci.VERIFY_VOTE_EXTENSION_STATUS_ACCEPT, }, nil } @@ -644,43 +858,65 @@ func (app *Application) checkHeightAndExtensions(isPrepareProcessProposal bool, func (app *Application) storeValidator(valUpdate *abci.ValidatorUpdate) error { // Store validator data to verify extensions - pubKey, err := cryptoenc.PubKeyFromProto(valUpdate.PubKey) + pubKey, err := cryptoenc.PubKeyFromTypeAndBytes(valUpdate.PubKeyType, valUpdate.PubKeyBytes) if err != nil { return err } - addr := pubKey.Address().String() if valUpdate.Power > 0 { - pubKeyBytes, err := valUpdate.PubKey.Marshal() + addr := pubKey.Address().String() + app.logger.Info("setting validator in app_state", "addr", addr) + pk, err := cryptoenc.PubKeyToProto(pubKey) if err != nil { - return err + return fmt.Errorf("failed to convert pubkey to proto: %w", err) } - app.logger.Info("setting validator in app_state", "addr", addr) - app.state.Set(prefixReservedKey+addr, hex.EncodeToString(pubKeyBytes)) + pubKeyBytes, err := pk.Marshal() + if err != nil { + return fmt.Errorf("failed to marshal pubkey: %w", err) + } + app.state.Set(prefixReservedKey+prefixValidator+addr, hex.EncodeToString(pubKeyBytes)) } return nil } // validatorUpdates generates a validator set update. func (app *Application) validatorUpdates(height uint64) (abci.ValidatorUpdates, error) { - updates := app.cfg.ValidatorUpdates[fmt.Sprintf("%v", height)] + // updates is map[string]uint8 of the form "validator_name" => voting_power + updates := app.cfg.ValidatorUpdates[strconv.FormatUint(height, 10)] if len(updates) == 0 { return nil, nil } - valUpdates := abci.ValidatorUpdates{} - for keyString, power := range updates { + // Collect the validator names into a slice and sort it to ensure deterministic + // iteration when creating the ValidatorUpdates below, since map traversal is + // non-deterministic. + validatorsNames := make([]string, 0, len(updates)) + for validatorName := range updates { + validatorsNames = append(validatorsNames, validatorName) + } + slices.Sort(validatorsNames) + + validatorsUpdates := make(abci.ValidatorUpdates, len(updates)) + for i, validatorName := range validatorsNames { + power := updates[validatorName] - keyBytes, err := base64.StdEncoding.DecodeString(keyString) + keyBytes, err := base64.StdEncoding.DecodeString(validatorName) if err != nil { - return nil, fmt.Errorf("invalid base64 pubkey value %q: %w", keyString, err) + formatStr := "invalid base64 pubkey value %q: %w" + return nil, fmt.Errorf(formatStr, validatorName, err) } - valUpdate := abci.UpdateValidator(keyBytes, int64(power), app.cfg.KeyType) - valUpdates = append(valUpdates, valUpdate) - if err := app.storeValidator(&valUpdate); err != nil { + + validatorUpdate := abci.ValidatorUpdate{ + Power: int64(power), + PubKeyType: app.cfg.KeyType, + PubKeyBytes: keyBytes, + } + + validatorsUpdates[i] = validatorUpdate + if err := app.storeValidator(&validatorUpdate); err != nil { return nil, err } } - return valUpdates, nil + return validatorsUpdates, nil } // logAbciRequest log the request using the app's logger. @@ -696,8 +932,30 @@ func (app *Application) logABCIRequest(req *abci.Request) error { return nil } +func (app *Application) loadPubKey(addr string) (crypto.PubKey, error) { + pubKeyHex := app.state.Get(prefixReservedKey + prefixValidator + addr) + if len(pubKeyHex) == 0 { + return nil, fmt.Errorf("unknown validator with address %q", addr) + } + pubKeyBytes, err := hex.DecodeString(pubKeyHex) + if err != nil { + return nil, fmt.Errorf("could not hex-decode public key for validator address %s, err %w", addr, err) + } + var pubKeyProto cryptoproto.PublicKey + err = pubKeyProto.Unmarshal(pubKeyBytes) + if err != nil { + return nil, fmt.Errorf("unable to unmarshal public key for validator address %s, err %w", addr, err) + } + pubKey, err := cryptoenc.PubKeyFromProto(pubKeyProto) + if err != nil { + return nil, fmt.Errorf("could not obtain a public key from its proto for validator address %s, err %w", addr, err) + } + + return pubKey, nil +} + // parseTx parses a tx in 'key=value' format into a key and value. -func parseTx(tx []byte) (string, string, error) { +func parseTx(tx []byte) (key, value string, err error) { parts := bytes.Split(tx, []byte("=")) if len(parts) != 2 { return "", "", fmt.Errorf("invalid tx format: %q", string(tx)) @@ -768,24 +1026,11 @@ func (app *Application) verifyAndSum( return 0, fmt.Errorf("error when marshaling signed bytes: %w", err) } - //... and verify + // ... and verify valAddr := crypto.Address(vote.Validator.Address).String() - pubKeyHex := app.state.Get(prefixReservedKey + valAddr) - if len(pubKeyHex) == 0 { - return 0, fmt.Errorf("received vote from unknown validator with address %q", valAddr) - } - pubKeyBytes, err := hex.DecodeString(pubKeyHex) - if err != nil { - return 0, fmt.Errorf("could not hex-decode public key for validator address %s, err %w", valAddr, err) - } - var pubKeyProto cryptoproto.PublicKey - err = pubKeyProto.Unmarshal(pubKeyBytes) - if err != nil { - return 0, fmt.Errorf("unable to unmarshal public key for validator address %s, err %w", valAddr, err) - } - pubKey, err := cryptoenc.PubKeyFromProto(pubKeyProto) + pubKey, err := app.loadPubKey(valAddr) if err != nil { - return 0, fmt.Errorf("could not obtain a public key from its proto for validator address %s, err %w", valAddr, err) + return 0, err } if !pubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) { return 0, errors.New("received vote with invalid signature") @@ -813,15 +1058,15 @@ func (app *Application) verifyAndSum( return sum, nil } -// verifyExtensionTx parses and verifies the payload of a vote extension-generated tx +// verifyExtensionTx parses and verifies the payload of a vote extension-generated tx. func (app *Application) verifyExtensionTx(height int64, payload string) error { parts := strings.Split(payload, "|") if len(parts) != 2 { - return fmt.Errorf("invalid payload format") + return errors.New("invalid payload format") } expSumStr := parts[0] if len(expSumStr) == 0 { - return fmt.Errorf("sum cannot be empty in vote extension payload") + return errors.New("sum cannot be empty in vote extension payload") } expSum, err := strconv.Atoi(expSumStr) @@ -831,17 +1076,17 @@ func (app *Application) verifyExtensionTx(height int64, payload string) error { extCommitHex := parts[1] if len(extCommitHex) == 0 { - return fmt.Errorf("extended commit data cannot be empty in vote extension payload") + return errors.New("extended commit data cannot be empty in vote extension payload") } extCommitBytes, err := hex.DecodeString(extCommitHex) if err != nil { - return fmt.Errorf("could not hex-decode vote extension payload") + return errors.New("could not hex-decode vote extension payload") } var extCommit abci.ExtendedCommitInfo if extCommit.Unmarshal(extCommitBytes) != nil { - return fmt.Errorf("unable to unmarshal extended commit") + return errors.New("unable to unmarshal extended commit") } sum, err := app.verifyAndSum(true, height, &extCommit, "process_proposal") diff --git a/test/e2e/app/log_abci.go b/test/e2e/app/log_abci.go index 343ccf1d91a..26b12e4bf3c 100644 --- a/test/e2e/app/log_abci.go +++ b/test/e2e/app/log_abci.go @@ -5,8 +5,9 @@ import ( "fmt" "strings" - abci "github.com/cometbft/cometbft/abci/types" "github.com/cosmos/gogoproto/proto" + + abci "github.com/cometbft/cometbft/abci/types" ) const AbciReq = "abci-req" diff --git a/test/e2e/app/log_abci_test.go b/test/e2e/app/log_abci_test.go index 393b939e3b1..fbeea6a2503 100644 --- a/test/e2e/app/log_abci_test.go +++ b/test/e2e/app/log_abci_test.go @@ -3,27 +3,30 @@ package app import ( "testing" - abci "github.com/cometbft/cometbft/abci/types" "github.com/stretchr/testify/require" + + abci "github.com/cometbft/cometbft/abci/types" ) // Tests for logging each type of requests. func TestLogging(t *testing.T) { - var reqs = []*abci.Request{ - {Value: &abci.Request_Echo{Echo: &abci.RequestEcho{}}}, - {Value: &abci.Request_Flush{Flush: &abci.RequestFlush{}}}, - {Value: &abci.Request_Info{Info: &abci.RequestInfo{}}}, - {Value: &abci.Request_InitChain{InitChain: &abci.RequestInitChain{}}}, - {Value: &abci.Request_Query{Query: &abci.RequestQuery{}}}, - {Value: &abci.Request_FinalizeBlock{FinalizeBlock: &abci.RequestFinalizeBlock{}}}, - {Value: &abci.Request_CheckTx{CheckTx: &abci.RequestCheckTx{}}}, - {Value: &abci.Request_Commit{Commit: &abci.RequestCommit{}}}, - {Value: &abci.Request_ListSnapshots{ListSnapshots: &abci.RequestListSnapshots{}}}, - {Value: &abci.Request_OfferSnapshot{OfferSnapshot: &abci.RequestOfferSnapshot{}}}, - {Value: &abci.Request_LoadSnapshotChunk{LoadSnapshotChunk: &abci.RequestLoadSnapshotChunk{}}}, - {Value: &abci.Request_ApplySnapshotChunk{ApplySnapshotChunk: &abci.RequestApplySnapshotChunk{}}}, - {Value: &abci.Request_PrepareProposal{PrepareProposal: &abci.RequestPrepareProposal{}}}, - {Value: &abci.Request_ProcessProposal{ProcessProposal: &abci.RequestProcessProposal{}}}, + reqs := []*abci.Request{ + {Value: &abci.Request_Echo{Echo: &abci.EchoRequest{}}}, + {Value: &abci.Request_Flush{Flush: &abci.FlushRequest{}}}, + {Value: &abci.Request_Info{Info: &abci.InfoRequest{}}}, + {Value: &abci.Request_InitChain{InitChain: &abci.InitChainRequest{}}}, + {Value: &abci.Request_Query{Query: &abci.QueryRequest{}}}, + {Value: &abci.Request_FinalizeBlock{FinalizeBlock: &abci.FinalizeBlockRequest{}}}, + {Value: &abci.Request_CheckTx{CheckTx: &abci.CheckTxRequest{}}}, + {Value: &abci.Request_Commit{Commit: &abci.CommitRequest{}}}, + {Value: &abci.Request_ListSnapshots{ListSnapshots: &abci.ListSnapshotsRequest{}}}, + {Value: &abci.Request_OfferSnapshot{OfferSnapshot: &abci.OfferSnapshotRequest{}}}, + {Value: &abci.Request_LoadSnapshotChunk{LoadSnapshotChunk: &abci.LoadSnapshotChunkRequest{}}}, + {Value: &abci.Request_ApplySnapshotChunk{ApplySnapshotChunk: &abci.ApplySnapshotChunkRequest{}}}, + {Value: &abci.Request_PrepareProposal{PrepareProposal: &abci.PrepareProposalRequest{}}}, + {Value: &abci.Request_ProcessProposal{ProcessProposal: &abci.ProcessProposalRequest{}}}, + {Value: &abci.Request_ExtendVote{ExtendVote: &abci.ExtendVoteRequest{}}}, + {Value: &abci.Request_VerifyVoteExtension{VerifyVoteExtension: &abci.VerifyVoteExtensionRequest{}}}, } for _, r := range reqs { s, err := GetABCIRequestString(r) diff --git a/test/e2e/app/snapshots.go b/test/e2e/app/snapshots.go index 79a223a4694..a5886cf4f53 100644 --- a/test/e2e/app/snapshots.go +++ b/test/e2e/app/snapshots.go @@ -15,7 +15,7 @@ import ( const ( snapshotChunkSize = 1e6 - // Keep only the most recent 10 snapshots. Older snapshots are pruned + // Keep only the most recent 10 snapshots. Older snapshots are pruned. maxSnapshotCount = 10 ) @@ -106,7 +106,7 @@ func (s *SnapshotStore) Create(state *State) (abci.Snapshot, error) { return snapshot, nil } -// Prune removes old snapshots ensuring only the most recent n snapshots remain +// Prune removes old snapshots ensuring only the most recent n snapshots remain. func (s *SnapshotStore) Prune(n int) error { s.Lock() defer s.Unlock() diff --git a/test/e2e/app/state.go b/test/e2e/app/state.go index 732bfbff482..0b142d390e1 100644 --- a/test/e2e/app/state.go +++ b/test/e2e/app/state.go @@ -20,9 +20,9 @@ const ( // Intermediate type used exclusively in serialization/deserialization of // State, such that State need not expose any of its internal values publicly. type serializedState struct { - Height uint64 - Values map[string]string - Hash []byte + Height uint64 `json:"height"` + Values map[string]string `json:"values"` + Hash []byte `json:"hash"` } // State is the application state. @@ -63,15 +63,14 @@ func (s *State) load() error { bz, err := os.ReadFile(s.currentFile) if err != nil { // if the current state doesn't exist then we try recover from the previous state - if errors.Is(err, os.ErrNotExist) { - bz, err = os.ReadFile(s.previousFile) - if err != nil { - return fmt.Errorf("failed to read both current and previous state (%q): %w", - s.previousFile, err) - } - } else { + if !errors.Is(err, os.ErrNotExist) { return fmt.Errorf("failed to read state from %q: %w", s.currentFile, err) } + bz, err = os.ReadFile(s.previousFile) + if err != nil { + return fmt.Errorf("failed to read both current and previous state (%q): %w", + s.previousFile, err) + } } if err := json.Unmarshal(bz, s); err != nil { return fmt.Errorf("invalid state data in %q: %w", s.currentFile, err) @@ -182,7 +181,7 @@ func (s *State) Query(key string) (string, uint64) { return value, height } -// Finalize is called after applying a block, updating the height and returning the new app_hash +// Finalize is called after applying a block, updating the height and returning the new app_hash. func (s *State) Finalize() []byte { s.Lock() defer s.Unlock() diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile index a6473e3513e..623dcfa9a2a 100644 --- a/test/e2e/docker/Dockerfile +++ b/test/e2e/docker/Dockerfile @@ -1,17 +1,19 @@ # We need to build in a Linux environment to support C libraries, e.g. RocksDB. # We use Debian instead of Alpine, so that we can use binary database packages # instead of spending time compiling them. -FROM golang:1.21-bullseye +FROM cometbft/cometbft-db-testing:v1.0.1 RUN apt-get -qq update -y && apt-get -qq upgrade -y >/dev/null -RUN apt-get -qq install -y libleveldb-dev librocksdb-dev >/dev/null + +# For latency emulation, install iproute2, which includes tc. +RUN apt-get -qq install -y iputils-ping iproute2 >/dev/null # Set up build directory /src/cometbft -ENV COMETBFT_BUILD_OPTIONS badgerdb,boltdb,cleveldb,rocksdb +ENV COMETBFT_BUILD_OPTIONS badgerdb,rocksdb,clock_skew,bls12381 WORKDIR /src/cometbft # Fetch dependencies separately (for layer caching) -COPY go.mod go.sum ./ +COPY go.mod go.sum api/go.mod api/go.sum ./ RUN go mod download # Build CometBFT and install into /usr/bin/cometbft @@ -25,7 +27,7 @@ RUN cd test/e2e && make node && cp build/node /usr/bin/app WORKDIR /cometbft VOLUME /cometbft ENV CMTHOME=/cometbft -ENV GORACE "halt_on_error=1" +ENV GORACE="halt_on_error=1" EXPOSE 26656 26657 26660 6060 ENTRYPOINT ["/usr/bin/entrypoint"] diff --git a/test/e2e/docker/Dockerfile.debug b/test/e2e/docker/Dockerfile.debug index 595854354c2..5a39794d508 100644 --- a/test/e2e/docker/Dockerfile.debug +++ b/test/e2e/docker/Dockerfile.debug @@ -1,15 +1,14 @@ # We need to build in a Linux environment to support C libraries, e.g. RocksDB. # We use Debian instead of Alpine, so that we can use binary database packages # instead of spending time compiling them. -FROM golang:1.21 +FROM cometbft/cometbft-db-testing:v1.0.1 RUN apt-get -qq update -y && apt-get -qq upgrade -y >/dev/null -RUN apt-get -qq install -y libleveldb-dev librocksdb-dev >/dev/null RUN apt-get -qq install -y zsh vim >/dev/null RUN go install github.com/go-delve/delve/cmd/dlv@latest # Set up build directory /src/cometbft -ENV COMETBFT_BUILD_OPTIONS badgerdb,boltdb,cleveldb,rocksdb,nostrip +ENV COMETBFT_BUILD_OPTIONS badgerdb,rocksdb,nostrip,clock_skew,bls12381 WORKDIR /src/cometbft # Fetch dependencies separately (for layer caching) @@ -19,7 +18,7 @@ RUN go mod download # Build CometBFT and install into /usr/bin/cometbft COPY . . RUN echo $COMETBFT_BUILD_OPTION && make build && cp build/cometbft /usr/bin/cometbft -COPY test/e2e/docker/entrypoint* /usr/bin/ +COPY test/e2e/docker/entrypoint-delve /usr/bin/entrypoint-builtin RUN cd test/e2e && make node && cp build/node /usr/bin/app # Set up runtime directory. We don't use a separate runtime image since we need @@ -30,6 +29,4 @@ ENV CMTHOME=/cometbft ENV GORACE "halt_on_error=1" EXPOSE 26656 26657 26660 6060 2345 2346 -ENTRYPOINT ["/usr/bin/entrypoint-delve"] -CMD ["node"] STOPSIGNAL SIGTERM diff --git a/test/e2e/docker/Dockerfile.fast b/test/e2e/docker/Dockerfile.fast index 5c29052ce60..4027b769488 100644 --- a/test/e2e/docker/Dockerfile.fast +++ b/test/e2e/docker/Dockerfile.fast @@ -1,4 +1,5 @@ -FROM gcr.io/distroless/static-debian11:debug +FROM alpine:latest +RUN apk add --no-cache iproute2 COPY build/node /app COPY docker/entrypoint-fast /usr/bin/entrypoint-builtin @@ -6,7 +7,7 @@ COPY docker/entrypoint-fast /usr/bin/entrypoint-builtin WORKDIR /cometbft VOLUME /cometbft ENV CMTHOME=/cometbft -ENV GORACE "halt_on_error=1" +ENV GORACE="halt_on_error=1" EXPOSE 26656 26657 26660 6060 ENTRYPOINT ["/usr/bin/entrypoint-builtin"] diff --git a/test/e2e/docker/entrypoint b/test/e2e/docker/entrypoint index 866e8ef870d..743df14bae2 100755 --- a/test/e2e/docker/entrypoint +++ b/test/e2e/docker/entrypoint @@ -1,5 +1,11 @@ #!/usr/bin/env bash +set -e + +if [ -f /cometbft/emulate-latency.sh ]; then + /cometbft/emulate-latency.sh +fi + # Forcibly remove any stray UNIX sockets left behind from previous runs rm -rf /var/run/privval.sock /var/run/app.sock diff --git a/test/e2e/docker/entrypoint-builtin b/test/e2e/docker/entrypoint-builtin index b6039915656..c35e6be7899 100755 --- a/test/e2e/docker/entrypoint-builtin +++ b/test/e2e/docker/entrypoint-builtin @@ -1,5 +1,11 @@ #!/usr/bin/env bash +set -e + +if [ -f /cometbft/emulate-latency.sh ]; then + /cometbft/emulate-latency.sh +fi + # Forcibly remove any stray UNIX sockets left behind from previous runs rm -rf /var/run/privval.sock /var/run/app.sock diff --git a/test/e2e/docker/entrypoint-builtin-delve b/test/e2e/docker/entrypoint-builtin-delve index 8667c722e29..646d1192aa2 100755 --- a/test/e2e/docker/entrypoint-builtin-delve +++ b/test/e2e/docker/entrypoint-builtin-delve @@ -1,5 +1,11 @@ #!/usr/bin/env bash +set -e + +if [ -f /cometbft/emulate-latency.sh ]; then + /cometbft/emulate-latency.sh +fi + # Forcibly remove any stray UNIX sockets left behind from previous runs rm -rf /var/run/privval.sock /var/run/app.sock diff --git a/test/e2e/docker/entrypoint-delve b/test/e2e/docker/entrypoint-delve index b822587f2bc..b2fd37e16eb 100755 --- a/test/e2e/docker/entrypoint-delve +++ b/test/e2e/docker/entrypoint-delve @@ -1,10 +1,16 @@ #!/usr/bin/env bash +set -e + +if [ -f /cometbft/emulate-latency.sh ]; then + /cometbft/emulate-latency.sh +fi + # Forcibly remove any stray UNIX sockets left behind from previous runs rm -rf /var/run/privval.sock /var/run/app.sock # dlv won't run the app until you connect to it with a client. -# Once the app is run, the signer will try only a few times before stopping, so don't take long to let commet run as well. +# Once the app is run, the signer will try only a few times before stopping, so don't take long to let comet run as well. dlv --headless --listen=:2345 --log --log-output=debugger,debuglineerr,gdbwire,lldbout,rpc --accept-multiclient --api-version=2 exec /usr/bin/app -- /cometbft/config/app.toml & sleep 30 diff --git a/test/e2e/docker/entrypoint-fast b/test/e2e/docker/entrypoint-fast index b2e4f5edb36..f69a94657d0 100755 --- a/test/e2e/docker/entrypoint-fast +++ b/test/e2e/docker/entrypoint-fast @@ -1,5 +1,10 @@ -#!/busybox/sh +#!/bin/sh + +set -e + +if [ -f /cometbft/emulate-latency.sh ]; then + /cometbft/emulate-latency.sh +fi rm -rf /var/run/privval.sock /var/run/app.sock /app /cometbft/config/app.toml # > /dev/null - diff --git a/test/e2e/fast-prototyping/README.md b/test/e2e/fast-prototyping/README.md deleted file mode 100644 index b73b69102f8..00000000000 --- a/test/e2e/fast-prototyping/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Fast Prototyping - -This directory contains a set of scripts to run experiments for fast-prototyping cometbft reactors. -Please note that (to date) these scripts are mostly oriented toward prototyping mempool reactors. - -## Fast docker image - -To use the fast-prototyping feature and be able to run dozens of nodes in containers, you need a very lean docker image. -To build it: -- Have an updated docker installed -- From the CometBFT clone, build - ```bash - E2E_DIR=$(pwd)/test/e2e - cd ${E2E_DIR} - make node-fast generator runner docker-fast - ``` - -- At this point you can run some tests using the fast docker image, but be aware that it is a very lean image. - ``` - ./run-multiple.sh networks/long.toml - ``` diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index b36268e13d3..2605d91502b 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -13,6 +13,9 @@ import ( "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing/object" + "github.com/cometbft/cometbft/crypto/bls12381" + "github.com/cometbft/cometbft/crypto/ed25519" + "github.com/cometbft/cometbft/crypto/secp256k1" e2e "github.com/cometbft/cometbft/test/e2e/pkg" "github.com/cometbft/cometbft/version" ) @@ -20,7 +23,7 @@ import ( var ( // testnetCombinations defines global testnet options, where we generate a // separate testnet for each combination (Cartesian product) of options. - testnetCombinations = map[string][]interface{}{ + testnetCombinations = map[string][]any{ "topology": {"single", "quad", "large"}, "initialHeight": {0, 1000}, "initialState": { @@ -28,13 +31,14 @@ var ( map[string]string{"initial01": "a", "initial02": "b", "initial03": "c"}, }, "validators": {"genesis", "initchain"}, + "no_lanes": {true, false}, } nodeVersions = weightedChoice{ "": 2, } // The following specify randomly chosen values for testnet nodes. - nodeDatabases = uniformChoice{"goleveldb", "cleveldb", "rocksdb", "boltdb", "badgerdb"} + nodeDatabases = uniformChoice{"goleveldb", "rocksdb", "badgerdb", "pebbledb"} ipv6 = uniformChoice{false, true} // FIXME: grpc disabled due to https://github.com/tendermint/tendermint/issues/5439 nodeABCIProtocols = uniformChoice{"unix", "tcp", "builtin", "builtin_connsync"} // "grpc" @@ -49,7 +53,7 @@ var ( 4 * int(e2e.EvidenceAgeHeight), } nodeEnableCompanionPruning = uniformChoice{true, false} - evidence = uniformChoice{0, 1, 10} + evidence = uniformChoice{0, 1, 10, 20, 200} abciDelays = uniformChoice{"none", "small", "large"} nodePerturbations = probSetChoice{ "disconnect": 0.1, @@ -61,9 +65,16 @@ var ( lightNodePerturbations = probSetChoice{ "upgrade": 0.3, } - voteExtensionEnableHeightOffset = uniformChoice{int64(0), int64(10), int64(100)} - voteExtensionEnabled = uniformChoice{true, false} - voteExtensionSize = uniformChoice{uint(128), uint(512), uint(2048), uint(8192)} // TODO: define the right values depending on experiment results. + voteExtensionsUpdateHeight = uniformChoice{int64(-1), int64(0), int64(1)} // -1: genesis, 0: InitChain, 1: (use offset) + voteExtensionEnabled = weightedChoice{true: 3, false: 1} + voteExtensionsHeightOffset = uniformChoice{int64(0), int64(10), int64(100)} + voteExtensionSize = uniformChoice{uint(128), uint(512), uint(2048), uint(8192)} // TODO: define the right values depending on experiment results. + pbtsUpdateHeight = uniformChoice{int64(-1), int64(0), int64(1)} // -1: genesis, 0: InitChain, 1: (use offset) + pbtsEnabled = weightedChoice{true: 3, false: 1} + pbtsHeightOffset = uniformChoice{int64(0), int64(10), int64(100)} + keyType = uniformChoice{ed25519.KeyType, secp256k1.KeyType, bls12381.KeyType} + // TODO: reinstate this once the oscillation logic is fixed. + // constantFlip = uniformChoice{true, false}. ) type generateConfig struct { @@ -71,6 +82,7 @@ type generateConfig struct { outputDir string multiVersion string prometheus bool + logLevel string } // Generate generates random testnets using the given RNG. @@ -112,7 +124,7 @@ func Generate(cfg *generateConfig) ([]e2e.Manifest, error) { } manifests := []e2e.Manifest{} for _, opt := range combinations(testnetCombinations) { - manifest, err := generateTestnet(cfg.randSource, opt, upgradeVersion, cfg.prometheus) + manifest, err := generateTestnet(cfg.randSource, opt, upgradeVersion, cfg.prometheus, cfg.logLevel) if err != nil { return nil, err } @@ -122,18 +134,20 @@ func Generate(cfg *generateConfig) ([]e2e.Manifest, error) { } // generateTestnet generates a single testnet with the given options. -func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion string, prometheus bool) (e2e.Manifest, error) { +func generateTestnet(r *rand.Rand, opt map[string]any, upgradeVersion string, prometheus bool, logLevel string) (e2e.Manifest, error) { manifest := e2e.Manifest{ - IPv6: ipv6.Choose(r).(bool), - ABCIProtocol: nodeABCIProtocols.Choose(r).(string), - InitialHeight: int64(opt["initialHeight"].(int)), - InitialState: opt["initialState"].(map[string]string), - Validators: &map[string]int64{}, - ValidatorUpdates: map[string]map[string]int64{}, - Evidence: evidence.Choose(r).(int), - Nodes: map[string]*e2e.ManifestNode{}, - UpgradeVersion: upgradeVersion, - Prometheus: prometheus, + IPv6: ipv6.Choose(r).(bool), + ABCIProtocol: nodeABCIProtocols.Choose(r).(string), + InitialHeight: int64(opt["initialHeight"].(int)), + InitialState: opt["initialState"].(map[string]string), + Validators: map[string]int64{}, + ValidatorUpdatesMap: map[string]map[string]int64{}, + KeyType: keyType.Choose(r).(string), + Evidence: evidence.Choose(r).(int), + NodesMap: map[string]*e2e.ManifestNode{}, + UpgradeVersion: upgradeVersion, + Prometheus: prometheus, + LogLevel: logLevel, } switch abciDelays.Choose(r).(string) { @@ -150,13 +164,30 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st manifest.VoteExtensionDelay = 100 * time.Millisecond manifest.FinalizeBlockDelay = 500 * time.Millisecond } - + manifest.VoteExtensionsUpdateHeight = voteExtensionsUpdateHeight.Choose(r).(int64) + if manifest.VoteExtensionsUpdateHeight == 1 { + manifest.VoteExtensionsUpdateHeight = manifest.InitialHeight + voteExtensionsHeightOffset.Choose(r).(int64) + } if voteExtensionEnabled.Choose(r).(bool) { - manifest.VoteExtensionsEnableHeight = manifest.InitialHeight + voteExtensionEnableHeightOffset.Choose(r).(int64) + baseHeight := max(manifest.VoteExtensionsUpdateHeight+1, manifest.InitialHeight) + manifest.VoteExtensionsEnableHeight = baseHeight + voteExtensionsHeightOffset.Choose(r).(int64) } manifest.VoteExtensionSize = voteExtensionSize.Choose(r).(uint) + // TODO: reinstate this once the oscillation logic is fixed. + // manifest.ConstantFlip = constantFlip.Choose(r).(bool) + manifest.ConstantFlip = false + + manifest.PbtsUpdateHeight = pbtsUpdateHeight.Choose(r).(int64) + if manifest.PbtsUpdateHeight == 1 { + manifest.PbtsUpdateHeight = manifest.InitialHeight + pbtsHeightOffset.Choose(r).(int64) + } + if pbtsEnabled.Choose(r).(bool) { + baseHeight := max(manifest.PbtsUpdateHeight+1, manifest.InitialHeight) + manifest.PbtsEnableHeight = baseHeight + pbtsHeightOffset.Choose(r).(int64) + } + // TODO: Add skew config var numSeeds, numValidators, numFulls, numLightClients int switch opt["topology"].(string) { case "single": @@ -175,7 +206,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st // First we generate seed nodes, starting at the initial height. for i := 1; i <= numSeeds; i++ { - manifest.Nodes[fmt.Sprintf("seed%02d", i)] = generateNode( + manifest.NodesMap[fmt.Sprintf("seed%02d", i)] = generateNode( r, e2e.ModeSeed, 0, false) } @@ -184,6 +215,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st // the initial validator set, and validator set updates for delayed nodes. nextStartAt := manifest.InitialHeight + 5 quorum := numValidators*2/3 + 1 + var totalWeight int64 for i := 1; i <= numValidators; i++ { startAt := int64(0) if i > quorum { @@ -191,24 +223,42 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st nextStartAt += 5 } name := fmt.Sprintf("validator%02d", i) - manifest.Nodes[name] = generateNode( - r, e2e.ModeValidator, startAt, i <= 2) + manifest.NodesMap[name] = generateNode(r, e2e.ModeValidator, startAt, i <= 2) + weight := int64(30 + r.Intn(71)) if startAt == 0 { - (*manifest.Validators)[name] = int64(30 + r.Intn(71)) + manifest.Validators[name] = weight } else { - manifest.ValidatorUpdates[fmt.Sprint(startAt+5)] = map[string]int64{ - name: int64(30 + r.Intn(71)), - } + manifest.ValidatorUpdatesMap[strconv.FormatInt(startAt+5, 10)] = map[string]int64{name: weight} + } + totalWeight += weight + } + + // Add clock skew only to processes that accumulate less than 1/3 of voting power. + var accWeight int64 + for i := 1; i <= numValidators; i++ { + name := fmt.Sprintf("validator%02d", i) + startAt := manifest.NodesMap[name].StartAt + var weight int64 + if startAt == 0 { + weight = manifest.Validators[name] + } else { + weight = manifest.ValidatorUpdatesMap[strconv.FormatInt(startAt+5, 10)][name] } + + if accWeight > totalWeight*2/3 { + // Interval: [-500ms, 59s500ms) + manifest.NodesMap[name].ClockSkew = time.Duration(int64(r.Float64()*float64(time.Minute))) - 500*time.Millisecond + } + accWeight += weight } // Move validators to InitChain if specified. switch opt["validators"].(string) { case "genesis": case "initchain": - manifest.ValidatorUpdates["0"] = *manifest.Validators - manifest.Validators = &map[string]int64{} + manifest.ValidatorUpdatesMap["0"] = manifest.Validators + manifest.Validators = map[string]int64{} default: return manifest, fmt.Errorf("invalid validators option %q", opt["validators"]) } @@ -220,7 +270,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st startAt = nextStartAt nextStartAt += 5 } - manifest.Nodes[fmt.Sprintf("full%02d", i)] = generateNode( + manifest.NodesMap[fmt.Sprintf("full%02d", i)] = generateNode( r, e2e.ModeFull, startAt, false) } @@ -228,8 +278,8 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st // each other, while non-seed nodes either use a set of random seeds or a // set of random peers that start before themselves. var seedNames, peerNames, lightProviders []string - for name, node := range manifest.Nodes { - if node.Mode == string(e2e.ModeSeed) { + for name, node := range manifest.NodesMap { + if node.ModeStr == string(e2e.ModeSeed) { seedNames = append(seedNames, name) } else { // if the full node or validator is an ideal candidate, it is added as a light provider. @@ -244,7 +294,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st for _, name := range seedNames { for _, otherName := range seedNames { if name != otherName { - manifest.Nodes[name].Seeds = append(manifest.Nodes[name].Seeds, otherName) + manifest.NodesMap[name].SeedsList = append(manifest.NodesMap[name].SeedsList, otherName) } } } @@ -252,9 +302,9 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st sort.Slice(peerNames, func(i, j int) bool { iName, jName := peerNames[i], peerNames[j] switch { - case manifest.Nodes[iName].StartAt < manifest.Nodes[jName].StartAt: + case manifest.NodesMap[iName].StartAt < manifest.NodesMap[jName].StartAt: return true - case manifest.Nodes[iName].StartAt > manifest.Nodes[jName].StartAt: + case manifest.NodesMap[iName].StartAt > manifest.NodesMap[jName].StartAt: return false default: return strings.Compare(iName, jName) == -1 @@ -262,20 +312,22 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st }) for i, name := range peerNames { if len(seedNames) > 0 && (i == 0 || r.Float64() >= 0.5) { - manifest.Nodes[name].Seeds = uniformSetChoice(seedNames).Choose(r) + manifest.NodesMap[name].SeedsList = uniformSetChoice(seedNames).Choose(r) } else if i > 0 { - manifest.Nodes[name].PersistentPeers = uniformSetChoice(peerNames[:i]).Choose(r) + manifest.NodesMap[name].PersistentPeersList = uniformSetChoice(peerNames[:i]).Choose(r) } } // lastly, set up the light clients for i := 1; i <= numLightClients; i++ { startAt := manifest.InitialHeight + 5 - manifest.Nodes[fmt.Sprintf("light%02d", i)] = generateLightNode( + manifest.NodesMap[fmt.Sprintf("light%02d", i)] = generateLightNode( r, startAt+(5*int64(i)), lightProviders, ) } + manifest.NoLanes = opt["no_lanes"].(bool) + return manifest, nil } @@ -288,13 +340,13 @@ func generateNode( ) *e2e.ManifestNode { node := e2e.ManifestNode{ Version: nodeVersions.Choose(r).(string), - Mode: string(mode), + ModeStr: string(mode), StartAt: startAt, Database: nodeDatabases.Choose(r).(string), - PrivvalProtocol: nodePrivvalProtocols.Choose(r).(string), + PrivvalProtocolStr: nodePrivvalProtocols.Choose(r).(string), BlockSyncVersion: nodeBlockSyncs.Choose(r).(string), StateSync: nodeStateSyncs.Choose(r).(bool) && startAt > 0, - PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), + PersistIntervalPtr: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), SnapshotInterval: uint64(nodeSnapshotIntervals.Choose(r).(int)), RetainBlocks: uint64(nodeRetainBlocks.Choose(r).(int)), EnableCompanionPruning: false, @@ -310,19 +362,19 @@ func generateNode( // If a node which does not persist state also does not retain blocks, randomly // choose to either persist state or retain all blocks. - if node.PersistInterval != nil && *node.PersistInterval == 0 && node.RetainBlocks > 0 { + if node.PersistIntervalPtr != nil && *node.PersistIntervalPtr == 0 && node.RetainBlocks > 0 { if r.Float64() > 0.5 { node.RetainBlocks = 0 } else { - node.PersistInterval = ptrUint64(node.RetainBlocks) + node.PersistIntervalPtr = ptrUint64(node.RetainBlocks) } } // If either PersistInterval or SnapshotInterval are greater than RetainBlocks, // expand the block retention time. if node.RetainBlocks > 0 { - if node.PersistInterval != nil && node.RetainBlocks < *node.PersistInterval { - node.RetainBlocks = *node.PersistInterval + if node.PersistIntervalPtr != nil && node.RetainBlocks < *node.PersistIntervalPtr { + node.RetainBlocks = *node.PersistIntervalPtr } if node.RetainBlocks < node.SnapshotInterval { node.RetainBlocks = node.SnapshotInterval @@ -340,13 +392,13 @@ func generateNode( func generateLightNode(r *rand.Rand, startAt int64, providers []string) *e2e.ManifestNode { return &e2e.ManifestNode{ - Mode: string(e2e.ModeLight), - Version: nodeVersions.Choose(r).(string), - StartAt: startAt, - Database: nodeDatabases.Choose(r).(string), - PersistInterval: ptrUint64(0), - PersistentPeers: providers, - Perturb: lightNodePerturbations.Choose(r), + ModeStr: string(e2e.ModeLight), + Version: nodeVersions.Choose(r).(string), + StartAt: startAt, + Database: nodeDatabases.Choose(r).(string), + PersistIntervalPtr: ptrUint64(0), + PersistentPeersList: providers, + Perturb: lightNodePerturbations.Choose(r), } } @@ -367,11 +419,12 @@ func parseWeightedVersions(s string) (weightedChoice, string, error) { for _, wv := range wvs { parts := strings.Split(strings.TrimSpace(wv), ":") var ver string - if len(parts) == 2 { + switch len(parts) { + case 2: ver = strings.TrimSpace(strings.Join([]string{"cometbft/e2e-node", parts[0]}, ":")) - } else if len(parts) == 3 { + case 3: ver = strings.TrimSpace(strings.Join([]string{parts[0], parts[1]}, ":")) - } else { + default: return nil, "", fmt.Errorf("unexpected weight:version combination: %s", wv) } @@ -412,7 +465,7 @@ func gitRepoLatestReleaseVersion(gitRepoDir string) (string, error) { if err != nil { return "", err } - return findLatestReleaseTag(version.TMCoreSemVer, tags) + return findLatestReleaseTag(version.CMTSemVer, tags) } func findLatestReleaseTag(baseVer string, tags []string) (string, error) { diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go index 670f701e4b3..9c2fec2df0a 100644 --- a/test/e2e/generator/generate_test.go +++ b/test/e2e/generator/generate_test.go @@ -1,3 +1,5 @@ +//go:build bls12381 + package main import ( @@ -6,13 +8,12 @@ import ( "path/filepath" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" e2e "github.com/cometbft/cometbft/test/e2e/pkg" ) -// TestGenerator tests that only valid manifests are generated +// TestGenerator tests that only valid manifests are generated. func TestGenerator(t *testing.T) { cfg := &generateConfig{ randSource: rand.New(rand.NewSource(randomSeed)), @@ -24,47 +25,8 @@ func TestGenerator(t *testing.T) { t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { infra, err := e2e.NewDockerInfrastructureData(m) require.NoError(t, err) - _, err = e2e.NewTestnetFromManifest(m, filepath.Join(t.TempDir(), fmt.Sprintf("Case%04d", idx)), infra) + _, err = e2e.NewTestnetFromManifest(m, filepath.Join(t.TempDir(), fmt.Sprintf("Case%04d", idx)), infra, "") require.NoError(t, err) }) } } - -func TestVersionFinder(t *testing.T) { - testCases := []struct { - baseVer string - tags []string - expectedLatest string - }{ - { - baseVer: "v0.34.0", - tags: []string{"v0.34.0", "v0.34.1", "v0.34.2", "v0.34.3-rc1", "v0.34.3", "v0.35.0", "v0.35.1", "v0.36.0-rc1"}, - expectedLatest: "v0.34.3", - }, - { - baseVer: "v0.38.0-dev", - tags: []string{"v0.34.0", "v0.34.1", "v0.34.2", "v0.37.0-rc2", "dev-v0.38.0"}, - expectedLatest: "", - }, - { - baseVer: "v0.37.1-rc1", - tags: []string{"v0.36.0", "v0.37.0-rc1", "v0.37.0"}, - expectedLatest: "v0.37.0", - }, - { - baseVer: "v1.0.0", - tags: []string{"v0.34.0", "v0.35.0", "v1.0.0", "v1.0.1"}, - expectedLatest: "v1.0.1", - }, - { - baseVer: "v1.1.5", - tags: []string{"v0.35.0", "v1.0.0", "v1.0.1", "v1.1.1", "v1.1.2", "v1.1.3", "v1.1.4"}, - expectedLatest: "v1.1.4", - }, - } - for _, tc := range testCases { - actualLatest, err := findLatestReleaseTag(tc.baseVer, tc.tags) - require.NoError(t, err) - assert.Equal(t, tc.expectedLatest, actualLatest) - } -} diff --git a/test/e2e/generator/generate_version_test.go b/test/e2e/generator/generate_version_test.go new file mode 100644 index 00000000000..a6af7453422 --- /dev/null +++ b/test/e2e/generator/generate_version_test.go @@ -0,0 +1,47 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestVersionFinder(t *testing.T) { + testCases := []struct { + baseVer string + tags []string + expectedLatest string + }{ + { + baseVer: "v0.34.0", + tags: []string{"v0.34.0", "v0.34.1", "v0.34.2", "v0.34.3-rc1", "v0.34.3", "v0.35.0", "v0.35.1", "v0.36.0-rc1"}, + expectedLatest: "v0.34.3", + }, + { + baseVer: "v0.38.0-dev", + tags: []string{"v0.34.0", "v0.34.1", "v0.34.2", "v0.37.0-rc2", "dev-v0.38.0"}, + expectedLatest: "", + }, + { + baseVer: "v0.37.1-rc1", + tags: []string{"v0.36.0", "v0.37.0-rc1", "v0.37.0"}, + expectedLatest: "v0.37.0", + }, + { + baseVer: "v1.0.0", + tags: []string{"v0.34.0", "v0.35.0", "v1.0.0", "v1.0.1"}, + expectedLatest: "v1.0.1", + }, + { + baseVer: "v1.1.5", + tags: []string{"v0.35.0", "v1.0.0", "v1.0.1", "v1.1.1", "v1.1.2", "v1.1.3", "v1.1.4"}, + expectedLatest: "v1.1.4", + }, + } + for _, tc := range testCases { + actualLatest, err := findLatestReleaseTag(tc.baseVer, tc.tags) + require.NoError(t, err) + assert.Equal(t, tc.expectedLatest, actualLatest) + } +} diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index f4f8140c0c1..6652bea2b94 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -16,7 +16,7 @@ const ( randomSeed int64 = 4827085738 ) -var logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +var logger = log.NewLogger(os.Stdout) func main() { NewCLI().Run() @@ -31,11 +31,11 @@ type CLI struct { func NewCLI() *CLI { cli := &CLI{} cli.root = &cobra.Command{ - Use: "generator -d dir [-g int] [-m version_weight_csv] [-p]", + Use: "generator -d dir [-g int] [-m version_weight_csv] [-p] [-l log_level]", Short: "End-to-end testnet generator", SilenceUsage: true, SilenceErrors: true, // we'll output them ourselves in Run() - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { dir, err := cmd.Flags().GetString("dir") if err != nil { return err @@ -52,7 +52,11 @@ func NewCLI() *CLI { if err != nil { return err } - return cli.generate(dir, groups, multiVersion, prometheus) + logLevel, err := cmd.Flags().GetString("log-level") + if err != nil { + return err + } + return cli.generate(dir, groups, multiVersion, prometheus, logLevel) }, } @@ -62,12 +66,13 @@ func NewCLI() *CLI { "or empty to only use this branch's version") cli.root.PersistentFlags().IntP("groups", "g", 0, "Number of groups") cli.root.PersistentFlags().BoolP("prometheus", "p", false, "Enable generation of Prometheus metrics on all manifests") + cli.root.PersistentFlags().StringP("log-level", "l", "", "Log level to use in the CometBFT config file, e.g. 'debug'") return cli } // generate generates manifests in a directory. -func (cli *CLI) generate(dir string, groups int, multiVersion string, prometheus bool) error { +func (*CLI) generate(dir string, groups int, multiVersion string, prometheus bool, logLevel string) error { err := os.MkdirAll(dir, 0o755) if err != nil { return err @@ -77,6 +82,7 @@ func (cli *CLI) generate(dir string, groups int, multiVersion string, prometheus randSource: rand.New(rand.NewSource(randomSeed)), //nolint:gosec multiVersion: multiVersion, prometheus: prometheus, + logLevel: logLevel, } manifests, err := Generate(cfg) if err != nil { diff --git a/test/e2e/generator/random.go b/test/e2e/generator/random.go index 4312eb30d70..3614355576e 100644 --- a/test/e2e/generator/random.go +++ b/test/e2e/generator/random.go @@ -20,26 +20,26 @@ import ( // {"foo": 2, "bar": 6} // {"foo": 3, "bar": 4} // {"foo": 3, "bar": 5} -// {"foo": 3, "bar": 6} -func combinations(items map[string][]interface{}) []map[string]interface{} { +// {"foo": 3, "bar": 6}. +func combinations(items map[string][]any) []map[string]any { keys := []string{} for key := range items { keys = append(keys, key) } sort.Strings(keys) - return combiner(map[string]interface{}{}, keys, items) + return combiner(map[string]any{}, keys, items) } // combiner is a utility function for combinations. -func combiner(head map[string]interface{}, pending []string, items map[string][]interface{}) []map[string]interface{} { +func combiner(head map[string]any, pending []string, items map[string][]any) []map[string]any { if len(pending) == 0 { - return []map[string]interface{}{head} + return []map[string]any{head} } key, pending := pending[0], pending[1:] - result := []map[string]interface{}{} + result := []map[string]any{} for _, value := range items[key] { - path := map[string]interface{}{} + path := map[string]any{} for k, v := range head { path[k] = v } @@ -50,9 +50,9 @@ func combiner(head map[string]interface{}, pending []string, items map[string][] } // uniformChoice chooses a single random item from the argument list, uniformly weighted. -type uniformChoice []interface{} +type uniformChoice []any -func (uc uniformChoice) Choose(r *rand.Rand) interface{} { +func (uc uniformChoice) Choose(r *rand.Rand) any { return uc[r.Intn(len(uc))] } @@ -85,11 +85,11 @@ func (usc uniformSetChoice) Choose(r *rand.Rand) []string { } // weightedChoice chooses a single random key from a map of keys and weights. -type weightedChoice map[interface{}]uint +type weightedChoice map[any]uint -func (wc weightedChoice) Choose(r *rand.Rand) interface{} { +func (wc weightedChoice) Choose(r *rand.Rand) any { total := 0 - choices := make([]interface{}, 0, len(wc)) + choices := make([]any, 0, len(wc)) for choice, weight := range wc { total += int(weight) choices = append(choices, choice) diff --git a/test/e2e/generator/random_test.go b/test/e2e/generator/random_test.go index 3fbb19ab5a7..0d7d909acc2 100644 --- a/test/e2e/generator/random_test.go +++ b/test/e2e/generator/random_test.go @@ -7,14 +7,14 @@ import ( ) func TestCombinations(t *testing.T) { - input := map[string][]interface{}{ + input := map[string][]any{ "bool": {false, true}, "int": {1, 2, 3}, "string": {"foo", "bar"}, } c := combinations(input) - assert.Equal(t, []map[string]interface{}{ + assert.Equal(t, []map[string]any{ {"bool": false, "int": 1, "string": "foo"}, {"bool": false, "int": 1, "string": "bar"}, {"bool": false, "int": 2, "string": "foo"}, diff --git a/test/e2e/monitoring/README.md b/test/e2e/monitoring/README.md new file mode 100644 index 00000000000..b9f67ba9b42 --- /dev/null +++ b/test/e2e/monitoring/README.md @@ -0,0 +1,39 @@ +# Monitoring + +Prometheus and Grafana server for E2E testnets. + +## How to run + +First, `prometheus.yml` must exist in this directory. For example, generate one by running from +`test/e2e`: +```bash +make fast +./build/runner -f networks/simple.toml setup +``` + +To start all monitoring services: +```bash +docker compose up -d +``` + +To stop all monitoring services: +```bash +docker compose down +``` + +## Details + +This docker compose (`compose.yml`) creates a local Granafa and Prometheus server. It is useful for +local debugging and monitoring. + +Prometheus will connect to the host machine's ports for data, as defined in `prometheus.yml`. + +You can access the Grafana web interface at `http://localhost:3000` and the Prometheus web interface +at `http://localhost:9090`. + +The default Grafana username and password is `admin`/`admin`. You will only need it if you want to +change something. The pre-loaded dashboards can be viewed without a password. + +Data from Grafana and Prometheus end up in the `data-grafana` and `data-prometheus` folders on your +host machine. This allows you to stop and restart the servers without data loss. The folders are +excluded from Git. diff --git a/test/e2e/monitoring/compose.yaml b/test/e2e/monitoring/compose.yaml new file mode 100644 index 00000000000..a8141d9dd5a --- /dev/null +++ b/test/e2e/monitoring/compose.yaml @@ -0,0 +1,54 @@ +services: + prometheus: + image: prom/prometheus + #image: prom/prometheus:v2.42.0 # Debian 12 seems to use a fairly old version of prometheus + container_name: prometheus + ports: + - 127.0.0.1:9090:9090 + volumes: + - ./prometheus.yml:/etc/prometheus/prometheus.yml + - ./data-prometheus:/prometheus + extra_hosts: + - "host.docker.internal:host-gateway" +# command: +# - "--config.file=/etc/prometheus/prometheus.yml" +# - "--storage.tsdb.path=/prometheus" +# - "--web.console.libraries=/usr/share/prometheus/console_libraries" +# - "--web.console.templates=/usr/share/prometheus/consoles" +# - "--no-storage.tsdb.wal-compression" +# - "--storage.tsdb.retention.time=1y" +# - "--storage.tsdb.no-lockfile" + grafana: + image: grafana/grafana-oss + container_name: grafana + # if you are running as root then set it to 0 + # else find the right id with the id -u command + user: '501' + ports: + - 127.0.0.1:3000:3000 + volumes: + - ./data-grafana:/var/lib/grafana + - ./config-grafana/provisioning:/etc/grafana/provisioning + - ./config-grafana/grafana.ini:/etc/grafana/grafana.ini + environment: + GF_LOG_LEVEL: info + GF_ANALYTICS_ENABLED: false + GF_ANALYTICS_REPORTING_ENABLED: false + GF_ANALYTICS_CHECK_FOR_PLUGIN_UPDATES: false + GF_ANALYTICS_CHECK_FOR_UPDATES: false + GF_ANALYTICS_FEEDBACK_LINKS_ENABLED: false + GF_SECURITY_DISABLE_GRAVATAR: true + GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH: /etc/grafana/provisioning/dashboards-data/main.json + GF_USERS_DEFAULT_THEME: system + GF_USERS_EDITORS_CAN_ADMIN: true + GF_AUTH_ANONYMOUS_ENABLED: true + GF_AUTH_ANONYMOUS_ORG_ROLE: Editor + GF_AUTH_BASIC_ENABLED: false + GF_NEWS_NEWS_FEED_ENABLED: false + GF_RENDERING_RENDERER_TOKEN: "-" + GF_RENDERING_SERVER_URL: http://grafana-image-renderer:8081/render + GF_RENDERING_CALLBACK_URL: http://grafana:3000/ + GF_LOG_FILTERS: rendering:debug + grafana-image-renderer: + image: grafana/grafana-image-renderer + container_name: grafana-image-renderer diff --git a/test/e2e/monitoring/config-grafana/grafana.ini b/test/e2e/monitoring/config-grafana/grafana.ini new file mode 100644 index 00000000000..135a7a3db02 --- /dev/null +++ b/test/e2e/monitoring/config-grafana/grafana.ini @@ -0,0 +1,2 @@ +[dashboards] +min_refresh_interval = 1s diff --git a/test/e2e/monitoring/config-grafana/provisioning/dashboards-data/main.json b/test/e2e/monitoring/config-grafana/provisioning/dashboards-data/main.json new file mode 100644 index 00000000000..e43d8a67b10 --- /dev/null +++ b/test/e2e/monitoring/config-grafana/provisioning/dashboards-data/main.json @@ -0,0 +1,4158 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 2, + "id": 1, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 31, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 1 + }, + "id": 30, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "process_resident_memory_bytes/1024/1024", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "memory (Mb)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 7 + }, + "id": 23, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "rate(process_cpu_seconds_total[$__rate_interval])", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "cpu", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 13 + }, + "id": 41, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "process_open_fds", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "#open files", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 19 + }, + "id": 42, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "go_goroutines", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "#goroutines", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 25 + }, + "id": 43, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "go_threads", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "#threads", + "type": "timeseries" + } + ], + "title": "Process/runtime", + "type": "row" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 1, + "panels": [], + "title": "Consensus", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "stepAfter", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "max(cometbft_consensus_height)" + }, + "properties": [ + { + "id": "custom.lineWidth", + "value": 4 + }, + { + "id": "custom.lineStyle", + "value": { + "dash": [ + 10, + 10 + ], + "fill": "dash" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "min(cometbft_consensus_height)" + }, + "properties": [ + { + "id": "custom.lineWidth", + "value": 4 + }, + { + "id": "custom.lineStyle", + "value": { + "dash": [ + 10, + 10 + ], + "fill": "dash" + } + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 20, + "x": 0, + "y": 2 + }, + "id": 2, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_consensus_height", + "format": "time_series", + "hide": false, + "interval": "500ms", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "max(cometbft_consensus_height)", + "hide": true, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "min(cometbft_consensus_height)", + "hide": true, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "C" + } + ], + "title": "Chain height", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 4, + "x": 20, + "y": 2 + }, + "id": 22, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.1.4", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_consensus_height", + "format": "time_series", + "hide": false, + "interval": "500ms", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Chain height", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "stepAfter", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 9 + }, + "id": 45, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_consensus_chain_size_bytes/1024", + "format": "time_series", + "hide": false, + "interval": "500ms", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Chain size (kb)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "stepAfter", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "mean" + }, + "properties": [ + { + "id": "custom.lineStyle", + "value": { + "dash": [ + 10, + 10 + ], + "fill": "dash" + } + }, + { + "id": "custom.lineWidth", + "value": 3 + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 15 + }, + "id": 20, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "rate(cometbft_consensus_height[60s])*60", + "format": "time_series", + "hide": false, + "interval": "500ms", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(rate(cometbft_consensus_height[60s]))*60 / count(count by (job) (cometbft_consensus_height))", + "hide": false, + "interval": "500ms", + "legendFormat": "mean", + "range": true, + "refId": "C" + } + ], + "title": "blocks/min", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 21 + }, + "id": 3, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_consensus_total_txs", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "#txs in chain", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 27 + }, + "id": 24, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "rate(cometbft_consensus_total_txs[60s])", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "tx/s", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 33 + }, + "id": 62, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "builder", + "expr": "cometbft_consensus_block_size_bytes/1024", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Block size (kb)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 39 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "builder", + "expr": "cometbft_consensus_rounds", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Round number", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 45 + }, + "id": 51, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_consensus_proposal_create_count", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "#Proposal created", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 51 + }, + "id": 52, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_consensus_proposal_receive_count", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "#Proposal received", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 20, + "x": 0, + "y": 57 + }, + "id": 28, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_consensus_missing_validators", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "#missing validators", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 20, + "x": 0, + "y": 62 + }, + "id": 29, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_consensus_validator_missed_blocks", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "#blocks missed", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 20, + "x": 0, + "y": 67 + }, + "id": 69, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "avg(rate(cometbft_consensus_step_duration_seconds_bucket[$__rate_interval])) by (step)", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Avg. step duration", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 72 + }, + "id": 6, + "panels": [], + "title": "P2P", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 20, + "x": 0, + "y": 73 + }, + "id": 9, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "builder", + "expr": "cometbft_p2p_peers", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Number of peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 20, + "y": 73 + }, + "id": 46, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.1.4", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "builder", + "expr": "cometbft_p2p_peers", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Number of peers", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "mempool_Txs" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "super-light-yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "consensus_BlockPart" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-yellow", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 77 + }, + "id": 7, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "(sum(rate(cometbft_p2p_message_send_bytes_total[$__rate_interval])) by (message_type))/1024", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Sent kb per message type", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 4, + "x": 20, + "y": 77 + }, + "id": 27, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.1.4", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_mempool_size", + "format": "time_series", + "interval": "500ms", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Mempool size (#txs)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "mempool_Txs" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "super-light-yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "consensus_BlockPart" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-yellow", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 83 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "(sum(rate(cometbft_p2p_message_receive_bytes_total[$__rate_interval])) by (message_type))/1024", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Received kb per message type", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 89 + }, + "id": 10, + "panels": [], + "title": "Mempool", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 20, + "x": 0, + "y": 90 + }, + "id": 11, + "options": { + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_mempool_size", + "format": "time_series", + "interval": "500ms", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Mempool size (#txs)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 20, + "x": 0, + "y": 97 + }, + "id": 66, + "options": { + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_mempool_size_bytes/1024", + "format": "time_series", + "interval": "500ms", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Mempool size (kb)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 20, + "x": 0, + "y": 104 + }, + "id": 61, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(cometbft_mempool_lane_size) by (lane)", + "hide": false, + "instant": false, + "legendFormat": "{{lane}}", + "range": true, + "refId": "C" + } + ], + "title": "Lane size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 20, + "x": 0, + "y": 111 + }, + "id": 68, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_mempool_lane_size", + "legendFormat": "{{job}}, {{lane}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_mempool_lane_size{job=\"validator002\"}", + "hide": false, + "instant": false, + "legendFormat": "{{job}}, {{lane}}", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_mempool_lane_size{job=\"full00\"}", + "hide": false, + "instant": false, + "legendFormat": "{{job}}, {{lane}}", + "range": true, + "refId": "C" + } + ], + "title": "Lane size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 20, + "x": 0, + "y": 118 + }, + "id": 67, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_mempool_lane_bytes/1024", + "legendFormat": "{{job}}, {{lane}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_mempool_lane_bytes{job=\"validator002\"}/1024", + "hide": false, + "instant": false, + "legendFormat": "{{job}}, {{lane}}", + "range": true, + "refId": "B" + } + ], + "title": "Lane size (kb)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 20, + "x": 0, + "y": 125 + }, + "id": 59, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(rate(cometbft_mempool_tx_life_span_bucket{job!=\"full00\"}[$__rate_interval])) by (lane)", + "legendFormat": "{{lane}}", + "range": true, + "refId": "A" + } + ], + "title": "Tx life span (validators)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 132 + }, + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(cometbft_mempool_tx_size_bytes_sum[$__rate_interval]) / 1024", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "{{job}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "kb added to the mempool", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 138 + }, + "id": 13, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(rate(cometbft_p2p_message_receive_bytes_total{message_type=~\"mempool_Txs|v1_Txs|mempool_Tx|v1_Tx|v1_Message|v1_WantTx|v1_SeenTx|v1_HaveTx|v1_Reset\"}[$__rate_interval])/1024) by (job)", + "hide": false, + "legendFormat": "{{job}}", + "range": true, + "refId": "B" + } + ], + "title": "Received kb (only mempool messages)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 144 + }, + "id": 14, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(rate(cometbft_p2p_message_send_bytes_total{message_type=~\"mempool_Txs|v1_Txs|mempool_Tx|v1_Tx|v1_Message|v1_WantTx|v1_SeenTx|v1_HaveTx|v1_Reset\"}[$__rate_interval])/1024) by (job)", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Sent kb (only mempool messages)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 20, + "x": 0, + "y": 150 + }, + "id": 15, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(rate(cometbft_mempool_recheck_times[$__rate_interval])) by (job)", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Recheck rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 20, + "x": 0, + "y": 156 + }, + "id": 49, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "builder", + "expr": "rate(cometbft_mempool_already_received_txs[$__rate_interval])", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Rate of already received txs (cache hits)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 20, + "x": 0, + "y": 163 + }, + "id": 63, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "rate(cometbft_mempool_rejected_txs[$__rate_interval])", + "legendFormat": "{{job}},{{lane}}", + "range": true, + "refId": "A" + } + ], + "title": "Rejected txs (rate)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 20, + "x": 0, + "y": 170 + }, + "id": 65, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "rate(cometbft_mempool_evicted_txs[$__rate_interval])", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Evicted txs (rate)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 20, + "x": 0, + "y": 177 + }, + "id": 64, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "rate(cometbft_mempool_invalid_txs[$__rate_interval])", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Invalid txs (rate)", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 184 + }, + "id": 35, + "panels": [], + "title": "ABCI", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 20, + "x": 0, + "y": 185 + }, + "id": 34, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "avg(rate(cometbft_abci_connection_method_timing_seconds_bucket[$__rate_interval])) by (method)", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Avg. duration of ABCI methods", + "type": "timeseries" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 192 + }, + "id": 18, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 20, + "x": 0, + "y": 194 + }, + "id": 56, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "cometbft_statesync_syncing", + "interval": "500ms", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "State syncing", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 80, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 20, + "x": 0, + "y": 198 + }, + "id": 19, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "builder", + "expr": "cometbft_blocksync_syncing", + "interval": "500ms", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Block syncing", + "type": "timeseries" + } + ], + "title": "Sync", + "type": "row" + } + ], + "refresh": "1s", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "prometheus", + "value": "prometheus" + }, + "hide": 0, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1s", + "3s", + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "browser", + "title": "Main", + "uid": "e718b47d-3d2c-4042-bc6f-52ede52487f0", + "version": 78, + "weekStart": "" +} \ No newline at end of file diff --git a/test/e2e/monitoring/config-grafana/provisioning/dashboards-data/mempool_compact.json b/test/e2e/monitoring/config-grafana/provisioning/dashboards-data/mempool_compact.json new file mode 100644 index 00000000000..818662f09cf --- /dev/null +++ b/test/e2e/monitoring/config-grafana/provisioning/dashboards-data/mempool_compact.json @@ -0,0 +1,1710 @@ +{ + "title": "Compact", + "id": null, + "uid": null, + "version": 1, + "editable": true, + "liveNow": false, + "timezone": "browser", + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 1, + "panels": [], + "title": "Consensus", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisGridShow": true, + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 70, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "stepAfter", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "hidden" + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 11, + "x": 0, + "y": 1 + }, + "id": 2, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "list", + "placement": "right", + "showLegend": false, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "min(cometbft_consensus_height)", + "format": "time_series", + "interval": "500ms", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Chain height", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 70, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "hidden" + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 11, + "x": 11, + "y": 1 + }, + "id": 22, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": false, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "min(cometbft_consensus_total_txs)", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Chain size (#txs)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 70, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 50, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "hidden" + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 11, + "x": 0, + "y": 8 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": false, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "avg(cometbft_consensus_block_size_bytes)", + "legendFormat": "{{job}}", + "range": true, + "refId": "A" + } + ], + "title": "Block size (bytes)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 70, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 50, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "hidden" + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 11, + "x": 11, + "y": 8 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": false, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg(cometbft_consensus_num_txs)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{job}}", + "range": true, + "refId": "B" + } + ], + "title": "Block size (#txs)", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 14 + }, + "id": 6, + "panels": [], + "title": "P2P", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 70, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 19, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 3, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "consensus_BlockPart" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-green", + "mode": "fixed" + } + }, + { + "id": "custom.lineWidth", + "value": 1 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "mempool_*" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-yellow", + "mode": "fixed" + } + }, + { + "id": "custom.fillOpacity", + "value": 30 + }, + { + "id": "custom.lineWidth", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "hidden" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 11, + "x": 0, + "y": 15 + }, + "id": 21, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(rate(cometbft_p2p_message_send_bytes_total{message_type=~\"consensus_BlockPart\"}[$__rate_interval])) by (message_type)/1024", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "sum(rate(cometbft_p2p_message_send_bytes_total{message_type=~\"v1_Txs|v1_SeenTx|v1_WantTx\"}[$__rate_interval]))/1024", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "mempool_*", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Sent kb per message type", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 70, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "consensus_BlockPart" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "mempool_*" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-yellow", + "mode": "fixed" + } + }, + { + "id": "custom.lineWidth", + "value": 2 + }, + { + "id": "custom.fillOpacity", + "value": 30 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "hidden" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 11, + "x": 11, + "y": 15 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(rate(cometbft_p2p_message_receive_bytes_total{message_type=~\"consensus_BlockPart\"}[$__rate_interval])) by (message_type)/1024", + "legendFormat": "__auto", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "(sum(sum(rate(cometbft_p2p_message_receive_bytes_total{message_type=~\"v1_Txs|v1_SeenTx|v1_WantTx\"}[$__rate_interval])) by (message_type)))/1024", + "hide": false, + "legendFormat": "mempool_*", + "range": true, + "refId": "B" + } + ], + "title": "Received kb per message type", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 23 + }, + "id": 10, + "panels": [], + "title": "Mempool", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 70, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 50, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "hidden" + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 11, + "x": 0, + "y": 24 + }, + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "avg(rate(cometbft_mempool_tx_size_bytes_sum[$__interval])/1024)", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Avg. size of txs added to mempools (kb)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 70, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 50, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "hidden" + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 11, + "x": 11, + "y": 24 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": false, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg(cometbft_mempool_size)", + "format": "heatmap", + "hide": false, + "instant": false, + "interval": "10s", + "legendFormat": "__auto", + "range": true, + "refId": "B" + } + ], + "title": "Avg. mempool size (#txs)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 70, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 25, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "hidden" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "validator receiving all load" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "avg. of other validators" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#808080", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 11, + "x": 0, + "y": 31 + }, + "id": 14, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "(sum(rate(cometbft_p2p_message_send_bytes_total{message_type=~\"v1_Txs|v1_Message|v1_SeenTx|v1_WantTx\", job=\"validator001\"}[$__rate_interval])))/1024", + "legendFormat": "validator receiving all load", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "(avg(rate(cometbft_p2p_message_send_bytes_total{message_type=~\"v1_Txs|v1_Message|v1_SeenTx|v1_WantTx\", job!=\"validator001\"}[$__rate_interval])))/1024", + "hide": false, + "legendFormat": "avg. of other validators", + "range": true, + "refId": "B" + } + ], + "title": "Sent kb (only mempool messages)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 70, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 25, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "hidden" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "validator receiving all load" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "avg. of other validators" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#7f7f7f", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 11, + "x": 11, + "y": 31 + }, + "id": 13, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "(sum(rate(cometbft_p2p_message_receive_bytes_total{message_type=~\"v1_Txs|v1_Message|v1_WantTx|v1_SeenTx\", job=\"validator001\"}[$__rate_interval])))/1024", + "hide": false, + "legendFormat": "validator receiving all load", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "(avg(rate(cometbft_p2p_message_receive_bytes_total{message_type=~\"v1_Txs|v1_Message|v1_WantTx|v1_SeenTx\", job!=\"validator001\"}[$__rate_interval])))/1024", + "hide": false, + "legendFormat": "avg. of other validators", + "range": true, + "refId": "A" + } + ], + "title": "Received kb (only mempool messages)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 70, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 50, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "hidden" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "validator receiving all load" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "avg. of other validators" + }, + "properties": [ + { + "id": "color", + "value": { + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 11, + "x": 0, + "y": 38 + }, + "id": 15, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(rate(cometbft_mempool_recheck_times{job=\"validator001\"}[$__rate_interval]))", + "legendFormat": "validator receiving all load", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "avg(rate(cometbft_mempool_recheck_times{job!=\"validator001\"}[$__rate_interval]))", + "hide": false, + "legendFormat": "avg. of other validators", + "range": true, + "refId": "B" + } + ], + "title": "Recheck rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "description": "Number of received txs that hit the cache.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 70, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 25, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "hidden" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "validator receiving all load" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "avg. of other validators" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#808080", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 11, + "x": 11, + "y": 38 + }, + "id": 16, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true, + "width": 200 + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(rate(cometbft_mempool_already_received_txs{job=\"validator001\"}[$__rate_interval]))", + "legendFormat": "validator receiving all load", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "avg(rate(cometbft_mempool_already_received_txs{job!=\"validator001\"}[$__rate_interval]))", + "hide": false, + "legendFormat": "avg. of other validators", + "range": true, + "refId": "B" + } + ], + "title": "Already received txs", + "type": "timeseries" + } + ], + "refresh": "1s", + "schemaVersion": 38, + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "prometheus", + "value": "prometheus" + }, + "hide": 0, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1s", + "3s", + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + } +} \ No newline at end of file diff --git a/test/e2e/monitoring/config-grafana/provisioning/dashboards/default.yml b/test/e2e/monitoring/config-grafana/provisioning/dashboards/default.yml new file mode 100644 index 00000000000..8a1e028a1f6 --- /dev/null +++ b/test/e2e/monitoring/config-grafana/provisioning/dashboards/default.yml @@ -0,0 +1,13 @@ +apiVersion: 1 + +providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: false + updateIntervalSeconds: 10 + allowUiUpdates: true + options: + path: '/etc/grafana/provisioning/dashboards-data' + foldersFromFilesStructure: true diff --git a/test/e2e/monitoring/config-grafana/provisioning/datasources/prometheus.yml b/test/e2e/monitoring/config-grafana/provisioning/datasources/prometheus.yml new file mode 100644 index 00000000000..3ad5d908d67 --- /dev/null +++ b/test/e2e/monitoring/config-grafana/provisioning/datasources/prometheus.yml @@ -0,0 +1,9 @@ +# Connect Grafana to Prometheus through a Grafana datasource. Hostname is derived from the docker name. +apiVersion: 1 +datasources: + - name: prometheus + uid: prometheus + type: prometheus + url: http://prometheus:9090 + is_default: true + editable: true diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index ac3b131a737..f986f7b92b5 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -3,7 +3,10 @@ ipv6 = true initial_height = 1000 +vote_extensions_update_height = 1004 vote_extensions_enable_height = 1007 +pbts_update_height = 1006 +pbts_enable_height = 1009 evidence = 5 initial_state = { initial01 = "a", initial02 = "b", initial03 = "c" } prepare_proposal_delay = "100ms" @@ -13,7 +16,8 @@ check_tx_delay = "0ms" abci_protocol = "builtin" prometheus = true peer_gossip_intraloop_sleep_duration = "50ms" -abci_tests_enabled = false +abci_tests_enabled = true +constant_flip = true [validators] validator01 = 100 @@ -43,10 +47,11 @@ perturb = ["restart"] seeds = ["seed01"] snapshot_interval = 5 perturb = ["disconnect"] +clock_skew = "20s" [node.validator02] seeds = ["seed01"] -database = "boltdb" +database = "goleveldb" privval_protocol = "tcp" persist_interval = 0 perturb = ["restart"] @@ -56,9 +61,10 @@ seeds = ["seed01"] database = "badgerdb" privval_protocol = "unix" persist_interval = 3 -retain_blocks = 10 +retain_blocks = 20 enable_companion_pruning = true perturb = ["kill"] +key_type = "secp256k1" [node.validator04] persistent_peers = ["validator01"] @@ -68,7 +74,7 @@ perturb = ["pause"] [node.validator05] start_at = 1005 # Becomes part of the validator set at 1010 persistent_peers = ["validator01", "full01"] -database = "cleveldb" +database = "rocksdb" privval_protocol = "tcp" perturb = ["kill", "pause", "disconnect", "restart"] @@ -76,7 +82,7 @@ perturb = ["kill", "pause", "disconnect", "restart"] start_at = 1010 mode = "full" persistent_peers = ["validator01", "validator02", "validator03", "validator04", "validator05"] -retain_blocks = 10 +retain_blocks = 20 enable_companion_pruning = true perturb = ["restart"] diff --git a/test/e2e/networks/long.toml b/test/e2e/networks/long.toml index 86c01b2b5f1..953a3ec9dbc 100644 --- a/test/e2e/networks/long.toml +++ b/test/e2e/networks/long.toml @@ -52,7 +52,7 @@ load_tx_connections = 0 mode = "full" version = "cometbft/e2e-node:latest" persistent_peers = ["validator03", "validator01", "validator06"] - database = "boltdb" + database = "pebbledb" privval_protocol = "tcp" start_at = 750 fast_sync = "v0" @@ -69,7 +69,7 @@ load_tx_connections = 0 seeds = ["seed01"] database = "badgerdb" privval_protocol = "unix" - start_at = 0 + start_at = 0 fast_sync = "v0" mempool_version = "v0" state_sync = false @@ -89,7 +89,7 @@ load_tx_connections = 0 state_sync = false persist_interval = 5 snapshot_interval = 0 - retain_blocks = 7 + retain_blocks = 7 perturb = ["upgrade"] send_no_load = false [node.validator01] @@ -128,7 +128,7 @@ load_tx_connections = 0 mode = "validator" version = "cometbft/e2e-node:latest" seeds = ["seed01"] - database = "boltdb" + database = "pebbledb" privval_protocol = "file" start_at = 0 fast_sync = "v0" @@ -144,7 +144,7 @@ load_tx_connections = 0 mode = "validator" version = "cometbft/e2e-node:latest" seeds = ["seed01"] - database = "cleveldb" + database = "rocksdb" privval_protocol = "tcp" start_at = 0 fast_sync = "v0" @@ -160,7 +160,7 @@ load_tx_connections = 0 mode = "validator" version = "cometbft/e2e-node:latest" persistent_peers = ["validator03"] - database = "cleveldb" + database = "rocksdb" privval_protocol = "unix" start_at = 0 fast_sync = "v0" @@ -176,7 +176,7 @@ load_tx_connections = 0 mode = "validator" version = "cometbft/e2e-node:latest" persistent_peers = ["full03", "validator02", "validator03"] - database = "boltdb" + database = "pebbledb" privval_protocol = "tcp" start_at = 5 fast_sync = "v0" @@ -192,7 +192,7 @@ load_tx_connections = 0 mode = "validator" version = "cometbft/e2e-node:latest" seeds = ["seed01"] - database = "cleveldb" + database = "rocksdb" privval_protocol = "file" start_at = 10 fast_sync = "v0" @@ -208,12 +208,12 @@ load_tx_connections = 0 mode = "validator" version = "cometbft/e2e-node:latest" seeds = ["seed01"] - database = "cleveldb" + database = "rocksdb" privval_protocol = "file" start_at = 1000 fast_sync = "v0" mempool_version = "v0" - state_sync = false + state_sync = false persist_interval = 5 snapshot_interval = 3 retain_blocks = 0 @@ -224,7 +224,7 @@ load_tx_connections = 0 mode = "validator" version = "cometbft/e2e-node:latest" seeds = ["seed01"] - database = "cleveldb" + database = "rocksdb" privval_protocol = "file" start_at = 1250 fast_sync = "v0" @@ -240,12 +240,12 @@ load_tx_connections = 0 mode = "validator" version = "cometbft/e2e-node:latest" seeds = ["seed01"] - database = "cleveldb" + database = "rocksdb" privval_protocol = "file" start_at = 1500 fast_sync = "v0" mempool_version = "v0" - state_sync = false + state_sync = false persist_interval = 5 snapshot_interval = 3 retain_blocks = 0 diff --git a/test/e2e/networks/simple.toml b/test/e2e/networks/simple.toml index 96b81f79fe1..f84a9d0beb1 100644 --- a/test/e2e/networks/simple.toml +++ b/test/e2e/networks/simple.toml @@ -1,4 +1,5 @@ +prometheus = true +[node.validator00] [node.validator01] [node.validator02] [node.validator03] -[node.validator04] diff --git a/test/e2e/networks/upgrade.toml b/test/e2e/networks/upgrade.toml new file mode 100644 index 00000000000..70d6f2f0bcb --- /dev/null +++ b/test/e2e/networks/upgrade.toml @@ -0,0 +1,37 @@ +evidence = 10 +vote_extensions_enable_height = 100 +abci_protocol = "tcp" +upgrade_version = "cometbft/e2e-node:local-version" + +# Image cometbft/v0.38.x built with v0.38.x as of feb 27/2024 + +[validators] + validator01 = 50 + validator02 = 50 + validator03 = 50 + validator04 = 50 + +[node] + [node.validator01] + mode = "validator" + version = "cometbft/v0.38.x" + perturb = ["upgrade"] + send_no_load = true + [node.validator02] + mode = "validator" + version = "cometbft/v0.38.x" + persistent_peers = ["validator01"] + perturb = [] + send_no_load = true + [node.validator03] + mode = "validator" + version = "cometbft/v0.38.x" + persistent_peers = ["validator01"] + perturb = [] + send_no_load = true + [node.validator04] + mode = "validator" + version = "cometbft/v0.38.x" + persistent_peers = ["validator01"] + perturb = [] + send_no_load = true diff --git a/test/e2e/networks/varyVESize.toml b/test/e2e/networks/varyVESize.toml index 5c97e9e3e75..5c731ea9913 100644 --- a/test/e2e/networks/varyVESize.toml +++ b/test/e2e/networks/varyVESize.toml @@ -29,7 +29,7 @@ vote_extension_size = 8192 [node.validator01] mode = "validator" version = "" - database = "cleveldb" + database = "rocksdb" privval_protocol = "unix" start_at = 0 block_sync_version = "v0" diff --git a/test/e2e/networks_regressions/blocksync_blocked.toml b/test/e2e/networks_regressions/blocksync_blocked.toml new file mode 100644 index 00000000000..c84e610b5a8 --- /dev/null +++ b/test/e2e/networks_regressions/blocksync_blocked.toml @@ -0,0 +1,11 @@ +vote_extensions_enable_height = 1 +pbts_enable_height = 1 + +[validators] +validator01 = 67 +validator02 = 33 + +[node.validator01] + +[node.validator02] +start_at = 5 diff --git a/test/e2e/networks_regressions/evidence_fail.toml b/test/e2e/networks_regressions/evidence_fail.toml new file mode 100644 index 00000000000..7b64cf9c9e7 --- /dev/null +++ b/test/e2e/networks_regressions/evidence_fail.toml @@ -0,0 +1,16 @@ +evidence = 120 +prometheus = true +pbts_enable_height = 1 + +[validators] + validator01 = 33 + validator02 = 67 + +[node] + [node.validator01] + mode = "validator" + persistent_peers = ["validator02"] + clock_skew = "40s" + [node.validator02] + mode = "validator" + diff --git a/test/e2e/networks_regressions/nodes_pruning.toml b/test/e2e/networks_regressions/nodes_pruning.toml new file mode 100644 index 00000000000..f95c2612324 --- /dev/null +++ b/test/e2e/networks_regressions/nodes_pruning.toml @@ -0,0 +1,37 @@ +# This test intends to verify the reliability of the data retention settings, and pruning behavior. +# Retention of blocks for full nodes is set at 14 (the minimum given the evidence age settings), +# this means that full nodes will retain at least the last 14 blocks of data. +# The 'enable_companion_pruning' flags for the first two full nodes are set to 'true'; since there is +# no actual data companion running in this setup, it effectively prevents the actual pruning from kicking in. + +prometheus = true + +[node] + [node.validator01] + [node.full01] + mode = "full" + retain_blocks = 14 + enable_companion_pruning = true + [node.full02] + mode = "full" + retain_blocks = 14 + enable_companion_pruning = true + [node.full03] + mode = "full" + retain_blocks = 14 + [node.full04] + mode = "full" + retain_blocks = 14 + [node.full05] + mode = "full" + retain_blocks = 14 + [node.full06] + mode = "full" + retain_blocks = 14 + [node.full07] + mode = "full" + retain_blocks = 14 + [node.full08] + mode = "full" + retain_blocks = 14 + start_at = 2 diff --git a/test/e2e/node/config.go b/test/e2e/node/config.go index bdd2d2232e8..2f5e0e262b8 100644 --- a/test/e2e/node/config.go +++ b/test/e2e/node/config.go @@ -5,6 +5,7 @@ import ( "time" "github.com/BurntSushi/toml" + "github.com/cometbft/cometbft/test/e2e/app" cmterrors "github.com/cometbft/cometbft/types/errors" ) @@ -31,12 +32,32 @@ type Config struct { FinalizeBlockDelay time.Duration `toml:"finalize_block_delay"` VoteExtensionDelay time.Duration `toml:"vote_extension_delay"` - VoteExtensionSize uint `toml:"vote_extension_size"` + VoteExtensionSize uint `toml:"vote_extension_size"` + VoteExtensionsEnableHeight int64 `toml:"vote_extensions_enable_height"` + VoteExtensionsUpdateHeight int64 `toml:"vote_extensions_update_height"` ABCIRequestsLoggingEnabled bool `toml:"abci_requests_logging_enabled"` + + ExperimentalKeyLayout string `toml:"experimental_db_key_layout"` + + Compact bool `toml:"compact"` + + CompactionInterval bool `toml:"compaction_interval"` + + DiscardABCIResponses bool `toml:"discard_abci_responses"` + + Indexer string `toml:"indexer"` + + PbtsEnableHeight int64 `toml:"pbts_enable_height"` + PbtsUpdateHeight int64 `toml:"pbts_update_height"` + + NoLanes bool `toml:"no_lanes"` + Lanes map[string]uint32 `toml:"lanes"` + + ConstantFlip bool `toml:"constant_flip"` } -// App extracts out the application specific configuration parameters +// App extracts out the application specific configuration parameters. func (cfg *Config) App() *app.Config { return &app.Config{ Dir: cfg.Dir, @@ -51,7 +72,14 @@ func (cfg *Config) App() *app.Config { FinalizeBlockDelay: cfg.FinalizeBlockDelay, VoteExtensionDelay: cfg.VoteExtensionDelay, VoteExtensionSize: cfg.VoteExtensionSize, + VoteExtensionsEnableHeight: cfg.VoteExtensionsEnableHeight, + VoteExtensionsUpdateHeight: cfg.VoteExtensionsUpdateHeight, ABCIRequestsLoggingEnabled: cfg.ABCIRequestsLoggingEnabled, + PbtsEnableHeight: cfg.PbtsEnableHeight, + PbtsUpdateHeight: cfg.PbtsUpdateHeight, + NoLanes: cfg.NoLanes, + Lanes: cfg.Lanes, + ConstantFlip: cfg.ConstantFlip, } } diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index e7446a79425..b74fdce1754 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -7,6 +7,7 @@ import ( "net/http" "os" "path/filepath" + "strconv" "strings" "time" @@ -15,15 +16,15 @@ import ( "github.com/cometbft/cometbft/abci/server" "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/crypto/ed25519" + cmtnet "github.com/cometbft/cometbft/internal/net" cmtflags "github.com/cometbft/cometbft/libs/cli/flags" "github.com/cometbft/cometbft/libs/log" - cmtnet "github.com/cometbft/cometbft/libs/net" "github.com/cometbft/cometbft/light" lproxy "github.com/cometbft/cometbft/light/proxy" lrpc "github.com/cometbft/cometbft/light/rpc" dbs "github.com/cometbft/cometbft/light/store/db" "github.com/cometbft/cometbft/node" - "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/privval" "github.com/cometbft/cometbft/proxy" rpcserver "github.com/cometbft/cometbft/rpc/jsonrpc/server" @@ -31,12 +32,12 @@ import ( e2e "github.com/cometbft/cometbft/test/e2e/pkg" ) -var logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +var logger = log.NewLogger(os.Stdout) // main is the binary entrypoint. func main() { if len(os.Args) != 2 { - fmt.Printf("Usage: %v ", os.Args[0]) + fmt.Printf("Usage: %v \n", os.Args[0]) return } configFile := "" @@ -132,8 +133,17 @@ func startNode(cfg *Config) error { nodeLogger.Info("Using default (synchronized) local client creator") } + if cfg.ExperimentalKeyLayout != "" { + cmtcfg.Storage.ExperimentalKeyLayout = cfg.ExperimentalKeyLayout + } + + // We hardcode ed25519 here because the priv validator files have already been set up in the setup step + pv, err := privval.LoadOrGenFilePV(cmtcfg.PrivValidatorKeyFile(), cmtcfg.PrivValidatorStateFile(), nil) + if err != nil { + return err + } n, err := node.NewNode(context.Background(), cmtcfg, - privval.LoadOrGenFilePV(cmtcfg.PrivValidatorKeyFile(), cmtcfg.PrivValidatorStateFile()), + pv, nodeKey, clientCreator, node.DefaultGenesisDocProviderFunc(cmtcfg), @@ -171,7 +181,7 @@ func startLightClient(cfg *Config) error { }, providers[0], providers[1:], - dbs.New(lightDB, "light"), + dbs.NewWithDBVersion(lightDB, "light", cfg.ExperimentalKeyLayout), light.Logger(nodeLogger), ) if err != nil { @@ -230,7 +240,7 @@ func startSigner(cfg *Config) error { return nil } -func setupNode() (*config.Config, log.Logger, *p2p.NodeKey, error) { +func setupNode() (*config.Config, log.Logger, *nodekey.NodeKey, error) { var cmtcfg *config.Config home := os.Getenv("CMTHOME") @@ -258,7 +268,9 @@ func setupNode() (*config.Config, log.Logger, *p2p.NodeKey, error) { } if cmtcfg.LogFormat == config.LogFormatJSON { - logger = log.NewTMJSONLogger(log.NewSyncWriter(os.Stdout)) + logger = log.NewJSONLogger(os.Stdout) + } else if !cmtcfg.LogColors { + logger = log.NewLoggerWithColor(os.Stdout, false) } nodeLogger, err := cmtflags.ParseLogLevel(cmtcfg.LogLevel, logger, config.DefaultLogLevel) @@ -266,9 +278,7 @@ func setupNode() (*config.Config, log.Logger, *p2p.NodeKey, error) { return nil, nil, nil, err } - nodeLogger = nodeLogger.With("module", "main") - - nodeKey, err := p2p.LoadOrGenNodeKey(cmtcfg.NodeKeyFile()) + nodeKey, err := nodekey.LoadOrGen(cmtcfg.NodeKeyFile()) if err != nil { return nil, nil, nil, fmt.Errorf("failed to load or gen node key %s: %w", cmtcfg.NodeKeyFile(), err) } @@ -277,7 +287,7 @@ func setupNode() (*config.Config, log.Logger, *p2p.NodeKey, error) { } // rpcEndpoints takes a list of persistent peers and splits them into a list of rpc endpoints -// using 26657 as the port number +// using 26657 as the port number. func rpcEndpoints(peers string) []string { arr := strings.Split(peers, ",") endpoints := make([]string, len(arr)) @@ -286,7 +296,7 @@ func rpcEndpoints(peers string) []string { hostName := strings.Split(urlString, ":26656")[0] // use RPC port instead port := 26657 - rpcEndpoint := "http://" + hostName + ":" + fmt.Sprint(port) + rpcEndpoint := "http://" + hostName + ":" + strconv.Itoa(port) endpoints[i] = rpcEndpoint } return endpoints diff --git a/test/e2e/pkg/files/aws-latencies.csv b/test/e2e/pkg/files/aws-latencies.csv new file mode 100644 index 00000000000..9e77dac2145 --- /dev/null +++ b/test/e2e/pkg/files/aws-latencies.csv @@ -0,0 +1,14 @@ +From/to,N_Virginia,Canada,N_California,London,Oregon,Ireland,Frankfurt,S_Paulo,Tokyo,Mumbai,Sydney,Seoul,Singapore +N_Virginia,0,7,30,38,39,33,44,58,73,93,98,87,105 +Canada,7,0,38,39,29,35,46,63,70,94,97,85,103 +N_California,30,39,0,68,10,68,75,88,54,116,69,67,86 +London,38,39,68,0,63,5,8,94,104,56,131,118,82 +Oregon,39,29,10,63,0,59,68,88,49,109,69,63,84 +Ireland,33,35,68,5,59,0,13,88,100,61,127,114,90 +Frankfurt,44,46,75,8,68,13,0,101,111,60,143,109,77 +S_Paulo,58,63,88,94,88,89,101,0,128,151,155,142,161 +Tokyo,73,70,54,104,49,100,111,128,0,60,57,16,39 +Mumbai,93,94,116,56,109,61,60,151,60,0,76,57,27 +Sydney,98,97,69,131,69,127,143,155,57,76,0,69,45 +Seoul,87,84,67,118,63,114,109,142,17,57,69,0,36 +Singapore,105,103,86,82,81,90,77,161,39,27,45,36,0 diff --git a/test/e2e/pkg/grammar/abci_grammar.md b/test/e2e/pkg/grammar/abci_grammar.md new file mode 100644 index 00000000000..748d1687f2d --- /dev/null +++ b/test/e2e/pkg/grammar/abci_grammar.md @@ -0,0 +1,75 @@ +``` +package "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto" + +Start : CleanStart | Recovery; + +CleanStart : InitChain ConsensusExec | StateSync ConsensusExec ; +StateSync : StateSyncAttempts SuccessSync | SuccessSync ; +StateSyncAttempts : StateSyncAttempt | StateSyncAttempt StateSyncAttempts ; +StateSyncAttempt : OfferSnapshot ApplyChunks | OfferSnapshot ; +SuccessSync : OfferSnapshot ApplyChunks ; +ApplyChunks : ApplyChunk | ApplyChunk ApplyChunks ; + +Recovery : InitChain ConsensusExec | ConsensusExec ; + +ConsensusExec : ConsensusHeights ; +ConsensusHeights : ConsensusHeight | ConsensusHeight ConsensusHeights ; +ConsensusHeight : ConsensusRounds FinalizeBlock Commit | FinalizeBlock Commit ; +ConsensusRounds : ConsensusRound | ConsensusRound ConsensusRounds ; +ConsensusRound : Proposer | NonProposer ; + +Proposer : GotVotes | ProposerSimple | Extend | GotVotes ProposerSimple | GotVotes Extend | ProposerSimple Extend | GotVotes ProposerSimple Extend ; +ProposerSimple : PrepareProposal | PrepareProposal ProcessProposal ; +NonProposer: GotVotes | ProcessProposal | Extend | GotVotes ProcessProposal | GotVotes Extend | ProcessProposal Extend | GotVotes ProcessProposal Extend ; +Extend : ExtendVote | GotVotes ExtendVote | ExtendVote GotVotes | GotVotes ExtendVote GotVotes ; +GotVotes : GotVote | GotVote GotVotes ; + +InitChain : "init_chain" ; +FinalizeBlock : "finalize_block" ; +Commit : "commit" ; +OfferSnapshot : "offer_snapshot" ; +ApplyChunk : "apply_snapshot_chunk" ; +PrepareProposal : "prepare_proposal" ; +ProcessProposal : "process_proposal" ; +ExtendVote : "extend_vote" ; +GotVote : "verify_vote_extension" ; + +``` + +The original grammar (https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_comet_expected_behavior.md) the grammar above +refers to is below: + +start = clean-start / recovery + +clean-start = ( app-handshake / state-sync ) consensus-exec +app-handshake = info init-chain +state-sync = *state-sync-attempt success-sync info +state-sync-attempt = offer-snapshot *apply-chunk +success-sync = offer-snapshot 1*apply-chunk + +recovery = info [init-chain] consensus-exec + +consensus-exec = (inf)consensus-height +consensus-height = *consensus-round finalize-block commit +consensus-round = proposer / non-proposer + +proposer = *got-vote [prepare-proposal [process-proposal]] [extend] +extend = *got-vote extend-vote *got-vote +non-proposer = *got-vote [process-proposal] [extend] + +init-chain = %s"" +offer-snapshot = %s"" +apply-chunk = %s"" +info = %s"" +prepare-proposal = %s"" +process-proposal = %s"" +extend-vote = %s"" +got-vote = %s"" +finalize-block = %s"" +commit = %s"" + +*Note* We ignore `Info` since it can be triggered by the e2e tests at unpredictable places because of its role in RPC handling from external clients. + + + + diff --git a/test/e2e/pkg/grammar/checker.go b/test/e2e/pkg/grammar/checker.go index 996c4b15b6d..26e50ad5768 100644 --- a/test/e2e/pkg/grammar/checker.go +++ b/test/e2e/pkg/grammar/checker.go @@ -1,17 +1,16 @@ package grammar import ( + "errors" "fmt" "os" "strings" - "github.com/cometbft/cometbft/libs/log" - abci "github.com/cometbft/cometbft/abci/types" - clean_start_lexer "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/lexer" - clean_start_parser "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/parser" - recovery_lexer "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/lexer" - recovery_parser "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/parser" + "github.com/cometbft/cometbft/libs/log" + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/lexer" + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/parser" + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/parser/symbols" ) const Commit = "commit" @@ -54,17 +53,17 @@ func (e *Error) String() string { func NewGrammarChecker(cfg *Config) *Checker { return &Checker{ cfg: cfg, - logger: log.NewTMLogger(log.NewSyncWriter(os.Stdout)), + logger: log.NewLogger(os.Stdout), } } // isSupportedByGrammar returns true for all requests supported by the current grammar ("/pkg/grammar/clean-start/abci_grammar_clean_start.md" and "/pkg/grammar/recovery/abci_grammar_recovery.md"). // This method needs to be modified if we add another ABCI call. -func (g *Checker) isSupportedByGrammar(req *abci.Request) bool { +func (*Checker) isSupportedByGrammar(req *abci.Request) bool { switch req.Value.(type) { case *abci.Request_InitChain, *abci.Request_FinalizeBlock, *abci.Request_Commit, *abci.Request_OfferSnapshot, *abci.Request_ApplySnapshotChunk, *abci.Request_PrepareProposal, - *abci.Request_ProcessProposal: + *abci.Request_ProcessProposal, *abci.Request_ExtendVote, *abci.Request_VerifyVoteExtension: return true default: return false @@ -92,7 +91,7 @@ func (g *Checker) filterLastHeight(reqs []*abci.Request) ([]*abci.Request, int) pos := len(reqs) - 1 cnt := 0 // Find the last commit. - for pos > 0 && g.getRequestTerminal(reqs[pos]) != Commit { + for pos >= 0 && g.getRequestTerminal(reqs[pos]) != Commit { pos-- cnt++ } @@ -100,7 +99,7 @@ func (g *Checker) filterLastHeight(reqs []*abci.Request) ([]*abci.Request, int) } // getRequestTerminal returns a value of a corresponding terminal in the ABCI grammar for a specific request. -func (g *Checker) getRequestTerminal(req *abci.Request) string { +func (*Checker) getRequestTerminal(req *abci.Request) string { // req.String() produces an output like this "init_chain: >" // we take just the part before the ":" (init_chain, in previous example) for each request parts := strings.Split(req.String(), ":") @@ -128,31 +127,27 @@ func (g *Checker) getExecutionString(reqs []*abci.Request) string { // Verify verifies whether a list of request satisfy ABCI grammar. func (g *Checker) Verify(reqs []*abci.Request, isCleanStart bool) (bool, error) { if len(reqs) == 0 { - return false, fmt.Errorf("execution with no ABCI calls") + return false, errors.New("execution with no ABCI calls") } + fullExecution := g.getExecutionString(reqs) r := g.filterRequests(reqs) // Check if the execution is incomplete. if len(r) == 0 { return true, nil } - var errors []*Error execution := g.getExecutionString(r) - if isCleanStart { - errors = g.verifyCleanStart(execution) - } else { - errors = g.verifyRecovery(execution) - } - if len(errors) == 0 { + errors := g.verify(execution, isCleanStart) + if errors == nil { return true, nil } - return false, fmt.Errorf("%v\nFull execution:\n%v", g.combineErrors(errors, g.cfg.NumberOfErrorsToShow), g.addHeightNumbersToTheExecution(execution)) + return false, fmt.Errorf("%v\nFull execution:\n%v", g.combineErrors(errors, g.cfg.NumberOfErrorsToShow), g.addHeightNumbersToTheExecution(fullExecution)) } -// verifyCleanStart verifies if a specific execution is a valid clean-start execution. -func (g *Checker) verifyCleanStart(execution string) []*Error { +// verifyCleanStart verifies if a specific execution is a valid execution. +func (*Checker) verify(execution string, isCleanStart bool) []*Error { errors := make([]*Error, 0) - lexer := clean_start_lexer.New([]rune(execution)) - _, errs := clean_start_parser.Parse(lexer) + lexer := lexer.New([]rune(execution)) + bsrForest, errs := parser.Parse(lexer) for _, err := range errs { exp := []string{} for _, ex := range err.Expected { @@ -161,37 +156,36 @@ func (g *Checker) verifyCleanStart(execution string) []*Error { expectedTokens := strings.Join(exp, ",") unexpectedToken := err.Token.TypeID() e := &Error{ - description: fmt.Sprintf("Invalid clean-start execution: parser was expecting one of [%v], got [%v] instead.", expectedTokens, unexpectedToken), + description: fmt.Sprintf("Invalid execution: parser was expecting one of [%v], got [%v] instead.", expectedTokens, unexpectedToken), height: err.Line - 1, } errors = append(errors, e) } - return errors -} - -// verifyRecovery verifies if a specific execution is a valid recovery execution. -func (g *Checker) verifyRecovery(execution string) []*Error { - errors := make([]*Error, 0) - lexer := recovery_lexer.New([]rune(execution)) - _, errs := recovery_parser.Parse(lexer) - for _, err := range errs { - exp := []string{} - for _, ex := range err.Expected { - exp = append(exp, ex) - } - expectedTokens := strings.Join(exp, ",") - unexpectedToken := err.Token.TypeID() - e := &Error{ - description: fmt.Sprintf("Invalid recovery execution: parser was expecting one of [%v], got [%v] instead.", expectedTokens, unexpectedToken), - height: err.Line - 1, + if len(errors) != 0 { + return errors + } + eType := symbols.NT_Recovery + if isCleanStart { + eType = symbols.NT_CleanStart + } + roots := bsrForest.GetRoots() + for _, r := range roots { + for _, s := range r.Label.Slot().Symbols { + if s == eType { + return nil + } } - errors = append(errors, e) } + e := &Error{ + description: "The execution is not of valid type.", + height: 0, + } + errors = append(errors, e) return errors } // addHeightNumbersToTheExecution adds height numbers to the execution. This is used just when printing the execution so we can find the height with error more easily. -func (g *Checker) addHeightNumbersToTheExecution(execution string) string { +func (*Checker) addHeightNumbersToTheExecution(execution string) string { heights := strings.Split(execution, "\n") s := "" for i, l := range heights { @@ -204,7 +198,7 @@ func (g *Checker) addHeightNumbersToTheExecution(execution string) string { } // combineErrors combines at most n errors in one. -func (g *Checker) combineErrors(errors []*Error, n int) error { +func (*Checker) combineErrors(errors []*Error, n int) error { s := "" for i, e := range errors { if i == n { diff --git a/test/e2e/pkg/grammar/checker_test.go b/test/e2e/pkg/grammar/checker_test.go index 2e4ad1be8fb..22b3725f206 100644 --- a/test/e2e/pkg/grammar/checker_test.go +++ b/test/e2e/pkg/grammar/checker_test.go @@ -4,97 +4,185 @@ import ( "fmt" "testing" - abci "github.com/cometbft/cometbft/abci/types" "github.com/stretchr/testify/require" + + abci "github.com/cometbft/cometbft/abci/types" ) var ( - initChain = &abci.Request{Value: &abci.Request_InitChain{InitChain: &abci.RequestInitChain{}}} - finalizeBlock = &abci.Request{Value: &abci.Request_FinalizeBlock{FinalizeBlock: &abci.RequestFinalizeBlock{}}} - commit = &abci.Request{Value: &abci.Request_Commit{Commit: &abci.RequestCommit{}}} - offerSnapshot = &abci.Request{Value: &abci.Request_OfferSnapshot{OfferSnapshot: &abci.RequestOfferSnapshot{}}} - applyChunk = &abci.Request{Value: &abci.Request_ApplySnapshotChunk{ApplySnapshotChunk: &abci.RequestApplySnapshotChunk{}}} - prepareProposal = &abci.Request{Value: &abci.Request_PrepareProposal{PrepareProposal: &abci.RequestPrepareProposal{}}} - processProposal = &abci.Request{Value: &abci.Request_ProcessProposal{ProcessProposal: &abci.RequestProcessProposal{}}} + initChain = &abci.Request{Value: &abci.Request_InitChain{InitChain: &abci.InitChainRequest{}}} + finalizeBlock = &abci.Request{Value: &abci.Request_FinalizeBlock{FinalizeBlock: &abci.FinalizeBlockRequest{}}} + commit = &abci.Request{Value: &abci.Request_Commit{Commit: &abci.CommitRequest{}}} + offerSnapshot = &abci.Request{Value: &abci.Request_OfferSnapshot{OfferSnapshot: &abci.OfferSnapshotRequest{}}} + applyChunk = &abci.Request{Value: &abci.Request_ApplySnapshotChunk{ApplySnapshotChunk: &abci.ApplySnapshotChunkRequest{}}} + prepareProposal = &abci.Request{Value: &abci.Request_PrepareProposal{PrepareProposal: &abci.PrepareProposalRequest{}}} + processProposal = &abci.Request{Value: &abci.Request_ProcessProposal{ProcessProposal: &abci.ProcessProposalRequest{}}} + extendVote = &abci.Request{Value: &abci.Request_ExtendVote{ExtendVote: &abci.ExtendVoteRequest{}}} + gotVote = &abci.Request{Value: &abci.Request_VerifyVoteExtension{VerifyVoteExtension: &abci.VerifyVoteExtensionRequest{}}} ) -const ( - CleanStart = true - Pass = true - Fail = false -) +const CleanStart = true + +type ABCIExecution struct { + abciCalls []*abci.Request + isValid bool +} + +// consensus-exec part of executions +// consensus-exec = (inf)consensus-height +// it is part of each executions. +var consExecPart = []ABCIExecution{ + // consensus-height = finalizeBlock commit + {[]*abci.Request{finalizeBlock, commit}, true}, + {[]*abci.Request{commit}, false}, + // consensus-height = *consensus-round finalizeBlock commit + // consensus-height = *consensus-round finalizeBlock commit + // consensus-round = proposer + // proposer = *gotVote + {[]*abci.Request{gotVote, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, gotVote, finalizeBlock, commit}, true}, + // proposer = [prepare-proposal [process-proposal]] + {[]*abci.Request{prepareProposal, processProposal, finalizeBlock, commit}, true}, + {[]*abci.Request{prepareProposal, finalizeBlock, commit}, true}, + // proposer = [extend] + {[]*abci.Request{extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, gotVote, extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{extendVote, gotVote, finalizeBlock, commit}, true}, + {[]*abci.Request{extendVote, gotVote, gotVote, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, gotVote, extendVote, gotVote, gotVote, finalizeBlock, commit}, true}, + // proposer = *gotVote [prepare-proposal [process-proposal]] + {[]*abci.Request{gotVote, prepareProposal, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, gotVote, prepareProposal, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, prepareProposal, processProposal, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, gotVote, prepareProposal, processProposal, finalizeBlock, commit}, true}, + // proposer = *gotVote [extend] + // same as just [extend] + // proposer = [prepare-proposal [process-proposal]] [extend] + {[]*abci.Request{prepareProposal, extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{prepareProposal, gotVote, extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{prepareProposal, extendVote, gotVote, finalizeBlock, commit}, true}, + {[]*abci.Request{prepareProposal, gotVote, extendVote, gotVote, finalizeBlock, commit}, true}, + {[]*abci.Request{prepareProposal, processProposal, extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{prepareProposal, processProposal, gotVote, extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{prepareProposal, processProposal, extendVote, gotVote, finalizeBlock, commit}, true}, + {[]*abci.Request{prepareProposal, processProposal, gotVote, extendVote, gotVote, finalizeBlock, commit}, true}, + // proposer = *gotVote [prepare-proposal [process-proposal]] [extend] + {[]*abci.Request{gotVote, prepareProposal, extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, gotVote, prepareProposal, gotVote, extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, prepareProposal, extendVote, gotVote, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, gotVote, prepareProposal, gotVote, extendVote, gotVote, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, prepareProposal, processProposal, extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, gotVote, prepareProposal, processProposal, gotVote, extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, prepareProposal, processProposal, extendVote, gotVote, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, gotVote, prepareProposal, processProposal, gotVote, extendVote, gotVote, finalizeBlock, commit}, true}, + + // consensus-round = non-proposer + // non-proposer = *gotVote + // same as for proposer + + // non-proposer = [process-proposal] + {[]*abci.Request{processProposal, finalizeBlock, commit}, true}, + // non-proposer = [extend] + // same as for proposer + + // non-proposer = *gotVote [process-proposal] + {[]*abci.Request{gotVote, processProposal, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, gotVote, processProposal, finalizeBlock, commit}, true}, + // non-proposer = *gotVote [extend] + // same as just [extend] + + // non-proposer = [process-proposal] [extend] + {[]*abci.Request{processProposal, extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{processProposal, gotVote, extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{processProposal, extendVote, gotVote, finalizeBlock, commit}, true}, + {[]*abci.Request{processProposal, gotVote, extendVote, gotVote, finalizeBlock, commit}, true}, -func TestVerify(t *testing.T) { - tests := []struct { - name string - abciCalls []*abci.Request - isCleanStart bool - result bool - }{ + // non-proposer = *gotVote [prepare-proposal [process-proposal]] [extend] + {[]*abci.Request{gotVote, processProposal, extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, gotVote, processProposal, gotVote, extendVote, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, processProposal, extendVote, gotVote, finalizeBlock, commit}, true}, + {[]*abci.Request{gotVote, gotVote, processProposal, gotVote, extendVote, gotVote, finalizeBlock, commit}, true}, + + {[]*abci.Request{prepareProposal, processProposal, processProposal, prepareProposal, processProposal, processProposal, processProposal, finalizeBlock, commit}, true}, +} + +func TestVerifyCleanStart(t *testing.T) { + // Parts of executions specific for clean-start execution + specificCleanStartPart := []ABCIExecution{ // start = clean-start // clean-start = init-chain consensus-exec - // consensus-height = finalizeBlock commit - {"empty-block-1", []*abci.Request{initChain, finalizeBlock, commit}, CleanStart, Pass}, - {"consensus-exec-missing", []*abci.Request{initChain}, CleanStart, Fail}, - {"finalize-block-missing-1", []*abci.Request{initChain, commit}, CleanStart, Fail}, - {"commit-missing-1", []*abci.Request{initChain, finalizeBlock}, CleanStart, Fail}, - // consensus-height = *consensus-round finalizeBlock commit - {"proposer-round-1", []*abci.Request{initChain, prepareProposal, processProposal, finalizeBlock, commit}, CleanStart, Pass}, - {"proposer-round-2", []*abci.Request{initChain, prepareProposal, finalizeBlock, commit}, CleanStart, Pass}, - {"non-proposer-round-1", []*abci.Request{initChain, processProposal, finalizeBlock, commit}, CleanStart, Pass}, - {"multiple-rounds-1", []*abci.Request{initChain, prepareProposal, processProposal, processProposal, prepareProposal, processProposal, processProposal, processProposal, finalizeBlock, commit}, CleanStart, Pass}, - - // clean-start = init-chain state-sync consensus-exec + {[]*abci.Request{initChain}, true}, + // clean-start = state-sync consensus-exec // state-sync = success-sync - {"one-apply-chunk-1", []*abci.Request{initChain, offerSnapshot, applyChunk, finalizeBlock, commit}, CleanStart, Pass}, - {"multiple-apply-chunks-1", []*abci.Request{initChain, offerSnapshot, applyChunk, applyChunk, finalizeBlock, commit}, CleanStart, Pass}, - {"offer-snapshot-missing-1", []*abci.Request{initChain, applyChunk, finalizeBlock, commit}, CleanStart, Fail}, - {"apply-chunk-missing", []*abci.Request{initChain, offerSnapshot, finalizeBlock, commit}, CleanStart, Fail}, + {[]*abci.Request{offerSnapshot, applyChunk}, true}, + {[]*abci.Request{offerSnapshot, applyChunk, applyChunk}, true}, + {[]*abci.Request{applyChunk}, false}, + {[]*abci.Request{offerSnapshot}, false}, // state-sync = *state-sync-attempt success-sync - {"one-apply-chunk-2", []*abci.Request{initChain, offerSnapshot, applyChunk, offerSnapshot, applyChunk, finalizeBlock, commit}, CleanStart, Pass}, - {"multiple-apply-chunks-2", []*abci.Request{initChain, offerSnapshot, applyChunk, applyChunk, applyChunk, offerSnapshot, applyChunk, finalizeBlock, commit}, CleanStart, Pass}, - {"offer-snapshot-missing-2", []*abci.Request{initChain, applyChunk, offerSnapshot, applyChunk, finalizeBlock, commit}, CleanStart, Fail}, - {"no-apply-chunk", []*abci.Request{initChain, offerSnapshot, offerSnapshot, applyChunk, finalizeBlock, commit}, CleanStart, Pass}, + {[]*abci.Request{offerSnapshot, applyChunk, offerSnapshot, applyChunk}, true}, + {[]*abci.Request{offerSnapshot, applyChunk, applyChunk, applyChunk, offerSnapshot, applyChunk}, true}, + {[]*abci.Request{applyChunk, offerSnapshot, applyChunk}, false}, + {[]*abci.Request{offerSnapshot, offerSnapshot, applyChunk}, true}, + // extra invalid executions + {[]*abci.Request{initChain, offerSnapshot, applyChunk}, false}, + {[]*abci.Request{}, false}, + } + for i, part1 := range specificCleanStartPart { + for j, part2 := range consExecPart { + checker := NewGrammarChecker(DefaultConfig()) + execution := append(part1.abciCalls, part2.abciCalls...) + valid := part1.isValid && part2.isValid + result, err := checker.Verify(execution, CleanStart) + if result == valid { + continue + } + if err == nil { + err = fmt.Errorf("grammar parsed an incorrect execution: %v", checker.getExecutionString(execution)) + } + t.Errorf("Test %v:%v returned %v, expected %v\n%v\n", i, j, result, valid, err) + } + } +} +func TestVerifyRecovery(t *testing.T) { + // Parts of executions specific for recovery execution + specificRecoveryPart := []ABCIExecution{ // start = recovery + // recovery = init-chain consensus-exec + {[]*abci.Request{initChain}, true}, // recovery = consensus-exec - // consensus-height = finalizeBlock commit - {"empty-block-2", []*abci.Request{finalizeBlock, commit}, !CleanStart, Pass}, - {"finalize-block-missing-2", []*abci.Request{commit}, !CleanStart, Fail}, - {"commit-missing-2", []*abci.Request{finalizeBlock}, !CleanStart, Fail}, - // consensus-height = *consensus-round finalizeBlock commit - {"proposer-round-3", []*abci.Request{prepareProposal, processProposal, finalizeBlock, commit}, !CleanStart, Pass}, - {"proposer-round-4", []*abci.Request{prepareProposal, finalizeBlock, commit}, !CleanStart, Pass}, - {"non-proposer-round-2", []*abci.Request{processProposal, finalizeBlock, commit}, !CleanStart, Pass}, - {"multiple-rounds-2", []*abci.Request{prepareProposal, processProposal, processProposal, prepareProposal, processProposal, processProposal, processProposal, finalizeBlock, commit}, !CleanStart, Pass}, - - // corner cases - {"empty execution", nil, CleanStart, Fail}, - {"empty execution", nil, !CleanStart, Fail}, + {[]*abci.Request{}, true}, } - - for _, test := range tests { - checker := NewGrammarChecker(DefaultConfig()) - result, err := checker.Verify(test.abciCalls, test.isCleanStart) - if result == test.result { - continue + for i, part1 := range specificRecoveryPart { + for j, part2 := range consExecPart { + checker := NewGrammarChecker(DefaultConfig()) + execution := append(part1.abciCalls, part2.abciCalls...) + valid := part1.isValid && part2.isValid + result, err := checker.Verify(execution, !CleanStart) + if result == valid { + continue + } + if err == nil { + err = fmt.Errorf("grammar parsed an incorrect execution: %v", checker.getExecutionString(execution)) + } + t.Errorf("Test %v:%v returned %v, expected %v\n%v\n", i, j, result, valid, err) } - if err == nil { - err = fmt.Errorf("grammar parsed an incorrect execution: %v", checker.getExecutionString(test.abciCalls)) - } - t.Errorf("Test %v returned %v, expected %v\n%v\n", test.name, result, test.result, err) } } func TestFilterLastHeight(t *testing.T) { - reqs := []*abci.Request{initChain, finalizeBlock, commit} + reqs := []*abci.Request{initChain, finalizeBlock} checker := NewGrammarChecker(DefaultConfig()) - rr, n := checker.filterLastHeight(reqs) - require.Equal(t, len(reqs), len(rr)) + r, n := checker.filterLastHeight(reqs) + require.Equal(t, len(r), 0) + require.Equal(t, n, 2) + reqs = append(reqs, commit) + r, n = checker.filterLastHeight(reqs) + require.Equal(t, len(r), len(reqs)) require.Zero(t, n) - - reqs = append(reqs, finalizeBlock) - rrr, n := checker.filterLastHeight(reqs) - require.Equal(t, len(rr), len(rrr)) - require.Equal(t, n, 1) + reqs = append(reqs, []*abci.Request{prepareProposal, processProposal}...) + r, n = checker.filterLastHeight(reqs) + require.Equal(t, len(r), 3) + require.Equal(t, n, 2) } diff --git a/test/e2e/pkg/grammar/clean-start/abci_grammar_clean_start.md b/test/e2e/pkg/grammar/clean-start/abci_grammar_clean_start.md deleted file mode 100644 index a66bb64f2ce..00000000000 --- a/test/e2e/pkg/grammar/clean-start/abci_grammar_clean_start.md +++ /dev/null @@ -1,61 +0,0 @@ -``` -package "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto" - -Start : CleanStart ; - -CleanStart : InitChain StateSync ConsensusExec | InitChain ConsensusExec ; -StateSync : StateSyncAttempts SuccessSync | SuccessSync ; -StateSyncAttempts : StateSyncAttempt | StateSyncAttempt StateSyncAttempts ; -StateSyncAttempt : OfferSnapshot ApplyChunks | OfferSnapshot ; -SuccessSync : OfferSnapshot ApplyChunks ; -ApplyChunks : ApplyChunk | ApplyChunk ApplyChunks ; - -ConsensusExec : ConsensusHeights ; -ConsensusHeights : ConsensusHeight | ConsensusHeight ConsensusHeights ; -ConsensusHeight : ConsensusRounds FinalizeBlock Commit | FinalizeBlock Commit ; -ConsensusRounds : ConsensusRound | ConsensusRound ConsensusRounds ; -ConsensusRound : Proposer | NonProposer ; - -Proposer : PrepareProposal | PrepareProposal ProcessProposal ; -NonProposer: ProcessProposal ; - -InitChain : "init_chain" ; -FinalizeBlock : "finalize_block" ; -Commit : "commit" ; -OfferSnapshot : "offer_snapshot" ; -ApplyChunk : "apply_snapshot_chunk" ; -PrepareProposal : "prepare_proposal" ; -ProcessProposal : "process_proposal" ; - -``` - -The part of the original grammar (https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_comet_expected_behavior.md) the grammar above -refers to is below: - -start = clean-start - -clean-start = init-chain [state-sync] consensus-exec -state-sync = *state-sync-attempt success-sync info -state-sync-attempt = offer-snapshot *apply-chunk -success-sync = offer-snapshot 1*apply-chunk - -consensus-exec = (inf)consensus-height -consensus-height = *consensus-round decide commit -consensus-round = proposer / non-proposer - -proposer = [prepare-proposal [process-proposal]] -non-proposer = [process-proposal] - -init-chain = %s"" -decide = %s"" -commit = %s"" -offer-snapshot = %s"" -apply-chunk = %s"" -info = %s"" -prepare-proposal = %s"" -process-proposal = %s"" - - - - - diff --git a/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/bsr/bsr.go b/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/bsr/bsr.go deleted file mode 100644 index 3a98779aa4b..00000000000 --- a/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/bsr/bsr.go +++ /dev/null @@ -1,685 +0,0 @@ -// Package bsr is generated by gogll. Do not edit. - -/* -Package bsr implements a Binary Subtree Representation set as defined in - - Scott et al - Derivation representation using binary subtree sets, - Science of Computer Programming 175 (2019) -*/ -package bsr - -import ( - "bytes" - "fmt" - "sort" - "strings" - - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/lexer" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/slot" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/symbols" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/sppf" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/token" -) - -type bsr interface { - LeftExtent() int - RightExtent() int - Pivot() int -} - -/* -Set contains the set of Binary Subtree Representations (BSR). -*/ -type Set struct { - slotEntries map[BSR]bool - ntSlotEntries map[ntSlot][]BSR - stringEntries map[stringKey]*stringBSR - rightExtent int - lex *lexer.Lexer - - startSym symbols.NT -} - -type ntSlot struct { - nt symbols.NT - leftExtent int - rightExtent int -} - -// BSR is the binary subtree representation of a parsed nonterminal -type BSR struct { - Label slot.Label - leftExtent int - pivot int - rightExtent int - set *Set -} - -type BSRs []BSR - -type stringBSR struct { - Symbols symbols.Symbols - leftExtent int - pivot int - rightExtent int - set *Set -} - -type stringBSRs []*stringBSR - -type stringKey string - -// New returns a new initialised BSR Set -func New(startSymbol symbols.NT, l *lexer.Lexer) *Set { - return &Set{ - slotEntries: make(map[BSR]bool), - ntSlotEntries: make(map[ntSlot][]BSR), - stringEntries: make(map[stringKey]*stringBSR), - rightExtent: 0, - lex: l, - startSym: startSymbol, - } -} - -/* -Add a bsr to the set. (i,j) is the extent. k is the pivot. -*/ -func (s *Set) Add(l slot.Label, i, k, j int) { - // fmt.Printf("bsr.Add(%s,%d,%d,%d l.Pos %d)\n", l, i, k, j, l.Pos()) - if l.EoR() { - s.insert(BSR{l, i, k, j, s}) - } else { - if l.Pos() > 1 { - s.insert(&stringBSR{l.Symbols()[:l.Pos()], i, k, j, s}) - } - } -} - -// AddEmpty adds a grammar slot: X : ϵ• -func (s *Set) AddEmpty(l slot.Label, i int) { - s.insert(BSR{l, i, i, i, s}) -} - -/* -Contain returns true iff the BSR Set contains the NT symbol with left and -right extent. -*/ -func (s *Set) Contain(nt symbols.NT, left, right int) bool { - // fmt.Printf("bsr.Contain(%s,%d,%d)\n",nt,left,right) - for e := range s.slotEntries { - // fmt.Printf(" (%s,%d,%d)\n",e.Label.Head(),e.leftExtent,e.rightExtent) - if e.Label.Head() == nt && e.leftExtent == left && e.rightExtent == right { - // fmt.Println(" true") - return true - } - } - // fmt.Println(" false") - return false -} - -// Dump prints all the NT and string elements of the BSR set -func (s *Set) Dump() { - fmt.Println("Roots:") - for _, rt := range s.GetRoots() { - fmt.Println(rt) - } - fmt.Println() - - fmt.Println("NT BSRs:") - for _, bsr := range s.getNTBSRs() { - fmt.Println(bsr) - } - fmt.Println() - - fmt.Println("string BSRs:") - for _, bsr := range s.getStringBSRs() { - fmt.Println(bsr) - } - fmt.Println() -} - -// GetAll returns all BSR grammar slot entries -func (s *Set) GetAll() (bsrs []BSR) { - for b := range s.slotEntries { - bsrs = append(bsrs, b) - } - return -} - -// GetRightExtent returns the right extent of the BSR set -func (s *Set) GetRightExtent() int { - return s.rightExtent -} - -// GetRoot returns the root of the parse tree of an unambiguous parse. -// GetRoot fails if the parse was ambiguous. Use GetRoots() for ambiguous parses. -func (s *Set) GetRoot() BSR { - rts := s.GetRoots() - if len(rts) != 1 { - failf("%d parse trees exist for start symbol %s", len(rts), s.startSym) - } - return rts[0] -} - -// GetRoots returns all the roots of parse trees of the start symbol of the grammar. -func (s *Set) GetRoots() (roots []BSR) { - for b := range s.slotEntries { - if b.Label.Head() == s.startSym && b.leftExtent == 0 && s.rightExtent == b.rightExtent { - roots = append(roots, b) - } - } - return -} - -// GetAllStrings returns all string elements with symbols = str, -// left extent = lext and right extent = rext -func (s *Set) GetAllStrings(str symbols.Symbols, lext, rext int) (strs []*stringBSR) { - for _, s := range s.stringEntries { - if s.Symbols.Equal(str) && s.leftExtent == lext && s.rightExtent == rext { - strs = append(strs, s) - } - } - return -} - -func (s *Set) getNTBSRs() BSRs { - bsrs := make(BSRs, 0, len(s.ntSlotEntries)) - for _, bsrl := range s.ntSlotEntries { - for _, bsr := range bsrl { - bsrs = append(bsrs, bsr) - } - } - sort.Sort(bsrs) - return bsrs -} - -func (s *Set) getStringBSRs() stringBSRs { - bsrs := make(stringBSRs, 0, len(s.stringEntries)) - for _, bsr := range s.stringEntries { - bsrs = append(bsrs, bsr) - } - sort.Sort(bsrs) - return bsrs -} - -func (s *Set) getString(symbols symbols.Symbols, leftExtent, rightExtent int) *stringBSR { - // fmt.Printf("Set.getString(%s,%d,%d)\n", symbols, leftExtent, rightExtent) - - strBsr, exist := s.stringEntries[getStringKey(symbols, leftExtent, rightExtent)] - if exist { - return strBsr - } - - panic(fmt.Sprintf("Error: no string %s left extent=%d right extent=%d\n", - symbols, leftExtent, rightExtent)) -} - -func (s *Set) insert(bsr bsr) { - if bsr.RightExtent() > s.rightExtent { - s.rightExtent = bsr.RightExtent() - } - switch b := bsr.(type) { - case BSR: - s.slotEntries[b] = true - nt := ntSlot{b.Label.Head(), b.leftExtent, b.rightExtent} - s.ntSlotEntries[nt] = append(s.ntSlotEntries[nt], b) - case *stringBSR: - s.stringEntries[b.key()] = b - default: - panic(fmt.Sprintf("Invalid type %T", bsr)) - } -} - -func (s *stringBSR) key() stringKey { - return getStringKey(s.Symbols, s.leftExtent, s.rightExtent) -} - -func getStringKey(symbols symbols.Symbols, lext, rext int) stringKey { - return stringKey(fmt.Sprintf("%s,%d,%d", symbols, lext, rext)) -} - -// Alternate returns the index of the grammar rule alternate. -func (b BSR) Alternate() int { - return b.Label.Alternate() -} - -// GetAllNTChildren returns all the NT Children of b. If an NT child of b has -// ambiguous parses then all parses of that child are returned. -func (b BSR) GetAllNTChildren() [][]BSR { - children := [][]BSR{} - for i, s := range b.Label.Symbols() { - if s.IsNonTerminal() { - sChildren := b.GetNTChildrenI(i) - children = append(children, sChildren) - } - } - return children -} - -// GetNTChild returns the BSR of occurrence i of nt in s. -// GetNTChild fails if s has ambiguous subtrees of occurrence i of nt. -func (b BSR) GetNTChild(nt symbols.NT, i int) BSR { - bsrs := b.GetNTChildren(nt, i) - if len(bsrs) != 1 { - ambiguousSlots := []string{} - for _, c := range bsrs { - ambiguousSlots = append(ambiguousSlots, c.String()) - } - b.set.fail(b, "%s is ambiguous in %s\n %s", nt, b, strings.Join(ambiguousSlots, "\n ")) - } - return bsrs[0] -} - -// GetNTChildI returns the BSR of NT symbol[i] in the BSR set. -// GetNTChildI fails if the BSR set has ambiguous subtrees of NT i. -func (b BSR) GetNTChildI(i int) BSR { - bsrs := b.GetNTChildrenI(i) - if len(bsrs) != 1 { - b.set.fail(b, "NT %d is ambiguous in %s", i, b) - } - return bsrs[0] -} - -// GetNTChildren returns all the BSRs of occurrence i of nt in s -func (b BSR) GetNTChildren(nt symbols.NT, i int) []BSR { - // fmt.Printf("GetNTChild(%s,%d) %s\n", nt, i, b) - positions := []int{} - for j, s := range b.Label.Symbols() { - if s == nt { - positions = append(positions, j) - } - } - if len(positions) == 0 { - b.set.fail(b, "Error: %s has no NT %s", b, nt) - } - return b.GetNTChildrenI(positions[i]) -} - -// GetNTChildrenI returns all the BSRs of NT symbol[i] in s -func (b BSR) GetNTChildrenI(i int) []BSR { - // fmt.Printf("bsr.GetNTChildI(%d) %s Pos %d\n", i, b, b.Label.Pos()) - - if i >= len(b.Label.Symbols()) { - b.set.fail(b, "Error: cannot get NT child %d of %s", i, b) - } - if len(b.Label.Symbols()) == 1 { - return b.set.getNTSlot(b.Label.Symbols()[i], b.pivot, b.rightExtent) - } - if len(b.Label.Symbols()) == 2 { - if i == 0 { - return b.set.getNTSlot(b.Label.Symbols()[i], b.leftExtent, b.pivot) - } - return b.set.getNTSlot(b.Label.Symbols()[i], b.pivot, b.rightExtent) - } - if b.Label.Pos() == i+1 { - return b.set.getNTSlot(b.Label.Symbols()[i], b.pivot, b.rightExtent) - } - - // Walk to pos i from the right - symbols := b.Label.Symbols()[:b.Label.Pos()-1] - str := b.set.getString(symbols, b.leftExtent, b.pivot) - for len(symbols) > i+1 && len(symbols) > 2 { - symbols = symbols[:len(symbols)-1] - str = b.set.getString(symbols, str.leftExtent, str.pivot) - } - - bsrs := []BSR{} - if i == 0 { - bsrs = b.set.getNTSlot(b.Label.Symbols()[i], str.leftExtent, str.pivot) - } else { - bsrs = b.set.getNTSlot(b.Label.Symbols()[i], str.pivot, str.rightExtent) - } - - // fmt.Println(bsrs) - - return bsrs -} - -// GetTChildI returns the terminal symbol at position i in b. -// GetTChildI panics if symbol i is not a valid terminal -func (b BSR) GetTChildI(i int) *token.Token { - symbols := b.Label.Symbols() - - if i >= len(symbols) { - panic(fmt.Sprintf("%s has no T child %d", b, i)) - } - if symbols[i].IsNonTerminal() { - panic(fmt.Sprintf("symbol %d in %s is an NT", i, b)) - } - - lext := b.leftExtent - for j := 0; j < i; j++ { - if symbols[j].IsNonTerminal() { - nt := b.GetNTChildI(j) - lext += nt.rightExtent - nt.leftExtent - } else { - lext++ - } - } - return b.set.lex.Tokens[lext] -} - -// LeftExtent returns the left extent of the BSR in the stream of tokens -func (b BSR) LeftExtent() int { - return b.leftExtent -} - -// RightExtent returns the right extent of the BSR in the stream of tokens -func (b BSR) RightExtent() int { - return b.rightExtent -} - -// Pivot returns the pivot of the BSR -func (b BSR) Pivot() int { - return b.pivot -} - -func (b BSR) String() string { - srcStr := "ℇ" - if b.leftExtent < b.rightExtent { - srcStr = b.set.lex.GetString(b.LeftExtent(), b.RightExtent()-1) - } - return fmt.Sprintf("%s,%d,%d,%d - %s", - b.Label, b.leftExtent, b.pivot, b.rightExtent, srcStr) -} - -// BSRs Sort interface -func (bs BSRs) Len() int { - return len(bs) -} - -func (bs BSRs) Less(i, j int) bool { - if bs[i].Label < bs[j].Label { - return true - } - if bs[i].Label > bs[j].Label { - return false - } - if bs[i].leftExtent < bs[j].leftExtent { - return true - } - if bs[i].leftExtent > bs[j].leftExtent { - return false - } - return bs[i].rightExtent < bs[j].rightExtent -} - -func (bs BSRs) Swap(i, j int) { - bs[i], bs[j] = bs[j], bs[i] -} - -// stringBSRs Sort interface -func (sbs stringBSRs) Len() int { - return len(sbs) -} - -func (sbs stringBSRs) Less(i, j int) bool { - if sbs[i].Symbols.String() < sbs[j].Symbols.String() { - return true - } - if sbs[i].Symbols.String() > sbs[j].Symbols.String() { - return false - } - if sbs[i].leftExtent < sbs[j].leftExtent { - return true - } - if sbs[i].leftExtent > sbs[j].leftExtent { - return false - } - return sbs[i].rightExtent < sbs[j].rightExtent -} - -func (sbs stringBSRs) Swap(i, j int) { - sbs[i], sbs[j] = sbs[j], sbs[i] -} - -func (s stringBSR) LeftExtent() int { - return s.leftExtent -} - -func (s stringBSR) RightExtent() int { - return s.rightExtent -} - -func (s stringBSR) Pivot() int { - return s.pivot -} - -func (s stringBSR) Empty() bool { - return s.leftExtent == s.pivot && s.pivot == s.rightExtent -} - -// String returns a string representation of s -func (s stringBSR) String() string { - return fmt.Sprintf("%s,%d,%d,%d - %s", &s.Symbols, s.leftExtent, s.pivot, - s.rightExtent, s.set.lex.GetString(s.LeftExtent(), s.RightExtent())) -} - -func (s *Set) getNTSlot(sym symbols.Symbol, leftExtent, rightExtent int) (bsrs []BSR) { - nt, ok := sym.(symbols.NT) - if !ok { - line, col := s.getLineColumn(leftExtent) - failf("%s is not an NT at line %d col %d", sym, line, col) - } - return s.ntSlotEntries[ntSlot{nt, leftExtent, rightExtent}] -} - -func (s *Set) fail(b BSR, format string, a ...interface{}) { - msg := fmt.Sprintf(format, a...) - line, col := s.getLineColumn(b.LeftExtent()) - panic(fmt.Sprintf("Error in BSR: %s at line %d col %d\n", msg, line, col)) -} - -func failf(format string, args ...interface{}) { - panic(fmt.Sprintf("Error in BSR: %s\n", fmt.Sprintf(format, args...))) -} - -func (s *Set) getLineColumn(cI int) (line, col int) { - return s.lex.GetLineColumnOfToken(cI) -} - -// ReportAmbiguous lists the ambiguous subtrees of the parse forest -func (s *Set) ReportAmbiguous() { - fmt.Println("Ambiguous BSR Subtrees:") - rts := s.GetRoots() - if len(rts) != 1 { - fmt.Printf("BSR has %d ambigous roots\n", len(rts)) - } - for i, b := range s.GetRoots() { - fmt.Println("In root", i) - if !s.report(b) { - fmt.Println("No ambiguous BSRs") - } - } -} - -// report return true iff at least one ambigous BSR was found -func (s *Set) report(b BSR) bool { - ambiguous := false - for i, sym := range b.Label.Symbols() { - ln, col := s.getLineColumn(b.LeftExtent()) - if sym.IsNonTerminal() { - if len(b.GetNTChildrenI(i)) != 1 { - ambiguous = true - fmt.Printf(" Ambigous: in %s: NT %s (%d) at line %d col %d \n", - b, sym, i, ln, col) - fmt.Println(" Children:") - for _, c := range b.GetNTChildrenI(i) { - fmt.Printf(" %s\n", c) - } - } - for _, b1 := range b.GetNTChildrenI(i) { - s.report(b1) - } - } - } - return ambiguous -} - -// IsAmbiguous returns true if the BSR set does not have exactly one root, or -// if any BSR in the set has an NT symbol, which does not have exactly one -// sub-tree. -func (s *Set) IsAmbiguous() bool { - if len(s.GetRoots()) != 1 { - return true - } - return isAmbiguous(s.GetRoot()) -} - -// isAmbiguous returns true if b or any of its NT children is ambiguous. -// A BSR is ambiguous if any of its NT symbols does not have exactly one -// subtrees (children). -func isAmbiguous(b BSR) bool { - for i, s := range b.Label.Symbols() { - if s.IsNonTerminal() { - if len(b.GetNTChildrenI(i)) != 1 { - return true - } - for _, b1 := range b.GetNTChildrenI(i) { - if isAmbiguous(b1) { - return true - } - } - } - } - return false -} - -//---- SPPF ------------ - -type bldSPPF struct { - root *sppf.SymbolNode - extLeafNodes []sppf.Node - pNodes map[string]*sppf.PackedNode - sNodes map[string]*sppf.SymbolNode // Index is Node.Label() -} - -func (pf *Set) ToSPPF() *sppf.SymbolNode { - bld := &bldSPPF{ - pNodes: map[string]*sppf.PackedNode{}, - sNodes: map[string]*sppf.SymbolNode{}, - } - rt := pf.GetRoots()[0] - bld.root = bld.mkSN(rt.Label.Head().String(), rt.leftExtent, rt.rightExtent) - - for len(bld.extLeafNodes) > 0 { - // let w = (μ, i, j) be an extendable leaf node of G - w := bld.extLeafNodes[len(bld.extLeafNodes)-1] - bld.extLeafNodes = bld.extLeafNodes[:len(bld.extLeafNodes)-1] - - // μ is a nonterminal X in Γ - if nt, ok := w.(*sppf.SymbolNode); ok && symbols.IsNT(nt.Symbol) { - bsts := pf.getNTSlot(symbols.ToNT(nt.Symbol), nt.Lext, nt.Rext) - // for each (X ::=γ,i,k, j)∈Υ { mkPN(X ::=γ·,i,k, j,G) } } - for _, bst := range bsts { - slt := bst.Label.Slot() - nt.Children = append(nt.Children, - bld.mkPN(slt.NT, slt.Symbols, slt.Pos, - bst.leftExtent, bst.pivot, bst.rightExtent)) - } - } else { // w is an intermediate node - // suppose μ is X ::=α·δ - in := w.(*sppf.IntermediateNode) - if in.Pos == 1 { - in.Children = append(in.Children, bld.mkPN(in.NT, in.Body, in.Pos, - in.Lext, in.Lext, in.Rext)) - } else { - // for each (α,i,k, j)∈Υ { mkPN(X ::=α·δ,i,k, j,G) } } } } - alpha, delta := in.Body[:in.Pos], in.Body[in.Pos:] - for _, str := range pf.GetAllStrings(alpha, in.Lext, in.Rext) { - body := append(str.Symbols, delta...) - in.Children = append(in.Children, - bld.mkPN(in.NT, body, in.Pos, str.leftExtent, str.pivot, str.rightExtent)) - } - } - } - } - return bld.root -} - -func (bld *bldSPPF) mkIN(nt symbols.NT, body symbols.Symbols, pos int, - lext, rext int) *sppf.IntermediateNode { - - in := &sppf.IntermediateNode{ - NT: nt, - Body: body, - Pos: pos, - Lext: lext, - Rext: rext, - } - bld.extLeafNodes = append(bld.extLeafNodes, in) - return in -} - -func (bld *bldSPPF) mkPN(nt symbols.NT, body symbols.Symbols, pos int, - lext, pivot, rext int) *sppf.PackedNode { - // fmt.Printf("mkPN %s,%d,%d,%d\n", slotString(nt, body, pos), lext, pivot, rext) - - // X ::= ⍺ • β, k - pn := &sppf.PackedNode{ - NT: nt, - Body: body, - Pos: pos, - Lext: lext, - Rext: rext, - Pivot: pivot, - LeftChild: nil, - RightChild: nil, - } - if pn1, exist := bld.pNodes[pn.Label()]; exist { - return pn1 - } - bld.pNodes[pn.Label()] = pn - - if len(body) == 0 { // ⍺ = ϵ - pn.RightChild = bld.mkSN("ϵ", lext, lext) - } else { // if ( α=βx, where |x|=1) { - // mkN(x,k, j, y,G) - pn.RightChild = bld.mkSN(pn.Body[pn.Pos-1].String(), pivot, rext) - - // if (|β|=1) mkN(β,i,k,y,G) - if pos == 2 { - pn.LeftChild = bld.mkSN(pn.Body[pn.Pos-2].String(), lext, pivot) - } - // if (|β|>1) mkN(X ::=β·xδ,i,k,y,G) - if pos > 2 { - pn.LeftChild = bld.mkIN(pn.NT, pn.Body, pn.Pos-1, lext, pivot) - } - } - - return pn -} - -func (bld *bldSPPF) mkSN(symbol string, lext, rext int) *sppf.SymbolNode { - sn := &sppf.SymbolNode{ - Symbol: symbol, - Lext: lext, - Rext: rext, - } - if sn1, exist := bld.sNodes[sn.Label()]; exist { - return sn1 - } - bld.sNodes[sn.Label()] = sn - if symbols.IsNT(symbol) { - bld.extLeafNodes = append(bld.extLeafNodes, sn) - } - return sn -} - -func slotString(nt symbols.NT, body symbols.Symbols, pos int) string { - w := new(bytes.Buffer) - fmt.Fprintf(w, "%s:", nt) - for i, sym := range body { - fmt.Fprint(w, " ") - if i == pos { - fmt.Fprint(w, "•") - } - fmt.Fprint(w, sym) - } - if len(body) == pos { - fmt.Fprint(w, "•") - } - return w.String() -} - diff --git a/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/parser.go b/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/parser.go deleted file mode 100644 index 8fb3c9832a6..00000000000 --- a/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/parser.go +++ /dev/null @@ -1,1267 +0,0 @@ -// Package parser is generated by gogll. Do not edit. -package parser - -import ( - "bytes" - "fmt" - "sort" - "strings" - - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/lexer" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/bsr" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/slot" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/symbols" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/token" -) - -type parser struct { - cI int - - R *descriptors - U *descriptors - - popped map[poppedNode]bool - crf map[clusterNode][]*crfNode - crfNodes map[crfNode]*crfNode - - lex *lexer.Lexer - parseErrors []*Error - - bsrSet *bsr.Set -} - -func newParser(l *lexer.Lexer) *parser { - return &parser{ - cI: 0, - lex: l, - R: &descriptors{}, - U: &descriptors{}, - popped: make(map[poppedNode]bool), - crf: map[clusterNode][]*crfNode{ - {symbols.NT_Start, 0}: {}, - }, - crfNodes: map[crfNode]*crfNode{}, - bsrSet: bsr.New(symbols.NT_Start, l), - parseErrors: nil, - } -} - -// Parse returns the BSR set containing the parse forest. -// If the parse was successfull []*Error is nil -func Parse(l *lexer.Lexer) (*bsr.Set, []*Error) { - return newParser(l).parse() -} - -func (p *parser) parse() (*bsr.Set, []*Error) { - var L slot.Label - m, cU := len(p.lex.Tokens)-1, 0 - p.ntAdd(symbols.NT_Start, 0) - // p.DumpDescriptors() - for !p.R.empty() { - L, cU, p.cI = p.R.remove() - - // fmt.Println() - // fmt.Printf("L:%s, cI:%d, I[p.cI]:%s, cU:%d\n", L, p.cI, p.lex.Tokens[p.cI], cU) - // p.DumpDescriptors() - - switch L { - case slot.ApplyChunk0R0: // ApplyChunk : ∙apply_snapshot_chunk - - p.bsrSet.Add(slot.ApplyChunk0R1, cU, p.cI, p.cI+1) - p.cI++ - if p.follow(symbols.NT_ApplyChunk) { - p.rtn(symbols.NT_ApplyChunk, cU, p.cI) - } else { - p.parseError(slot.ApplyChunk0R0, p.cI, followSets[symbols.NT_ApplyChunk]) - } - case slot.ApplyChunks0R0: // ApplyChunks : ∙ApplyChunk - - p.call(slot.ApplyChunks0R1, cU, p.cI) - case slot.ApplyChunks0R1: // ApplyChunks : ApplyChunk ∙ - - if p.follow(symbols.NT_ApplyChunks) { - p.rtn(symbols.NT_ApplyChunks, cU, p.cI) - } else { - p.parseError(slot.ApplyChunks0R0, p.cI, followSets[symbols.NT_ApplyChunks]) - } - case slot.ApplyChunks1R0: // ApplyChunks : ∙ApplyChunk ApplyChunks - - p.call(slot.ApplyChunks1R1, cU, p.cI) - case slot.ApplyChunks1R1: // ApplyChunks : ApplyChunk ∙ApplyChunks - - if !p.testSelect(slot.ApplyChunks1R1) { - p.parseError(slot.ApplyChunks1R1, p.cI, first[slot.ApplyChunks1R1]) - break - } - - p.call(slot.ApplyChunks1R2, cU, p.cI) - case slot.ApplyChunks1R2: // ApplyChunks : ApplyChunk ApplyChunks ∙ - - if p.follow(symbols.NT_ApplyChunks) { - p.rtn(symbols.NT_ApplyChunks, cU, p.cI) - } else { - p.parseError(slot.ApplyChunks1R0, p.cI, followSets[symbols.NT_ApplyChunks]) - } - case slot.CleanStart0R0: // CleanStart : ∙InitChain StateSync ConsensusExec - - p.call(slot.CleanStart0R1, cU, p.cI) - case slot.CleanStart0R1: // CleanStart : InitChain ∙StateSync ConsensusExec - - if !p.testSelect(slot.CleanStart0R1) { - p.parseError(slot.CleanStart0R1, p.cI, first[slot.CleanStart0R1]) - break - } - - p.call(slot.CleanStart0R2, cU, p.cI) - case slot.CleanStart0R2: // CleanStart : InitChain StateSync ∙ConsensusExec - - if !p.testSelect(slot.CleanStart0R2) { - p.parseError(slot.CleanStart0R2, p.cI, first[slot.CleanStart0R2]) - break - } - - p.call(slot.CleanStart0R3, cU, p.cI) - case slot.CleanStart0R3: // CleanStart : InitChain StateSync ConsensusExec ∙ - - if p.follow(symbols.NT_CleanStart) { - p.rtn(symbols.NT_CleanStart, cU, p.cI) - } else { - p.parseError(slot.CleanStart0R0, p.cI, followSets[symbols.NT_CleanStart]) - } - case slot.CleanStart1R0: // CleanStart : ∙InitChain ConsensusExec - - p.call(slot.CleanStart1R1, cU, p.cI) - case slot.CleanStart1R1: // CleanStart : InitChain ∙ConsensusExec - - if !p.testSelect(slot.CleanStart1R1) { - p.parseError(slot.CleanStart1R1, p.cI, first[slot.CleanStart1R1]) - break - } - - p.call(slot.CleanStart1R2, cU, p.cI) - case slot.CleanStart1R2: // CleanStart : InitChain ConsensusExec ∙ - - if p.follow(symbols.NT_CleanStart) { - p.rtn(symbols.NT_CleanStart, cU, p.cI) - } else { - p.parseError(slot.CleanStart1R0, p.cI, followSets[symbols.NT_CleanStart]) - } - case slot.Commit0R0: // Commit : ∙commit - - p.bsrSet.Add(slot.Commit0R1, cU, p.cI, p.cI+1) - p.cI++ - if p.follow(symbols.NT_Commit) { - p.rtn(symbols.NT_Commit, cU, p.cI) - } else { - p.parseError(slot.Commit0R0, p.cI, followSets[symbols.NT_Commit]) - } - case slot.ConsensusExec0R0: // ConsensusExec : ∙ConsensusHeights - - p.call(slot.ConsensusExec0R1, cU, p.cI) - case slot.ConsensusExec0R1: // ConsensusExec : ConsensusHeights ∙ - - if p.follow(symbols.NT_ConsensusExec) { - p.rtn(symbols.NT_ConsensusExec, cU, p.cI) - } else { - p.parseError(slot.ConsensusExec0R0, p.cI, followSets[symbols.NT_ConsensusExec]) - } - case slot.ConsensusHeight0R0: // ConsensusHeight : ∙ConsensusRounds FinalizeBlock Commit - - p.call(slot.ConsensusHeight0R1, cU, p.cI) - case slot.ConsensusHeight0R1: // ConsensusHeight : ConsensusRounds ∙FinalizeBlock Commit - - if !p.testSelect(slot.ConsensusHeight0R1) { - p.parseError(slot.ConsensusHeight0R1, p.cI, first[slot.ConsensusHeight0R1]) - break - } - - p.call(slot.ConsensusHeight0R2, cU, p.cI) - case slot.ConsensusHeight0R2: // ConsensusHeight : ConsensusRounds FinalizeBlock ∙Commit - - if !p.testSelect(slot.ConsensusHeight0R2) { - p.parseError(slot.ConsensusHeight0R2, p.cI, first[slot.ConsensusHeight0R2]) - break - } - - p.call(slot.ConsensusHeight0R3, cU, p.cI) - case slot.ConsensusHeight0R3: // ConsensusHeight : ConsensusRounds FinalizeBlock Commit ∙ - - if p.follow(symbols.NT_ConsensusHeight) { - p.rtn(symbols.NT_ConsensusHeight, cU, p.cI) - } else { - p.parseError(slot.ConsensusHeight0R0, p.cI, followSets[symbols.NT_ConsensusHeight]) - } - case slot.ConsensusHeight1R0: // ConsensusHeight : ∙FinalizeBlock Commit - - p.call(slot.ConsensusHeight1R1, cU, p.cI) - case slot.ConsensusHeight1R1: // ConsensusHeight : FinalizeBlock ∙Commit - - if !p.testSelect(slot.ConsensusHeight1R1) { - p.parseError(slot.ConsensusHeight1R1, p.cI, first[slot.ConsensusHeight1R1]) - break - } - - p.call(slot.ConsensusHeight1R2, cU, p.cI) - case slot.ConsensusHeight1R2: // ConsensusHeight : FinalizeBlock Commit ∙ - - if p.follow(symbols.NT_ConsensusHeight) { - p.rtn(symbols.NT_ConsensusHeight, cU, p.cI) - } else { - p.parseError(slot.ConsensusHeight1R0, p.cI, followSets[symbols.NT_ConsensusHeight]) - } - case slot.ConsensusHeights0R0: // ConsensusHeights : ∙ConsensusHeight - - p.call(slot.ConsensusHeights0R1, cU, p.cI) - case slot.ConsensusHeights0R1: // ConsensusHeights : ConsensusHeight ∙ - - if p.follow(symbols.NT_ConsensusHeights) { - p.rtn(symbols.NT_ConsensusHeights, cU, p.cI) - } else { - p.parseError(slot.ConsensusHeights0R0, p.cI, followSets[symbols.NT_ConsensusHeights]) - } - case slot.ConsensusHeights1R0: // ConsensusHeights : ∙ConsensusHeight ConsensusHeights - - p.call(slot.ConsensusHeights1R1, cU, p.cI) - case slot.ConsensusHeights1R1: // ConsensusHeights : ConsensusHeight ∙ConsensusHeights - - if !p.testSelect(slot.ConsensusHeights1R1) { - p.parseError(slot.ConsensusHeights1R1, p.cI, first[slot.ConsensusHeights1R1]) - break - } - - p.call(slot.ConsensusHeights1R2, cU, p.cI) - case slot.ConsensusHeights1R2: // ConsensusHeights : ConsensusHeight ConsensusHeights ∙ - - if p.follow(symbols.NT_ConsensusHeights) { - p.rtn(symbols.NT_ConsensusHeights, cU, p.cI) - } else { - p.parseError(slot.ConsensusHeights1R0, p.cI, followSets[symbols.NT_ConsensusHeights]) - } - case slot.ConsensusRound0R0: // ConsensusRound : ∙Proposer - - p.call(slot.ConsensusRound0R1, cU, p.cI) - case slot.ConsensusRound0R1: // ConsensusRound : Proposer ∙ - - if p.follow(symbols.NT_ConsensusRound) { - p.rtn(symbols.NT_ConsensusRound, cU, p.cI) - } else { - p.parseError(slot.ConsensusRound0R0, p.cI, followSets[symbols.NT_ConsensusRound]) - } - case slot.ConsensusRound1R0: // ConsensusRound : ∙NonProposer - - p.call(slot.ConsensusRound1R1, cU, p.cI) - case slot.ConsensusRound1R1: // ConsensusRound : NonProposer ∙ - - if p.follow(symbols.NT_ConsensusRound) { - p.rtn(symbols.NT_ConsensusRound, cU, p.cI) - } else { - p.parseError(slot.ConsensusRound1R0, p.cI, followSets[symbols.NT_ConsensusRound]) - } - case slot.ConsensusRounds0R0: // ConsensusRounds : ∙ConsensusRound - - p.call(slot.ConsensusRounds0R1, cU, p.cI) - case slot.ConsensusRounds0R1: // ConsensusRounds : ConsensusRound ∙ - - if p.follow(symbols.NT_ConsensusRounds) { - p.rtn(symbols.NT_ConsensusRounds, cU, p.cI) - } else { - p.parseError(slot.ConsensusRounds0R0, p.cI, followSets[symbols.NT_ConsensusRounds]) - } - case slot.ConsensusRounds1R0: // ConsensusRounds : ∙ConsensusRound ConsensusRounds - - p.call(slot.ConsensusRounds1R1, cU, p.cI) - case slot.ConsensusRounds1R1: // ConsensusRounds : ConsensusRound ∙ConsensusRounds - - if !p.testSelect(slot.ConsensusRounds1R1) { - p.parseError(slot.ConsensusRounds1R1, p.cI, first[slot.ConsensusRounds1R1]) - break - } - - p.call(slot.ConsensusRounds1R2, cU, p.cI) - case slot.ConsensusRounds1R2: // ConsensusRounds : ConsensusRound ConsensusRounds ∙ - - if p.follow(symbols.NT_ConsensusRounds) { - p.rtn(symbols.NT_ConsensusRounds, cU, p.cI) - } else { - p.parseError(slot.ConsensusRounds1R0, p.cI, followSets[symbols.NT_ConsensusRounds]) - } - case slot.FinalizeBlock0R0: // FinalizeBlock : ∙finalize_block - - p.bsrSet.Add(slot.FinalizeBlock0R1, cU, p.cI, p.cI+1) - p.cI++ - if p.follow(symbols.NT_FinalizeBlock) { - p.rtn(symbols.NT_FinalizeBlock, cU, p.cI) - } else { - p.parseError(slot.FinalizeBlock0R0, p.cI, followSets[symbols.NT_FinalizeBlock]) - } - case slot.InitChain0R0: // InitChain : ∙init_chain - - p.bsrSet.Add(slot.InitChain0R1, cU, p.cI, p.cI+1) - p.cI++ - if p.follow(symbols.NT_InitChain) { - p.rtn(symbols.NT_InitChain, cU, p.cI) - } else { - p.parseError(slot.InitChain0R0, p.cI, followSets[symbols.NT_InitChain]) - } - case slot.NonProposer0R0: // NonProposer : ∙ProcessProposal - - p.call(slot.NonProposer0R1, cU, p.cI) - case slot.NonProposer0R1: // NonProposer : ProcessProposal ∙ - - if p.follow(symbols.NT_NonProposer) { - p.rtn(symbols.NT_NonProposer, cU, p.cI) - } else { - p.parseError(slot.NonProposer0R0, p.cI, followSets[symbols.NT_NonProposer]) - } - case slot.OfferSnapshot0R0: // OfferSnapshot : ∙offer_snapshot - - p.bsrSet.Add(slot.OfferSnapshot0R1, cU, p.cI, p.cI+1) - p.cI++ - if p.follow(symbols.NT_OfferSnapshot) { - p.rtn(symbols.NT_OfferSnapshot, cU, p.cI) - } else { - p.parseError(slot.OfferSnapshot0R0, p.cI, followSets[symbols.NT_OfferSnapshot]) - } - case slot.PrepareProposal0R0: // PrepareProposal : ∙prepare_proposal - - p.bsrSet.Add(slot.PrepareProposal0R1, cU, p.cI, p.cI+1) - p.cI++ - if p.follow(symbols.NT_PrepareProposal) { - p.rtn(symbols.NT_PrepareProposal, cU, p.cI) - } else { - p.parseError(slot.PrepareProposal0R0, p.cI, followSets[symbols.NT_PrepareProposal]) - } - case slot.ProcessProposal0R0: // ProcessProposal : ∙process_proposal - - p.bsrSet.Add(slot.ProcessProposal0R1, cU, p.cI, p.cI+1) - p.cI++ - if p.follow(symbols.NT_ProcessProposal) { - p.rtn(symbols.NT_ProcessProposal, cU, p.cI) - } else { - p.parseError(slot.ProcessProposal0R0, p.cI, followSets[symbols.NT_ProcessProposal]) - } - case slot.Proposer0R0: // Proposer : ∙PrepareProposal - - p.call(slot.Proposer0R1, cU, p.cI) - case slot.Proposer0R1: // Proposer : PrepareProposal ∙ - - if p.follow(symbols.NT_Proposer) { - p.rtn(symbols.NT_Proposer, cU, p.cI) - } else { - p.parseError(slot.Proposer0R0, p.cI, followSets[symbols.NT_Proposer]) - } - case slot.Proposer1R0: // Proposer : ∙PrepareProposal ProcessProposal - - p.call(slot.Proposer1R1, cU, p.cI) - case slot.Proposer1R1: // Proposer : PrepareProposal ∙ProcessProposal - - if !p.testSelect(slot.Proposer1R1) { - p.parseError(slot.Proposer1R1, p.cI, first[slot.Proposer1R1]) - break - } - - p.call(slot.Proposer1R2, cU, p.cI) - case slot.Proposer1R2: // Proposer : PrepareProposal ProcessProposal ∙ - - if p.follow(symbols.NT_Proposer) { - p.rtn(symbols.NT_Proposer, cU, p.cI) - } else { - p.parseError(slot.Proposer1R0, p.cI, followSets[symbols.NT_Proposer]) - } - case slot.Start0R0: // Start : ∙CleanStart - - p.call(slot.Start0R1, cU, p.cI) - case slot.Start0R1: // Start : CleanStart ∙ - - if p.follow(symbols.NT_Start) { - p.rtn(symbols.NT_Start, cU, p.cI) - } else { - p.parseError(slot.Start0R0, p.cI, followSets[symbols.NT_Start]) - } - case slot.StateSync0R0: // StateSync : ∙StateSyncAttempts SuccessSync - - p.call(slot.StateSync0R1, cU, p.cI) - case slot.StateSync0R1: // StateSync : StateSyncAttempts ∙SuccessSync - - if !p.testSelect(slot.StateSync0R1) { - p.parseError(slot.StateSync0R1, p.cI, first[slot.StateSync0R1]) - break - } - - p.call(slot.StateSync0R2, cU, p.cI) - case slot.StateSync0R2: // StateSync : StateSyncAttempts SuccessSync ∙ - - if p.follow(symbols.NT_StateSync) { - p.rtn(symbols.NT_StateSync, cU, p.cI) - } else { - p.parseError(slot.StateSync0R0, p.cI, followSets[symbols.NT_StateSync]) - } - case slot.StateSync1R0: // StateSync : ∙SuccessSync - - p.call(slot.StateSync1R1, cU, p.cI) - case slot.StateSync1R1: // StateSync : SuccessSync ∙ - - if p.follow(symbols.NT_StateSync) { - p.rtn(symbols.NT_StateSync, cU, p.cI) - } else { - p.parseError(slot.StateSync1R0, p.cI, followSets[symbols.NT_StateSync]) - } - case slot.StateSyncAttempt0R0: // StateSyncAttempt : ∙OfferSnapshot ApplyChunks - - p.call(slot.StateSyncAttempt0R1, cU, p.cI) - case slot.StateSyncAttempt0R1: // StateSyncAttempt : OfferSnapshot ∙ApplyChunks - - if !p.testSelect(slot.StateSyncAttempt0R1) { - p.parseError(slot.StateSyncAttempt0R1, p.cI, first[slot.StateSyncAttempt0R1]) - break - } - - p.call(slot.StateSyncAttempt0R2, cU, p.cI) - case slot.StateSyncAttempt0R2: // StateSyncAttempt : OfferSnapshot ApplyChunks ∙ - - if p.follow(symbols.NT_StateSyncAttempt) { - p.rtn(symbols.NT_StateSyncAttempt, cU, p.cI) - } else { - p.parseError(slot.StateSyncAttempt0R0, p.cI, followSets[symbols.NT_StateSyncAttempt]) - } - case slot.StateSyncAttempt1R0: // StateSyncAttempt : ∙OfferSnapshot - - p.call(slot.StateSyncAttempt1R1, cU, p.cI) - case slot.StateSyncAttempt1R1: // StateSyncAttempt : OfferSnapshot ∙ - - if p.follow(symbols.NT_StateSyncAttempt) { - p.rtn(symbols.NT_StateSyncAttempt, cU, p.cI) - } else { - p.parseError(slot.StateSyncAttempt1R0, p.cI, followSets[symbols.NT_StateSyncAttempt]) - } - case slot.StateSyncAttempts0R0: // StateSyncAttempts : ∙StateSyncAttempt - - p.call(slot.StateSyncAttempts0R1, cU, p.cI) - case slot.StateSyncAttempts0R1: // StateSyncAttempts : StateSyncAttempt ∙ - - if p.follow(symbols.NT_StateSyncAttempts) { - p.rtn(symbols.NT_StateSyncAttempts, cU, p.cI) - } else { - p.parseError(slot.StateSyncAttempts0R0, p.cI, followSets[symbols.NT_StateSyncAttempts]) - } - case slot.StateSyncAttempts1R0: // StateSyncAttempts : ∙StateSyncAttempt StateSyncAttempts - - p.call(slot.StateSyncAttempts1R1, cU, p.cI) - case slot.StateSyncAttempts1R1: // StateSyncAttempts : StateSyncAttempt ∙StateSyncAttempts - - if !p.testSelect(slot.StateSyncAttempts1R1) { - p.parseError(slot.StateSyncAttempts1R1, p.cI, first[slot.StateSyncAttempts1R1]) - break - } - - p.call(slot.StateSyncAttempts1R2, cU, p.cI) - case slot.StateSyncAttempts1R2: // StateSyncAttempts : StateSyncAttempt StateSyncAttempts ∙ - - if p.follow(symbols.NT_StateSyncAttempts) { - p.rtn(symbols.NT_StateSyncAttempts, cU, p.cI) - } else { - p.parseError(slot.StateSyncAttempts1R0, p.cI, followSets[symbols.NT_StateSyncAttempts]) - } - case slot.SuccessSync0R0: // SuccessSync : ∙OfferSnapshot ApplyChunks - - p.call(slot.SuccessSync0R1, cU, p.cI) - case slot.SuccessSync0R1: // SuccessSync : OfferSnapshot ∙ApplyChunks - - if !p.testSelect(slot.SuccessSync0R1) { - p.parseError(slot.SuccessSync0R1, p.cI, first[slot.SuccessSync0R1]) - break - } - - p.call(slot.SuccessSync0R2, cU, p.cI) - case slot.SuccessSync0R2: // SuccessSync : OfferSnapshot ApplyChunks ∙ - - if p.follow(symbols.NT_SuccessSync) { - p.rtn(symbols.NT_SuccessSync, cU, p.cI) - } else { - p.parseError(slot.SuccessSync0R0, p.cI, followSets[symbols.NT_SuccessSync]) - } - - default: - panic("This must not happen") - } - } - if !p.bsrSet.Contain(symbols.NT_Start, 0, m) { - p.sortParseErrors() - return nil, p.parseErrors - } - return p.bsrSet, nil -} - -func (p *parser) ntAdd(nt symbols.NT, j int) { - // fmt.Printf("p.ntAdd(%s, %d)\n", nt, j) - failed := true - expected := map[token.Type]string{} - for _, l := range slot.GetAlternates(nt) { - if p.testSelect(l) { - p.dscAdd(l, j, j) - failed = false - } else { - for k, v := range first[l] { - expected[k] = v - } - } - } - if failed { - for _, l := range slot.GetAlternates(nt) { - p.parseError(l, j, expected) - } - } -} - -/*** Call Return Forest ***/ - -type poppedNode struct { - X symbols.NT - k, j int -} - -type clusterNode struct { - X symbols.NT - k int -} - -type crfNode struct { - L slot.Label - i int -} - -/* -suppose that L is Y ::=αX ·β -if there is no CRF node labelled (L,i) - - create one let u be the CRF node labelled (L,i) - -if there is no CRF node labelled (X, j) { - - create a CRF node v labelled (X, j) - create an edge from v to u - ntAdd(X, j) - } else { - - let v be the CRF node labelled (X, j) - if there is not an edge from v to u { - create an edge from v to u - for all ((X, j,h)∈P) { - dscAdd(L, i, h); - bsrAdd(L, i, j, h) - } - } - } -*/ -func (p *parser) call(L slot.Label, i, j int) { - // fmt.Printf("p.call(%s,%d,%d)\n", L,i,j) - u, exist := p.crfNodes[crfNode{L, i}] - // fmt.Printf(" u exist=%t\n", exist) - if !exist { - u = &crfNode{L, i} - p.crfNodes[*u] = u - } - X := L.Symbols()[L.Pos()-1].(symbols.NT) - ndV := clusterNode{X, j} - v, exist := p.crf[ndV] - if !exist { - // fmt.Println(" v !exist") - p.crf[ndV] = []*crfNode{u} - p.ntAdd(X, j) - } else { - // fmt.Println(" v exist") - if !existEdge(v, u) { - // fmt.Printf(" !existEdge(%v)\n", u) - p.crf[ndV] = append(v, u) - // fmt.Printf("|popped|=%d\n", len(popped)) - for pnd := range p.popped { - if pnd.X == X && pnd.k == j { - p.dscAdd(L, i, pnd.j) - p.bsrSet.Add(L, i, j, pnd.j) - } - } - } - } -} - -func existEdge(nds []*crfNode, nd *crfNode) bool { - for _, nd1 := range nds { - if nd1 == nd { - return true - } - } - return false -} - -func (p *parser) rtn(X symbols.NT, k, j int) { - // fmt.Printf("p.rtn(%s,%d,%d)\n", X,k,j) - pn := poppedNode{X, k, j} - if _, exist := p.popped[pn]; !exist { - p.popped[pn] = true - for _, nd := range p.crf[clusterNode{X, k}] { - p.dscAdd(nd.L, nd.i, j) - p.bsrSet.Add(nd.L, nd.i, k, j) - } - } -} - -// func CRFString() string { -// buf := new(bytes.Buffer) -// buf.WriteString("CRF: {") -// for cn, nds := range crf{ -// for _, nd := range nds { -// fmt.Fprintf(buf, "%s->%s, ", cn, nd) -// } -// } -// buf.WriteString("}") -// return buf.String() -// } - -func (cn clusterNode) String() string { - return fmt.Sprintf("(%s,%d)", cn.X, cn.k) -} - -func (n crfNode) String() string { - return fmt.Sprintf("(%s,%d)", n.L.String(), n.i) -} - -// func PoppedString() string { -// buf := new(bytes.Buffer) -// buf.WriteString("Popped: {") -// for p, _ := range popped { -// fmt.Fprintf(buf, "(%s,%d,%d) ", p.X, p.k, p.j) -// } -// buf.WriteString("}") -// return buf.String() -// } - -/*** descriptors ***/ - -type descriptors struct { - set []*descriptor -} - -func (ds *descriptors) contain(d *descriptor) bool { - for _, d1 := range ds.set { - if d1 == d { - return true - } - } - return false -} - -func (ds *descriptors) empty() bool { - return len(ds.set) == 0 -} - -func (ds *descriptors) String() string { - buf := new(bytes.Buffer) - buf.WriteString("{") - for i, d := range ds.set { - if i > 0 { - buf.WriteString("; ") - } - fmt.Fprintf(buf, "%s", d) - } - buf.WriteString("}") - return buf.String() -} - -type descriptor struct { - L slot.Label - k int - i int -} - -func (d *descriptor) String() string { - return fmt.Sprintf("%s,%d,%d", d.L, d.k, d.i) -} - -func (p *parser) dscAdd(L slot.Label, k, i int) { - // fmt.Printf("p.dscAdd(%s,%d,%d)\n", L, k, i) - d := &descriptor{L, k, i} - if !p.U.contain(d) { - p.R.set = append(p.R.set, d) - p.U.set = append(p.U.set, d) - } -} - -func (ds *descriptors) remove() (L slot.Label, k, i int) { - d := ds.set[len(ds.set)-1] - ds.set = ds.set[:len(ds.set)-1] - // fmt.Printf("remove: %s,%d,%d\n", d.L, d.k, d.i) - return d.L, d.k, d.i -} - -func (p *parser) DumpDescriptors() { - p.DumpR() - p.DumpU() -} - -func (p *parser) DumpR() { - fmt.Println("R:") - for _, d := range p.R.set { - fmt.Printf(" %s\n", d) - } -} - -func (p *parser) DumpU() { - fmt.Println("U:") - for _, d := range p.U.set { - fmt.Printf(" %s\n", d) - } -} - -/*** TestSelect ***/ - -func (p *parser) follow(nt symbols.NT) bool { - _, exist := followSets[nt][p.lex.Tokens[p.cI].Type()] - return exist -} - -func (p *parser) testSelect(l slot.Label) bool { - _, exist := first[l][p.lex.Tokens[p.cI].Type()] - // fmt.Printf("testSelect(%s) = %t\n", l, exist) - return exist -} - -var first = []map[token.Type]string{ - // ApplyChunk : ∙apply_snapshot_chunk - { - token.T_0: "apply_snapshot_chunk", - }, - // ApplyChunk : apply_snapshot_chunk ∙ - { - token.T_0: "apply_snapshot_chunk", - token.T_2: "finalize_block", - token.T_4: "offer_snapshot", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ApplyChunks : ∙ApplyChunk - { - token.T_0: "apply_snapshot_chunk", - }, - // ApplyChunks : ApplyChunk ∙ - { - token.T_2: "finalize_block", - token.T_4: "offer_snapshot", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ApplyChunks : ∙ApplyChunk ApplyChunks - { - token.T_0: "apply_snapshot_chunk", - }, - // ApplyChunks : ApplyChunk ∙ApplyChunks - { - token.T_0: "apply_snapshot_chunk", - }, - // ApplyChunks : ApplyChunk ApplyChunks ∙ - { - token.T_2: "finalize_block", - token.T_4: "offer_snapshot", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // CleanStart : ∙InitChain StateSync ConsensusExec - { - token.T_3: "init_chain", - }, - // CleanStart : InitChain ∙StateSync ConsensusExec - { - token.T_4: "offer_snapshot", - }, - // CleanStart : InitChain StateSync ∙ConsensusExec - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // CleanStart : InitChain StateSync ConsensusExec ∙ - { - token.EOF: "$", - }, - // CleanStart : ∙InitChain ConsensusExec - { - token.T_3: "init_chain", - }, - // CleanStart : InitChain ∙ConsensusExec - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // CleanStart : InitChain ConsensusExec ∙ - { - token.EOF: "$", - }, - // Commit : ∙commit - { - token.T_1: "commit", - }, - // Commit : commit ∙ - { - token.EOF: "$", - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusExec : ∙ConsensusHeights - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusExec : ConsensusHeights ∙ - { - token.EOF: "$", - }, - // ConsensusHeight : ∙ConsensusRounds FinalizeBlock Commit - { - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusHeight : ConsensusRounds ∙FinalizeBlock Commit - { - token.T_2: "finalize_block", - }, - // ConsensusHeight : ConsensusRounds FinalizeBlock ∙Commit - { - token.T_1: "commit", - }, - // ConsensusHeight : ConsensusRounds FinalizeBlock Commit ∙ - { - token.EOF: "$", - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusHeight : ∙FinalizeBlock Commit - { - token.T_2: "finalize_block", - }, - // ConsensusHeight : FinalizeBlock ∙Commit - { - token.T_1: "commit", - }, - // ConsensusHeight : FinalizeBlock Commit ∙ - { - token.EOF: "$", - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusHeights : ∙ConsensusHeight - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusHeights : ConsensusHeight ∙ - { - token.EOF: "$", - }, - // ConsensusHeights : ∙ConsensusHeight ConsensusHeights - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusHeights : ConsensusHeight ∙ConsensusHeights - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusHeights : ConsensusHeight ConsensusHeights ∙ - { - token.EOF: "$", - }, - // ConsensusRound : ∙Proposer - { - token.T_5: "prepare_proposal", - }, - // ConsensusRound : Proposer ∙ - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusRound : ∙NonProposer - { - token.T_6: "process_proposal", - }, - // ConsensusRound : NonProposer ∙ - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusRounds : ∙ConsensusRound - { - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusRounds : ConsensusRound ∙ - { - token.T_2: "finalize_block", - }, - // ConsensusRounds : ∙ConsensusRound ConsensusRounds - { - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusRounds : ConsensusRound ∙ConsensusRounds - { - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusRounds : ConsensusRound ConsensusRounds ∙ - { - token.T_2: "finalize_block", - }, - // FinalizeBlock : ∙finalize_block - { - token.T_2: "finalize_block", - }, - // FinalizeBlock : finalize_block ∙ - { - token.T_1: "commit", - }, - // InitChain : ∙init_chain - { - token.T_3: "init_chain", - }, - // InitChain : init_chain ∙ - { - token.T_2: "finalize_block", - token.T_4: "offer_snapshot", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // NonProposer : ∙ProcessProposal - { - token.T_6: "process_proposal", - }, - // NonProposer : ProcessProposal ∙ - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // OfferSnapshot : ∙offer_snapshot - { - token.T_4: "offer_snapshot", - }, - // OfferSnapshot : offer_snapshot ∙ - { - token.T_0: "apply_snapshot_chunk", - token.T_4: "offer_snapshot", - }, - // PrepareProposal : ∙prepare_proposal - { - token.T_5: "prepare_proposal", - }, - // PrepareProposal : prepare_proposal ∙ - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ProcessProposal : ∙process_proposal - { - token.T_6: "process_proposal", - }, - // ProcessProposal : process_proposal ∙ - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // Proposer : ∙PrepareProposal - { - token.T_5: "prepare_proposal", - }, - // Proposer : PrepareProposal ∙ - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // Proposer : ∙PrepareProposal ProcessProposal - { - token.T_5: "prepare_proposal", - }, - // Proposer : PrepareProposal ∙ProcessProposal - { - token.T_6: "process_proposal", - }, - // Proposer : PrepareProposal ProcessProposal ∙ - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // Start : ∙CleanStart - { - token.T_3: "init_chain", - }, - // Start : CleanStart ∙ - { - token.EOF: "$", - }, - // StateSync : ∙StateSyncAttempts SuccessSync - { - token.T_4: "offer_snapshot", - }, - // StateSync : StateSyncAttempts ∙SuccessSync - { - token.T_4: "offer_snapshot", - }, - // StateSync : StateSyncAttempts SuccessSync ∙ - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // StateSync : ∙SuccessSync - { - token.T_4: "offer_snapshot", - }, - // StateSync : SuccessSync ∙ - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // StateSyncAttempt : ∙OfferSnapshot ApplyChunks - { - token.T_4: "offer_snapshot", - }, - // StateSyncAttempt : OfferSnapshot ∙ApplyChunks - { - token.T_0: "apply_snapshot_chunk", - }, - // StateSyncAttempt : OfferSnapshot ApplyChunks ∙ - { - token.T_4: "offer_snapshot", - }, - // StateSyncAttempt : ∙OfferSnapshot - { - token.T_4: "offer_snapshot", - }, - // StateSyncAttempt : OfferSnapshot ∙ - { - token.T_4: "offer_snapshot", - }, - // StateSyncAttempts : ∙StateSyncAttempt - { - token.T_4: "offer_snapshot", - }, - // StateSyncAttempts : StateSyncAttempt ∙ - { - token.T_4: "offer_snapshot", - }, - // StateSyncAttempts : ∙StateSyncAttempt StateSyncAttempts - { - token.T_4: "offer_snapshot", - }, - // StateSyncAttempts : StateSyncAttempt ∙StateSyncAttempts - { - token.T_4: "offer_snapshot", - }, - // StateSyncAttempts : StateSyncAttempt StateSyncAttempts ∙ - { - token.T_4: "offer_snapshot", - }, - // SuccessSync : ∙OfferSnapshot ApplyChunks - { - token.T_4: "offer_snapshot", - }, - // SuccessSync : OfferSnapshot ∙ApplyChunks - { - token.T_0: "apply_snapshot_chunk", - }, - // SuccessSync : OfferSnapshot ApplyChunks ∙ - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, -} - -var followSets = []map[token.Type]string{ - // ApplyChunk - { - token.T_0: "apply_snapshot_chunk", - token.T_2: "finalize_block", - token.T_4: "offer_snapshot", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ApplyChunks - { - token.T_2: "finalize_block", - token.T_4: "offer_snapshot", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // CleanStart - { - token.EOF: "$", - }, - // Commit - { - token.EOF: "$", - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusExec - { - token.EOF: "$", - }, - // ConsensusHeight - { - token.EOF: "$", - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusHeights - { - token.EOF: "$", - }, - // ConsensusRound - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ConsensusRounds - { - token.T_2: "finalize_block", - }, - // FinalizeBlock - { - token.T_1: "commit", - }, - // InitChain - { - token.T_2: "finalize_block", - token.T_4: "offer_snapshot", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // NonProposer - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // OfferSnapshot - { - token.T_0: "apply_snapshot_chunk", - token.T_4: "offer_snapshot", - }, - // PrepareProposal - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // ProcessProposal - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // Proposer - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // Start - { - token.EOF: "$", - }, - // StateSync - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, - // StateSyncAttempt - { - token.T_4: "offer_snapshot", - }, - // StateSyncAttempts - { - token.T_4: "offer_snapshot", - }, - // SuccessSync - { - token.T_2: "finalize_block", - token.T_5: "prepare_proposal", - token.T_6: "process_proposal", - }, -} - -/*** Errors ***/ - -/* -Error is returned by Parse at every point at which the parser fails to parse -a grammar production. For non-LL-1 grammars there will be an error for each -alternate attempted by the parser. - -The errors are sorted in descending order of input position (index of token in -the stream of tokens). - -Normally the error of interest is the one that has parsed the largest number of -tokens. -*/ -type Error struct { - // Index of token that caused the error. - cI int - - // Grammar slot at which the error occured. - Slot slot.Label - - // The token at which the error occurred. - Token *token.Token - - // The line and column in the input text at which the error occurred - Line, Column int - - // The tokens expected at the point where the error occurred - Expected map[token.Type]string -} - -func (pe *Error) String() string { - w := new(bytes.Buffer) - fmt.Fprintf(w, "Parse Error: %s I[%d]=%s at line %d col %d\n", - pe.Slot, pe.cI, pe.Token, pe.Line, pe.Column) - exp := []string{} - for _, e := range pe.Expected { - exp = append(exp, e) - } - fmt.Fprintf(w, "Expected one of: [%s]", strings.Join(exp, ",")) - return w.String() -} - -func (p *parser) parseError(slot slot.Label, i int, expected map[token.Type]string) { - pe := &Error{cI: i, Slot: slot, Token: p.lex.Tokens[i], Expected: expected} - p.parseErrors = append(p.parseErrors, pe) -} - -func (p *parser) sortParseErrors() { - sort.Slice(p.parseErrors, - func(i, j int) bool { - return p.parseErrors[j].Token.Lext() < p.parseErrors[i].Token.Lext() - }) - for _, pe := range p.parseErrors { - pe.Line, pe.Column = p.lex.GetLineColumn(pe.Token.Lext()) - } -} diff --git a/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/slot/slot.go b/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/slot/slot.go deleted file mode 100644 index f13c8e7128a..00000000000 --- a/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/slot/slot.go +++ /dev/null @@ -1,862 +0,0 @@ - -// Package slot is generated by gogll. Do not edit. -package slot - -import( - "bytes" - "fmt" - - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/symbols" -) - -type Label int - -const( - ApplyChunk0R0 Label = iota - ApplyChunk0R1 - ApplyChunks0R0 - ApplyChunks0R1 - ApplyChunks1R0 - ApplyChunks1R1 - ApplyChunks1R2 - CleanStart0R0 - CleanStart0R1 - CleanStart0R2 - CleanStart0R3 - CleanStart1R0 - CleanStart1R1 - CleanStart1R2 - Commit0R0 - Commit0R1 - ConsensusExec0R0 - ConsensusExec0R1 - ConsensusHeight0R0 - ConsensusHeight0R1 - ConsensusHeight0R2 - ConsensusHeight0R3 - ConsensusHeight1R0 - ConsensusHeight1R1 - ConsensusHeight1R2 - ConsensusHeights0R0 - ConsensusHeights0R1 - ConsensusHeights1R0 - ConsensusHeights1R1 - ConsensusHeights1R2 - ConsensusRound0R0 - ConsensusRound0R1 - ConsensusRound1R0 - ConsensusRound1R1 - ConsensusRounds0R0 - ConsensusRounds0R1 - ConsensusRounds1R0 - ConsensusRounds1R1 - ConsensusRounds1R2 - FinalizeBlock0R0 - FinalizeBlock0R1 - InitChain0R0 - InitChain0R1 - NonProposer0R0 - NonProposer0R1 - OfferSnapshot0R0 - OfferSnapshot0R1 - PrepareProposal0R0 - PrepareProposal0R1 - ProcessProposal0R0 - ProcessProposal0R1 - Proposer0R0 - Proposer0R1 - Proposer1R0 - Proposer1R1 - Proposer1R2 - Start0R0 - Start0R1 - StateSync0R0 - StateSync0R1 - StateSync0R2 - StateSync1R0 - StateSync1R1 - StateSyncAttempt0R0 - StateSyncAttempt0R1 - StateSyncAttempt0R2 - StateSyncAttempt1R0 - StateSyncAttempt1R1 - StateSyncAttempts0R0 - StateSyncAttempts0R1 - StateSyncAttempts1R0 - StateSyncAttempts1R1 - StateSyncAttempts1R2 - SuccessSync0R0 - SuccessSync0R1 - SuccessSync0R2 -) - -type Slot struct { - NT symbols.NT - Alt int - Pos int - Symbols symbols.Symbols - Label Label -} - -type Index struct { - NT symbols.NT - Alt int - Pos int -} - -func GetAlternates(nt symbols.NT) []Label { - alts, exist := alternates[nt] - if !exist { - panic(fmt.Sprintf("Invalid NT %s", nt)) - } - return alts -} - -func GetLabel(nt symbols.NT, alt, pos int) Label { - l, exist := slotIndex[Index{nt,alt,pos}] - if exist { - return l - } - panic(fmt.Sprintf("Error: no slot label for NT=%s, alt=%d, pos=%d", nt, alt, pos)) -} - -func (l Label) EoR() bool { - return l.Slot().EoR() -} - -func (l Label) Head() symbols.NT { - return l.Slot().NT -} - -func (l Label) Index() Index { - s := l.Slot() - return Index{s.NT, s.Alt, s.Pos} -} - -func (l Label) Alternate() int { - return l.Slot().Alt -} - -func (l Label) Pos() int { - return l.Slot().Pos -} - -func (l Label) Slot() *Slot { - s, exist := slots[l] - if !exist { - panic(fmt.Sprintf("Invalid slot label %d", l)) - } - return s -} - -func (l Label) String() string { - return l.Slot().String() -} - -func (l Label) Symbols() symbols.Symbols { - return l.Slot().Symbols -} - -func (s *Slot) EoR() bool { - return s.Pos >= len(s.Symbols) -} - -func (s *Slot) String() string { - buf := new(bytes.Buffer) - fmt.Fprintf(buf, "%s : ", s.NT) - for i, sym := range s.Symbols { - if i == s.Pos { - fmt.Fprintf(buf, "∙") - } - fmt.Fprintf(buf, "%s ", sym) - } - if s.Pos >= len(s.Symbols) { - fmt.Fprintf(buf, "∙") - } - return buf.String() -} - -var slots = map[Label]*Slot{ - ApplyChunk0R0: { - symbols.NT_ApplyChunk, 0, 0, - symbols.Symbols{ - symbols.T_0, - }, - ApplyChunk0R0, - }, - ApplyChunk0R1: { - symbols.NT_ApplyChunk, 0, 1, - symbols.Symbols{ - symbols.T_0, - }, - ApplyChunk0R1, - }, - ApplyChunks0R0: { - symbols.NT_ApplyChunks, 0, 0, - symbols.Symbols{ - symbols.NT_ApplyChunk, - }, - ApplyChunks0R0, - }, - ApplyChunks0R1: { - symbols.NT_ApplyChunks, 0, 1, - symbols.Symbols{ - symbols.NT_ApplyChunk, - }, - ApplyChunks0R1, - }, - ApplyChunks1R0: { - symbols.NT_ApplyChunks, 1, 0, - symbols.Symbols{ - symbols.NT_ApplyChunk, - symbols.NT_ApplyChunks, - }, - ApplyChunks1R0, - }, - ApplyChunks1R1: { - symbols.NT_ApplyChunks, 1, 1, - symbols.Symbols{ - symbols.NT_ApplyChunk, - symbols.NT_ApplyChunks, - }, - ApplyChunks1R1, - }, - ApplyChunks1R2: { - symbols.NT_ApplyChunks, 1, 2, - symbols.Symbols{ - symbols.NT_ApplyChunk, - symbols.NT_ApplyChunks, - }, - ApplyChunks1R2, - }, - CleanStart0R0: { - symbols.NT_CleanStart, 0, 0, - symbols.Symbols{ - symbols.NT_InitChain, - symbols.NT_StateSync, - symbols.NT_ConsensusExec, - }, - CleanStart0R0, - }, - CleanStart0R1: { - symbols.NT_CleanStart, 0, 1, - symbols.Symbols{ - symbols.NT_InitChain, - symbols.NT_StateSync, - symbols.NT_ConsensusExec, - }, - CleanStart0R1, - }, - CleanStart0R2: { - symbols.NT_CleanStart, 0, 2, - symbols.Symbols{ - symbols.NT_InitChain, - symbols.NT_StateSync, - symbols.NT_ConsensusExec, - }, - CleanStart0R2, - }, - CleanStart0R3: { - symbols.NT_CleanStart, 0, 3, - symbols.Symbols{ - symbols.NT_InitChain, - symbols.NT_StateSync, - symbols.NT_ConsensusExec, - }, - CleanStart0R3, - }, - CleanStart1R0: { - symbols.NT_CleanStart, 1, 0, - symbols.Symbols{ - symbols.NT_InitChain, - symbols.NT_ConsensusExec, - }, - CleanStart1R0, - }, - CleanStart1R1: { - symbols.NT_CleanStart, 1, 1, - symbols.Symbols{ - symbols.NT_InitChain, - symbols.NT_ConsensusExec, - }, - CleanStart1R1, - }, - CleanStart1R2: { - symbols.NT_CleanStart, 1, 2, - symbols.Symbols{ - symbols.NT_InitChain, - symbols.NT_ConsensusExec, - }, - CleanStart1R2, - }, - Commit0R0: { - symbols.NT_Commit, 0, 0, - symbols.Symbols{ - symbols.T_1, - }, - Commit0R0, - }, - Commit0R1: { - symbols.NT_Commit, 0, 1, - symbols.Symbols{ - symbols.T_1, - }, - Commit0R1, - }, - ConsensusExec0R0: { - symbols.NT_ConsensusExec, 0, 0, - symbols.Symbols{ - symbols.NT_ConsensusHeights, - }, - ConsensusExec0R0, - }, - ConsensusExec0R1: { - symbols.NT_ConsensusExec, 0, 1, - symbols.Symbols{ - symbols.NT_ConsensusHeights, - }, - ConsensusExec0R1, - }, - ConsensusHeight0R0: { - symbols.NT_ConsensusHeight, 0, 0, - symbols.Symbols{ - symbols.NT_ConsensusRounds, - symbols.NT_FinalizeBlock, - symbols.NT_Commit, - }, - ConsensusHeight0R0, - }, - ConsensusHeight0R1: { - symbols.NT_ConsensusHeight, 0, 1, - symbols.Symbols{ - symbols.NT_ConsensusRounds, - symbols.NT_FinalizeBlock, - symbols.NT_Commit, - }, - ConsensusHeight0R1, - }, - ConsensusHeight0R2: { - symbols.NT_ConsensusHeight, 0, 2, - symbols.Symbols{ - symbols.NT_ConsensusRounds, - symbols.NT_FinalizeBlock, - symbols.NT_Commit, - }, - ConsensusHeight0R2, - }, - ConsensusHeight0R3: { - symbols.NT_ConsensusHeight, 0, 3, - symbols.Symbols{ - symbols.NT_ConsensusRounds, - symbols.NT_FinalizeBlock, - symbols.NT_Commit, - }, - ConsensusHeight0R3, - }, - ConsensusHeight1R0: { - symbols.NT_ConsensusHeight, 1, 0, - symbols.Symbols{ - symbols.NT_FinalizeBlock, - symbols.NT_Commit, - }, - ConsensusHeight1R0, - }, - ConsensusHeight1R1: { - symbols.NT_ConsensusHeight, 1, 1, - symbols.Symbols{ - symbols.NT_FinalizeBlock, - symbols.NT_Commit, - }, - ConsensusHeight1R1, - }, - ConsensusHeight1R2: { - symbols.NT_ConsensusHeight, 1, 2, - symbols.Symbols{ - symbols.NT_FinalizeBlock, - symbols.NT_Commit, - }, - ConsensusHeight1R2, - }, - ConsensusHeights0R0: { - symbols.NT_ConsensusHeights, 0, 0, - symbols.Symbols{ - symbols.NT_ConsensusHeight, - }, - ConsensusHeights0R0, - }, - ConsensusHeights0R1: { - symbols.NT_ConsensusHeights, 0, 1, - symbols.Symbols{ - symbols.NT_ConsensusHeight, - }, - ConsensusHeights0R1, - }, - ConsensusHeights1R0: { - symbols.NT_ConsensusHeights, 1, 0, - symbols.Symbols{ - symbols.NT_ConsensusHeight, - symbols.NT_ConsensusHeights, - }, - ConsensusHeights1R0, - }, - ConsensusHeights1R1: { - symbols.NT_ConsensusHeights, 1, 1, - symbols.Symbols{ - symbols.NT_ConsensusHeight, - symbols.NT_ConsensusHeights, - }, - ConsensusHeights1R1, - }, - ConsensusHeights1R2: { - symbols.NT_ConsensusHeights, 1, 2, - symbols.Symbols{ - symbols.NT_ConsensusHeight, - symbols.NT_ConsensusHeights, - }, - ConsensusHeights1R2, - }, - ConsensusRound0R0: { - symbols.NT_ConsensusRound, 0, 0, - symbols.Symbols{ - symbols.NT_Proposer, - }, - ConsensusRound0R0, - }, - ConsensusRound0R1: { - symbols.NT_ConsensusRound, 0, 1, - symbols.Symbols{ - symbols.NT_Proposer, - }, - ConsensusRound0R1, - }, - ConsensusRound1R0: { - symbols.NT_ConsensusRound, 1, 0, - symbols.Symbols{ - symbols.NT_NonProposer, - }, - ConsensusRound1R0, - }, - ConsensusRound1R1: { - symbols.NT_ConsensusRound, 1, 1, - symbols.Symbols{ - symbols.NT_NonProposer, - }, - ConsensusRound1R1, - }, - ConsensusRounds0R0: { - symbols.NT_ConsensusRounds, 0, 0, - symbols.Symbols{ - symbols.NT_ConsensusRound, - }, - ConsensusRounds0R0, - }, - ConsensusRounds0R1: { - symbols.NT_ConsensusRounds, 0, 1, - symbols.Symbols{ - symbols.NT_ConsensusRound, - }, - ConsensusRounds0R1, - }, - ConsensusRounds1R0: { - symbols.NT_ConsensusRounds, 1, 0, - symbols.Symbols{ - symbols.NT_ConsensusRound, - symbols.NT_ConsensusRounds, - }, - ConsensusRounds1R0, - }, - ConsensusRounds1R1: { - symbols.NT_ConsensusRounds, 1, 1, - symbols.Symbols{ - symbols.NT_ConsensusRound, - symbols.NT_ConsensusRounds, - }, - ConsensusRounds1R1, - }, - ConsensusRounds1R2: { - symbols.NT_ConsensusRounds, 1, 2, - symbols.Symbols{ - symbols.NT_ConsensusRound, - symbols.NT_ConsensusRounds, - }, - ConsensusRounds1R2, - }, - FinalizeBlock0R0: { - symbols.NT_FinalizeBlock, 0, 0, - symbols.Symbols{ - symbols.T_2, - }, - FinalizeBlock0R0, - }, - FinalizeBlock0R1: { - symbols.NT_FinalizeBlock, 0, 1, - symbols.Symbols{ - symbols.T_2, - }, - FinalizeBlock0R1, - }, - InitChain0R0: { - symbols.NT_InitChain, 0, 0, - symbols.Symbols{ - symbols.T_3, - }, - InitChain0R0, - }, - InitChain0R1: { - symbols.NT_InitChain, 0, 1, - symbols.Symbols{ - symbols.T_3, - }, - InitChain0R1, - }, - NonProposer0R0: { - symbols.NT_NonProposer, 0, 0, - symbols.Symbols{ - symbols.NT_ProcessProposal, - }, - NonProposer0R0, - }, - NonProposer0R1: { - symbols.NT_NonProposer, 0, 1, - symbols.Symbols{ - symbols.NT_ProcessProposal, - }, - NonProposer0R1, - }, - OfferSnapshot0R0: { - symbols.NT_OfferSnapshot, 0, 0, - symbols.Symbols{ - symbols.T_4, - }, - OfferSnapshot0R0, - }, - OfferSnapshot0R1: { - symbols.NT_OfferSnapshot, 0, 1, - symbols.Symbols{ - symbols.T_4, - }, - OfferSnapshot0R1, - }, - PrepareProposal0R0: { - symbols.NT_PrepareProposal, 0, 0, - symbols.Symbols{ - symbols.T_5, - }, - PrepareProposal0R0, - }, - PrepareProposal0R1: { - symbols.NT_PrepareProposal, 0, 1, - symbols.Symbols{ - symbols.T_5, - }, - PrepareProposal0R1, - }, - ProcessProposal0R0: { - symbols.NT_ProcessProposal, 0, 0, - symbols.Symbols{ - symbols.T_6, - }, - ProcessProposal0R0, - }, - ProcessProposal0R1: { - symbols.NT_ProcessProposal, 0, 1, - symbols.Symbols{ - symbols.T_6, - }, - ProcessProposal0R1, - }, - Proposer0R0: { - symbols.NT_Proposer, 0, 0, - symbols.Symbols{ - symbols.NT_PrepareProposal, - }, - Proposer0R0, - }, - Proposer0R1: { - symbols.NT_Proposer, 0, 1, - symbols.Symbols{ - symbols.NT_PrepareProposal, - }, - Proposer0R1, - }, - Proposer1R0: { - symbols.NT_Proposer, 1, 0, - symbols.Symbols{ - symbols.NT_PrepareProposal, - symbols.NT_ProcessProposal, - }, - Proposer1R0, - }, - Proposer1R1: { - symbols.NT_Proposer, 1, 1, - symbols.Symbols{ - symbols.NT_PrepareProposal, - symbols.NT_ProcessProposal, - }, - Proposer1R1, - }, - Proposer1R2: { - symbols.NT_Proposer, 1, 2, - symbols.Symbols{ - symbols.NT_PrepareProposal, - symbols.NT_ProcessProposal, - }, - Proposer1R2, - }, - Start0R0: { - symbols.NT_Start, 0, 0, - symbols.Symbols{ - symbols.NT_CleanStart, - }, - Start0R0, - }, - Start0R1: { - symbols.NT_Start, 0, 1, - symbols.Symbols{ - symbols.NT_CleanStart, - }, - Start0R1, - }, - StateSync0R0: { - symbols.NT_StateSync, 0, 0, - symbols.Symbols{ - symbols.NT_StateSyncAttempts, - symbols.NT_SuccessSync, - }, - StateSync0R0, - }, - StateSync0R1: { - symbols.NT_StateSync, 0, 1, - symbols.Symbols{ - symbols.NT_StateSyncAttempts, - symbols.NT_SuccessSync, - }, - StateSync0R1, - }, - StateSync0R2: { - symbols.NT_StateSync, 0, 2, - symbols.Symbols{ - symbols.NT_StateSyncAttempts, - symbols.NT_SuccessSync, - }, - StateSync0R2, - }, - StateSync1R0: { - symbols.NT_StateSync, 1, 0, - symbols.Symbols{ - symbols.NT_SuccessSync, - }, - StateSync1R0, - }, - StateSync1R1: { - symbols.NT_StateSync, 1, 1, - symbols.Symbols{ - symbols.NT_SuccessSync, - }, - StateSync1R1, - }, - StateSyncAttempt0R0: { - symbols.NT_StateSyncAttempt, 0, 0, - symbols.Symbols{ - symbols.NT_OfferSnapshot, - symbols.NT_ApplyChunks, - }, - StateSyncAttempt0R0, - }, - StateSyncAttempt0R1: { - symbols.NT_StateSyncAttempt, 0, 1, - symbols.Symbols{ - symbols.NT_OfferSnapshot, - symbols.NT_ApplyChunks, - }, - StateSyncAttempt0R1, - }, - StateSyncAttempt0R2: { - symbols.NT_StateSyncAttempt, 0, 2, - symbols.Symbols{ - symbols.NT_OfferSnapshot, - symbols.NT_ApplyChunks, - }, - StateSyncAttempt0R2, - }, - StateSyncAttempt1R0: { - symbols.NT_StateSyncAttempt, 1, 0, - symbols.Symbols{ - symbols.NT_OfferSnapshot, - }, - StateSyncAttempt1R0, - }, - StateSyncAttempt1R1: { - symbols.NT_StateSyncAttempt, 1, 1, - symbols.Symbols{ - symbols.NT_OfferSnapshot, - }, - StateSyncAttempt1R1, - }, - StateSyncAttempts0R0: { - symbols.NT_StateSyncAttempts, 0, 0, - symbols.Symbols{ - symbols.NT_StateSyncAttempt, - }, - StateSyncAttempts0R0, - }, - StateSyncAttempts0R1: { - symbols.NT_StateSyncAttempts, 0, 1, - symbols.Symbols{ - symbols.NT_StateSyncAttempt, - }, - StateSyncAttempts0R1, - }, - StateSyncAttempts1R0: { - symbols.NT_StateSyncAttempts, 1, 0, - symbols.Symbols{ - symbols.NT_StateSyncAttempt, - symbols.NT_StateSyncAttempts, - }, - StateSyncAttempts1R0, - }, - StateSyncAttempts1R1: { - symbols.NT_StateSyncAttempts, 1, 1, - symbols.Symbols{ - symbols.NT_StateSyncAttempt, - symbols.NT_StateSyncAttempts, - }, - StateSyncAttempts1R1, - }, - StateSyncAttempts1R2: { - symbols.NT_StateSyncAttempts, 1, 2, - symbols.Symbols{ - symbols.NT_StateSyncAttempt, - symbols.NT_StateSyncAttempts, - }, - StateSyncAttempts1R2, - }, - SuccessSync0R0: { - symbols.NT_SuccessSync, 0, 0, - symbols.Symbols{ - symbols.NT_OfferSnapshot, - symbols.NT_ApplyChunks, - }, - SuccessSync0R0, - }, - SuccessSync0R1: { - symbols.NT_SuccessSync, 0, 1, - symbols.Symbols{ - symbols.NT_OfferSnapshot, - symbols.NT_ApplyChunks, - }, - SuccessSync0R1, - }, - SuccessSync0R2: { - symbols.NT_SuccessSync, 0, 2, - symbols.Symbols{ - symbols.NT_OfferSnapshot, - symbols.NT_ApplyChunks, - }, - SuccessSync0R2, - }, -} - -var slotIndex = map[Index]Label { - Index{ symbols.NT_ApplyChunk,0,0 }: ApplyChunk0R0, - Index{ symbols.NT_ApplyChunk,0,1 }: ApplyChunk0R1, - Index{ symbols.NT_ApplyChunks,0,0 }: ApplyChunks0R0, - Index{ symbols.NT_ApplyChunks,0,1 }: ApplyChunks0R1, - Index{ symbols.NT_ApplyChunks,1,0 }: ApplyChunks1R0, - Index{ symbols.NT_ApplyChunks,1,1 }: ApplyChunks1R1, - Index{ symbols.NT_ApplyChunks,1,2 }: ApplyChunks1R2, - Index{ symbols.NT_CleanStart,0,0 }: CleanStart0R0, - Index{ symbols.NT_CleanStart,0,1 }: CleanStart0R1, - Index{ symbols.NT_CleanStart,0,2 }: CleanStart0R2, - Index{ symbols.NT_CleanStart,0,3 }: CleanStart0R3, - Index{ symbols.NT_CleanStart,1,0 }: CleanStart1R0, - Index{ symbols.NT_CleanStart,1,1 }: CleanStart1R1, - Index{ symbols.NT_CleanStart,1,2 }: CleanStart1R2, - Index{ symbols.NT_Commit,0,0 }: Commit0R0, - Index{ symbols.NT_Commit,0,1 }: Commit0R1, - Index{ symbols.NT_ConsensusExec,0,0 }: ConsensusExec0R0, - Index{ symbols.NT_ConsensusExec,0,1 }: ConsensusExec0R1, - Index{ symbols.NT_ConsensusHeight,0,0 }: ConsensusHeight0R0, - Index{ symbols.NT_ConsensusHeight,0,1 }: ConsensusHeight0R1, - Index{ symbols.NT_ConsensusHeight,0,2 }: ConsensusHeight0R2, - Index{ symbols.NT_ConsensusHeight,0,3 }: ConsensusHeight0R3, - Index{ symbols.NT_ConsensusHeight,1,0 }: ConsensusHeight1R0, - Index{ symbols.NT_ConsensusHeight,1,1 }: ConsensusHeight1R1, - Index{ symbols.NT_ConsensusHeight,1,2 }: ConsensusHeight1R2, - Index{ symbols.NT_ConsensusHeights,0,0 }: ConsensusHeights0R0, - Index{ symbols.NT_ConsensusHeights,0,1 }: ConsensusHeights0R1, - Index{ symbols.NT_ConsensusHeights,1,0 }: ConsensusHeights1R0, - Index{ symbols.NT_ConsensusHeights,1,1 }: ConsensusHeights1R1, - Index{ symbols.NT_ConsensusHeights,1,2 }: ConsensusHeights1R2, - Index{ symbols.NT_ConsensusRound,0,0 }: ConsensusRound0R0, - Index{ symbols.NT_ConsensusRound,0,1 }: ConsensusRound0R1, - Index{ symbols.NT_ConsensusRound,1,0 }: ConsensusRound1R0, - Index{ symbols.NT_ConsensusRound,1,1 }: ConsensusRound1R1, - Index{ symbols.NT_ConsensusRounds,0,0 }: ConsensusRounds0R0, - Index{ symbols.NT_ConsensusRounds,0,1 }: ConsensusRounds0R1, - Index{ symbols.NT_ConsensusRounds,1,0 }: ConsensusRounds1R0, - Index{ symbols.NT_ConsensusRounds,1,1 }: ConsensusRounds1R1, - Index{ symbols.NT_ConsensusRounds,1,2 }: ConsensusRounds1R2, - Index{ symbols.NT_FinalizeBlock,0,0 }: FinalizeBlock0R0, - Index{ symbols.NT_FinalizeBlock,0,1 }: FinalizeBlock0R1, - Index{ symbols.NT_InitChain,0,0 }: InitChain0R0, - Index{ symbols.NT_InitChain,0,1 }: InitChain0R1, - Index{ symbols.NT_NonProposer,0,0 }: NonProposer0R0, - Index{ symbols.NT_NonProposer,0,1 }: NonProposer0R1, - Index{ symbols.NT_OfferSnapshot,0,0 }: OfferSnapshot0R0, - Index{ symbols.NT_OfferSnapshot,0,1 }: OfferSnapshot0R1, - Index{ symbols.NT_PrepareProposal,0,0 }: PrepareProposal0R0, - Index{ symbols.NT_PrepareProposal,0,1 }: PrepareProposal0R1, - Index{ symbols.NT_ProcessProposal,0,0 }: ProcessProposal0R0, - Index{ symbols.NT_ProcessProposal,0,1 }: ProcessProposal0R1, - Index{ symbols.NT_Proposer,0,0 }: Proposer0R0, - Index{ symbols.NT_Proposer,0,1 }: Proposer0R1, - Index{ symbols.NT_Proposer,1,0 }: Proposer1R0, - Index{ symbols.NT_Proposer,1,1 }: Proposer1R1, - Index{ symbols.NT_Proposer,1,2 }: Proposer1R2, - Index{ symbols.NT_Start,0,0 }: Start0R0, - Index{ symbols.NT_Start,0,1 }: Start0R1, - Index{ symbols.NT_StateSync,0,0 }: StateSync0R0, - Index{ symbols.NT_StateSync,0,1 }: StateSync0R1, - Index{ symbols.NT_StateSync,0,2 }: StateSync0R2, - Index{ symbols.NT_StateSync,1,0 }: StateSync1R0, - Index{ symbols.NT_StateSync,1,1 }: StateSync1R1, - Index{ symbols.NT_StateSyncAttempt,0,0 }: StateSyncAttempt0R0, - Index{ symbols.NT_StateSyncAttempt,0,1 }: StateSyncAttempt0R1, - Index{ symbols.NT_StateSyncAttempt,0,2 }: StateSyncAttempt0R2, - Index{ symbols.NT_StateSyncAttempt,1,0 }: StateSyncAttempt1R0, - Index{ symbols.NT_StateSyncAttempt,1,1 }: StateSyncAttempt1R1, - Index{ symbols.NT_StateSyncAttempts,0,0 }: StateSyncAttempts0R0, - Index{ symbols.NT_StateSyncAttempts,0,1 }: StateSyncAttempts0R1, - Index{ symbols.NT_StateSyncAttempts,1,0 }: StateSyncAttempts1R0, - Index{ symbols.NT_StateSyncAttempts,1,1 }: StateSyncAttempts1R1, - Index{ symbols.NT_StateSyncAttempts,1,2 }: StateSyncAttempts1R2, - Index{ symbols.NT_SuccessSync,0,0 }: SuccessSync0R0, - Index{ symbols.NT_SuccessSync,0,1 }: SuccessSync0R1, - Index{ symbols.NT_SuccessSync,0,2 }: SuccessSync0R2, -} - -var alternates = map[symbols.NT][]Label{ - symbols.NT_Start:[]Label{ Start0R0 }, - symbols.NT_CleanStart:[]Label{ CleanStart0R0,CleanStart1R0 }, - symbols.NT_StateSync:[]Label{ StateSync0R0,StateSync1R0 }, - symbols.NT_StateSyncAttempts:[]Label{ StateSyncAttempts0R0,StateSyncAttempts1R0 }, - symbols.NT_StateSyncAttempt:[]Label{ StateSyncAttempt0R0,StateSyncAttempt1R0 }, - symbols.NT_SuccessSync:[]Label{ SuccessSync0R0 }, - symbols.NT_ApplyChunks:[]Label{ ApplyChunks0R0,ApplyChunks1R0 }, - symbols.NT_ConsensusExec:[]Label{ ConsensusExec0R0 }, - symbols.NT_ConsensusHeights:[]Label{ ConsensusHeights0R0,ConsensusHeights1R0 }, - symbols.NT_ConsensusHeight:[]Label{ ConsensusHeight0R0,ConsensusHeight1R0 }, - symbols.NT_ConsensusRounds:[]Label{ ConsensusRounds0R0,ConsensusRounds1R0 }, - symbols.NT_ConsensusRound:[]Label{ ConsensusRound0R0,ConsensusRound1R0 }, - symbols.NT_Proposer:[]Label{ Proposer0R0,Proposer1R0 }, - symbols.NT_NonProposer:[]Label{ NonProposer0R0 }, - symbols.NT_InitChain:[]Label{ InitChain0R0 }, - symbols.NT_FinalizeBlock:[]Label{ FinalizeBlock0R0 }, - symbols.NT_Commit:[]Label{ Commit0R0 }, - symbols.NT_OfferSnapshot:[]Label{ OfferSnapshot0R0 }, - symbols.NT_ApplyChunk:[]Label{ ApplyChunk0R0 }, - symbols.NT_PrepareProposal:[]Label{ PrepareProposal0R0 }, - symbols.NT_ProcessProposal:[]Label{ ProcessProposal0R0 }, -} - diff --git a/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/symbols/symbols.go b/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/symbols/symbols.go deleted file mode 100644 index 03d9f16d448..00000000000 --- a/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/symbols/symbols.go +++ /dev/null @@ -1,177 +0,0 @@ - -// Package symbols is generated by gogll. Do not edit. -package symbols - -import( - "bytes" - "fmt" -) - -type Symbol interface{ - isSymbol() - IsNonTerminal() bool - String() string -} - -func (NT) isSymbol() {} -func (T) isSymbol() {} - -// NT is the type of non-terminals symbols -type NT int -const( - NT_ApplyChunk NT = iota - NT_ApplyChunks - NT_CleanStart - NT_Commit - NT_ConsensusExec - NT_ConsensusHeight - NT_ConsensusHeights - NT_ConsensusRound - NT_ConsensusRounds - NT_FinalizeBlock - NT_InitChain - NT_NonProposer - NT_OfferSnapshot - NT_PrepareProposal - NT_ProcessProposal - NT_Proposer - NT_Start - NT_StateSync - NT_StateSyncAttempt - NT_StateSyncAttempts - NT_SuccessSync -) - -// T is the type of terminals symbols -type T int -const( - T_0 T = iota // apply_snapshot_chunk - T_1 // commit - T_2 // finalize_block - T_3 // init_chain - T_4 // offer_snapshot - T_5 // prepare_proposal - T_6 // process_proposal -) - -type Symbols []Symbol - -func (ss Symbols) Equal(ss1 Symbols) bool { - if len(ss) != len(ss1) { - return false - } - for i, s := range ss { - if s.String() != ss1[i].String() { - return false - } - } - return true -} - -func (ss Symbols) String() string { - w := new(bytes.Buffer) - for i, s := range ss { - if i > 0 { - fmt.Fprint(w, " ") - } - fmt.Fprintf(w, "%s", s) - } - return w.String() -} - -func (ss Symbols) Strings() []string { - strs := make([]string, len(ss)) - for i, s := range ss { - strs[i] = s.String() - } - return strs -} - -func (NT) IsNonTerminal() bool { - return true -} - -func (T) IsNonTerminal() bool { - return false -} - -func (nt NT) String() string { - return ntToString[nt] -} - -func (t T) String() string { - return tToString[t] -} - -// IsNT returns true iff sym is a non-terminal symbol of the grammar -func IsNT(sym string) bool { - _, exist := stringNT[sym] - return exist -} - -// ToNT returns the NT value of sym or panics if sym is not a non-terminal of the grammar -func ToNT(sym string) NT { - nt, exist := stringNT[sym] - if !exist { - panic(fmt.Sprintf("No NT: %s", sym)) - } - return nt -} - -var ntToString = []string { - "ApplyChunk", /* NT_ApplyChunk */ - "ApplyChunks", /* NT_ApplyChunks */ - "CleanStart", /* NT_CleanStart */ - "Commit", /* NT_Commit */ - "ConsensusExec", /* NT_ConsensusExec */ - "ConsensusHeight", /* NT_ConsensusHeight */ - "ConsensusHeights", /* NT_ConsensusHeights */ - "ConsensusRound", /* NT_ConsensusRound */ - "ConsensusRounds", /* NT_ConsensusRounds */ - "FinalizeBlock", /* NT_FinalizeBlock */ - "InitChain", /* NT_InitChain */ - "NonProposer", /* NT_NonProposer */ - "OfferSnapshot", /* NT_OfferSnapshot */ - "PrepareProposal", /* NT_PrepareProposal */ - "ProcessProposal", /* NT_ProcessProposal */ - "Proposer", /* NT_Proposer */ - "Start", /* NT_Start */ - "StateSync", /* NT_StateSync */ - "StateSyncAttempt", /* NT_StateSyncAttempt */ - "StateSyncAttempts", /* NT_StateSyncAttempts */ - "SuccessSync", /* NT_SuccessSync */ -} - -var tToString = []string { - "apply_snapshot_chunk", /* T_0 */ - "commit", /* T_1 */ - "finalize_block", /* T_2 */ - "init_chain", /* T_3 */ - "offer_snapshot", /* T_4 */ - "prepare_proposal", /* T_5 */ - "process_proposal", /* T_6 */ -} - -var stringNT = map[string]NT{ - "ApplyChunk":NT_ApplyChunk, - "ApplyChunks":NT_ApplyChunks, - "CleanStart":NT_CleanStart, - "Commit":NT_Commit, - "ConsensusExec":NT_ConsensusExec, - "ConsensusHeight":NT_ConsensusHeight, - "ConsensusHeights":NT_ConsensusHeights, - "ConsensusRound":NT_ConsensusRound, - "ConsensusRounds":NT_ConsensusRounds, - "FinalizeBlock":NT_FinalizeBlock, - "InitChain":NT_InitChain, - "NonProposer":NT_NonProposer, - "OfferSnapshot":NT_OfferSnapshot, - "PrepareProposal":NT_PrepareProposal, - "ProcessProposal":NT_ProcessProposal, - "Proposer":NT_Proposer, - "Start":NT_Start, - "StateSync":NT_StateSync, - "StateSyncAttempt":NT_StateSyncAttempt, - "StateSyncAttempts":NT_StateSyncAttempts, - "SuccessSync":NT_SuccessSync, -} diff --git a/test/e2e/pkg/grammar/clean-start/grammar-auto/sppf/sppf.go b/test/e2e/pkg/grammar/clean-start/grammar-auto/sppf/sppf.go deleted file mode 100644 index dd08257dc37..00000000000 --- a/test/e2e/pkg/grammar/clean-start/grammar-auto/sppf/sppf.go +++ /dev/null @@ -1,208 +0,0 @@ -// Package sppf is generated by gogll. Do not edit. - -/* -Package sppf implements a Shared Packed Parse Forest as defined in: - - Elizabeth Scott, Adrian Johnstone - GLL parse-tree generation - Science of Computer Programming (2012), doi:10.1016/j.scico.2012.03.005 -*/ -package sppf - -import ( - "fmt" - "bytes" - "github.com/goccmack/goutil/ioutil" - - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/parser/symbols" -) - -type Node interface { - isNode() - dot(*dotBuilder) - Label() string - String() string -} - -type IntermediateNode struct { - NT symbols.NT - Body symbols.Symbols - Pos int - Lext, Rext int - Children []*PackedNode -} - -type SymbolNode struct { - Symbol string - Lext, Rext int - Children []*PackedNode -} - -type PackedNode struct { - NT symbols.NT - Body symbols.Symbols - Pos int - Lext, Pivot, Rext int - - LeftChild Node // Either an intermediate or Symbol node - RightChild *SymbolNode -} - -func (*IntermediateNode) isNode() {} -func (*SymbolNode) isNode() {} -func (*PackedNode) isNode() {} - -func slotString(nt symbols.NT, body symbols.Symbols, pos int) string { - w := new(bytes.Buffer) - fmt.Fprintf(w, "%s:", nt) - for i, sym := range body { - fmt.Fprint(w, " ") - if i == pos { - fmt.Fprint(w, "•") - } - fmt.Fprint(w, sym) - } - if len(body) == pos { - fmt.Fprint(w, "•") - } - return w.String() -} - -func (n *IntermediateNode) Label() string { - return fmt.Sprintf("\"%s:,%d,%d\"", slotString(n.NT, n.Body, n.Pos), n.Lext, n.Rext) -} - -func (n *SymbolNode) Label() string { - return fmt.Sprintf("\"%s,%d,%d\"", n.Symbol, n.Lext, n.Rext) -} - -func (n *PackedNode) Label() string { - return fmt.Sprintf("\"%s,%d,%d,%d\"", slotString(n.NT, n.Body, n.Pos), n.Lext, n.Pivot, n.Rext) -} - -func (n *IntermediateNode) String() string { - return "IN: " + n.Label() -} - -func (n *SymbolNode) String() string { - return "SN: " + n.Label() -} - -func (n *PackedNode) String() string { - return "PN: " + n.Label() -} - -//---- Dot ---- - -type dotBuilder struct { - nodes map[string]bool // index = node.Label() - w *bytes.Buffer -} - -func (bld *dotBuilder) add(n Node) { - // fmt.Printf("dotBuilder.add: %s\n", n.Label()) - if bld.done(n) { - panic(fmt.Sprintf("duplicate %s", n.Label())) - } - // fmt.Println(" Before:") - // bld.dumpNodes() - - bld.nodes[n.Label()] = true - - // fmt.Println(" After:") - // bld.dumpNodes() - // fmt.Println() -} - -func (bld *dotBuilder) done(n Node) bool { - return bld.nodes[n.Label()] -} - -func (bld *dotBuilder) dumpNodes() { - for n, t := range bld.nodes { - fmt.Printf(" %s = %t\n", n, t) - } -} - -// DotFile writes a graph representation of the SPPF in dot notation to file -func (root *SymbolNode) DotFile(file string) { - bld := &dotBuilder{ - nodes: make(map[string]bool), - w: new(bytes.Buffer), - } - fmt.Fprintln(bld.w, "digraph SPPF {") - root.dot(bld) - fmt.Fprintln(bld.w, "}") - ioutil.WriteFile(file, bld.w.Bytes()) -} - -func (n *IntermediateNode) dot(bld *dotBuilder) { - // fmt.Println("in.dot", n.Label()) - - if bld.done(n) { - return - } - bld.add(n) - - fmt.Fprintf(bld.w, "%s [shape=box]\n", n.Label()) - - for _, c := range n.Children { - fmt.Fprintf(bld.w, "%s -> %s\n", n.Label(), c.Label()) - if !bld.done(c) { - c.dot(bld) - } - } -} - -func (n *PackedNode) dot(bld *dotBuilder) { - // fmt.Println("pn.dot", n.Label(), "exist", bld.nodes[n.Label()]) - - if bld.done(n) { - return - } - bld.add(n) - - fmt.Fprintf(bld.w, "%s [shape=box,style=rounded,penwidth=3]\n", n.Label()) - if n.LeftChild != nil { - if !bld.done(n.LeftChild) { - n.LeftChild.dot(bld) - } - fmt.Fprintf(bld.w, "%s -> %s\n", n.Label(), n.LeftChild.Label()) - } - if n.RightChild != nil { - if !bld.done(n.RightChild) { - n.RightChild.dot(bld) - } - fmt.Fprintf(bld.w, "%s -> %s\n", n.Label(), n.RightChild.Label()) - } - if n.LeftChild != nil && n.RightChild != nil { - fmt.Fprintf(bld.w, "%s,%s\n", n.LeftChild.Label(), n.RightChild.Label()) - } -} - -func (n *SymbolNode) dot(bld *dotBuilder) { - // fmt.Println("sn.dot", n.Label(), "done=", bld.done(n)) - - if bld.done(n) { - return - } - bld.add(n) - - fmt.Fprintln(bld.w, n.Label()) - for _, pn := range n.Children { - // fmt.Printf(" child: %s\n", pn.Label()) - fmt.Fprintf(bld.w, "%s -> %s\n", n.Label(), pn.Label()) - if !bld.done(pn) { - pn.dot(bld) - } - } - for i, pn := range n.Children { - if i > 0 { - fmt.Fprint(bld.w, ";") - } - fmt.Fprintf(bld.w, "%s", pn.Label()) - } - fmt.Fprintln(bld.w) - -} - diff --git a/test/e2e/pkg/grammar/clean-start/grammar-auto/token/token.go b/test/e2e/pkg/grammar/clean-start/grammar-auto/token/token.go deleted file mode 100644 index 5a7c8e97a99..00000000000 --- a/test/e2e/pkg/grammar/clean-start/grammar-auto/token/token.go +++ /dev/null @@ -1,206 +0,0 @@ - -// Package token is generated by GoGLL. Do not edit -package token - -import( - "fmt" -) - -// Token is returned by the lexer for every scanned lexical token -type Token struct { - typ Type - lext, rext int - input []rune -} - -/* -New returns a new token. -lext is the left extent and rext the right extent of the token in the input. -input is the input slice scanned by the lexer. -*/ -func New(t Type, lext, rext int, input []rune) *Token { - return &Token{ - typ: t, - lext: lext, - rext: rext, - input: input, - } -} - -// GetLineColumn returns the line and column of the left extent of t -func (t *Token) GetLineColumn() (line, col int) { - line, col = 1, 1 - for j := 0; j < t.lext; j++ { - switch t.input[j] { - case '\n': - line++ - col = 1 - case '\t': - col += 4 - default: - col++ - } - } - return -} - -// GetInput returns the input from which t was parsed. -func (t *Token) GetInput() []rune { - return t.input -} - -// Lext returns the left extent of t in the input stream of runes -func (t *Token) Lext() int { - return t.lext -} - -// Literal returns the literal runes of t scanned by the lexer -func (t *Token) Literal() []rune { - return t.input[t.lext:t.rext] -} - -// LiteralString returns string(t.Literal()) -func (t *Token) LiteralString() string { - return string(t.Literal()) -} - -// LiteralStripEscape returns the literal runes of t scanned by the lexer -func (t *Token) LiteralStripEscape() []rune { - lit := t.Literal() - strip := make([]rune, 0, len(lit)) - for i := 0; i < len(lit); i++ { - if lit[i] == '\\' { - i++ - switch lit[i] { - case 't': - strip = append(strip, '\t') - case 'r': - strip = append(strip, '\r') - case 'n': - strip = append(strip, '\r') - default: - strip = append(strip, lit[i]) - } - } else { - strip = append(strip, lit[i]) - } - } - return strip -} - -// LiteralStringStripEscape returns string(t.LiteralStripEscape()) -func (t *Token) LiteralStringStripEscape() string { - return string(t.LiteralStripEscape()) -} - -// Rext returns the right extent of t in the input stream of runes -func (t *Token) Rext() int { - return t.rext -} - -func (t *Token) String() string { - return fmt.Sprintf("%s (%d,%d) %s", - t.TypeID(), t.lext, t.rext, t.LiteralString()) -} - -// Suppress returns true iff t is suppressed by the lexer -func (t *Token) Suppress() bool { - return Suppress[t.typ] -} - -// Type returns the token Type of t -func (t *Token) Type() Type { - return t.typ -} - -// TypeID returns the token Type ID of t. -// This may be different from the literal of token t. -func (t *Token) TypeID() string { - return t.Type().ID() -} - -// Type is the token type -type Type int - -func (t Type) String() string { - return TypeToString[t] -} - -// ID returns the token type ID of token Type t -func (t Type) ID() string { - return TypeToID[t] -} - - -const( - Error Type = iota // Error - EOF // $ - T_0 // apply_snapshot_chunk - T_1 // commit - T_2 // finalize_block - T_3 // init_chain - T_4 // offer_snapshot - T_5 // prepare_proposal - T_6 // process_proposal -) - -var TypeToString = []string{ - "Error", - "EOF", - "T_0", - "T_1", - "T_2", - "T_3", - "T_4", - "T_5", - "T_6", -} - -var StringToType = map[string] Type { - "Error" : Error, - "EOF" : EOF, - "T_0" : T_0, - "T_1" : T_1, - "T_2" : T_2, - "T_3" : T_3, - "T_4" : T_4, - "T_5" : T_5, - "T_6" : T_6, -} - -var TypeToID = []string { - "Error", - "$", - "apply_snapshot_chunk", - "commit", - "finalize_block", - "init_chain", - "offer_snapshot", - "prepare_proposal", - "process_proposal", -} - -var IDToType = map[string]Type { - "Error": 0, - "$": 1, - "apply_snapshot_chunk": 2, - "commit": 3, - "finalize_block": 4, - "init_chain": 5, - "offer_snapshot": 6, - "prepare_proposal": 7, - "process_proposal": 8, -} - -var Suppress = []bool { - false, - false, - false, - false, - false, - false, - false, - false, - false, -} - diff --git a/test/e2e/pkg/grammar/clean-start/grammar-auto/lexer/lexer.go b/test/e2e/pkg/grammar/grammar-auto/lexer/lexer.go similarity index 71% rename from test/e2e/pkg/grammar/clean-start/grammar-auto/lexer/lexer.go rename to test/e2e/pkg/grammar/grammar-auto/lexer/lexer.go index 8209831da6d..8015f118165 100644 --- a/test/e2e/pkg/grammar/clean-start/grammar-auto/lexer/lexer.go +++ b/test/e2e/pkg/grammar/grammar-auto/lexer/lexer.go @@ -1,4 +1,3 @@ - // Package lexer is generated by GoGLL. Do not edit. package lexer @@ -8,7 +7,7 @@ import ( "strings" "unicode" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/clean-start/grammar-auto/token" + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/token" ) type state int @@ -109,14 +108,14 @@ var ( // parsed from the input type Lexer struct { // I is the input slice of runes - I []rune + I []rune // Tokens is the slice of tokens constructed by the lexer from I Tokens []*token.Token } /* -NewFile constructs a Lexer created from the input file, fname. +NewFile constructs a Lexer created from the input file, fname. If the input file is a markdown file NewFile process treats all text outside code blocks as whitespace. All text inside code blocks are treated as input text. @@ -161,7 +160,7 @@ func loadMd(input []rune) { } /* -New constructs a Lexer from a slice of runes. +New constructs a Lexer from a slice of runes. All contents of the input slice are treated as input text. */ @@ -281,861 +280,1149 @@ func not(r rune, set []rune) bool { return true } -var accept = []token.Type{ - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.T_1, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.T_3, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.T_2, - token.T_4, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.T_5, - token.T_6, - token.Error, - token.Error, - token.Error, - token.T_0, +var accept = []token.Type{ + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.T_1, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.T_4, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.T_2, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.T_3, + token.T_5, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.T_6, + token.T_7, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.Error, + token.T_0, + token.Error, + token.T_8, } -var nextState = []func(r rune) state{ +var nextState = []func(r rune) state{ // Set0 func(r rune) state { - switch { + switch { case r == 'a': - return 1 + return 1 case r == 'c': - return 2 + return 2 + case r == 'e': + return 3 case r == 'f': - return 3 + return 4 case r == 'i': - return 4 + return 5 case r == 'o': - return 5 + return 6 case r == 'p': - return 6 + return 7 + case r == 'v': + return 8 } return nullState - }, + }, // Set1 func(r rune) state { - switch { + switch { case r == 'p': - return 7 + return 9 } return nullState - }, + }, // Set2 func(r rune) state { - switch { + switch { case r == 'o': - return 8 + return 10 } return nullState - }, + }, // Set3 func(r rune) state { - switch { - case r == 'i': - return 9 + switch { + case r == 'x': + return 11 } return nullState - }, + }, // Set4 func(r rune) state { - switch { - case r == 'n': - return 10 + switch { + case r == 'i': + return 12 } return nullState - }, + }, // Set5 func(r rune) state { - switch { - case r == 'f': - return 11 + switch { + case r == 'n': + return 13 } return nullState - }, + }, // Set6 func(r rune) state { - switch { - case r == 'r': - return 12 + switch { + case r == 'f': + return 14 } return nullState - }, + }, // Set7 func(r rune) state { - switch { - case r == 'p': - return 13 + switch { + case r == 'r': + return 15 } return nullState - }, + }, // Set8 func(r rune) state { - switch { - case r == 'm': - return 14 + switch { + case r == 'e': + return 16 } return nullState - }, + }, // Set9 func(r rune) state { - switch { - case r == 'n': - return 15 + switch { + case r == 'p': + return 17 } return nullState - }, + }, // Set10 func(r rune) state { - switch { - case r == 'i': - return 16 + switch { + case r == 'm': + return 18 } return nullState - }, + }, // Set11 func(r rune) state { - switch { - case r == 'f': - return 17 + switch { + case r == 't': + return 19 } return nullState - }, + }, // Set12 func(r rune) state { - switch { - case r == 'e': - return 18 - case r == 'o': - return 19 + switch { + case r == 'n': + return 20 } return nullState - }, + }, // Set13 func(r rune) state { - switch { - case r == 'l': - return 20 + switch { + case r == 'i': + return 21 } return nullState - }, + }, // Set14 func(r rune) state { - switch { - case r == 'm': - return 21 + switch { + case r == 'f': + return 22 } return nullState - }, + }, // Set15 func(r rune) state { - switch { - case r == 'a': - return 22 + switch { + case r == 'e': + return 23 + case r == 'o': + return 24 } return nullState - }, + }, // Set16 func(r rune) state { - switch { - case r == 't': - return 23 + switch { + case r == 'r': + return 25 } return nullState - }, + }, // Set17 func(r rune) state { - switch { - case r == 'e': - return 24 + switch { + case r == 'l': + return 26 } return nullState - }, + }, // Set18 func(r rune) state { - switch { - case r == 'p': - return 25 + switch { + case r == 'm': + return 27 } return nullState - }, + }, // Set19 func(r rune) state { - switch { - case r == 'c': - return 26 + switch { + case r == 'e': + return 28 } return nullState - }, + }, // Set20 func(r rune) state { - switch { - case r == 'y': - return 27 + switch { + case r == 'a': + return 29 } return nullState - }, + }, // Set21 func(r rune) state { - switch { - case r == 'i': - return 28 + switch { + case r == 't': + return 30 } return nullState - }, + }, // Set22 func(r rune) state { - switch { - case r == 'l': - return 29 + switch { + case r == 'e': + return 31 } return nullState - }, + }, // Set23 func(r rune) state { - switch { - case r == '_': - return 30 + switch { + case r == 'p': + return 32 } return nullState - }, + }, // Set24 func(r rune) state { - switch { - case r == 'r': - return 31 + switch { + case r == 'c': + return 33 } return nullState - }, + }, // Set25 func(r rune) state { - switch { - case r == 'a': - return 32 + switch { + case r == 'i': + return 34 } return nullState - }, + }, // Set26 func(r rune) state { - switch { - case r == 'e': - return 33 + switch { + case r == 'y': + return 35 } return nullState - }, + }, // Set27 func(r rune) state { - switch { - case r == '_': - return 34 + switch { + case r == 'i': + return 36 } return nullState - }, + }, // Set28 func(r rune) state { - switch { - case r == 't': - return 35 + switch { + case r == 'n': + return 37 } return nullState - }, + }, // Set29 func(r rune) state { - switch { - case r == 'i': - return 36 + switch { + case r == 'l': + return 38 } return nullState - }, + }, // Set30 func(r rune) state { - switch { - case r == 'c': - return 37 + switch { + case r == '_': + return 39 } return nullState - }, + }, // Set31 func(r rune) state { - switch { - case r == '_': - return 38 + switch { + case r == 'r': + return 40 } return nullState - }, + }, // Set32 func(r rune) state { - switch { - case r == 'r': - return 39 + switch { + case r == 'a': + return 41 } return nullState - }, + }, // Set33 func(r rune) state { - switch { - case r == 's': - return 40 + switch { + case r == 'e': + return 42 } return nullState - }, + }, // Set34 func(r rune) state { - switch { - case r == 's': - return 41 + switch { + case r == 'f': + return 43 } return nullState - }, + }, // Set35 func(r rune) state { - switch { + switch { + case r == '_': + return 44 } return nullState - }, + }, // Set36 func(r rune) state { - switch { - case r == 'z': - return 42 + switch { + case r == 't': + return 45 } return nullState - }, + }, // Set37 func(r rune) state { - switch { - case r == 'h': - return 43 + switch { + case r == 'd': + return 46 } return nullState - }, + }, // Set38 func(r rune) state { - switch { - case r == 's': - return 44 + switch { + case r == 'i': + return 47 } return nullState - }, + }, // Set39 func(r rune) state { - switch { - case r == 'e': - return 45 + switch { + case r == 'c': + return 48 } return nullState - }, + }, // Set40 func(r rune) state { - switch { - case r == 's': - return 46 + switch { + case r == '_': + return 49 } return nullState - }, + }, // Set41 func(r rune) state { - switch { - case r == 'n': - return 47 + switch { + case r == 'r': + return 50 } return nullState - }, + }, // Set42 func(r rune) state { - switch { - case r == 'e': - return 48 + switch { + case r == 's': + return 51 } return nullState - }, + }, // Set43 func(r rune) state { - switch { - case r == 'a': - return 49 + switch { + case r == 'y': + return 52 } return nullState - }, + }, // Set44 func(r rune) state { - switch { - case r == 'n': - return 50 + switch { + case r == 's': + return 53 } return nullState - }, + }, // Set45 func(r rune) state { - switch { - case r == '_': - return 51 + switch { } return nullState - }, + }, // Set46 func(r rune) state { - switch { + switch { case r == '_': - return 52 + return 54 } return nullState - }, + }, // Set47 func(r rune) state { - switch { - case r == 'a': - return 53 + switch { + case r == 'z': + return 55 } return nullState - }, + }, // Set48 func(r rune) state { - switch { - case r == '_': - return 54 + switch { + case r == 'h': + return 56 } return nullState - }, + }, // Set49 func(r rune) state { - switch { - case r == 'i': - return 55 + switch { + case r == 's': + return 57 } return nullState - }, + }, // Set50 func(r rune) state { - switch { - case r == 'a': - return 56 + switch { + case r == 'e': + return 58 } return nullState - }, + }, // Set51 func(r rune) state { - switch { - case r == 'p': - return 57 + switch { + case r == 's': + return 59 } return nullState - }, + }, // Set52 func(r rune) state { - switch { - case r == 'p': - return 58 + switch { + case r == '_': + return 60 } return nullState - }, + }, // Set53 func(r rune) state { - switch { - case r == 'p': - return 59 + switch { + case r == 'n': + return 61 } return nullState - }, + }, // Set54 func(r rune) state { - switch { - case r == 'b': - return 60 + switch { + case r == 'v': + return 62 } return nullState - }, + }, // Set55 func(r rune) state { - switch { - case r == 'n': - return 61 + switch { + case r == 'e': + return 63 } return nullState - }, + }, // Set56 func(r rune) state { - switch { - case r == 'p': - return 62 + switch { + case r == 'a': + return 64 } return nullState - }, + }, // Set57 func(r rune) state { - switch { - case r == 'r': - return 63 + switch { + case r == 'n': + return 65 } return nullState - }, + }, // Set58 func(r rune) state { - switch { - case r == 'r': - return 64 + switch { + case r == '_': + return 66 } return nullState - }, + }, // Set59 func(r rune) state { - switch { - case r == 's': - return 65 + switch { + case r == '_': + return 67 } return nullState - }, + }, // Set60 func(r rune) state { - switch { - case r == 'l': - return 66 + switch { + case r == 'v': + return 68 } return nullState - }, + }, // Set61 func(r rune) state { - switch { + switch { + case r == 'a': + return 69 } return nullState - }, + }, // Set62 func(r rune) state { - switch { - case r == 's': - return 67 + switch { + case r == 'o': + return 70 } return nullState - }, + }, // Set63 func(r rune) state { - switch { - case r == 'o': - return 68 + switch { + case r == '_': + return 71 } return nullState - }, + }, // Set64 func(r rune) state { - switch { - case r == 'o': - return 69 + switch { + case r == 'i': + return 72 } return nullState - }, + }, // Set65 func(r rune) state { - switch { - case r == 'h': - return 70 + switch { + case r == 'a': + return 73 } return nullState - }, + }, // Set66 func(r rune) state { - switch { - case r == 'o': - return 71 + switch { + case r == 'p': + return 74 } return nullState - }, + }, // Set67 func(r rune) state { - switch { - case r == 'h': - return 72 + switch { + case r == 'p': + return 75 } return nullState - }, + }, // Set68 func(r rune) state { - switch { - case r == 'p': - return 73 + switch { + case r == 'o': + return 76 } return nullState - }, + }, // Set69 func(r rune) state { - switch { + switch { case r == 'p': - return 74 + return 77 } return nullState - }, + }, // Set70 func(r rune) state { - switch { - case r == 'o': - return 75 + switch { + case r == 't': + return 78 } return nullState - }, + }, // Set71 func(r rune) state { - switch { - case r == 'c': - return 76 + switch { + case r == 'b': + return 79 } return nullState - }, + }, // Set72 func(r rune) state { - switch { - case r == 'o': - return 77 + switch { + case r == 'n': + return 80 } return nullState - }, + }, // Set73 func(r rune) state { - switch { - case r == 'o': - return 78 + switch { + case r == 'p': + return 81 } return nullState - }, + }, // Set74 func(r rune) state { - switch { - case r == 'o': - return 79 + switch { + case r == 'r': + return 82 } return nullState - }, + }, // Set75 func(r rune) state { - switch { - case r == 't': - return 80 + switch { + case r == 'r': + return 83 } return nullState - }, + }, // Set76 func(r rune) state { - switch { - case r == 'k': - return 81 + switch { + case r == 't': + return 84 } return nullState - }, + }, // Set77 func(r rune) state { - switch { - case r == 't': - return 82 + switch { + case r == 's': + return 85 } return nullState - }, + }, // Set78 func(r rune) state { - switch { - case r == 's': - return 83 + switch { + case r == 'e': + return 86 } return nullState - }, + }, // Set79 func(r rune) state { - switch { - case r == 's': - return 84 + switch { + case r == 'l': + return 87 } return nullState - }, + }, // Set80 func(r rune) state { - switch { - case r == '_': - return 85 + switch { } return nullState - }, + }, // Set81 func(r rune) state { - switch { + switch { + case r == 's': + return 88 } return nullState - }, + }, // Set82 func(r rune) state { - switch { + switch { + case r == 'o': + return 89 } return nullState - }, + }, // Set83 func(r rune) state { - switch { - case r == 'a': - return 86 + switch { + case r == 'o': + return 90 } return nullState - }, + }, // Set84 func(r rune) state { - switch { - case r == 'a': - return 87 + switch { + case r == 'e': + return 91 } return nullState - }, + }, // Set85 func(r rune) state { - switch { - case r == 'c': - return 88 + switch { + case r == 'h': + return 92 } return nullState - }, + }, // Set86 func(r rune) state { - switch { - case r == 'l': - return 89 + switch { } return nullState - }, + }, // Set87 func(r rune) state { - switch { - case r == 'l': - return 90 + switch { + case r == 'o': + return 93 } return nullState - }, + }, // Set88 func(r rune) state { - switch { + switch { case r == 'h': - return 91 + return 94 } return nullState - }, + }, // Set89 func(r rune) state { - switch { + switch { + case r == 'p': + return 95 } return nullState - }, + }, // Set90 func(r rune) state { - switch { + switch { + case r == 'p': + return 96 } return nullState - }, + }, // Set91 func(r rune) state { - switch { - case r == 'u': - return 92 + switch { + case r == '_': + return 97 } return nullState - }, + }, // Set92 func(r rune) state { - switch { - case r == 'n': - return 93 + switch { + case r == 'o': + return 98 } return nullState - }, + }, // Set93 func(r rune) state { - switch { - case r == 'k': - return 94 + switch { + case r == 'c': + return 99 } return nullState - }, + }, // Set94 func(r rune) state { - switch { + switch { + case r == 'o': + return 100 + } + return nullState + }, + // Set95 + func(r rune) state { + switch { + case r == 'o': + return 101 + } + return nullState + }, + // Set96 + func(r rune) state { + switch { + case r == 'o': + return 102 + } + return nullState + }, + // Set97 + func(r rune) state { + switch { + case r == 'e': + return 103 + } + return nullState + }, + // Set98 + func(r rune) state { + switch { + case r == 't': + return 104 + } + return nullState + }, + // Set99 + func(r rune) state { + switch { + case r == 'k': + return 105 + } + return nullState + }, + // Set100 + func(r rune) state { + switch { + case r == 't': + return 106 + } + return nullState + }, + // Set101 + func(r rune) state { + switch { + case r == 's': + return 107 + } + return nullState + }, + // Set102 + func(r rune) state { + switch { + case r == 's': + return 108 + } + return nullState + }, + // Set103 + func(r rune) state { + switch { + case r == 'x': + return 109 + } + return nullState + }, + // Set104 + func(r rune) state { + switch { + case r == '_': + return 110 + } + return nullState + }, + // Set105 + func(r rune) state { + switch { + } + return nullState + }, + // Set106 + func(r rune) state { + switch { + } + return nullState + }, + // Set107 + func(r rune) state { + switch { + case r == 'a': + return 111 + } + return nullState + }, + // Set108 + func(r rune) state { + switch { + case r == 'a': + return 112 + } + return nullState + }, + // Set109 + func(r rune) state { + switch { + case r == 't': + return 113 + } + return nullState + }, + // Set110 + func(r rune) state { + switch { + case r == 'c': + return 114 + } + return nullState + }, + // Set111 + func(r rune) state { + switch { + case r == 'l': + return 115 + } + return nullState + }, + // Set112 + func(r rune) state { + switch { + case r == 'l': + return 116 + } + return nullState + }, + // Set113 + func(r rune) state { + switch { + case r == 'e': + return 117 + } + return nullState + }, + // Set114 + func(r rune) state { + switch { + case r == 'h': + return 118 + } + return nullState + }, + // Set115 + func(r rune) state { + switch { + } + return nullState + }, + // Set116 + func(r rune) state { + switch { + } + return nullState + }, + // Set117 + func(r rune) state { + switch { + case r == 'n': + return 119 + } + return nullState + }, + // Set118 + func(r rune) state { + switch { + case r == 'u': + return 120 + } + return nullState + }, + // Set119 + func(r rune) state { + switch { + case r == 's': + return 121 + } + return nullState + }, + // Set120 + func(r rune) state { + switch { + case r == 'n': + return 122 + } + return nullState + }, + // Set121 + func(r rune) state { + switch { + case r == 'i': + return 123 + } + return nullState + }, + // Set122 + func(r rune) state { + switch { + case r == 'k': + return 124 + } + return nullState + }, + // Set123 + func(r rune) state { + switch { + case r == 'o': + return 125 + } + return nullState + }, + // Set124 + func(r rune) state { + switch { + } + return nullState + }, + // Set125 + func(r rune) state { + switch { + case r == 'n': + return 126 + } + return nullState + }, + // Set126 + func(r rune) state { + switch { } return nullState - }, + }, } diff --git a/test/e2e/pkg/grammar/grammar-auto/parser/bsr/bsr.go b/test/e2e/pkg/grammar/grammar-auto/parser/bsr/bsr.go new file mode 100644 index 00000000000..e262b41338c --- /dev/null +++ b/test/e2e/pkg/grammar/grammar-auto/parser/bsr/bsr.go @@ -0,0 +1,685 @@ +// Package bsr is generated by gogll. Do not edit. + +/* +Package bsr implements a Binary Subtree Representation set as defined in + + Scott et al + Derivation representation using binary subtree sets, + Science of Computer Programming 175 (2019) +*/ +package bsr + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/lexer" + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/parser/slot" + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/parser/symbols" + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/sppf" + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/token" +) + +type bsr interface { + LeftExtent() int + RightExtent() int + Pivot() int +} + +/* +Set contains the set of Binary Subtree Representations (BSR). +*/ +type Set struct { + slotEntries map[BSR]bool + ntSlotEntries map[ntSlot][]BSR + stringEntries map[stringKey]*stringBSR + rightExtent int + lex *lexer.Lexer + + startSym symbols.NT +} + +type ntSlot struct { + nt symbols.NT + leftExtent int + rightExtent int +} + +// BSR is the binary subtree representation of a parsed nonterminal +type BSR struct { + Label slot.Label + leftExtent int + pivot int + rightExtent int + set *Set +} + +type BSRs []BSR + +type stringBSR struct { + Symbols symbols.Symbols + leftExtent int + pivot int + rightExtent int + set *Set +} + +type stringBSRs []*stringBSR + +type stringKey string + +// New returns a new initialised BSR Set +func New(startSymbol symbols.NT, l *lexer.Lexer) *Set { + return &Set{ + slotEntries: make(map[BSR]bool), + ntSlotEntries: make(map[ntSlot][]BSR), + stringEntries: make(map[stringKey]*stringBSR), + rightExtent: 0, + lex: l, + startSym: startSymbol, + } +} + +/* +Add a bsr to the set. (i,j) is the extent. k is the pivot. +*/ +func (s *Set) Add(l slot.Label, i, k, j int) { + // fmt.Printf("bsr.Add(%s,%d,%d,%d l.Pos %d)\n", l, i, k, j, l.Pos()) + if l.EoR() { + s.insert(BSR{l, i, k, j, s}) + } else { + if l.Pos() > 1 { + s.insert(&stringBSR{l.Symbols()[:l.Pos()], i, k, j, s}) + } + } +} + +// AddEmpty adds a grammar slot: X : ϵ• +func (s *Set) AddEmpty(l slot.Label, i int) { + s.insert(BSR{l, i, i, i, s}) +} + +/* +Contain returns true iff the BSR Set contains the NT symbol with left and +right extent. +*/ +func (s *Set) Contain(nt symbols.NT, left, right int) bool { + // fmt.Printf("bsr.Contain(%s,%d,%d)\n",nt,left,right) + for e := range s.slotEntries { + // fmt.Printf(" (%s,%d,%d)\n",e.Label.Head(),e.leftExtent,e.rightExtent) + if e.Label.Head() == nt && e.leftExtent == left && e.rightExtent == right { + // fmt.Println(" true") + return true + } + } + // fmt.Println(" false") + return false +} + +// Dump prints all the NT and string elements of the BSR set +func (s *Set) Dump() { + fmt.Println("Roots:") + for _, rt := range s.GetRoots() { + fmt.Println(rt) + } + fmt.Println() + + fmt.Println("NT BSRs:") + for _, bsr := range s.getNTBSRs() { + fmt.Println(bsr) + } + fmt.Println() + + fmt.Println("string BSRs:") + for _, bsr := range s.getStringBSRs() { + fmt.Println(bsr) + } + fmt.Println() +} + +// GetAll returns all BSR grammar slot entries +func (s *Set) GetAll() (bsrs []BSR) { + for b := range s.slotEntries { + bsrs = append(bsrs, b) + } + return +} + +// GetRightExtent returns the right extent of the BSR set +func (s *Set) GetRightExtent() int { + return s.rightExtent +} + +// GetRoot returns the root of the parse tree of an unambiguous parse. +// GetRoot fails if the parse was ambiguous. Use GetRoots() for ambiguous parses. +func (s *Set) GetRoot() BSR { + rts := s.GetRoots() + if len(rts) != 1 { + failf("%d parse trees exist for start symbol %s", len(rts), s.startSym) + } + return rts[0] +} + +// GetRoots returns all the roots of parse trees of the start symbol of the grammar. +func (s *Set) GetRoots() (roots []BSR) { + for b := range s.slotEntries { + if b.Label.Head() == s.startSym && b.leftExtent == 0 && s.rightExtent == b.rightExtent { + roots = append(roots, b) + } + } + return +} + +// GetAllStrings returns all string elements with symbols = str, +// left extent = lext and right extent = rext +func (s *Set) GetAllStrings(str symbols.Symbols, lext, rext int) (strs []*stringBSR) { + for _, s := range s.stringEntries { + if s.Symbols.Equal(str) && s.leftExtent == lext && s.rightExtent == rext { + strs = append(strs, s) + } + } + return +} + +func (s *Set) getNTBSRs() BSRs { + bsrs := make(BSRs, 0, len(s.ntSlotEntries)) + for _, bsrl := range s.ntSlotEntries { + for _, bsr := range bsrl { + bsrs = append(bsrs, bsr) + } + } + sort.Sort(bsrs) + return bsrs +} + +func (s *Set) getStringBSRs() stringBSRs { + bsrs := make(stringBSRs, 0, len(s.stringEntries)) + for _, bsr := range s.stringEntries { + bsrs = append(bsrs, bsr) + } + sort.Sort(bsrs) + return bsrs +} + +func (s *Set) getString(symbols symbols.Symbols, leftExtent, rightExtent int) *stringBSR { + // fmt.Printf("Set.getString(%s,%d,%d)\n", symbols, leftExtent, rightExtent) + + strBsr, exist := s.stringEntries[getStringKey(symbols, leftExtent, rightExtent)] + if exist { + return strBsr + } + + panic(fmt.Sprintf("Error: no string %s left extent=%d right extent=%d\n", + symbols, leftExtent, rightExtent)) +} + +func (s *Set) insert(bsr bsr) { + if bsr.RightExtent() > s.rightExtent { + s.rightExtent = bsr.RightExtent() + } + switch b := bsr.(type) { + case BSR: + s.slotEntries[b] = true + nt := ntSlot{b.Label.Head(), b.leftExtent, b.rightExtent} + s.ntSlotEntries[nt] = append(s.ntSlotEntries[nt], b) + case *stringBSR: + s.stringEntries[b.key()] = b + default: + panic(fmt.Sprintf("Invalid type %T", bsr)) + } +} + +func (s *stringBSR) key() stringKey { + return getStringKey(s.Symbols, s.leftExtent, s.rightExtent) +} + +func getStringKey(symbols symbols.Symbols, lext, rext int) stringKey { + return stringKey(fmt.Sprintf("%s,%d,%d", symbols, lext, rext)) +} + +// Alternate returns the index of the grammar rule alternate. +func (b BSR) Alternate() int { + return b.Label.Alternate() +} + +// GetAllNTChildren returns all the NT Children of b. If an NT child of b has +// ambiguous parses then all parses of that child are returned. +func (b BSR) GetAllNTChildren() [][]BSR { + children := [][]BSR{} + for i, s := range b.Label.Symbols() { + if s.IsNonTerminal() { + sChildren := b.GetNTChildrenI(i) + children = append(children, sChildren) + } + } + return children +} + +// GetNTChild returns the BSR of occurrence i of nt in s. +// GetNTChild fails if s has ambiguous subtrees of occurrence i of nt. +func (b BSR) GetNTChild(nt symbols.NT, i int) BSR { + bsrs := b.GetNTChildren(nt, i) + if len(bsrs) != 1 { + ambiguousSlots := []string{} + for _, c := range bsrs { + ambiguousSlots = append(ambiguousSlots, c.String()) + } + b.set.fail(b, "%s is ambiguous in %s\n %s", nt, b, strings.Join(ambiguousSlots, "\n ")) + } + return bsrs[0] +} + +// GetNTChildI returns the BSR of NT symbol[i] in the BSR set. +// GetNTChildI fails if the BSR set has ambiguous subtrees of NT i. +func (b BSR) GetNTChildI(i int) BSR { + bsrs := b.GetNTChildrenI(i) + if len(bsrs) != 1 { + b.set.fail(b, "NT %d is ambiguous in %s", i, b) + } + return bsrs[0] +} + +// GetNTChildren returns all the BSRs of occurrence i of nt in s +func (b BSR) GetNTChildren(nt symbols.NT, i int) []BSR { + // fmt.Printf("GetNTChild(%s,%d) %s\n", nt, i, b) + positions := []int{} + for j, s := range b.Label.Symbols() { + if s == nt { + positions = append(positions, j) + } + } + if len(positions) == 0 { + b.set.fail(b, "Error: %s has no NT %s", b, nt) + } + return b.GetNTChildrenI(positions[i]) +} + +// GetNTChildrenI returns all the BSRs of NT symbol[i] in s +func (b BSR) GetNTChildrenI(i int) []BSR { + // fmt.Printf("bsr.GetNTChildI(%d) %s Pos %d\n", i, b, b.Label.Pos()) + + if i >= len(b.Label.Symbols()) { + b.set.fail(b, "Error: cannot get NT child %d of %s", i, b) + } + if len(b.Label.Symbols()) == 1 { + return b.set.getNTSlot(b.Label.Symbols()[i], b.pivot, b.rightExtent) + } + if len(b.Label.Symbols()) == 2 { + if i == 0 { + return b.set.getNTSlot(b.Label.Symbols()[i], b.leftExtent, b.pivot) + } + return b.set.getNTSlot(b.Label.Symbols()[i], b.pivot, b.rightExtent) + } + if b.Label.Pos() == i+1 { + return b.set.getNTSlot(b.Label.Symbols()[i], b.pivot, b.rightExtent) + } + + // Walk to pos i from the right + symbols := b.Label.Symbols()[:b.Label.Pos()-1] + str := b.set.getString(symbols, b.leftExtent, b.pivot) + for len(symbols) > i+1 && len(symbols) > 2 { + symbols = symbols[:len(symbols)-1] + str = b.set.getString(symbols, str.leftExtent, str.pivot) + } + + bsrs := []BSR{} + if i == 0 { + bsrs = b.set.getNTSlot(b.Label.Symbols()[i], str.leftExtent, str.pivot) + } else { + bsrs = b.set.getNTSlot(b.Label.Symbols()[i], str.pivot, str.rightExtent) + } + + // fmt.Println(bsrs) + + return bsrs +} + +// GetTChildI returns the terminal symbol at position i in b. +// GetTChildI panics if symbol i is not a valid terminal +func (b BSR) GetTChildI(i int) *token.Token { + symbols := b.Label.Symbols() + + if i >= len(symbols) { + panic(fmt.Sprintf("%s has no T child %d", b, i)) + } + if symbols[i].IsNonTerminal() { + panic(fmt.Sprintf("symbol %d in %s is an NT", i, b)) + } + + lext := b.leftExtent + for j := 0; j < i; j++ { + if symbols[j].IsNonTerminal() { + nt := b.GetNTChildI(j) + lext += nt.rightExtent - nt.leftExtent + } else { + lext++ + } + } + return b.set.lex.Tokens[lext] +} + +// LeftExtent returns the left extent of the BSR in the stream of tokens +func (b BSR) LeftExtent() int { + return b.leftExtent +} + +// RightExtent returns the right extent of the BSR in the stream of tokens +func (b BSR) RightExtent() int { + return b.rightExtent +} + +// Pivot returns the pivot of the BSR +func (b BSR) Pivot() int { + return b.pivot +} + +func (b BSR) String() string { + srcStr := "ℇ" + if b.leftExtent < b.rightExtent { + srcStr = b.set.lex.GetString(b.LeftExtent(), b.RightExtent()-1) + } + return fmt.Sprintf("%s,%d,%d,%d - %s", + b.Label, b.leftExtent, b.pivot, b.rightExtent, srcStr) +} + +// BSRs Sort interface +func (bs BSRs) Len() int { + return len(bs) +} + +func (bs BSRs) Less(i, j int) bool { + if bs[i].Label < bs[j].Label { + return true + } + if bs[i].Label > bs[j].Label { + return false + } + if bs[i].leftExtent < bs[j].leftExtent { + return true + } + if bs[i].leftExtent > bs[j].leftExtent { + return false + } + return bs[i].rightExtent < bs[j].rightExtent +} + +func (bs BSRs) Swap(i, j int) { + bs[i], bs[j] = bs[j], bs[i] +} + +// stringBSRs Sort interface +func (sbs stringBSRs) Len() int { + return len(sbs) +} + +func (sbs stringBSRs) Less(i, j int) bool { + if sbs[i].Symbols.String() < sbs[j].Symbols.String() { + return true + } + if sbs[i].Symbols.String() > sbs[j].Symbols.String() { + return false + } + if sbs[i].leftExtent < sbs[j].leftExtent { + return true + } + if sbs[i].leftExtent > sbs[j].leftExtent { + return false + } + return sbs[i].rightExtent < sbs[j].rightExtent +} + +func (sbs stringBSRs) Swap(i, j int) { + sbs[i], sbs[j] = sbs[j], sbs[i] +} + +func (s stringBSR) LeftExtent() int { + return s.leftExtent +} + +func (s stringBSR) RightExtent() int { + return s.rightExtent +} + +func (s stringBSR) Pivot() int { + return s.pivot +} + +func (s stringBSR) Empty() bool { + return s.leftExtent == s.pivot && s.pivot == s.rightExtent +} + +// String returns a string representation of s +func (s stringBSR) String() string { + return fmt.Sprintf("%s,%d,%d,%d - %s", &s.Symbols, s.leftExtent, s.pivot, + s.rightExtent, s.set.lex.GetString(s.LeftExtent(), s.RightExtent())) +} + +func (s *Set) getNTSlot(sym symbols.Symbol, leftExtent, rightExtent int) (bsrs []BSR) { + nt, ok := sym.(symbols.NT) + if !ok { + line, col := s.getLineColumn(leftExtent) + failf("%s is not an NT at line %d col %d", sym, line, col) + } + return s.ntSlotEntries[ntSlot{nt, leftExtent, rightExtent}] +} + +func (s *Set) fail(b BSR, format string, a ...any) { + msg := fmt.Sprintf(format, a...) + line, col := s.getLineColumn(b.LeftExtent()) + panic(fmt.Sprintf("Error in BSR: %s at line %d col %d\n", msg, line, col)) +} + +func failf(format string, args ...any) { + panic(fmt.Sprintf("Error in BSR: %s\n", fmt.Sprintf(format, args...))) +} + +func (s *Set) getLineColumn(cI int) (line, col int) { + return s.lex.GetLineColumnOfToken(cI) +} + +// ReportAmbiguous lists the ambiguous subtrees of the parse forest +func (s *Set) ReportAmbiguous() { + fmt.Println("Ambiguous BSR Subtrees:") + rts := s.GetRoots() + if len(rts) != 1 { + fmt.Printf("BSR has %d ambigous roots\n", len(rts)) + } + for i, b := range s.GetRoots() { + fmt.Println("In root", i) + if !s.report(b) { + fmt.Println("No ambiguous BSRs") + } + } +} + +// report return true iff at least one ambigous BSR was found +func (s *Set) report(b BSR) bool { + ambiguous := false + for i, sym := range b.Label.Symbols() { + ln, col := s.getLineColumn(b.LeftExtent()) + if sym.IsNonTerminal() { + if len(b.GetNTChildrenI(i)) != 1 { + ambiguous = true + fmt.Printf(" Ambigous: in %s: NT %s (%d) at line %d col %d \n", + b, sym, i, ln, col) + fmt.Println(" Children:") + for _, c := range b.GetNTChildrenI(i) { + fmt.Printf(" %s\n", c) + } + } + for _, b1 := range b.GetNTChildrenI(i) { + s.report(b1) + } + } + } + return ambiguous +} + +// IsAmbiguous returns true if the BSR set does not have exactly one root, or +// if any BSR in the set has an NT symbol, which does not have exactly one +// sub-tree. +func (s *Set) IsAmbiguous() bool { + if len(s.GetRoots()) != 1 { + return true + } + return isAmbiguous(s.GetRoot()) +} + +// isAmbiguous returns true if b or any of its NT children is ambiguous. +// A BSR is ambiguous if any of its NT symbols does not have exactly one +// subtrees (children). +func isAmbiguous(b BSR) bool { + for i, s := range b.Label.Symbols() { + if s.IsNonTerminal() { + if len(b.GetNTChildrenI(i)) != 1 { + return true + } + for _, b1 := range b.GetNTChildrenI(i) { + if isAmbiguous(b1) { + return true + } + } + } + } + return false +} + +//---- SPPF ------------ + +type bldSPPF struct { + root *sppf.SymbolNode + extLeafNodes []sppf.Node + pNodes map[string]*sppf.PackedNode + sNodes map[string]*sppf.SymbolNode // Index is Node.Label() +} + +func (pf *Set) ToSPPF() *sppf.SymbolNode { + bld := &bldSPPF{ + pNodes: map[string]*sppf.PackedNode{}, + sNodes: map[string]*sppf.SymbolNode{}, + } + rt := pf.GetRoots()[0] + bld.root = bld.mkSN(rt.Label.Head().String(), rt.leftExtent, rt.rightExtent) + + for len(bld.extLeafNodes) > 0 { + // let w = (μ, i, j) be an extendable leaf node of G + w := bld.extLeafNodes[len(bld.extLeafNodes)-1] + bld.extLeafNodes = bld.extLeafNodes[:len(bld.extLeafNodes)-1] + + // μ is a nonterminal X in Γ + if nt, ok := w.(*sppf.SymbolNode); ok && symbols.IsNT(nt.Symbol) { + bsts := pf.getNTSlot(symbols.ToNT(nt.Symbol), nt.Lext, nt.Rext) + // for each (X ::=γ,i,k, j)∈Υ { mkPN(X ::=γ·,i,k, j,G) } } + for _, bst := range bsts { + slt := bst.Label.Slot() + nt.Children = append(nt.Children, + bld.mkPN(slt.NT, slt.Symbols, slt.Pos, + bst.leftExtent, bst.pivot, bst.rightExtent)) + } + } else { // w is an intermediate node + // suppose μ is X ::=α·δ + in := w.(*sppf.IntermediateNode) + if in.Pos == 1 { + in.Children = append(in.Children, bld.mkPN(in.NT, in.Body, in.Pos, + in.Lext, in.Lext, in.Rext)) + } else { + // for each (α,i,k, j)∈Υ { mkPN(X ::=α·δ,i,k, j,G) } } } } + alpha, delta := in.Body[:in.Pos], in.Body[in.Pos:] + for _, str := range pf.GetAllStrings(alpha, in.Lext, in.Rext) { + body := append(str.Symbols, delta...) + in.Children = append(in.Children, + bld.mkPN(in.NT, body, in.Pos, str.leftExtent, str.pivot, str.rightExtent)) + } + } + } + } + return bld.root +} + +func (bld *bldSPPF) mkIN(nt symbols.NT, body symbols.Symbols, pos int, + lext, rext int, +) *sppf.IntermediateNode { + in := &sppf.IntermediateNode{ + NT: nt, + Body: body, + Pos: pos, + Lext: lext, + Rext: rext, + } + bld.extLeafNodes = append(bld.extLeafNodes, in) + return in +} + +func (bld *bldSPPF) mkPN(nt symbols.NT, body symbols.Symbols, pos int, + lext, pivot, rext int, +) *sppf.PackedNode { + // fmt.Printf("mkPN %s,%d,%d,%d\n", slotString(nt, body, pos), lext, pivot, rext) + + // X ::= ⍺ • β, k + pn := &sppf.PackedNode{ + NT: nt, + Body: body, + Pos: pos, + Lext: lext, + Rext: rext, + Pivot: pivot, + LeftChild: nil, + RightChild: nil, + } + if pn1, exist := bld.pNodes[pn.Label()]; exist { + return pn1 + } + bld.pNodes[pn.Label()] = pn + + if len(body) == 0 { // ⍺ = ϵ + pn.RightChild = bld.mkSN("ϵ", lext, lext) + } else { // if ( α=βx, where |x|=1) { + // mkN(x,k, j, y,G) + pn.RightChild = bld.mkSN(pn.Body[pn.Pos-1].String(), pivot, rext) + + // if (|β|=1) mkN(β,i,k,y,G) + if pos == 2 { + pn.LeftChild = bld.mkSN(pn.Body[pn.Pos-2].String(), lext, pivot) + } + // if (|β|>1) mkN(X ::=β·xδ,i,k,y,G) + if pos > 2 { + pn.LeftChild = bld.mkIN(pn.NT, pn.Body, pn.Pos-1, lext, pivot) + } + } + + return pn +} + +func (bld *bldSPPF) mkSN(symbol string, lext, rext int) *sppf.SymbolNode { + sn := &sppf.SymbolNode{ + Symbol: symbol, + Lext: lext, + Rext: rext, + } + if sn1, exist := bld.sNodes[sn.Label()]; exist { + return sn1 + } + bld.sNodes[sn.Label()] = sn + if symbols.IsNT(symbol) { + bld.extLeafNodes = append(bld.extLeafNodes, sn) + } + return sn +} + +func slotString(nt symbols.NT, body symbols.Symbols, pos int) string { + w := new(bytes.Buffer) + fmt.Fprintf(w, "%s:", nt) + for i, sym := range body { + fmt.Fprint(w, " ") + if i == pos { + fmt.Fprint(w, "•") + } + fmt.Fprint(w, sym) + } + if len(body) == pos { + fmt.Fprint(w, "•") + } + return w.String() +} diff --git a/test/e2e/pkg/grammar/grammar-auto/parser/parser.go b/test/e2e/pkg/grammar/grammar-auto/parser/parser.go new file mode 100644 index 00000000000..8063d2dd061 --- /dev/null +++ b/test/e2e/pkg/grammar/grammar-auto/parser/parser.go @@ -0,0 +1,2107 @@ +// Package parser is generated by gogll. Do not edit. +package parser + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/lexer" + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/parser/bsr" + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/parser/slot" + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/parser/symbols" + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/token" +) + +type parser struct { + cI int + + R *descriptors + U *descriptors + + popped map[poppedNode]bool + crf map[clusterNode][]*crfNode + crfNodes map[crfNode]*crfNode + + lex *lexer.Lexer + parseErrors []*Error + + bsrSet *bsr.Set +} + +func newParser(l *lexer.Lexer) *parser { + return &parser{ + cI: 0, + lex: l, + R: &descriptors{}, + U: &descriptors{}, + popped: make(map[poppedNode]bool), + crf: map[clusterNode][]*crfNode{ + {symbols.NT_Start, 0}: {}, + }, + crfNodes: map[crfNode]*crfNode{}, + bsrSet: bsr.New(symbols.NT_Start, l), + parseErrors: nil, + } +} + +// Parse returns the BSR set containing the parse forest. +// If the parse was successfull []*Error is nil +func Parse(l *lexer.Lexer) (*bsr.Set, []*Error) { + return newParser(l).parse() +} + +func (p *parser) parse() (*bsr.Set, []*Error) { + var L slot.Label + m, cU := len(p.lex.Tokens)-1, 0 + p.ntAdd(symbols.NT_Start, 0) + // p.DumpDescriptors() + for !p.R.empty() { + L, cU, p.cI = p.R.remove() + + // fmt.Println() + // fmt.Printf("L:%s, cI:%d, I[p.cI]:%s, cU:%d\n", L, p.cI, p.lex.Tokens[p.cI], cU) + // p.DumpDescriptors() + + switch L { + case slot.ApplyChunk0R0: // ApplyChunk : ∙apply_snapshot_chunk + + p.bsrSet.Add(slot.ApplyChunk0R1, cU, p.cI, p.cI+1) + p.cI++ + if p.follow(symbols.NT_ApplyChunk) { + p.rtn(symbols.NT_ApplyChunk, cU, p.cI) + } else { + p.parseError(slot.ApplyChunk0R0, p.cI, followSets[symbols.NT_ApplyChunk]) + } + case slot.ApplyChunks0R0: // ApplyChunks : ∙ApplyChunk + + p.call(slot.ApplyChunks0R1, cU, p.cI) + case slot.ApplyChunks0R1: // ApplyChunks : ApplyChunk ∙ + + if p.follow(symbols.NT_ApplyChunks) { + p.rtn(symbols.NT_ApplyChunks, cU, p.cI) + } else { + p.parseError(slot.ApplyChunks0R0, p.cI, followSets[symbols.NT_ApplyChunks]) + } + case slot.ApplyChunks1R0: // ApplyChunks : ∙ApplyChunk ApplyChunks + + p.call(slot.ApplyChunks1R1, cU, p.cI) + case slot.ApplyChunks1R1: // ApplyChunks : ApplyChunk ∙ApplyChunks + + if !p.testSelect(slot.ApplyChunks1R1) { + p.parseError(slot.ApplyChunks1R1, p.cI, first[slot.ApplyChunks1R1]) + break + } + + p.call(slot.ApplyChunks1R2, cU, p.cI) + case slot.ApplyChunks1R2: // ApplyChunks : ApplyChunk ApplyChunks ∙ + + if p.follow(symbols.NT_ApplyChunks) { + p.rtn(symbols.NT_ApplyChunks, cU, p.cI) + } else { + p.parseError(slot.ApplyChunks1R0, p.cI, followSets[symbols.NT_ApplyChunks]) + } + case slot.CleanStart0R0: // CleanStart : ∙InitChain ConsensusExec + + p.call(slot.CleanStart0R1, cU, p.cI) + case slot.CleanStart0R1: // CleanStart : InitChain ∙ConsensusExec + + if !p.testSelect(slot.CleanStart0R1) { + p.parseError(slot.CleanStart0R1, p.cI, first[slot.CleanStart0R1]) + break + } + + p.call(slot.CleanStart0R2, cU, p.cI) + case slot.CleanStart0R2: // CleanStart : InitChain ConsensusExec ∙ + + if p.follow(symbols.NT_CleanStart) { + p.rtn(symbols.NT_CleanStart, cU, p.cI) + } else { + p.parseError(slot.CleanStart0R0, p.cI, followSets[symbols.NT_CleanStart]) + } + case slot.CleanStart1R0: // CleanStart : ∙StateSync ConsensusExec + + p.call(slot.CleanStart1R1, cU, p.cI) + case slot.CleanStart1R1: // CleanStart : StateSync ∙ConsensusExec + + if !p.testSelect(slot.CleanStart1R1) { + p.parseError(slot.CleanStart1R1, p.cI, first[slot.CleanStart1R1]) + break + } + + p.call(slot.CleanStart1R2, cU, p.cI) + case slot.CleanStart1R2: // CleanStart : StateSync ConsensusExec ∙ + + if p.follow(symbols.NT_CleanStart) { + p.rtn(symbols.NT_CleanStart, cU, p.cI) + } else { + p.parseError(slot.CleanStart1R0, p.cI, followSets[symbols.NT_CleanStart]) + } + case slot.Commit0R0: // Commit : ∙commit + + p.bsrSet.Add(slot.Commit0R1, cU, p.cI, p.cI+1) + p.cI++ + if p.follow(symbols.NT_Commit) { + p.rtn(symbols.NT_Commit, cU, p.cI) + } else { + p.parseError(slot.Commit0R0, p.cI, followSets[symbols.NT_Commit]) + } + case slot.ConsensusExec0R0: // ConsensusExec : ∙ConsensusHeights + + p.call(slot.ConsensusExec0R1, cU, p.cI) + case slot.ConsensusExec0R1: // ConsensusExec : ConsensusHeights ∙ + + if p.follow(symbols.NT_ConsensusExec) { + p.rtn(symbols.NT_ConsensusExec, cU, p.cI) + } else { + p.parseError(slot.ConsensusExec0R0, p.cI, followSets[symbols.NT_ConsensusExec]) + } + case slot.ConsensusHeight0R0: // ConsensusHeight : ∙ConsensusRounds FinalizeBlock Commit + + p.call(slot.ConsensusHeight0R1, cU, p.cI) + case slot.ConsensusHeight0R1: // ConsensusHeight : ConsensusRounds ∙FinalizeBlock Commit + + if !p.testSelect(slot.ConsensusHeight0R1) { + p.parseError(slot.ConsensusHeight0R1, p.cI, first[slot.ConsensusHeight0R1]) + break + } + + p.call(slot.ConsensusHeight0R2, cU, p.cI) + case slot.ConsensusHeight0R2: // ConsensusHeight : ConsensusRounds FinalizeBlock ∙Commit + + if !p.testSelect(slot.ConsensusHeight0R2) { + p.parseError(slot.ConsensusHeight0R2, p.cI, first[slot.ConsensusHeight0R2]) + break + } + + p.call(slot.ConsensusHeight0R3, cU, p.cI) + case slot.ConsensusHeight0R3: // ConsensusHeight : ConsensusRounds FinalizeBlock Commit ∙ + + if p.follow(symbols.NT_ConsensusHeight) { + p.rtn(symbols.NT_ConsensusHeight, cU, p.cI) + } else { + p.parseError(slot.ConsensusHeight0R0, p.cI, followSets[symbols.NT_ConsensusHeight]) + } + case slot.ConsensusHeight1R0: // ConsensusHeight : ∙FinalizeBlock Commit + + p.call(slot.ConsensusHeight1R1, cU, p.cI) + case slot.ConsensusHeight1R1: // ConsensusHeight : FinalizeBlock ∙Commit + + if !p.testSelect(slot.ConsensusHeight1R1) { + p.parseError(slot.ConsensusHeight1R1, p.cI, first[slot.ConsensusHeight1R1]) + break + } + + p.call(slot.ConsensusHeight1R2, cU, p.cI) + case slot.ConsensusHeight1R2: // ConsensusHeight : FinalizeBlock Commit ∙ + + if p.follow(symbols.NT_ConsensusHeight) { + p.rtn(symbols.NT_ConsensusHeight, cU, p.cI) + } else { + p.parseError(slot.ConsensusHeight1R0, p.cI, followSets[symbols.NT_ConsensusHeight]) + } + case slot.ConsensusHeights0R0: // ConsensusHeights : ∙ConsensusHeight + + p.call(slot.ConsensusHeights0R1, cU, p.cI) + case slot.ConsensusHeights0R1: // ConsensusHeights : ConsensusHeight ∙ + + if p.follow(symbols.NT_ConsensusHeights) { + p.rtn(symbols.NT_ConsensusHeights, cU, p.cI) + } else { + p.parseError(slot.ConsensusHeights0R0, p.cI, followSets[symbols.NT_ConsensusHeights]) + } + case slot.ConsensusHeights1R0: // ConsensusHeights : ∙ConsensusHeight ConsensusHeights + + p.call(slot.ConsensusHeights1R1, cU, p.cI) + case slot.ConsensusHeights1R1: // ConsensusHeights : ConsensusHeight ∙ConsensusHeights + + if !p.testSelect(slot.ConsensusHeights1R1) { + p.parseError(slot.ConsensusHeights1R1, p.cI, first[slot.ConsensusHeights1R1]) + break + } + + p.call(slot.ConsensusHeights1R2, cU, p.cI) + case slot.ConsensusHeights1R2: // ConsensusHeights : ConsensusHeight ConsensusHeights ∙ + + if p.follow(symbols.NT_ConsensusHeights) { + p.rtn(symbols.NT_ConsensusHeights, cU, p.cI) + } else { + p.parseError(slot.ConsensusHeights1R0, p.cI, followSets[symbols.NT_ConsensusHeights]) + } + case slot.ConsensusRound0R0: // ConsensusRound : ∙Proposer + + p.call(slot.ConsensusRound0R1, cU, p.cI) + case slot.ConsensusRound0R1: // ConsensusRound : Proposer ∙ + + if p.follow(symbols.NT_ConsensusRound) { + p.rtn(symbols.NT_ConsensusRound, cU, p.cI) + } else { + p.parseError(slot.ConsensusRound0R0, p.cI, followSets[symbols.NT_ConsensusRound]) + } + case slot.ConsensusRound1R0: // ConsensusRound : ∙NonProposer + + p.call(slot.ConsensusRound1R1, cU, p.cI) + case slot.ConsensusRound1R1: // ConsensusRound : NonProposer ∙ + + if p.follow(symbols.NT_ConsensusRound) { + p.rtn(symbols.NT_ConsensusRound, cU, p.cI) + } else { + p.parseError(slot.ConsensusRound1R0, p.cI, followSets[symbols.NT_ConsensusRound]) + } + case slot.ConsensusRounds0R0: // ConsensusRounds : ∙ConsensusRound + + p.call(slot.ConsensusRounds0R1, cU, p.cI) + case slot.ConsensusRounds0R1: // ConsensusRounds : ConsensusRound ∙ + + if p.follow(symbols.NT_ConsensusRounds) { + p.rtn(symbols.NT_ConsensusRounds, cU, p.cI) + } else { + p.parseError(slot.ConsensusRounds0R0, p.cI, followSets[symbols.NT_ConsensusRounds]) + } + case slot.ConsensusRounds1R0: // ConsensusRounds : ∙ConsensusRound ConsensusRounds + + p.call(slot.ConsensusRounds1R1, cU, p.cI) + case slot.ConsensusRounds1R1: // ConsensusRounds : ConsensusRound ∙ConsensusRounds + + if !p.testSelect(slot.ConsensusRounds1R1) { + p.parseError(slot.ConsensusRounds1R1, p.cI, first[slot.ConsensusRounds1R1]) + break + } + + p.call(slot.ConsensusRounds1R2, cU, p.cI) + case slot.ConsensusRounds1R2: // ConsensusRounds : ConsensusRound ConsensusRounds ∙ + + if p.follow(symbols.NT_ConsensusRounds) { + p.rtn(symbols.NT_ConsensusRounds, cU, p.cI) + } else { + p.parseError(slot.ConsensusRounds1R0, p.cI, followSets[symbols.NT_ConsensusRounds]) + } + case slot.Extend0R0: // Extend : ∙ExtendVote + + p.call(slot.Extend0R1, cU, p.cI) + case slot.Extend0R1: // Extend : ExtendVote ∙ + + if p.follow(symbols.NT_Extend) { + p.rtn(symbols.NT_Extend, cU, p.cI) + } else { + p.parseError(slot.Extend0R0, p.cI, followSets[symbols.NT_Extend]) + } + case slot.Extend1R0: // Extend : ∙GotVotes ExtendVote + + p.call(slot.Extend1R1, cU, p.cI) + case slot.Extend1R1: // Extend : GotVotes ∙ExtendVote + + if !p.testSelect(slot.Extend1R1) { + p.parseError(slot.Extend1R1, p.cI, first[slot.Extend1R1]) + break + } + + p.call(slot.Extend1R2, cU, p.cI) + case slot.Extend1R2: // Extend : GotVotes ExtendVote ∙ + + if p.follow(symbols.NT_Extend) { + p.rtn(symbols.NT_Extend, cU, p.cI) + } else { + p.parseError(slot.Extend1R0, p.cI, followSets[symbols.NT_Extend]) + } + case slot.Extend2R0: // Extend : ∙ExtendVote GotVotes + + p.call(slot.Extend2R1, cU, p.cI) + case slot.Extend2R1: // Extend : ExtendVote ∙GotVotes + + if !p.testSelect(slot.Extend2R1) { + p.parseError(slot.Extend2R1, p.cI, first[slot.Extend2R1]) + break + } + + p.call(slot.Extend2R2, cU, p.cI) + case slot.Extend2R2: // Extend : ExtendVote GotVotes ∙ + + if p.follow(symbols.NT_Extend) { + p.rtn(symbols.NT_Extend, cU, p.cI) + } else { + p.parseError(slot.Extend2R0, p.cI, followSets[symbols.NT_Extend]) + } + case slot.Extend3R0: // Extend : ∙GotVotes ExtendVote GotVotes + + p.call(slot.Extend3R1, cU, p.cI) + case slot.Extend3R1: // Extend : GotVotes ∙ExtendVote GotVotes + + if !p.testSelect(slot.Extend3R1) { + p.parseError(slot.Extend3R1, p.cI, first[slot.Extend3R1]) + break + } + + p.call(slot.Extend3R2, cU, p.cI) + case slot.Extend3R2: // Extend : GotVotes ExtendVote ∙GotVotes + + if !p.testSelect(slot.Extend3R2) { + p.parseError(slot.Extend3R2, p.cI, first[slot.Extend3R2]) + break + } + + p.call(slot.Extend3R3, cU, p.cI) + case slot.Extend3R3: // Extend : GotVotes ExtendVote GotVotes ∙ + + if p.follow(symbols.NT_Extend) { + p.rtn(symbols.NT_Extend, cU, p.cI) + } else { + p.parseError(slot.Extend3R0, p.cI, followSets[symbols.NT_Extend]) + } + case slot.ExtendVote0R0: // ExtendVote : ∙extend_vote + + p.bsrSet.Add(slot.ExtendVote0R1, cU, p.cI, p.cI+1) + p.cI++ + if p.follow(symbols.NT_ExtendVote) { + p.rtn(symbols.NT_ExtendVote, cU, p.cI) + } else { + p.parseError(slot.ExtendVote0R0, p.cI, followSets[symbols.NT_ExtendVote]) + } + case slot.FinalizeBlock0R0: // FinalizeBlock : ∙finalize_block + + p.bsrSet.Add(slot.FinalizeBlock0R1, cU, p.cI, p.cI+1) + p.cI++ + if p.follow(symbols.NT_FinalizeBlock) { + p.rtn(symbols.NT_FinalizeBlock, cU, p.cI) + } else { + p.parseError(slot.FinalizeBlock0R0, p.cI, followSets[symbols.NT_FinalizeBlock]) + } + case slot.GotVote0R0: // GotVote : ∙verify_vote_extension + + p.bsrSet.Add(slot.GotVote0R1, cU, p.cI, p.cI+1) + p.cI++ + if p.follow(symbols.NT_GotVote) { + p.rtn(symbols.NT_GotVote, cU, p.cI) + } else { + p.parseError(slot.GotVote0R0, p.cI, followSets[symbols.NT_GotVote]) + } + case slot.GotVotes0R0: // GotVotes : ∙GotVote + + p.call(slot.GotVotes0R1, cU, p.cI) + case slot.GotVotes0R1: // GotVotes : GotVote ∙ + + if p.follow(symbols.NT_GotVotes) { + p.rtn(symbols.NT_GotVotes, cU, p.cI) + } else { + p.parseError(slot.GotVotes0R0, p.cI, followSets[symbols.NT_GotVotes]) + } + case slot.GotVotes1R0: // GotVotes : ∙GotVote GotVotes + + p.call(slot.GotVotes1R1, cU, p.cI) + case slot.GotVotes1R1: // GotVotes : GotVote ∙GotVotes + + if !p.testSelect(slot.GotVotes1R1) { + p.parseError(slot.GotVotes1R1, p.cI, first[slot.GotVotes1R1]) + break + } + + p.call(slot.GotVotes1R2, cU, p.cI) + case slot.GotVotes1R2: // GotVotes : GotVote GotVotes ∙ + + if p.follow(symbols.NT_GotVotes) { + p.rtn(symbols.NT_GotVotes, cU, p.cI) + } else { + p.parseError(slot.GotVotes1R0, p.cI, followSets[symbols.NT_GotVotes]) + } + case slot.InitChain0R0: // InitChain : ∙init_chain + + p.bsrSet.Add(slot.InitChain0R1, cU, p.cI, p.cI+1) + p.cI++ + if p.follow(symbols.NT_InitChain) { + p.rtn(symbols.NT_InitChain, cU, p.cI) + } else { + p.parseError(slot.InitChain0R0, p.cI, followSets[symbols.NT_InitChain]) + } + case slot.NonProposer0R0: // NonProposer : ∙GotVotes + + p.call(slot.NonProposer0R1, cU, p.cI) + case slot.NonProposer0R1: // NonProposer : GotVotes ∙ + + if p.follow(symbols.NT_NonProposer) { + p.rtn(symbols.NT_NonProposer, cU, p.cI) + } else { + p.parseError(slot.NonProposer0R0, p.cI, followSets[symbols.NT_NonProposer]) + } + case slot.NonProposer1R0: // NonProposer : ∙ProcessProposal + + p.call(slot.NonProposer1R1, cU, p.cI) + case slot.NonProposer1R1: // NonProposer : ProcessProposal ∙ + + if p.follow(symbols.NT_NonProposer) { + p.rtn(symbols.NT_NonProposer, cU, p.cI) + } else { + p.parseError(slot.NonProposer1R0, p.cI, followSets[symbols.NT_NonProposer]) + } + case slot.NonProposer2R0: // NonProposer : ∙Extend + + p.call(slot.NonProposer2R1, cU, p.cI) + case slot.NonProposer2R1: // NonProposer : Extend ∙ + + if p.follow(symbols.NT_NonProposer) { + p.rtn(symbols.NT_NonProposer, cU, p.cI) + } else { + p.parseError(slot.NonProposer2R0, p.cI, followSets[symbols.NT_NonProposer]) + } + case slot.NonProposer3R0: // NonProposer : ∙GotVotes ProcessProposal + + p.call(slot.NonProposer3R1, cU, p.cI) + case slot.NonProposer3R1: // NonProposer : GotVotes ∙ProcessProposal + + if !p.testSelect(slot.NonProposer3R1) { + p.parseError(slot.NonProposer3R1, p.cI, first[slot.NonProposer3R1]) + break + } + + p.call(slot.NonProposer3R2, cU, p.cI) + case slot.NonProposer3R2: // NonProposer : GotVotes ProcessProposal ∙ + + if p.follow(symbols.NT_NonProposer) { + p.rtn(symbols.NT_NonProposer, cU, p.cI) + } else { + p.parseError(slot.NonProposer3R0, p.cI, followSets[symbols.NT_NonProposer]) + } + case slot.NonProposer4R0: // NonProposer : ∙GotVotes Extend + + p.call(slot.NonProposer4R1, cU, p.cI) + case slot.NonProposer4R1: // NonProposer : GotVotes ∙Extend + + if !p.testSelect(slot.NonProposer4R1) { + p.parseError(slot.NonProposer4R1, p.cI, first[slot.NonProposer4R1]) + break + } + + p.call(slot.NonProposer4R2, cU, p.cI) + case slot.NonProposer4R2: // NonProposer : GotVotes Extend ∙ + + if p.follow(symbols.NT_NonProposer) { + p.rtn(symbols.NT_NonProposer, cU, p.cI) + } else { + p.parseError(slot.NonProposer4R0, p.cI, followSets[symbols.NT_NonProposer]) + } + case slot.NonProposer5R0: // NonProposer : ∙ProcessProposal Extend + + p.call(slot.NonProposer5R1, cU, p.cI) + case slot.NonProposer5R1: // NonProposer : ProcessProposal ∙Extend + + if !p.testSelect(slot.NonProposer5R1) { + p.parseError(slot.NonProposer5R1, p.cI, first[slot.NonProposer5R1]) + break + } + + p.call(slot.NonProposer5R2, cU, p.cI) + case slot.NonProposer5R2: // NonProposer : ProcessProposal Extend ∙ + + if p.follow(symbols.NT_NonProposer) { + p.rtn(symbols.NT_NonProposer, cU, p.cI) + } else { + p.parseError(slot.NonProposer5R0, p.cI, followSets[symbols.NT_NonProposer]) + } + case slot.NonProposer6R0: // NonProposer : ∙GotVotes ProcessProposal Extend + + p.call(slot.NonProposer6R1, cU, p.cI) + case slot.NonProposer6R1: // NonProposer : GotVotes ∙ProcessProposal Extend + + if !p.testSelect(slot.NonProposer6R1) { + p.parseError(slot.NonProposer6R1, p.cI, first[slot.NonProposer6R1]) + break + } + + p.call(slot.NonProposer6R2, cU, p.cI) + case slot.NonProposer6R2: // NonProposer : GotVotes ProcessProposal ∙Extend + + if !p.testSelect(slot.NonProposer6R2) { + p.parseError(slot.NonProposer6R2, p.cI, first[slot.NonProposer6R2]) + break + } + + p.call(slot.NonProposer6R3, cU, p.cI) + case slot.NonProposer6R3: // NonProposer : GotVotes ProcessProposal Extend ∙ + + if p.follow(symbols.NT_NonProposer) { + p.rtn(symbols.NT_NonProposer, cU, p.cI) + } else { + p.parseError(slot.NonProposer6R0, p.cI, followSets[symbols.NT_NonProposer]) + } + case slot.OfferSnapshot0R0: // OfferSnapshot : ∙offer_snapshot + + p.bsrSet.Add(slot.OfferSnapshot0R1, cU, p.cI, p.cI+1) + p.cI++ + if p.follow(symbols.NT_OfferSnapshot) { + p.rtn(symbols.NT_OfferSnapshot, cU, p.cI) + } else { + p.parseError(slot.OfferSnapshot0R0, p.cI, followSets[symbols.NT_OfferSnapshot]) + } + case slot.PrepareProposal0R0: // PrepareProposal : ∙prepare_proposal + + p.bsrSet.Add(slot.PrepareProposal0R1, cU, p.cI, p.cI+1) + p.cI++ + if p.follow(symbols.NT_PrepareProposal) { + p.rtn(symbols.NT_PrepareProposal, cU, p.cI) + } else { + p.parseError(slot.PrepareProposal0R0, p.cI, followSets[symbols.NT_PrepareProposal]) + } + case slot.ProcessProposal0R0: // ProcessProposal : ∙process_proposal + + p.bsrSet.Add(slot.ProcessProposal0R1, cU, p.cI, p.cI+1) + p.cI++ + if p.follow(symbols.NT_ProcessProposal) { + p.rtn(symbols.NT_ProcessProposal, cU, p.cI) + } else { + p.parseError(slot.ProcessProposal0R0, p.cI, followSets[symbols.NT_ProcessProposal]) + } + case slot.Proposer0R0: // Proposer : ∙GotVotes + + p.call(slot.Proposer0R1, cU, p.cI) + case slot.Proposer0R1: // Proposer : GotVotes ∙ + + if p.follow(symbols.NT_Proposer) { + p.rtn(symbols.NT_Proposer, cU, p.cI) + } else { + p.parseError(slot.Proposer0R0, p.cI, followSets[symbols.NT_Proposer]) + } + case slot.Proposer1R0: // Proposer : ∙ProposerSimple + + p.call(slot.Proposer1R1, cU, p.cI) + case slot.Proposer1R1: // Proposer : ProposerSimple ∙ + + if p.follow(symbols.NT_Proposer) { + p.rtn(symbols.NT_Proposer, cU, p.cI) + } else { + p.parseError(slot.Proposer1R0, p.cI, followSets[symbols.NT_Proposer]) + } + case slot.Proposer2R0: // Proposer : ∙Extend + + p.call(slot.Proposer2R1, cU, p.cI) + case slot.Proposer2R1: // Proposer : Extend ∙ + + if p.follow(symbols.NT_Proposer) { + p.rtn(symbols.NT_Proposer, cU, p.cI) + } else { + p.parseError(slot.Proposer2R0, p.cI, followSets[symbols.NT_Proposer]) + } + case slot.Proposer3R0: // Proposer : ∙GotVotes ProposerSimple + + p.call(slot.Proposer3R1, cU, p.cI) + case slot.Proposer3R1: // Proposer : GotVotes ∙ProposerSimple + + if !p.testSelect(slot.Proposer3R1) { + p.parseError(slot.Proposer3R1, p.cI, first[slot.Proposer3R1]) + break + } + + p.call(slot.Proposer3R2, cU, p.cI) + case slot.Proposer3R2: // Proposer : GotVotes ProposerSimple ∙ + + if p.follow(symbols.NT_Proposer) { + p.rtn(symbols.NT_Proposer, cU, p.cI) + } else { + p.parseError(slot.Proposer3R0, p.cI, followSets[symbols.NT_Proposer]) + } + case slot.Proposer4R0: // Proposer : ∙GotVotes Extend + + p.call(slot.Proposer4R1, cU, p.cI) + case slot.Proposer4R1: // Proposer : GotVotes ∙Extend + + if !p.testSelect(slot.Proposer4R1) { + p.parseError(slot.Proposer4R1, p.cI, first[slot.Proposer4R1]) + break + } + + p.call(slot.Proposer4R2, cU, p.cI) + case slot.Proposer4R2: // Proposer : GotVotes Extend ∙ + + if p.follow(symbols.NT_Proposer) { + p.rtn(symbols.NT_Proposer, cU, p.cI) + } else { + p.parseError(slot.Proposer4R0, p.cI, followSets[symbols.NT_Proposer]) + } + case slot.Proposer5R0: // Proposer : ∙ProposerSimple Extend + + p.call(slot.Proposer5R1, cU, p.cI) + case slot.Proposer5R1: // Proposer : ProposerSimple ∙Extend + + if !p.testSelect(slot.Proposer5R1) { + p.parseError(slot.Proposer5R1, p.cI, first[slot.Proposer5R1]) + break + } + + p.call(slot.Proposer5R2, cU, p.cI) + case slot.Proposer5R2: // Proposer : ProposerSimple Extend ∙ + + if p.follow(symbols.NT_Proposer) { + p.rtn(symbols.NT_Proposer, cU, p.cI) + } else { + p.parseError(slot.Proposer5R0, p.cI, followSets[symbols.NT_Proposer]) + } + case slot.Proposer6R0: // Proposer : ∙GotVotes ProposerSimple Extend + + p.call(slot.Proposer6R1, cU, p.cI) + case slot.Proposer6R1: // Proposer : GotVotes ∙ProposerSimple Extend + + if !p.testSelect(slot.Proposer6R1) { + p.parseError(slot.Proposer6R1, p.cI, first[slot.Proposer6R1]) + break + } + + p.call(slot.Proposer6R2, cU, p.cI) + case slot.Proposer6R2: // Proposer : GotVotes ProposerSimple ∙Extend + + if !p.testSelect(slot.Proposer6R2) { + p.parseError(slot.Proposer6R2, p.cI, first[slot.Proposer6R2]) + break + } + + p.call(slot.Proposer6R3, cU, p.cI) + case slot.Proposer6R3: // Proposer : GotVotes ProposerSimple Extend ∙ + + if p.follow(symbols.NT_Proposer) { + p.rtn(symbols.NT_Proposer, cU, p.cI) + } else { + p.parseError(slot.Proposer6R0, p.cI, followSets[symbols.NT_Proposer]) + } + case slot.ProposerSimple0R0: // ProposerSimple : ∙PrepareProposal + + p.call(slot.ProposerSimple0R1, cU, p.cI) + case slot.ProposerSimple0R1: // ProposerSimple : PrepareProposal ∙ + + if p.follow(symbols.NT_ProposerSimple) { + p.rtn(symbols.NT_ProposerSimple, cU, p.cI) + } else { + p.parseError(slot.ProposerSimple0R0, p.cI, followSets[symbols.NT_ProposerSimple]) + } + case slot.ProposerSimple1R0: // ProposerSimple : ∙PrepareProposal ProcessProposal + + p.call(slot.ProposerSimple1R1, cU, p.cI) + case slot.ProposerSimple1R1: // ProposerSimple : PrepareProposal ∙ProcessProposal + + if !p.testSelect(slot.ProposerSimple1R1) { + p.parseError(slot.ProposerSimple1R1, p.cI, first[slot.ProposerSimple1R1]) + break + } + + p.call(slot.ProposerSimple1R2, cU, p.cI) + case slot.ProposerSimple1R2: // ProposerSimple : PrepareProposal ProcessProposal ∙ + + if p.follow(symbols.NT_ProposerSimple) { + p.rtn(symbols.NT_ProposerSimple, cU, p.cI) + } else { + p.parseError(slot.ProposerSimple1R0, p.cI, followSets[symbols.NT_ProposerSimple]) + } + case slot.Recovery0R0: // Recovery : ∙InitChain ConsensusExec + + p.call(slot.Recovery0R1, cU, p.cI) + case slot.Recovery0R1: // Recovery : InitChain ∙ConsensusExec + + if !p.testSelect(slot.Recovery0R1) { + p.parseError(slot.Recovery0R1, p.cI, first[slot.Recovery0R1]) + break + } + + p.call(slot.Recovery0R2, cU, p.cI) + case slot.Recovery0R2: // Recovery : InitChain ConsensusExec ∙ + + if p.follow(symbols.NT_Recovery) { + p.rtn(symbols.NT_Recovery, cU, p.cI) + } else { + p.parseError(slot.Recovery0R0, p.cI, followSets[symbols.NT_Recovery]) + } + case slot.Recovery1R0: // Recovery : ∙ConsensusExec + + p.call(slot.Recovery1R1, cU, p.cI) + case slot.Recovery1R1: // Recovery : ConsensusExec ∙ + + if p.follow(symbols.NT_Recovery) { + p.rtn(symbols.NT_Recovery, cU, p.cI) + } else { + p.parseError(slot.Recovery1R0, p.cI, followSets[symbols.NT_Recovery]) + } + case slot.Start0R0: // Start : ∙CleanStart + + p.call(slot.Start0R1, cU, p.cI) + case slot.Start0R1: // Start : CleanStart ∙ + + if p.follow(symbols.NT_Start) { + p.rtn(symbols.NT_Start, cU, p.cI) + } else { + p.parseError(slot.Start0R0, p.cI, followSets[symbols.NT_Start]) + } + case slot.Start1R0: // Start : ∙Recovery + + p.call(slot.Start1R1, cU, p.cI) + case slot.Start1R1: // Start : Recovery ∙ + + if p.follow(symbols.NT_Start) { + p.rtn(symbols.NT_Start, cU, p.cI) + } else { + p.parseError(slot.Start1R0, p.cI, followSets[symbols.NT_Start]) + } + case slot.StateSync0R0: // StateSync : ∙StateSyncAttempts SuccessSync + + p.call(slot.StateSync0R1, cU, p.cI) + case slot.StateSync0R1: // StateSync : StateSyncAttempts ∙SuccessSync + + if !p.testSelect(slot.StateSync0R1) { + p.parseError(slot.StateSync0R1, p.cI, first[slot.StateSync0R1]) + break + } + + p.call(slot.StateSync0R2, cU, p.cI) + case slot.StateSync0R2: // StateSync : StateSyncAttempts SuccessSync ∙ + + if p.follow(symbols.NT_StateSync) { + p.rtn(symbols.NT_StateSync, cU, p.cI) + } else { + p.parseError(slot.StateSync0R0, p.cI, followSets[symbols.NT_StateSync]) + } + case slot.StateSync1R0: // StateSync : ∙SuccessSync + + p.call(slot.StateSync1R1, cU, p.cI) + case slot.StateSync1R1: // StateSync : SuccessSync ∙ + + if p.follow(symbols.NT_StateSync) { + p.rtn(symbols.NT_StateSync, cU, p.cI) + } else { + p.parseError(slot.StateSync1R0, p.cI, followSets[symbols.NT_StateSync]) + } + case slot.StateSyncAttempt0R0: // StateSyncAttempt : ∙OfferSnapshot ApplyChunks + + p.call(slot.StateSyncAttempt0R1, cU, p.cI) + case slot.StateSyncAttempt0R1: // StateSyncAttempt : OfferSnapshot ∙ApplyChunks + + if !p.testSelect(slot.StateSyncAttempt0R1) { + p.parseError(slot.StateSyncAttempt0R1, p.cI, first[slot.StateSyncAttempt0R1]) + break + } + + p.call(slot.StateSyncAttempt0R2, cU, p.cI) + case slot.StateSyncAttempt0R2: // StateSyncAttempt : OfferSnapshot ApplyChunks ∙ + + if p.follow(symbols.NT_StateSyncAttempt) { + p.rtn(symbols.NT_StateSyncAttempt, cU, p.cI) + } else { + p.parseError(slot.StateSyncAttempt0R0, p.cI, followSets[symbols.NT_StateSyncAttempt]) + } + case slot.StateSyncAttempt1R0: // StateSyncAttempt : ∙OfferSnapshot + + p.call(slot.StateSyncAttempt1R1, cU, p.cI) + case slot.StateSyncAttempt1R1: // StateSyncAttempt : OfferSnapshot ∙ + + if p.follow(symbols.NT_StateSyncAttempt) { + p.rtn(symbols.NT_StateSyncAttempt, cU, p.cI) + } else { + p.parseError(slot.StateSyncAttempt1R0, p.cI, followSets[symbols.NT_StateSyncAttempt]) + } + case slot.StateSyncAttempts0R0: // StateSyncAttempts : ∙StateSyncAttempt + + p.call(slot.StateSyncAttempts0R1, cU, p.cI) + case slot.StateSyncAttempts0R1: // StateSyncAttempts : StateSyncAttempt ∙ + + if p.follow(symbols.NT_StateSyncAttempts) { + p.rtn(symbols.NT_StateSyncAttempts, cU, p.cI) + } else { + p.parseError(slot.StateSyncAttempts0R0, p.cI, followSets[symbols.NT_StateSyncAttempts]) + } + case slot.StateSyncAttempts1R0: // StateSyncAttempts : ∙StateSyncAttempt StateSyncAttempts + + p.call(slot.StateSyncAttempts1R1, cU, p.cI) + case slot.StateSyncAttempts1R1: // StateSyncAttempts : StateSyncAttempt ∙StateSyncAttempts + + if !p.testSelect(slot.StateSyncAttempts1R1) { + p.parseError(slot.StateSyncAttempts1R1, p.cI, first[slot.StateSyncAttempts1R1]) + break + } + + p.call(slot.StateSyncAttempts1R2, cU, p.cI) + case slot.StateSyncAttempts1R2: // StateSyncAttempts : StateSyncAttempt StateSyncAttempts ∙ + + if p.follow(symbols.NT_StateSyncAttempts) { + p.rtn(symbols.NT_StateSyncAttempts, cU, p.cI) + } else { + p.parseError(slot.StateSyncAttempts1R0, p.cI, followSets[symbols.NT_StateSyncAttempts]) + } + case slot.SuccessSync0R0: // SuccessSync : ∙OfferSnapshot ApplyChunks + + p.call(slot.SuccessSync0R1, cU, p.cI) + case slot.SuccessSync0R1: // SuccessSync : OfferSnapshot ∙ApplyChunks + + if !p.testSelect(slot.SuccessSync0R1) { + p.parseError(slot.SuccessSync0R1, p.cI, first[slot.SuccessSync0R1]) + break + } + + p.call(slot.SuccessSync0R2, cU, p.cI) + case slot.SuccessSync0R2: // SuccessSync : OfferSnapshot ApplyChunks ∙ + + if p.follow(symbols.NT_SuccessSync) { + p.rtn(symbols.NT_SuccessSync, cU, p.cI) + } else { + p.parseError(slot.SuccessSync0R0, p.cI, followSets[symbols.NT_SuccessSync]) + } + + default: + panic("This must not happen") + } + } + if !p.bsrSet.Contain(symbols.NT_Start, 0, m) { + p.sortParseErrors() + return nil, p.parseErrors + } + return p.bsrSet, nil +} + +func (p *parser) ntAdd(nt symbols.NT, j int) { + // fmt.Printf("p.ntAdd(%s, %d)\n", nt, j) + failed := true + expected := map[token.Type]string{} + for _, l := range slot.GetAlternates(nt) { + if p.testSelect(l) { + p.dscAdd(l, j, j) + failed = false + } else { + for k, v := range first[l] { + expected[k] = v + } + } + } + if failed { + for _, l := range slot.GetAlternates(nt) { + p.parseError(l, j, expected) + } + } +} + +/*** Call Return Forest ***/ + +type poppedNode struct { + X symbols.NT + k, j int +} + +type clusterNode struct { + X symbols.NT + k int +} + +type crfNode struct { + L slot.Label + i int +} + +/* +suppose that L is Y ::=αX ·β +if there is no CRF node labelled (L,i) + + create one let u be the CRF node labelled (L,i) + +if there is no CRF node labelled (X, j) { + + create a CRF node v labelled (X, j) + create an edge from v to u + ntAdd(X, j) + } else { + + let v be the CRF node labelled (X, j) + if there is not an edge from v to u { + create an edge from v to u + for all ((X, j,h)∈P) { + dscAdd(L, i, h); + bsrAdd(L, i, j, h) + } + } + } +*/ +func (p *parser) call(L slot.Label, i, j int) { + // fmt.Printf("p.call(%s,%d,%d)\n", L,i,j) + u, exist := p.crfNodes[crfNode{L, i}] + // fmt.Printf(" u exist=%t\n", exist) + if !exist { + u = &crfNode{L, i} + p.crfNodes[*u] = u + } + X := L.Symbols()[L.Pos()-1].(symbols.NT) + ndV := clusterNode{X, j} + v, exist := p.crf[ndV] + if !exist { + // fmt.Println(" v !exist") + p.crf[ndV] = []*crfNode{u} + p.ntAdd(X, j) + } else { + // fmt.Println(" v exist") + if !existEdge(v, u) { + // fmt.Printf(" !existEdge(%v)\n", u) + p.crf[ndV] = append(v, u) + // fmt.Printf("|popped|=%d\n", len(popped)) + for pnd := range p.popped { + if pnd.X == X && pnd.k == j { + p.dscAdd(L, i, pnd.j) + p.bsrSet.Add(L, i, j, pnd.j) + } + } + } + } +} + +func existEdge(nds []*crfNode, nd *crfNode) bool { + for _, nd1 := range nds { + if nd1 == nd { + return true + } + } + return false +} + +func (p *parser) rtn(X symbols.NT, k, j int) { + // fmt.Printf("p.rtn(%s,%d,%d)\n", X,k,j) + pn := poppedNode{X, k, j} + if _, exist := p.popped[pn]; !exist { + p.popped[pn] = true + for _, nd := range p.crf[clusterNode{X, k}] { + p.dscAdd(nd.L, nd.i, j) + p.bsrSet.Add(nd.L, nd.i, k, j) + } + } +} + +// func CRFString() string { +// buf := new(bytes.Buffer) +// buf.WriteString("CRF: {") +// for cn, nds := range crf{ +// for _, nd := range nds { +// fmt.Fprintf(buf, "%s->%s, ", cn, nd) +// } +// } +// buf.WriteString("}") +// return buf.String() +// } + +func (cn clusterNode) String() string { + return fmt.Sprintf("(%s,%d)", cn.X, cn.k) +} + +func (n crfNode) String() string { + return fmt.Sprintf("(%s,%d)", n.L.String(), n.i) +} + +// func PoppedString() string { +// buf := new(bytes.Buffer) +// buf.WriteString("Popped: {") +// for p, _ := range popped { +// fmt.Fprintf(buf, "(%s,%d,%d) ", p.X, p.k, p.j) +// } +// buf.WriteString("}") +// return buf.String() +// } + +/*** descriptors ***/ + +type descriptors struct { + set []*descriptor +} + +func (ds *descriptors) contain(d *descriptor) bool { + for _, d1 := range ds.set { + if d1 == d { + return true + } + } + return false +} + +func (ds *descriptors) empty() bool { + return len(ds.set) == 0 +} + +func (ds *descriptors) String() string { + buf := new(bytes.Buffer) + buf.WriteString("{") + for i, d := range ds.set { + if i > 0 { + buf.WriteString("; ") + } + fmt.Fprintf(buf, "%s", d) + } + buf.WriteString("}") + return buf.String() +} + +type descriptor struct { + L slot.Label + k int + i int +} + +func (d *descriptor) String() string { + return fmt.Sprintf("%s,%d,%d", d.L, d.k, d.i) +} + +func (p *parser) dscAdd(L slot.Label, k, i int) { + // fmt.Printf("p.dscAdd(%s,%d,%d)\n", L, k, i) + d := &descriptor{L, k, i} + if !p.U.contain(d) { + p.R.set = append(p.R.set, d) + p.U.set = append(p.U.set, d) + } +} + +func (ds *descriptors) remove() (L slot.Label, k, i int) { + d := ds.set[len(ds.set)-1] + ds.set = ds.set[:len(ds.set)-1] + // fmt.Printf("remove: %s,%d,%d\n", d.L, d.k, d.i) + return d.L, d.k, d.i +} + +func (p *parser) DumpDescriptors() { + p.DumpR() + p.DumpU() +} + +func (p *parser) DumpR() { + fmt.Println("R:") + for _, d := range p.R.set { + fmt.Printf(" %s\n", d) + } +} + +func (p *parser) DumpU() { + fmt.Println("U:") + for _, d := range p.U.set { + fmt.Printf(" %s\n", d) + } +} + +/*** TestSelect ***/ + +func (p *parser) follow(nt symbols.NT) bool { + _, exist := followSets[nt][p.lex.Tokens[p.cI].Type()] + return exist +} + +func (p *parser) testSelect(l slot.Label) bool { + _, exist := first[l][p.lex.Tokens[p.cI].Type()] + // fmt.Printf("testSelect(%s) = %t\n", l, exist) + return exist +} + +var first = []map[token.Type]string{ + // ApplyChunk : ∙apply_snapshot_chunk + { + token.T_0: "apply_snapshot_chunk", + }, + // ApplyChunk : apply_snapshot_chunk ∙ + { + token.T_0: "apply_snapshot_chunk", + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_5: "offer_snapshot", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ApplyChunks : ∙ApplyChunk + { + token.T_0: "apply_snapshot_chunk", + }, + // ApplyChunks : ApplyChunk ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_5: "offer_snapshot", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ApplyChunks : ∙ApplyChunk ApplyChunks + { + token.T_0: "apply_snapshot_chunk", + }, + // ApplyChunks : ApplyChunk ∙ApplyChunks + { + token.T_0: "apply_snapshot_chunk", + }, + // ApplyChunks : ApplyChunk ApplyChunks ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_5: "offer_snapshot", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // CleanStart : ∙InitChain ConsensusExec + { + token.T_4: "init_chain", + }, + // CleanStart : InitChain ∙ConsensusExec + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // CleanStart : InitChain ConsensusExec ∙ + { + token.EOF: "$", + }, + // CleanStart : ∙StateSync ConsensusExec + { + token.T_5: "offer_snapshot", + }, + // CleanStart : StateSync ∙ConsensusExec + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // CleanStart : StateSync ConsensusExec ∙ + { + token.EOF: "$", + }, + // Commit : ∙commit + { + token.T_1: "commit", + }, + // Commit : commit ∙ + { + token.EOF: "$", + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusExec : ∙ConsensusHeights + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusExec : ConsensusHeights ∙ + { + token.EOF: "$", + }, + // ConsensusHeight : ∙ConsensusRounds FinalizeBlock Commit + { + token.T_2: "extend_vote", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusHeight : ConsensusRounds ∙FinalizeBlock Commit + { + token.T_3: "finalize_block", + }, + // ConsensusHeight : ConsensusRounds FinalizeBlock ∙Commit + { + token.T_1: "commit", + }, + // ConsensusHeight : ConsensusRounds FinalizeBlock Commit ∙ + { + token.EOF: "$", + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusHeight : ∙FinalizeBlock Commit + { + token.T_3: "finalize_block", + }, + // ConsensusHeight : FinalizeBlock ∙Commit + { + token.T_1: "commit", + }, + // ConsensusHeight : FinalizeBlock Commit ∙ + { + token.EOF: "$", + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusHeights : ∙ConsensusHeight + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusHeights : ConsensusHeight ∙ + { + token.EOF: "$", + }, + // ConsensusHeights : ∙ConsensusHeight ConsensusHeights + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusHeights : ConsensusHeight ∙ConsensusHeights + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusHeights : ConsensusHeight ConsensusHeights ∙ + { + token.EOF: "$", + }, + // ConsensusRound : ∙Proposer + { + token.T_2: "extend_vote", + token.T_6: "prepare_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusRound : Proposer ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusRound : ∙NonProposer + { + token.T_2: "extend_vote", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusRound : NonProposer ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusRounds : ∙ConsensusRound + { + token.T_2: "extend_vote", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusRounds : ConsensusRound ∙ + { + token.T_3: "finalize_block", + }, + // ConsensusRounds : ∙ConsensusRound ConsensusRounds + { + token.T_2: "extend_vote", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusRounds : ConsensusRound ∙ConsensusRounds + { + token.T_2: "extend_vote", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusRounds : ConsensusRound ConsensusRounds ∙ + { + token.T_3: "finalize_block", + }, + // Extend : ∙ExtendVote + { + token.T_2: "extend_vote", + }, + // Extend : ExtendVote ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Extend : ∙GotVotes ExtendVote + { + token.T_8: "verify_vote_extension", + }, + // Extend : GotVotes ∙ExtendVote + { + token.T_2: "extend_vote", + }, + // Extend : GotVotes ExtendVote ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Extend : ∙ExtendVote GotVotes + { + token.T_2: "extend_vote", + }, + // Extend : ExtendVote ∙GotVotes + { + token.T_8: "verify_vote_extension", + }, + // Extend : ExtendVote GotVotes ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Extend : ∙GotVotes ExtendVote GotVotes + { + token.T_8: "verify_vote_extension", + }, + // Extend : GotVotes ∙ExtendVote GotVotes + { + token.T_2: "extend_vote", + }, + // Extend : GotVotes ExtendVote ∙GotVotes + { + token.T_8: "verify_vote_extension", + }, + // Extend : GotVotes ExtendVote GotVotes ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ExtendVote : ∙extend_vote + { + token.T_2: "extend_vote", + }, + // ExtendVote : extend_vote ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // FinalizeBlock : ∙finalize_block + { + token.T_3: "finalize_block", + }, + // FinalizeBlock : finalize_block ∙ + { + token.T_1: "commit", + }, + // GotVote : ∙verify_vote_extension + { + token.T_8: "verify_vote_extension", + }, + // GotVote : verify_vote_extension ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // GotVotes : ∙GotVote + { + token.T_8: "verify_vote_extension", + }, + // GotVotes : GotVote ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // GotVotes : ∙GotVote GotVotes + { + token.T_8: "verify_vote_extension", + }, + // GotVotes : GotVote ∙GotVotes + { + token.T_8: "verify_vote_extension", + }, + // GotVotes : GotVote GotVotes ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // InitChain : ∙init_chain + { + token.T_4: "init_chain", + }, + // InitChain : init_chain ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // NonProposer : ∙GotVotes + { + token.T_8: "verify_vote_extension", + }, + // NonProposer : GotVotes ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // NonProposer : ∙ProcessProposal + { + token.T_7: "process_proposal", + }, + // NonProposer : ProcessProposal ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // NonProposer : ∙Extend + { + token.T_2: "extend_vote", + token.T_8: "verify_vote_extension", + }, + // NonProposer : Extend ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // NonProposer : ∙GotVotes ProcessProposal + { + token.T_8: "verify_vote_extension", + }, + // NonProposer : GotVotes ∙ProcessProposal + { + token.T_7: "process_proposal", + }, + // NonProposer : GotVotes ProcessProposal ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // NonProposer : ∙GotVotes Extend + { + token.T_8: "verify_vote_extension", + }, + // NonProposer : GotVotes ∙Extend + { + token.T_2: "extend_vote", + token.T_8: "verify_vote_extension", + }, + // NonProposer : GotVotes Extend ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // NonProposer : ∙ProcessProposal Extend + { + token.T_7: "process_proposal", + }, + // NonProposer : ProcessProposal ∙Extend + { + token.T_2: "extend_vote", + token.T_8: "verify_vote_extension", + }, + // NonProposer : ProcessProposal Extend ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // NonProposer : ∙GotVotes ProcessProposal Extend + { + token.T_8: "verify_vote_extension", + }, + // NonProposer : GotVotes ∙ProcessProposal Extend + { + token.T_7: "process_proposal", + }, + // NonProposer : GotVotes ProcessProposal ∙Extend + { + token.T_2: "extend_vote", + token.T_8: "verify_vote_extension", + }, + // NonProposer : GotVotes ProcessProposal Extend ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // OfferSnapshot : ∙offer_snapshot + { + token.T_5: "offer_snapshot", + }, + // OfferSnapshot : offer_snapshot ∙ + { + token.T_0: "apply_snapshot_chunk", + token.T_5: "offer_snapshot", + }, + // PrepareProposal : ∙prepare_proposal + { + token.T_6: "prepare_proposal", + }, + // PrepareProposal : prepare_proposal ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ProcessProposal : ∙process_proposal + { + token.T_7: "process_proposal", + }, + // ProcessProposal : process_proposal ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Proposer : ∙GotVotes + { + token.T_8: "verify_vote_extension", + }, + // Proposer : GotVotes ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Proposer : ∙ProposerSimple + { + token.T_6: "prepare_proposal", + }, + // Proposer : ProposerSimple ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Proposer : ∙Extend + { + token.T_2: "extend_vote", + token.T_8: "verify_vote_extension", + }, + // Proposer : Extend ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Proposer : ∙GotVotes ProposerSimple + { + token.T_8: "verify_vote_extension", + }, + // Proposer : GotVotes ∙ProposerSimple + { + token.T_6: "prepare_proposal", + }, + // Proposer : GotVotes ProposerSimple ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Proposer : ∙GotVotes Extend + { + token.T_8: "verify_vote_extension", + }, + // Proposer : GotVotes ∙Extend + { + token.T_2: "extend_vote", + token.T_8: "verify_vote_extension", + }, + // Proposer : GotVotes Extend ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Proposer : ∙ProposerSimple Extend + { + token.T_6: "prepare_proposal", + }, + // Proposer : ProposerSimple ∙Extend + { + token.T_2: "extend_vote", + token.T_8: "verify_vote_extension", + }, + // Proposer : ProposerSimple Extend ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Proposer : ∙GotVotes ProposerSimple Extend + { + token.T_8: "verify_vote_extension", + }, + // Proposer : GotVotes ∙ProposerSimple Extend + { + token.T_6: "prepare_proposal", + }, + // Proposer : GotVotes ProposerSimple ∙Extend + { + token.T_2: "extend_vote", + token.T_8: "verify_vote_extension", + }, + // Proposer : GotVotes ProposerSimple Extend ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ProposerSimple : ∙PrepareProposal + { + token.T_6: "prepare_proposal", + }, + // ProposerSimple : PrepareProposal ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ProposerSimple : ∙PrepareProposal ProcessProposal + { + token.T_6: "prepare_proposal", + }, + // ProposerSimple : PrepareProposal ∙ProcessProposal + { + token.T_7: "process_proposal", + }, + // ProposerSimple : PrepareProposal ProcessProposal ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Recovery : ∙InitChain ConsensusExec + { + token.T_4: "init_chain", + }, + // Recovery : InitChain ∙ConsensusExec + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Recovery : InitChain ConsensusExec ∙ + { + token.EOF: "$", + }, + // Recovery : ∙ConsensusExec + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Recovery : ConsensusExec ∙ + { + token.EOF: "$", + }, + // Start : ∙CleanStart + { + token.T_4: "init_chain", + token.T_5: "offer_snapshot", + }, + // Start : CleanStart ∙ + { + token.EOF: "$", + }, + // Start : ∙Recovery + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_4: "init_chain", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Start : Recovery ∙ + { + token.EOF: "$", + }, + // StateSync : ∙StateSyncAttempts SuccessSync + { + token.T_5: "offer_snapshot", + }, + // StateSync : StateSyncAttempts ∙SuccessSync + { + token.T_5: "offer_snapshot", + }, + // StateSync : StateSyncAttempts SuccessSync ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // StateSync : ∙SuccessSync + { + token.T_5: "offer_snapshot", + }, + // StateSync : SuccessSync ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // StateSyncAttempt : ∙OfferSnapshot ApplyChunks + { + token.T_5: "offer_snapshot", + }, + // StateSyncAttempt : OfferSnapshot ∙ApplyChunks + { + token.T_0: "apply_snapshot_chunk", + }, + // StateSyncAttempt : OfferSnapshot ApplyChunks ∙ + { + token.T_5: "offer_snapshot", + }, + // StateSyncAttempt : ∙OfferSnapshot + { + token.T_5: "offer_snapshot", + }, + // StateSyncAttempt : OfferSnapshot ∙ + { + token.T_5: "offer_snapshot", + }, + // StateSyncAttempts : ∙StateSyncAttempt + { + token.T_5: "offer_snapshot", + }, + // StateSyncAttempts : StateSyncAttempt ∙ + { + token.T_5: "offer_snapshot", + }, + // StateSyncAttempts : ∙StateSyncAttempt StateSyncAttempts + { + token.T_5: "offer_snapshot", + }, + // StateSyncAttempts : StateSyncAttempt ∙StateSyncAttempts + { + token.T_5: "offer_snapshot", + }, + // StateSyncAttempts : StateSyncAttempt StateSyncAttempts ∙ + { + token.T_5: "offer_snapshot", + }, + // SuccessSync : ∙OfferSnapshot ApplyChunks + { + token.T_5: "offer_snapshot", + }, + // SuccessSync : OfferSnapshot ∙ApplyChunks + { + token.T_0: "apply_snapshot_chunk", + }, + // SuccessSync : OfferSnapshot ApplyChunks ∙ + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, +} + +var followSets = []map[token.Type]string{ + // ApplyChunk + { + token.T_0: "apply_snapshot_chunk", + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_5: "offer_snapshot", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ApplyChunks + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_5: "offer_snapshot", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // CleanStart + { + token.EOF: "$", + }, + // Commit + { + token.EOF: "$", + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusExec + { + token.EOF: "$", + }, + // ConsensusHeight + { + token.EOF: "$", + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusHeights + { + token.EOF: "$", + }, + // ConsensusRound + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ConsensusRounds + { + token.T_3: "finalize_block", + }, + // Extend + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ExtendVote + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // FinalizeBlock + { + token.T_1: "commit", + }, + // GotVote + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // GotVotes + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // InitChain + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // NonProposer + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // OfferSnapshot + { + token.T_0: "apply_snapshot_chunk", + token.T_5: "offer_snapshot", + }, + // PrepareProposal + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ProcessProposal + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Proposer + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // ProposerSimple + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // Recovery + { + token.EOF: "$", + }, + // Start + { + token.EOF: "$", + }, + // StateSync + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, + // StateSyncAttempt + { + token.T_5: "offer_snapshot", + }, + // StateSyncAttempts + { + token.T_5: "offer_snapshot", + }, + // SuccessSync + { + token.T_2: "extend_vote", + token.T_3: "finalize_block", + token.T_6: "prepare_proposal", + token.T_7: "process_proposal", + token.T_8: "verify_vote_extension", + }, +} + +/*** Errors ***/ + +/* +Error is returned by Parse at every point at which the parser fails to parse +a grammar production. For non-LL-1 grammars there will be an error for each +alternate attempted by the parser. + +The errors are sorted in descending order of input position (index of token in +the stream of tokens). + +Normally the error of interest is the one that has parsed the largest number of +tokens. +*/ +type Error struct { + // Index of token that caused the error. + cI int + + // Grammar slot at which the error occured. + Slot slot.Label + + // The token at which the error occurred. + Token *token.Token + + // The line and column in the input text at which the error occurred + Line, Column int + + // The tokens expected at the point where the error occurred + Expected map[token.Type]string +} + +func (pe *Error) String() string { + w := new(bytes.Buffer) + fmt.Fprintf(w, "Parse Error: %s I[%d]=%s at line %d col %d\n", + pe.Slot, pe.cI, pe.Token, pe.Line, pe.Column) + exp := []string{} + for _, e := range pe.Expected { + exp = append(exp, e) + } + fmt.Fprintf(w, "Expected one of: [%s]", strings.Join(exp, ",")) + return w.String() +} + +func (p *parser) parseError(slot slot.Label, i int, expected map[token.Type]string) { + pe := &Error{cI: i, Slot: slot, Token: p.lex.Tokens[i], Expected: expected} + p.parseErrors = append(p.parseErrors, pe) +} + +func (p *parser) sortParseErrors() { + sort.Slice(p.parseErrors, + func(i, j int) bool { + return p.parseErrors[j].Token.Lext() < p.parseErrors[i].Token.Lext() + }) + for _, pe := range p.parseErrors { + pe.Line, pe.Column = p.lex.GetLineColumn(pe.Token.Lext()) + } +} diff --git a/test/e2e/pkg/grammar/grammar-auto/parser/slot/slot.go b/test/e2e/pkg/grammar/grammar-auto/parser/slot/slot.go new file mode 100644 index 00000000000..e5f69183944 --- /dev/null +++ b/test/e2e/pkg/grammar/grammar-auto/parser/slot/slot.go @@ -0,0 +1,1482 @@ +// Package slot is generated by gogll. Do not edit. +package slot + +import ( + "bytes" + "fmt" + + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/parser/symbols" +) + +type Label int + +const ( + ApplyChunk0R0 Label = iota + ApplyChunk0R1 + ApplyChunks0R0 + ApplyChunks0R1 + ApplyChunks1R0 + ApplyChunks1R1 + ApplyChunks1R2 + CleanStart0R0 + CleanStart0R1 + CleanStart0R2 + CleanStart1R0 + CleanStart1R1 + CleanStart1R2 + Commit0R0 + Commit0R1 + ConsensusExec0R0 + ConsensusExec0R1 + ConsensusHeight0R0 + ConsensusHeight0R1 + ConsensusHeight0R2 + ConsensusHeight0R3 + ConsensusHeight1R0 + ConsensusHeight1R1 + ConsensusHeight1R2 + ConsensusHeights0R0 + ConsensusHeights0R1 + ConsensusHeights1R0 + ConsensusHeights1R1 + ConsensusHeights1R2 + ConsensusRound0R0 + ConsensusRound0R1 + ConsensusRound1R0 + ConsensusRound1R1 + ConsensusRounds0R0 + ConsensusRounds0R1 + ConsensusRounds1R0 + ConsensusRounds1R1 + ConsensusRounds1R2 + Extend0R0 + Extend0R1 + Extend1R0 + Extend1R1 + Extend1R2 + Extend2R0 + Extend2R1 + Extend2R2 + Extend3R0 + Extend3R1 + Extend3R2 + Extend3R3 + ExtendVote0R0 + ExtendVote0R1 + FinalizeBlock0R0 + FinalizeBlock0R1 + GotVote0R0 + GotVote0R1 + GotVotes0R0 + GotVotes0R1 + GotVotes1R0 + GotVotes1R1 + GotVotes1R2 + InitChain0R0 + InitChain0R1 + NonProposer0R0 + NonProposer0R1 + NonProposer1R0 + NonProposer1R1 + NonProposer2R0 + NonProposer2R1 + NonProposer3R0 + NonProposer3R1 + NonProposer3R2 + NonProposer4R0 + NonProposer4R1 + NonProposer4R2 + NonProposer5R0 + NonProposer5R1 + NonProposer5R2 + NonProposer6R0 + NonProposer6R1 + NonProposer6R2 + NonProposer6R3 + OfferSnapshot0R0 + OfferSnapshot0R1 + PrepareProposal0R0 + PrepareProposal0R1 + ProcessProposal0R0 + ProcessProposal0R1 + Proposer0R0 + Proposer0R1 + Proposer1R0 + Proposer1R1 + Proposer2R0 + Proposer2R1 + Proposer3R0 + Proposer3R1 + Proposer3R2 + Proposer4R0 + Proposer4R1 + Proposer4R2 + Proposer5R0 + Proposer5R1 + Proposer5R2 + Proposer6R0 + Proposer6R1 + Proposer6R2 + Proposer6R3 + ProposerSimple0R0 + ProposerSimple0R1 + ProposerSimple1R0 + ProposerSimple1R1 + ProposerSimple1R2 + Recovery0R0 + Recovery0R1 + Recovery0R2 + Recovery1R0 + Recovery1R1 + Start0R0 + Start0R1 + Start1R0 + Start1R1 + StateSync0R0 + StateSync0R1 + StateSync0R2 + StateSync1R0 + StateSync1R1 + StateSyncAttempt0R0 + StateSyncAttempt0R1 + StateSyncAttempt0R2 + StateSyncAttempt1R0 + StateSyncAttempt1R1 + StateSyncAttempts0R0 + StateSyncAttempts0R1 + StateSyncAttempts1R0 + StateSyncAttempts1R1 + StateSyncAttempts1R2 + SuccessSync0R0 + SuccessSync0R1 + SuccessSync0R2 +) + +type Slot struct { + NT symbols.NT + Alt int + Pos int + Symbols symbols.Symbols + Label Label +} + +type Index struct { + NT symbols.NT + Alt int + Pos int +} + +func GetAlternates(nt symbols.NT) []Label { + alts, exist := alternates[nt] + if !exist { + panic(fmt.Sprintf("Invalid NT %s", nt)) + } + return alts +} + +func GetLabel(nt symbols.NT, alt, pos int) Label { + l, exist := slotIndex[Index{nt, alt, pos}] + if exist { + return l + } + panic(fmt.Sprintf("Error: no slot label for NT=%s, alt=%d, pos=%d", nt, alt, pos)) +} + +func (l Label) EoR() bool { + return l.Slot().EoR() +} + +func (l Label) Head() symbols.NT { + return l.Slot().NT +} + +func (l Label) Index() Index { + s := l.Slot() + return Index{s.NT, s.Alt, s.Pos} +} + +func (l Label) Alternate() int { + return l.Slot().Alt +} + +func (l Label) Pos() int { + return l.Slot().Pos +} + +func (l Label) Slot() *Slot { + s, exist := slots[l] + if !exist { + panic(fmt.Sprintf("Invalid slot label %d", l)) + } + return s +} + +func (l Label) String() string { + return l.Slot().String() +} + +func (l Label) Symbols() symbols.Symbols { + return l.Slot().Symbols +} + +func (s *Slot) EoR() bool { + return s.Pos >= len(s.Symbols) +} + +func (s *Slot) String() string { + buf := new(bytes.Buffer) + fmt.Fprintf(buf, "%s : ", s.NT) + for i, sym := range s.Symbols { + if i == s.Pos { + fmt.Fprintf(buf, "∙") + } + fmt.Fprintf(buf, "%s ", sym) + } + if s.Pos >= len(s.Symbols) { + fmt.Fprintf(buf, "∙") + } + return buf.String() +} + +var slots = map[Label]*Slot{ + ApplyChunk0R0: { + symbols.NT_ApplyChunk, 0, 0, + symbols.Symbols{ + symbols.T_0, + }, + ApplyChunk0R0, + }, + ApplyChunk0R1: { + symbols.NT_ApplyChunk, 0, 1, + symbols.Symbols{ + symbols.T_0, + }, + ApplyChunk0R1, + }, + ApplyChunks0R0: { + symbols.NT_ApplyChunks, 0, 0, + symbols.Symbols{ + symbols.NT_ApplyChunk, + }, + ApplyChunks0R0, + }, + ApplyChunks0R1: { + symbols.NT_ApplyChunks, 0, 1, + symbols.Symbols{ + symbols.NT_ApplyChunk, + }, + ApplyChunks0R1, + }, + ApplyChunks1R0: { + symbols.NT_ApplyChunks, 1, 0, + symbols.Symbols{ + symbols.NT_ApplyChunk, + symbols.NT_ApplyChunks, + }, + ApplyChunks1R0, + }, + ApplyChunks1R1: { + symbols.NT_ApplyChunks, 1, 1, + symbols.Symbols{ + symbols.NT_ApplyChunk, + symbols.NT_ApplyChunks, + }, + ApplyChunks1R1, + }, + ApplyChunks1R2: { + symbols.NT_ApplyChunks, 1, 2, + symbols.Symbols{ + symbols.NT_ApplyChunk, + symbols.NT_ApplyChunks, + }, + ApplyChunks1R2, + }, + CleanStart0R0: { + symbols.NT_CleanStart, 0, 0, + symbols.Symbols{ + symbols.NT_InitChain, + symbols.NT_ConsensusExec, + }, + CleanStart0R0, + }, + CleanStart0R1: { + symbols.NT_CleanStart, 0, 1, + symbols.Symbols{ + symbols.NT_InitChain, + symbols.NT_ConsensusExec, + }, + CleanStart0R1, + }, + CleanStart0R2: { + symbols.NT_CleanStart, 0, 2, + symbols.Symbols{ + symbols.NT_InitChain, + symbols.NT_ConsensusExec, + }, + CleanStart0R2, + }, + CleanStart1R0: { + symbols.NT_CleanStart, 1, 0, + symbols.Symbols{ + symbols.NT_StateSync, + symbols.NT_ConsensusExec, + }, + CleanStart1R0, + }, + CleanStart1R1: { + symbols.NT_CleanStart, 1, 1, + symbols.Symbols{ + symbols.NT_StateSync, + symbols.NT_ConsensusExec, + }, + CleanStart1R1, + }, + CleanStart1R2: { + symbols.NT_CleanStart, 1, 2, + symbols.Symbols{ + symbols.NT_StateSync, + symbols.NT_ConsensusExec, + }, + CleanStart1R2, + }, + Commit0R0: { + symbols.NT_Commit, 0, 0, + symbols.Symbols{ + symbols.T_1, + }, + Commit0R0, + }, + Commit0R1: { + symbols.NT_Commit, 0, 1, + symbols.Symbols{ + symbols.T_1, + }, + Commit0R1, + }, + ConsensusExec0R0: { + symbols.NT_ConsensusExec, 0, 0, + symbols.Symbols{ + symbols.NT_ConsensusHeights, + }, + ConsensusExec0R0, + }, + ConsensusExec0R1: { + symbols.NT_ConsensusExec, 0, 1, + symbols.Symbols{ + symbols.NT_ConsensusHeights, + }, + ConsensusExec0R1, + }, + ConsensusHeight0R0: { + symbols.NT_ConsensusHeight, 0, 0, + symbols.Symbols{ + symbols.NT_ConsensusRounds, + symbols.NT_FinalizeBlock, + symbols.NT_Commit, + }, + ConsensusHeight0R0, + }, + ConsensusHeight0R1: { + symbols.NT_ConsensusHeight, 0, 1, + symbols.Symbols{ + symbols.NT_ConsensusRounds, + symbols.NT_FinalizeBlock, + symbols.NT_Commit, + }, + ConsensusHeight0R1, + }, + ConsensusHeight0R2: { + symbols.NT_ConsensusHeight, 0, 2, + symbols.Symbols{ + symbols.NT_ConsensusRounds, + symbols.NT_FinalizeBlock, + symbols.NT_Commit, + }, + ConsensusHeight0R2, + }, + ConsensusHeight0R3: { + symbols.NT_ConsensusHeight, 0, 3, + symbols.Symbols{ + symbols.NT_ConsensusRounds, + symbols.NT_FinalizeBlock, + symbols.NT_Commit, + }, + ConsensusHeight0R3, + }, + ConsensusHeight1R0: { + symbols.NT_ConsensusHeight, 1, 0, + symbols.Symbols{ + symbols.NT_FinalizeBlock, + symbols.NT_Commit, + }, + ConsensusHeight1R0, + }, + ConsensusHeight1R1: { + symbols.NT_ConsensusHeight, 1, 1, + symbols.Symbols{ + symbols.NT_FinalizeBlock, + symbols.NT_Commit, + }, + ConsensusHeight1R1, + }, + ConsensusHeight1R2: { + symbols.NT_ConsensusHeight, 1, 2, + symbols.Symbols{ + symbols.NT_FinalizeBlock, + symbols.NT_Commit, + }, + ConsensusHeight1R2, + }, + ConsensusHeights0R0: { + symbols.NT_ConsensusHeights, 0, 0, + symbols.Symbols{ + symbols.NT_ConsensusHeight, + }, + ConsensusHeights0R0, + }, + ConsensusHeights0R1: { + symbols.NT_ConsensusHeights, 0, 1, + symbols.Symbols{ + symbols.NT_ConsensusHeight, + }, + ConsensusHeights0R1, + }, + ConsensusHeights1R0: { + symbols.NT_ConsensusHeights, 1, 0, + symbols.Symbols{ + symbols.NT_ConsensusHeight, + symbols.NT_ConsensusHeights, + }, + ConsensusHeights1R0, + }, + ConsensusHeights1R1: { + symbols.NT_ConsensusHeights, 1, 1, + symbols.Symbols{ + symbols.NT_ConsensusHeight, + symbols.NT_ConsensusHeights, + }, + ConsensusHeights1R1, + }, + ConsensusHeights1R2: { + symbols.NT_ConsensusHeights, 1, 2, + symbols.Symbols{ + symbols.NT_ConsensusHeight, + symbols.NT_ConsensusHeights, + }, + ConsensusHeights1R2, + }, + ConsensusRound0R0: { + symbols.NT_ConsensusRound, 0, 0, + symbols.Symbols{ + symbols.NT_Proposer, + }, + ConsensusRound0R0, + }, + ConsensusRound0R1: { + symbols.NT_ConsensusRound, 0, 1, + symbols.Symbols{ + symbols.NT_Proposer, + }, + ConsensusRound0R1, + }, + ConsensusRound1R0: { + symbols.NT_ConsensusRound, 1, 0, + symbols.Symbols{ + symbols.NT_NonProposer, + }, + ConsensusRound1R0, + }, + ConsensusRound1R1: { + symbols.NT_ConsensusRound, 1, 1, + symbols.Symbols{ + symbols.NT_NonProposer, + }, + ConsensusRound1R1, + }, + ConsensusRounds0R0: { + symbols.NT_ConsensusRounds, 0, 0, + symbols.Symbols{ + symbols.NT_ConsensusRound, + }, + ConsensusRounds0R0, + }, + ConsensusRounds0R1: { + symbols.NT_ConsensusRounds, 0, 1, + symbols.Symbols{ + symbols.NT_ConsensusRound, + }, + ConsensusRounds0R1, + }, + ConsensusRounds1R0: { + symbols.NT_ConsensusRounds, 1, 0, + symbols.Symbols{ + symbols.NT_ConsensusRound, + symbols.NT_ConsensusRounds, + }, + ConsensusRounds1R0, + }, + ConsensusRounds1R1: { + symbols.NT_ConsensusRounds, 1, 1, + symbols.Symbols{ + symbols.NT_ConsensusRound, + symbols.NT_ConsensusRounds, + }, + ConsensusRounds1R1, + }, + ConsensusRounds1R2: { + symbols.NT_ConsensusRounds, 1, 2, + symbols.Symbols{ + symbols.NT_ConsensusRound, + symbols.NT_ConsensusRounds, + }, + ConsensusRounds1R2, + }, + Extend0R0: { + symbols.NT_Extend, 0, 0, + symbols.Symbols{ + symbols.NT_ExtendVote, + }, + Extend0R0, + }, + Extend0R1: { + symbols.NT_Extend, 0, 1, + symbols.Symbols{ + symbols.NT_ExtendVote, + }, + Extend0R1, + }, + Extend1R0: { + symbols.NT_Extend, 1, 0, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ExtendVote, + }, + Extend1R0, + }, + Extend1R1: { + symbols.NT_Extend, 1, 1, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ExtendVote, + }, + Extend1R1, + }, + Extend1R2: { + symbols.NT_Extend, 1, 2, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ExtendVote, + }, + Extend1R2, + }, + Extend2R0: { + symbols.NT_Extend, 2, 0, + symbols.Symbols{ + symbols.NT_ExtendVote, + symbols.NT_GotVotes, + }, + Extend2R0, + }, + Extend2R1: { + symbols.NT_Extend, 2, 1, + symbols.Symbols{ + symbols.NT_ExtendVote, + symbols.NT_GotVotes, + }, + Extend2R1, + }, + Extend2R2: { + symbols.NT_Extend, 2, 2, + symbols.Symbols{ + symbols.NT_ExtendVote, + symbols.NT_GotVotes, + }, + Extend2R2, + }, + Extend3R0: { + symbols.NT_Extend, 3, 0, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ExtendVote, + symbols.NT_GotVotes, + }, + Extend3R0, + }, + Extend3R1: { + symbols.NT_Extend, 3, 1, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ExtendVote, + symbols.NT_GotVotes, + }, + Extend3R1, + }, + Extend3R2: { + symbols.NT_Extend, 3, 2, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ExtendVote, + symbols.NT_GotVotes, + }, + Extend3R2, + }, + Extend3R3: { + symbols.NT_Extend, 3, 3, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ExtendVote, + symbols.NT_GotVotes, + }, + Extend3R3, + }, + ExtendVote0R0: { + symbols.NT_ExtendVote, 0, 0, + symbols.Symbols{ + symbols.T_2, + }, + ExtendVote0R0, + }, + ExtendVote0R1: { + symbols.NT_ExtendVote, 0, 1, + symbols.Symbols{ + symbols.T_2, + }, + ExtendVote0R1, + }, + FinalizeBlock0R0: { + symbols.NT_FinalizeBlock, 0, 0, + symbols.Symbols{ + symbols.T_3, + }, + FinalizeBlock0R0, + }, + FinalizeBlock0R1: { + symbols.NT_FinalizeBlock, 0, 1, + symbols.Symbols{ + symbols.T_3, + }, + FinalizeBlock0R1, + }, + GotVote0R0: { + symbols.NT_GotVote, 0, 0, + symbols.Symbols{ + symbols.T_8, + }, + GotVote0R0, + }, + GotVote0R1: { + symbols.NT_GotVote, 0, 1, + symbols.Symbols{ + symbols.T_8, + }, + GotVote0R1, + }, + GotVotes0R0: { + symbols.NT_GotVotes, 0, 0, + symbols.Symbols{ + symbols.NT_GotVote, + }, + GotVotes0R0, + }, + GotVotes0R1: { + symbols.NT_GotVotes, 0, 1, + symbols.Symbols{ + symbols.NT_GotVote, + }, + GotVotes0R1, + }, + GotVotes1R0: { + symbols.NT_GotVotes, 1, 0, + symbols.Symbols{ + symbols.NT_GotVote, + symbols.NT_GotVotes, + }, + GotVotes1R0, + }, + GotVotes1R1: { + symbols.NT_GotVotes, 1, 1, + symbols.Symbols{ + symbols.NT_GotVote, + symbols.NT_GotVotes, + }, + GotVotes1R1, + }, + GotVotes1R2: { + symbols.NT_GotVotes, 1, 2, + symbols.Symbols{ + symbols.NT_GotVote, + symbols.NT_GotVotes, + }, + GotVotes1R2, + }, + InitChain0R0: { + symbols.NT_InitChain, 0, 0, + symbols.Symbols{ + symbols.T_4, + }, + InitChain0R0, + }, + InitChain0R1: { + symbols.NT_InitChain, 0, 1, + symbols.Symbols{ + symbols.T_4, + }, + InitChain0R1, + }, + NonProposer0R0: { + symbols.NT_NonProposer, 0, 0, + symbols.Symbols{ + symbols.NT_GotVotes, + }, + NonProposer0R0, + }, + NonProposer0R1: { + symbols.NT_NonProposer, 0, 1, + symbols.Symbols{ + symbols.NT_GotVotes, + }, + NonProposer0R1, + }, + NonProposer1R0: { + symbols.NT_NonProposer, 1, 0, + symbols.Symbols{ + symbols.NT_ProcessProposal, + }, + NonProposer1R0, + }, + NonProposer1R1: { + symbols.NT_NonProposer, 1, 1, + symbols.Symbols{ + symbols.NT_ProcessProposal, + }, + NonProposer1R1, + }, + NonProposer2R0: { + symbols.NT_NonProposer, 2, 0, + symbols.Symbols{ + symbols.NT_Extend, + }, + NonProposer2R0, + }, + NonProposer2R1: { + symbols.NT_NonProposer, 2, 1, + symbols.Symbols{ + symbols.NT_Extend, + }, + NonProposer2R1, + }, + NonProposer3R0: { + symbols.NT_NonProposer, 3, 0, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ProcessProposal, + }, + NonProposer3R0, + }, + NonProposer3R1: { + symbols.NT_NonProposer, 3, 1, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ProcessProposal, + }, + NonProposer3R1, + }, + NonProposer3R2: { + symbols.NT_NonProposer, 3, 2, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ProcessProposal, + }, + NonProposer3R2, + }, + NonProposer4R0: { + symbols.NT_NonProposer, 4, 0, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_Extend, + }, + NonProposer4R0, + }, + NonProposer4R1: { + symbols.NT_NonProposer, 4, 1, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_Extend, + }, + NonProposer4R1, + }, + NonProposer4R2: { + symbols.NT_NonProposer, 4, 2, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_Extend, + }, + NonProposer4R2, + }, + NonProposer5R0: { + symbols.NT_NonProposer, 5, 0, + symbols.Symbols{ + symbols.NT_ProcessProposal, + symbols.NT_Extend, + }, + NonProposer5R0, + }, + NonProposer5R1: { + symbols.NT_NonProposer, 5, 1, + symbols.Symbols{ + symbols.NT_ProcessProposal, + symbols.NT_Extend, + }, + NonProposer5R1, + }, + NonProposer5R2: { + symbols.NT_NonProposer, 5, 2, + symbols.Symbols{ + symbols.NT_ProcessProposal, + symbols.NT_Extend, + }, + NonProposer5R2, + }, + NonProposer6R0: { + symbols.NT_NonProposer, 6, 0, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ProcessProposal, + symbols.NT_Extend, + }, + NonProposer6R0, + }, + NonProposer6R1: { + symbols.NT_NonProposer, 6, 1, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ProcessProposal, + symbols.NT_Extend, + }, + NonProposer6R1, + }, + NonProposer6R2: { + symbols.NT_NonProposer, 6, 2, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ProcessProposal, + symbols.NT_Extend, + }, + NonProposer6R2, + }, + NonProposer6R3: { + symbols.NT_NonProposer, 6, 3, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ProcessProposal, + symbols.NT_Extend, + }, + NonProposer6R3, + }, + OfferSnapshot0R0: { + symbols.NT_OfferSnapshot, 0, 0, + symbols.Symbols{ + symbols.T_5, + }, + OfferSnapshot0R0, + }, + OfferSnapshot0R1: { + symbols.NT_OfferSnapshot, 0, 1, + symbols.Symbols{ + symbols.T_5, + }, + OfferSnapshot0R1, + }, + PrepareProposal0R0: { + symbols.NT_PrepareProposal, 0, 0, + symbols.Symbols{ + symbols.T_6, + }, + PrepareProposal0R0, + }, + PrepareProposal0R1: { + symbols.NT_PrepareProposal, 0, 1, + symbols.Symbols{ + symbols.T_6, + }, + PrepareProposal0R1, + }, + ProcessProposal0R0: { + symbols.NT_ProcessProposal, 0, 0, + symbols.Symbols{ + symbols.T_7, + }, + ProcessProposal0R0, + }, + ProcessProposal0R1: { + symbols.NT_ProcessProposal, 0, 1, + symbols.Symbols{ + symbols.T_7, + }, + ProcessProposal0R1, + }, + Proposer0R0: { + symbols.NT_Proposer, 0, 0, + symbols.Symbols{ + symbols.NT_GotVotes, + }, + Proposer0R0, + }, + Proposer0R1: { + symbols.NT_Proposer, 0, 1, + symbols.Symbols{ + symbols.NT_GotVotes, + }, + Proposer0R1, + }, + Proposer1R0: { + symbols.NT_Proposer, 1, 0, + symbols.Symbols{ + symbols.NT_ProposerSimple, + }, + Proposer1R0, + }, + Proposer1R1: { + symbols.NT_Proposer, 1, 1, + symbols.Symbols{ + symbols.NT_ProposerSimple, + }, + Proposer1R1, + }, + Proposer2R0: { + symbols.NT_Proposer, 2, 0, + symbols.Symbols{ + symbols.NT_Extend, + }, + Proposer2R0, + }, + Proposer2R1: { + symbols.NT_Proposer, 2, 1, + symbols.Symbols{ + symbols.NT_Extend, + }, + Proposer2R1, + }, + Proposer3R0: { + symbols.NT_Proposer, 3, 0, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ProposerSimple, + }, + Proposer3R0, + }, + Proposer3R1: { + symbols.NT_Proposer, 3, 1, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ProposerSimple, + }, + Proposer3R1, + }, + Proposer3R2: { + symbols.NT_Proposer, 3, 2, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ProposerSimple, + }, + Proposer3R2, + }, + Proposer4R0: { + symbols.NT_Proposer, 4, 0, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_Extend, + }, + Proposer4R0, + }, + Proposer4R1: { + symbols.NT_Proposer, 4, 1, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_Extend, + }, + Proposer4R1, + }, + Proposer4R2: { + symbols.NT_Proposer, 4, 2, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_Extend, + }, + Proposer4R2, + }, + Proposer5R0: { + symbols.NT_Proposer, 5, 0, + symbols.Symbols{ + symbols.NT_ProposerSimple, + symbols.NT_Extend, + }, + Proposer5R0, + }, + Proposer5R1: { + symbols.NT_Proposer, 5, 1, + symbols.Symbols{ + symbols.NT_ProposerSimple, + symbols.NT_Extend, + }, + Proposer5R1, + }, + Proposer5R2: { + symbols.NT_Proposer, 5, 2, + symbols.Symbols{ + symbols.NT_ProposerSimple, + symbols.NT_Extend, + }, + Proposer5R2, + }, + Proposer6R0: { + symbols.NT_Proposer, 6, 0, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ProposerSimple, + symbols.NT_Extend, + }, + Proposer6R0, + }, + Proposer6R1: { + symbols.NT_Proposer, 6, 1, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ProposerSimple, + symbols.NT_Extend, + }, + Proposer6R1, + }, + Proposer6R2: { + symbols.NT_Proposer, 6, 2, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ProposerSimple, + symbols.NT_Extend, + }, + Proposer6R2, + }, + Proposer6R3: { + symbols.NT_Proposer, 6, 3, + symbols.Symbols{ + symbols.NT_GotVotes, + symbols.NT_ProposerSimple, + symbols.NT_Extend, + }, + Proposer6R3, + }, + ProposerSimple0R0: { + symbols.NT_ProposerSimple, 0, 0, + symbols.Symbols{ + symbols.NT_PrepareProposal, + }, + ProposerSimple0R0, + }, + ProposerSimple0R1: { + symbols.NT_ProposerSimple, 0, 1, + symbols.Symbols{ + symbols.NT_PrepareProposal, + }, + ProposerSimple0R1, + }, + ProposerSimple1R0: { + symbols.NT_ProposerSimple, 1, 0, + symbols.Symbols{ + symbols.NT_PrepareProposal, + symbols.NT_ProcessProposal, + }, + ProposerSimple1R0, + }, + ProposerSimple1R1: { + symbols.NT_ProposerSimple, 1, 1, + symbols.Symbols{ + symbols.NT_PrepareProposal, + symbols.NT_ProcessProposal, + }, + ProposerSimple1R1, + }, + ProposerSimple1R2: { + symbols.NT_ProposerSimple, 1, 2, + symbols.Symbols{ + symbols.NT_PrepareProposal, + symbols.NT_ProcessProposal, + }, + ProposerSimple1R2, + }, + Recovery0R0: { + symbols.NT_Recovery, 0, 0, + symbols.Symbols{ + symbols.NT_InitChain, + symbols.NT_ConsensusExec, + }, + Recovery0R0, + }, + Recovery0R1: { + symbols.NT_Recovery, 0, 1, + symbols.Symbols{ + symbols.NT_InitChain, + symbols.NT_ConsensusExec, + }, + Recovery0R1, + }, + Recovery0R2: { + symbols.NT_Recovery, 0, 2, + symbols.Symbols{ + symbols.NT_InitChain, + symbols.NT_ConsensusExec, + }, + Recovery0R2, + }, + Recovery1R0: { + symbols.NT_Recovery, 1, 0, + symbols.Symbols{ + symbols.NT_ConsensusExec, + }, + Recovery1R0, + }, + Recovery1R1: { + symbols.NT_Recovery, 1, 1, + symbols.Symbols{ + symbols.NT_ConsensusExec, + }, + Recovery1R1, + }, + Start0R0: { + symbols.NT_Start, 0, 0, + symbols.Symbols{ + symbols.NT_CleanStart, + }, + Start0R0, + }, + Start0R1: { + symbols.NT_Start, 0, 1, + symbols.Symbols{ + symbols.NT_CleanStart, + }, + Start0R1, + }, + Start1R0: { + symbols.NT_Start, 1, 0, + symbols.Symbols{ + symbols.NT_Recovery, + }, + Start1R0, + }, + Start1R1: { + symbols.NT_Start, 1, 1, + symbols.Symbols{ + symbols.NT_Recovery, + }, + Start1R1, + }, + StateSync0R0: { + symbols.NT_StateSync, 0, 0, + symbols.Symbols{ + symbols.NT_StateSyncAttempts, + symbols.NT_SuccessSync, + }, + StateSync0R0, + }, + StateSync0R1: { + symbols.NT_StateSync, 0, 1, + symbols.Symbols{ + symbols.NT_StateSyncAttempts, + symbols.NT_SuccessSync, + }, + StateSync0R1, + }, + StateSync0R2: { + symbols.NT_StateSync, 0, 2, + symbols.Symbols{ + symbols.NT_StateSyncAttempts, + symbols.NT_SuccessSync, + }, + StateSync0R2, + }, + StateSync1R0: { + symbols.NT_StateSync, 1, 0, + symbols.Symbols{ + symbols.NT_SuccessSync, + }, + StateSync1R0, + }, + StateSync1R1: { + symbols.NT_StateSync, 1, 1, + symbols.Symbols{ + symbols.NT_SuccessSync, + }, + StateSync1R1, + }, + StateSyncAttempt0R0: { + symbols.NT_StateSyncAttempt, 0, 0, + symbols.Symbols{ + symbols.NT_OfferSnapshot, + symbols.NT_ApplyChunks, + }, + StateSyncAttempt0R0, + }, + StateSyncAttempt0R1: { + symbols.NT_StateSyncAttempt, 0, 1, + symbols.Symbols{ + symbols.NT_OfferSnapshot, + symbols.NT_ApplyChunks, + }, + StateSyncAttempt0R1, + }, + StateSyncAttempt0R2: { + symbols.NT_StateSyncAttempt, 0, 2, + symbols.Symbols{ + symbols.NT_OfferSnapshot, + symbols.NT_ApplyChunks, + }, + StateSyncAttempt0R2, + }, + StateSyncAttempt1R0: { + symbols.NT_StateSyncAttempt, 1, 0, + symbols.Symbols{ + symbols.NT_OfferSnapshot, + }, + StateSyncAttempt1R0, + }, + StateSyncAttempt1R1: { + symbols.NT_StateSyncAttempt, 1, 1, + symbols.Symbols{ + symbols.NT_OfferSnapshot, + }, + StateSyncAttempt1R1, + }, + StateSyncAttempts0R0: { + symbols.NT_StateSyncAttempts, 0, 0, + symbols.Symbols{ + symbols.NT_StateSyncAttempt, + }, + StateSyncAttempts0R0, + }, + StateSyncAttempts0R1: { + symbols.NT_StateSyncAttempts, 0, 1, + symbols.Symbols{ + symbols.NT_StateSyncAttempt, + }, + StateSyncAttempts0R1, + }, + StateSyncAttempts1R0: { + symbols.NT_StateSyncAttempts, 1, 0, + symbols.Symbols{ + symbols.NT_StateSyncAttempt, + symbols.NT_StateSyncAttempts, + }, + StateSyncAttempts1R0, + }, + StateSyncAttempts1R1: { + symbols.NT_StateSyncAttempts, 1, 1, + symbols.Symbols{ + symbols.NT_StateSyncAttempt, + symbols.NT_StateSyncAttempts, + }, + StateSyncAttempts1R1, + }, + StateSyncAttempts1R2: { + symbols.NT_StateSyncAttempts, 1, 2, + symbols.Symbols{ + symbols.NT_StateSyncAttempt, + symbols.NT_StateSyncAttempts, + }, + StateSyncAttempts1R2, + }, + SuccessSync0R0: { + symbols.NT_SuccessSync, 0, 0, + symbols.Symbols{ + symbols.NT_OfferSnapshot, + symbols.NT_ApplyChunks, + }, + SuccessSync0R0, + }, + SuccessSync0R1: { + symbols.NT_SuccessSync, 0, 1, + symbols.Symbols{ + symbols.NT_OfferSnapshot, + symbols.NT_ApplyChunks, + }, + SuccessSync0R1, + }, + SuccessSync0R2: { + symbols.NT_SuccessSync, 0, 2, + symbols.Symbols{ + symbols.NT_OfferSnapshot, + symbols.NT_ApplyChunks, + }, + SuccessSync0R2, + }, +} + +var slotIndex = map[Index]Label{ + {symbols.NT_ApplyChunk, 0, 0}: ApplyChunk0R0, + {symbols.NT_ApplyChunk, 0, 1}: ApplyChunk0R1, + {symbols.NT_ApplyChunks, 0, 0}: ApplyChunks0R0, + {symbols.NT_ApplyChunks, 0, 1}: ApplyChunks0R1, + {symbols.NT_ApplyChunks, 1, 0}: ApplyChunks1R0, + {symbols.NT_ApplyChunks, 1, 1}: ApplyChunks1R1, + {symbols.NT_ApplyChunks, 1, 2}: ApplyChunks1R2, + {symbols.NT_CleanStart, 0, 0}: CleanStart0R0, + {symbols.NT_CleanStart, 0, 1}: CleanStart0R1, + {symbols.NT_CleanStart, 0, 2}: CleanStart0R2, + {symbols.NT_CleanStart, 1, 0}: CleanStart1R0, + {symbols.NT_CleanStart, 1, 1}: CleanStart1R1, + {symbols.NT_CleanStart, 1, 2}: CleanStart1R2, + {symbols.NT_Commit, 0, 0}: Commit0R0, + {symbols.NT_Commit, 0, 1}: Commit0R1, + {symbols.NT_ConsensusExec, 0, 0}: ConsensusExec0R0, + {symbols.NT_ConsensusExec, 0, 1}: ConsensusExec0R1, + {symbols.NT_ConsensusHeight, 0, 0}: ConsensusHeight0R0, + {symbols.NT_ConsensusHeight, 0, 1}: ConsensusHeight0R1, + {symbols.NT_ConsensusHeight, 0, 2}: ConsensusHeight0R2, + {symbols.NT_ConsensusHeight, 0, 3}: ConsensusHeight0R3, + {symbols.NT_ConsensusHeight, 1, 0}: ConsensusHeight1R0, + {symbols.NT_ConsensusHeight, 1, 1}: ConsensusHeight1R1, + {symbols.NT_ConsensusHeight, 1, 2}: ConsensusHeight1R2, + {symbols.NT_ConsensusHeights, 0, 0}: ConsensusHeights0R0, + {symbols.NT_ConsensusHeights, 0, 1}: ConsensusHeights0R1, + {symbols.NT_ConsensusHeights, 1, 0}: ConsensusHeights1R0, + {symbols.NT_ConsensusHeights, 1, 1}: ConsensusHeights1R1, + {symbols.NT_ConsensusHeights, 1, 2}: ConsensusHeights1R2, + {symbols.NT_ConsensusRound, 0, 0}: ConsensusRound0R0, + {symbols.NT_ConsensusRound, 0, 1}: ConsensusRound0R1, + {symbols.NT_ConsensusRound, 1, 0}: ConsensusRound1R0, + {symbols.NT_ConsensusRound, 1, 1}: ConsensusRound1R1, + {symbols.NT_ConsensusRounds, 0, 0}: ConsensusRounds0R0, + {symbols.NT_ConsensusRounds, 0, 1}: ConsensusRounds0R1, + {symbols.NT_ConsensusRounds, 1, 0}: ConsensusRounds1R0, + {symbols.NT_ConsensusRounds, 1, 1}: ConsensusRounds1R1, + {symbols.NT_ConsensusRounds, 1, 2}: ConsensusRounds1R2, + {symbols.NT_Extend, 0, 0}: Extend0R0, + {symbols.NT_Extend, 0, 1}: Extend0R1, + {symbols.NT_Extend, 1, 0}: Extend1R0, + {symbols.NT_Extend, 1, 1}: Extend1R1, + {symbols.NT_Extend, 1, 2}: Extend1R2, + {symbols.NT_Extend, 2, 0}: Extend2R0, + {symbols.NT_Extend, 2, 1}: Extend2R1, + {symbols.NT_Extend, 2, 2}: Extend2R2, + {symbols.NT_Extend, 3, 0}: Extend3R0, + {symbols.NT_Extend, 3, 1}: Extend3R1, + {symbols.NT_Extend, 3, 2}: Extend3R2, + {symbols.NT_Extend, 3, 3}: Extend3R3, + {symbols.NT_ExtendVote, 0, 0}: ExtendVote0R0, + {symbols.NT_ExtendVote, 0, 1}: ExtendVote0R1, + {symbols.NT_FinalizeBlock, 0, 0}: FinalizeBlock0R0, + {symbols.NT_FinalizeBlock, 0, 1}: FinalizeBlock0R1, + {symbols.NT_GotVote, 0, 0}: GotVote0R0, + {symbols.NT_GotVote, 0, 1}: GotVote0R1, + {symbols.NT_GotVotes, 0, 0}: GotVotes0R0, + {symbols.NT_GotVotes, 0, 1}: GotVotes0R1, + {symbols.NT_GotVotes, 1, 0}: GotVotes1R0, + {symbols.NT_GotVotes, 1, 1}: GotVotes1R1, + {symbols.NT_GotVotes, 1, 2}: GotVotes1R2, + {symbols.NT_InitChain, 0, 0}: InitChain0R0, + {symbols.NT_InitChain, 0, 1}: InitChain0R1, + {symbols.NT_NonProposer, 0, 0}: NonProposer0R0, + {symbols.NT_NonProposer, 0, 1}: NonProposer0R1, + {symbols.NT_NonProposer, 1, 0}: NonProposer1R0, + {symbols.NT_NonProposer, 1, 1}: NonProposer1R1, + {symbols.NT_NonProposer, 2, 0}: NonProposer2R0, + {symbols.NT_NonProposer, 2, 1}: NonProposer2R1, + {symbols.NT_NonProposer, 3, 0}: NonProposer3R0, + {symbols.NT_NonProposer, 3, 1}: NonProposer3R1, + {symbols.NT_NonProposer, 3, 2}: NonProposer3R2, + {symbols.NT_NonProposer, 4, 0}: NonProposer4R0, + {symbols.NT_NonProposer, 4, 1}: NonProposer4R1, + {symbols.NT_NonProposer, 4, 2}: NonProposer4R2, + {symbols.NT_NonProposer, 5, 0}: NonProposer5R0, + {symbols.NT_NonProposer, 5, 1}: NonProposer5R1, + {symbols.NT_NonProposer, 5, 2}: NonProposer5R2, + {symbols.NT_NonProposer, 6, 0}: NonProposer6R0, + {symbols.NT_NonProposer, 6, 1}: NonProposer6R1, + {symbols.NT_NonProposer, 6, 2}: NonProposer6R2, + {symbols.NT_NonProposer, 6, 3}: NonProposer6R3, + {symbols.NT_OfferSnapshot, 0, 0}: OfferSnapshot0R0, + {symbols.NT_OfferSnapshot, 0, 1}: OfferSnapshot0R1, + {symbols.NT_PrepareProposal, 0, 0}: PrepareProposal0R0, + {symbols.NT_PrepareProposal, 0, 1}: PrepareProposal0R1, + {symbols.NT_ProcessProposal, 0, 0}: ProcessProposal0R0, + {symbols.NT_ProcessProposal, 0, 1}: ProcessProposal0R1, + {symbols.NT_Proposer, 0, 0}: Proposer0R0, + {symbols.NT_Proposer, 0, 1}: Proposer0R1, + {symbols.NT_Proposer, 1, 0}: Proposer1R0, + {symbols.NT_Proposer, 1, 1}: Proposer1R1, + {symbols.NT_Proposer, 2, 0}: Proposer2R0, + {symbols.NT_Proposer, 2, 1}: Proposer2R1, + {symbols.NT_Proposer, 3, 0}: Proposer3R0, + {symbols.NT_Proposer, 3, 1}: Proposer3R1, + {symbols.NT_Proposer, 3, 2}: Proposer3R2, + {symbols.NT_Proposer, 4, 0}: Proposer4R0, + {symbols.NT_Proposer, 4, 1}: Proposer4R1, + {symbols.NT_Proposer, 4, 2}: Proposer4R2, + {symbols.NT_Proposer, 5, 0}: Proposer5R0, + {symbols.NT_Proposer, 5, 1}: Proposer5R1, + {symbols.NT_Proposer, 5, 2}: Proposer5R2, + {symbols.NT_Proposer, 6, 0}: Proposer6R0, + {symbols.NT_Proposer, 6, 1}: Proposer6R1, + {symbols.NT_Proposer, 6, 2}: Proposer6R2, + {symbols.NT_Proposer, 6, 3}: Proposer6R3, + {symbols.NT_ProposerSimple, 0, 0}: ProposerSimple0R0, + {symbols.NT_ProposerSimple, 0, 1}: ProposerSimple0R1, + {symbols.NT_ProposerSimple, 1, 0}: ProposerSimple1R0, + {symbols.NT_ProposerSimple, 1, 1}: ProposerSimple1R1, + {symbols.NT_ProposerSimple, 1, 2}: ProposerSimple1R2, + {symbols.NT_Recovery, 0, 0}: Recovery0R0, + {symbols.NT_Recovery, 0, 1}: Recovery0R1, + {symbols.NT_Recovery, 0, 2}: Recovery0R2, + {symbols.NT_Recovery, 1, 0}: Recovery1R0, + {symbols.NT_Recovery, 1, 1}: Recovery1R1, + {symbols.NT_Start, 0, 0}: Start0R0, + {symbols.NT_Start, 0, 1}: Start0R1, + {symbols.NT_Start, 1, 0}: Start1R0, + {symbols.NT_Start, 1, 1}: Start1R1, + {symbols.NT_StateSync, 0, 0}: StateSync0R0, + {symbols.NT_StateSync, 0, 1}: StateSync0R1, + {symbols.NT_StateSync, 0, 2}: StateSync0R2, + {symbols.NT_StateSync, 1, 0}: StateSync1R0, + {symbols.NT_StateSync, 1, 1}: StateSync1R1, + {symbols.NT_StateSyncAttempt, 0, 0}: StateSyncAttempt0R0, + {symbols.NT_StateSyncAttempt, 0, 1}: StateSyncAttempt0R1, + {symbols.NT_StateSyncAttempt, 0, 2}: StateSyncAttempt0R2, + {symbols.NT_StateSyncAttempt, 1, 0}: StateSyncAttempt1R0, + {symbols.NT_StateSyncAttempt, 1, 1}: StateSyncAttempt1R1, + {symbols.NT_StateSyncAttempts, 0, 0}: StateSyncAttempts0R0, + {symbols.NT_StateSyncAttempts, 0, 1}: StateSyncAttempts0R1, + {symbols.NT_StateSyncAttempts, 1, 0}: StateSyncAttempts1R0, + {symbols.NT_StateSyncAttempts, 1, 1}: StateSyncAttempts1R1, + {symbols.NT_StateSyncAttempts, 1, 2}: StateSyncAttempts1R2, + {symbols.NT_SuccessSync, 0, 0}: SuccessSync0R0, + {symbols.NT_SuccessSync, 0, 1}: SuccessSync0R1, + {symbols.NT_SuccessSync, 0, 2}: SuccessSync0R2, +} + +var alternates = map[symbols.NT][]Label{ + symbols.NT_Start: {Start0R0, Start1R0}, + symbols.NT_CleanStart: {CleanStart0R0, CleanStart1R0}, + symbols.NT_StateSync: {StateSync0R0, StateSync1R0}, + symbols.NT_StateSyncAttempts: {StateSyncAttempts0R0, StateSyncAttempts1R0}, + symbols.NT_StateSyncAttempt: {StateSyncAttempt0R0, StateSyncAttempt1R0}, + symbols.NT_SuccessSync: {SuccessSync0R0}, + symbols.NT_ApplyChunks: {ApplyChunks0R0, ApplyChunks1R0}, + symbols.NT_Recovery: {Recovery0R0, Recovery1R0}, + symbols.NT_ConsensusExec: {ConsensusExec0R0}, + symbols.NT_ConsensusHeights: {ConsensusHeights0R0, ConsensusHeights1R0}, + symbols.NT_ConsensusHeight: {ConsensusHeight0R0, ConsensusHeight1R0}, + symbols.NT_ConsensusRounds: {ConsensusRounds0R0, ConsensusRounds1R0}, + symbols.NT_ConsensusRound: {ConsensusRound0R0, ConsensusRound1R0}, + symbols.NT_Proposer: {Proposer0R0, Proposer1R0, Proposer2R0, Proposer3R0, Proposer4R0, Proposer5R0, Proposer6R0}, + symbols.NT_ProposerSimple: {ProposerSimple0R0, ProposerSimple1R0}, + symbols.NT_NonProposer: {NonProposer0R0, NonProposer1R0, NonProposer2R0, NonProposer3R0, NonProposer4R0, NonProposer5R0, NonProposer6R0}, + symbols.NT_Extend: {Extend0R0, Extend1R0, Extend2R0, Extend3R0}, + symbols.NT_GotVotes: {GotVotes0R0, GotVotes1R0}, + symbols.NT_InitChain: {InitChain0R0}, + symbols.NT_FinalizeBlock: {FinalizeBlock0R0}, + symbols.NT_Commit: {Commit0R0}, + symbols.NT_OfferSnapshot: {OfferSnapshot0R0}, + symbols.NT_ApplyChunk: {ApplyChunk0R0}, + symbols.NT_PrepareProposal: {PrepareProposal0R0}, + symbols.NT_ProcessProposal: {ProcessProposal0R0}, + symbols.NT_ExtendVote: {ExtendVote0R0}, + symbols.NT_GotVote: {GotVote0R0}, +} diff --git a/test/e2e/pkg/grammar/grammar-auto/parser/symbols/symbols.go b/test/e2e/pkg/grammar/grammar-auto/parser/symbols/symbols.go new file mode 100644 index 00000000000..f889b46e453 --- /dev/null +++ b/test/e2e/pkg/grammar/grammar-auto/parser/symbols/symbols.go @@ -0,0 +1,200 @@ +// Package symbols is generated by gogll. Do not edit. +package symbols + +import ( + "bytes" + "fmt" +) + +type Symbol interface { + isSymbol() + IsNonTerminal() bool + String() string +} + +func (NT) isSymbol() {} +func (T) isSymbol() {} + +// NT is the type of non-terminals symbols +type NT int + +const ( + NT_ApplyChunk NT = iota + NT_ApplyChunks + NT_CleanStart + NT_Commit + NT_ConsensusExec + NT_ConsensusHeight + NT_ConsensusHeights + NT_ConsensusRound + NT_ConsensusRounds + NT_Extend + NT_ExtendVote + NT_FinalizeBlock + NT_GotVote + NT_GotVotes + NT_InitChain + NT_NonProposer + NT_OfferSnapshot + NT_PrepareProposal + NT_ProcessProposal + NT_Proposer + NT_ProposerSimple + NT_Recovery + NT_Start + NT_StateSync + NT_StateSyncAttempt + NT_StateSyncAttempts + NT_SuccessSync +) + +// T is the type of terminals symbols +type T int + +const ( + T_0 T = iota // apply_snapshot_chunk + T_1 // commit + T_2 // extend_vote + T_3 // finalize_block + T_4 // init_chain + T_5 // offer_snapshot + T_6 // prepare_proposal + T_7 // process_proposal + T_8 // verify_vote_extension +) + +type Symbols []Symbol + +func (ss Symbols) Equal(ss1 Symbols) bool { + if len(ss) != len(ss1) { + return false + } + for i, s := range ss { + if s.String() != ss1[i].String() { + return false + } + } + return true +} + +func (ss Symbols) String() string { + w := new(bytes.Buffer) + for i, s := range ss { + if i > 0 { + fmt.Fprint(w, " ") + } + fmt.Fprintf(w, "%s", s) + } + return w.String() +} + +func (ss Symbols) Strings() []string { + strs := make([]string, len(ss)) + for i, s := range ss { + strs[i] = s.String() + } + return strs +} + +func (NT) IsNonTerminal() bool { + return true +} + +func (T) IsNonTerminal() bool { + return false +} + +func (nt NT) String() string { + return ntToString[nt] +} + +func (t T) String() string { + return tToString[t] +} + +// IsNT returns true iff sym is a non-terminal symbol of the grammar +func IsNT(sym string) bool { + _, exist := stringNT[sym] + return exist +} + +// ToNT returns the NT value of sym or panics if sym is not a non-terminal of the grammar +func ToNT(sym string) NT { + nt, exist := stringNT[sym] + if !exist { + panic(fmt.Sprintf("No NT: %s", sym)) + } + return nt +} + +var ntToString = []string{ + "ApplyChunk", /* NT_ApplyChunk */ + "ApplyChunks", /* NT_ApplyChunks */ + "CleanStart", /* NT_CleanStart */ + "Commit", /* NT_Commit */ + "ConsensusExec", /* NT_ConsensusExec */ + "ConsensusHeight", /* NT_ConsensusHeight */ + "ConsensusHeights", /* NT_ConsensusHeights */ + "ConsensusRound", /* NT_ConsensusRound */ + "ConsensusRounds", /* NT_ConsensusRounds */ + "Extend", /* NT_Extend */ + "ExtendVote", /* NT_ExtendVote */ + "FinalizeBlock", /* NT_FinalizeBlock */ + "GotVote", /* NT_GotVote */ + "GotVotes", /* NT_GotVotes */ + "InitChain", /* NT_InitChain */ + "NonProposer", /* NT_NonProposer */ + "OfferSnapshot", /* NT_OfferSnapshot */ + "PrepareProposal", /* NT_PrepareProposal */ + "ProcessProposal", /* NT_ProcessProposal */ + "Proposer", /* NT_Proposer */ + "ProposerSimple", /* NT_ProposerSimple */ + "Recovery", /* NT_Recovery */ + "Start", /* NT_Start */ + "StateSync", /* NT_StateSync */ + "StateSyncAttempt", /* NT_StateSyncAttempt */ + "StateSyncAttempts", /* NT_StateSyncAttempts */ + "SuccessSync", /* NT_SuccessSync */ +} + +var tToString = []string{ + "apply_snapshot_chunk", /* T_0 */ + "commit", /* T_1 */ + "extend_vote", /* T_2 */ + "finalize_block", /* T_3 */ + "init_chain", /* T_4 */ + "offer_snapshot", /* T_5 */ + "prepare_proposal", /* T_6 */ + "process_proposal", /* T_7 */ + "verify_vote_extension", /* T_8 */ +} + +var stringNT = map[string]NT{ + "ApplyChunk": NT_ApplyChunk, + "ApplyChunks": NT_ApplyChunks, + "CleanStart": NT_CleanStart, + "Commit": NT_Commit, + "ConsensusExec": NT_ConsensusExec, + "ConsensusHeight": NT_ConsensusHeight, + "ConsensusHeights": NT_ConsensusHeights, + "ConsensusRound": NT_ConsensusRound, + "ConsensusRounds": NT_ConsensusRounds, + "Extend": NT_Extend, + "ExtendVote": NT_ExtendVote, + "FinalizeBlock": NT_FinalizeBlock, + "GotVote": NT_GotVote, + "GotVotes": NT_GotVotes, + "InitChain": NT_InitChain, + "NonProposer": NT_NonProposer, + "OfferSnapshot": NT_OfferSnapshot, + "PrepareProposal": NT_PrepareProposal, + "ProcessProposal": NT_ProcessProposal, + "Proposer": NT_Proposer, + "ProposerSimple": NT_ProposerSimple, + "Recovery": NT_Recovery, + "Start": NT_Start, + "StateSync": NT_StateSync, + "StateSyncAttempt": NT_StateSyncAttempt, + "StateSyncAttempts": NT_StateSyncAttempts, + "SuccessSync": NT_SuccessSync, +} diff --git a/test/e2e/pkg/grammar/recovery/grammar-auto/sppf/sppf.go b/test/e2e/pkg/grammar/grammar-auto/sppf/sppf.go similarity index 97% rename from test/e2e/pkg/grammar/recovery/grammar-auto/sppf/sppf.go rename to test/e2e/pkg/grammar/grammar-auto/sppf/sppf.go index b41eb40d4a0..d6f7ef00cd6 100644 --- a/test/e2e/pkg/grammar/recovery/grammar-auto/sppf/sppf.go +++ b/test/e2e/pkg/grammar/grammar-auto/sppf/sppf.go @@ -10,11 +10,12 @@ Package sppf implements a Shared Packed Parse Forest as defined in: package sppf import ( - "fmt" "bytes" + "fmt" + "github.com/goccmack/goutil/ioutil" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/parser/symbols" + "github.com/cometbft/cometbft/test/e2e/pkg/grammar/grammar-auto/parser/symbols" ) type Node interface { @@ -92,7 +93,7 @@ func (n *PackedNode) String() string { return "PN: " + n.Label() } -//---- Dot ---- +// ---- Dot ---- type dotBuilder struct { nodes map[string]bool // index = node.Label() @@ -203,6 +204,4 @@ func (n *SymbolNode) dot(bld *dotBuilder) { fmt.Fprintf(bld.w, "%s", pn.Label()) } fmt.Fprintln(bld.w) - } - diff --git a/test/e2e/pkg/grammar/recovery/grammar-auto/token/token.go b/test/e2e/pkg/grammar/grammar-auto/token/token.go similarity index 51% rename from test/e2e/pkg/grammar/recovery/grammar-auto/token/token.go rename to test/e2e/pkg/grammar/grammar-auto/token/token.go index e85f54ded11..01dcf380590 100644 --- a/test/e2e/pkg/grammar/recovery/grammar-auto/token/token.go +++ b/test/e2e/pkg/grammar/grammar-auto/token/token.go @@ -1,16 +1,15 @@ - // Package token is generated by GoGLL. Do not edit package token -import( - "fmt" +import ( + "fmt" ) // Token is returned by the lexer for every scanned lexical token type Token struct { - typ Type - lext, rext int - input []rune + typ Type + lext, rext int + input []rune } /* @@ -19,49 +18,49 @@ lext is the left extent and rext the right extent of the token in the input. input is the input slice scanned by the lexer. */ func New(t Type, lext, rext int, input []rune) *Token { - return &Token{ - typ: t, - lext: lext, - rext: rext, - input: input, - } + return &Token{ + typ: t, + lext: lext, + rext: rext, + input: input, + } } // GetLineColumn returns the line and column of the left extent of t func (t *Token) GetLineColumn() (line, col int) { - line, col = 1, 1 - for j := 0; j < t.lext; j++ { - switch t.input[j] { - case '\n': - line++ - col = 1 - case '\t': - col += 4 - default: - col++ - } - } - return + line, col = 1, 1 + for j := 0; j < t.lext; j++ { + switch t.input[j] { + case '\n': + line++ + col = 1 + case '\t': + col += 4 + default: + col++ + } + } + return } // GetInput returns the input from which t was parsed. func (t *Token) GetInput() []rune { - return t.input + return t.input } // Lext returns the left extent of t in the input stream of runes func (t *Token) Lext() int { - return t.lext + return t.lext } // Literal returns the literal runes of t scanned by the lexer func (t *Token) Literal() []rune { - return t.input[t.lext:t.rext] + return t.input[t.lext:t.rext] } // LiteralString returns string(t.Literal()) func (t *Token) LiteralString() string { - return string(t.Literal()) + return string(t.Literal()) } // LiteralStripEscape returns the literal runes of t scanned by the lexer @@ -95,12 +94,12 @@ func (t *Token) LiteralStringStripEscape() string { // Rext returns the right extent of t in the input stream of runes func (t *Token) Rext() int { - return t.rext + return t.rext } func (t *Token) String() string { - return fmt.Sprintf("%s (%d,%d) %s", - t.TypeID(), t.lext, t.rext, t.LiteralString()) + return fmt.Sprintf("%s (%d,%d) %s", + t.TypeID(), t.lext, t.rext, t.LiteralString()) } // Suppress returns true iff t is suppressed by the lexer @@ -110,79 +109,107 @@ func (t *Token) Suppress() bool { // Type returns the token Type of t func (t *Token) Type() Type { - return t.typ + return t.typ } -// TypeID returns the token Type ID of t. +// TypeID returns the token Type ID of t. // This may be different from the literal of token t. func (t *Token) TypeID() string { - return t.Type().ID() + return t.Type().ID() } // Type is the token type type Type int func (t Type) String() string { - return TypeToString[t] + return TypeToString[t] } // ID returns the token type ID of token Type t func (t Type) ID() string { - return TypeToID[t] -} - - -const( - Error Type = iota // Error - EOF // $ - T_0 // commit - T_1 // finalize_block - T_2 // prepare_proposal - T_3 // process_proposal + return TypeToID[t] +} + +const ( + Error Type = iota // Error + EOF // $ + T_0 // apply_snapshot_chunk + T_1 // commit + T_2 // extend_vote + T_3 // finalize_block + T_4 // init_chain + T_5 // offer_snapshot + T_6 // prepare_proposal + T_7 // process_proposal + T_8 // verify_vote_extension ) -var TypeToString = []string{ - "Error", - "EOF", - "T_0", - "T_1", - "T_2", - "T_3", -} - -var StringToType = map[string] Type { - "Error" : Error, - "EOF" : EOF, - "T_0" : T_0, - "T_1" : T_1, - "T_2" : T_2, - "T_3" : T_3, -} - -var TypeToID = []string { - "Error", - "$", - "commit", - "finalize_block", - "prepare_proposal", - "process_proposal", -} - -var IDToType = map[string]Type { - "Error": 0, - "$": 1, - "commit": 2, - "finalize_block": 3, - "prepare_proposal": 4, - "process_proposal": 5, -} - -var Suppress = []bool { - false, - false, - false, - false, - false, - false, +var TypeToString = []string{ + "Error", + "EOF", + "T_0", + "T_1", + "T_2", + "T_3", + "T_4", + "T_5", + "T_6", + "T_7", + "T_8", +} + +var StringToType = map[string]Type{ + "Error": Error, + "EOF": EOF, + "T_0": T_0, + "T_1": T_1, + "T_2": T_2, + "T_3": T_3, + "T_4": T_4, + "T_5": T_5, + "T_6": T_6, + "T_7": T_7, + "T_8": T_8, +} + +var TypeToID = []string{ + "Error", + "$", + "apply_snapshot_chunk", + "commit", + "extend_vote", + "finalize_block", + "init_chain", + "offer_snapshot", + "prepare_proposal", + "process_proposal", + "verify_vote_extension", +} + +var IDToType = map[string]Type{ + "Error": 0, + "$": 1, + "apply_snapshot_chunk": 2, + "commit": 3, + "extend_vote": 4, + "finalize_block": 5, + "init_chain": 6, + "offer_snapshot": 7, + "prepare_proposal": 8, + "process_proposal": 9, + "verify_vote_extension": 10, +} + +var Suppress = []bool{ + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, } - diff --git a/test/e2e/pkg/grammar/recovery/abci_grammar_recovery.md b/test/e2e/pkg/grammar/recovery/abci_grammar_recovery.md deleted file mode 100644 index f1e87cf0a08..00000000000 --- a/test/e2e/pkg/grammar/recovery/abci_grammar_recovery.md +++ /dev/null @@ -1,42 +0,0 @@ -``` -package "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto" - -Start : Recovery ; - -Recovery : ConsensusExec ; - -ConsensusExec : ConsensusHeights ; -ConsensusHeights : ConsensusHeight | ConsensusHeight ConsensusHeights ; -ConsensusHeight : ConsensusRounds FinalizeBlock Commit | FinalizeBlock Commit ; -ConsensusRounds : ConsensusRound | ConsensusRound ConsensusRounds ; -ConsensusRound : Proposer | NonProposer ; - -Proposer : PrepareProposal | PrepareProposal ProcessProposal ; -NonProposer: ProcessProposal ; - -FinalizeBlock : "finalize_block" ; -Commit : "commit" ; -PrepareProposal : "prepare_proposal" ; -ProcessProposal : "process_proposal" ; - -``` - -The part of the original grammar (https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_comet_expected_behavior.md) the grammar above -refers to is below: - -start = recovery - -recovery = info consensus-exec - -consensus-exec = (inf)consensus-height -consensus-height = *consensus-round decide commit -consensus-round = proposer / non-proposer - -proposer = [prepare-proposal [process-proposal]] -non-proposer = [process-proposal] - -decide = %s"" -commit = %s"" -info = %s"" -prepare-proposal = %s"" -process-proposal = %s"" diff --git a/test/e2e/pkg/grammar/recovery/grammar-auto/lexer/lexer.go b/test/e2e/pkg/grammar/recovery/grammar-auto/lexer/lexer.go deleted file mode 100644 index 82ba6080526..00000000000 --- a/test/e2e/pkg/grammar/recovery/grammar-auto/lexer/lexer.go +++ /dev/null @@ -1,745 +0,0 @@ - -// Package lexer is generated by GoGLL. Do not edit. -package lexer - -import ( - // "fmt" - "io/ioutil" - "strings" - "unicode" - - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/token" -) - -type state int - -const nullState state = -1 - -// Unicode categories -var ( - _Cc = unicode.Cc // Cc is the set of Unicode characters in category Cc (Other, control). - _Cf = unicode.Cf // Cf is the set of Unicode characters in category Cf (Other, format). - _Co = unicode.Co // Co is the set of Unicode characters in category Co (Other, private use). - _Cs = unicode.Cs // Cs is the set of Unicode characters in category Cs (Other, surrogate). - _Digit = unicode.Digit // Digit is the set of Unicode characters with the "decimal digit" property. - _Nd = unicode.Nd // Nd is the set of Unicode characters in category Nd (Number, decimal digit). - _Letter = unicode.Letter // Letter/L is the set of Unicode letters, category L. - _L = unicode.L - _Lm = unicode.Lm // Lm is the set of Unicode characters in category Lm (Letter, modifier). - _Lo = unicode.Lo // Lo is the set of Unicode characters in category Lo (Letter, other). - _Lower = unicode.Lower // Lower is the set of Unicode lower case letters. - _Ll = unicode.Ll // Ll is the set of Unicode characters in category Ll (Letter, lowercase). - _Mark = unicode.Mark // Mark/M is the set of Unicode mark characters, category M. - _M = unicode.M - _Mc = unicode.Mc // Mc is the set of Unicode characters in category Mc (Mark, spacing combining). - _Me = unicode.Me // Me is the set of Unicode characters in category Me (Mark, enclosing). - _Mn = unicode.Mn // Mn is the set of Unicode characters in category Mn (Mark, nonspacing). - _Nl = unicode.Nl // Nl is the set of Unicode characters in category Nl (Number, letter). - _No = unicode.No // No is the set of Unicode characters in category No (Number, other). - _Number = unicode.Number // Number/N is the set of Unicode number characters, category N. - _N = unicode.N - _Other = unicode.Other // Other/C is the set of Unicode control and special characters, category C. - _C = unicode.C - _Pc = unicode.Pc // Pc is the set of Unicode characters in category Pc (Punctuation, connector). - _Pd = unicode.Pd // Pd is the set of Unicode characters in category Pd (Punctuation, dash). - _Pe = unicode.Pe // Pe is the set of Unicode characters in category Pe (Punctuation, close). - _Pf = unicode.Pf // Pf is the set of Unicode characters in category Pf (Punctuation, final quote). - _Pi = unicode.Pi // Pi is the set of Unicode characters in category Pi (Punctuation, initial quote). - _Po = unicode.Po // Po is the set of Unicode characters in category Po (Punctuation, other). - _Ps = unicode.Ps // Ps is the set of Unicode characters in category Ps (Punctuation, open). - _Punct = unicode.Punct // Punct/P is the set of Unicode punctuation characters, category P. - _P = unicode.P - _Sc = unicode.Sc // Sc is the set of Unicode characters in category Sc (Symbol, currency). - _Sk = unicode.Sk // Sk is the set of Unicode characters in category Sk (Symbol, modifier). - _Sm = unicode.Sm // Sm is the set of Unicode characters in category Sm (Symbol, math). - _So = unicode.So // So is the set of Unicode characters in category So (Symbol, other). - _Space = unicode.Space // Space/Z is the set of Unicode space characters, category Z. - _Z = unicode.Z - _Symbol = unicode.Symbol // Symbol/S is the set of Unicode symbol characters, category S. - _S = unicode.S - _Title = unicode.Title // Title is the set of Unicode title case letters. - _Lt = unicode.Lt // Lt is the set of Unicode characters in category Lt (Letter, titlecase). - _Upper = unicode.Upper // Upper is the set of Unicode upper case letters. - _Lu = unicode.Lu // Lu is the set of Unicode characters in category Lu (Letter, uppercase). - _Zl = unicode.Zl // Zl is the set of Unicode characters in category Zl (Separator, line). - _Zp = unicode.Zp // Zp is the set of Unicode characters in category Zp (Separator, paragraph). - _Zs = unicode.Zs // Zs is the set of Unicode characters in category Zs (Separator, space). -) - -// Unicode properties -var ( - _ASCII_Hex_Digit = unicode.ASCII_Hex_Digit // ASCII_Hex_Digit is the set of Unicode characters with property ASCII_Hex_Digit. - _Bidi_Control = unicode.Bidi_Control // Bidi_Control is the set of Unicode characters with property Bidi_Control. - _Dash = unicode.Dash // Dash is the set of Unicode characters with property Dash. - _Deprecated = unicode.Deprecated // Deprecated is the set of Unicode characters with property Deprecated. - _Diacritic = unicode.Diacritic // Diacritic is the set of Unicode characters with property Diacritic. - _Extender = unicode.Extender // Extender is the set of Unicode characters with property Extender. - _Hex_Digit = unicode.Hex_Digit // Hex_Digit is the set of Unicode characters with property Hex_Digit. - _Hyphen = unicode.Hyphen // Hyphen is the set of Unicode characters with property Hyphen. - _IDS_Binary_Operator = unicode.IDS_Binary_Operator // IDS_Binary_Operator is the set of Unicode characters with property IDS_Binary_Operator. - _IDS_Trinary_Operator = unicode.IDS_Trinary_Operator // IDS_Trinary_Operator is the set of Unicode characters with property IDS_Trinary_Operator. - _Ideographic = unicode.Ideographic // Ideographic is the set of Unicode characters with property Ideographic. - _Join_Control = unicode.Join_Control // Join_Control is the set of Unicode characters with property Join_Control. - _Logical_Order_Exception = unicode.Logical_Order_Exception // Logical_Order_Exception is the set of Unicode characters with property Logical_Order_Exception. - _Noncharacter_Code_Point = unicode.Noncharacter_Code_Point // Noncharacter_Code_Point is the set of Unicode characters with property Noncharacter_Code_Point. - _Other_Alphabetic = unicode.Other_Alphabetic // Other_Alphabetic is the set of Unicode characters with property Other_Alphabetic. - _Other_Default_Ignorable_Code_Point = unicode.Other_Default_Ignorable_Code_Point // Other_Default_Ignorable_Code_Point is the set of Unicode characters with property Other_Default_Ignorable_Code_Point. - _Other_Grapheme_Extend = unicode.Other_Grapheme_Extend // Other_Grapheme_Extend is the set of Unicode characters with property Other_Grapheme_Extend. - _Other_ID_Continue = unicode.Other_ID_Continue // Other_ID_Continue is the set of Unicode characters with property Other_ID_Continue. - _Other_ID_Start = unicode.Other_ID_Start // Other_ID_Start is the set of Unicode characters with property Other_ID_Start. - _Other_Lowercase = unicode.Other_Lowercase // Other_Lowercase is the set of Unicode characters with property Other_Lowercase. - _Other_Math = unicode.Other_Math // Other_Math is the set of Unicode characters with property Other_Math. - _Other_Uppercase = unicode.Other_Uppercase // Other_Uppercase is the set of Unicode characters with property Other_Uppercase. - _Pattern_Syntax = unicode.Pattern_Syntax // Pattern_Syntax is the set of Unicode characters with property Pattern_Syntax. - _Pattern_White_Space = unicode.Pattern_White_Space // Pattern_White_Space is the set of Unicode characters with property Pattern_White_Space. - _Prepended_Concatenation_Mark = unicode.Prepended_Concatenation_Mark // Prepended_Concatenation_Mark is the set of Unicode characters with property Prepended_Concatenation_Mark. - _Quotation_Mark = unicode.Quotation_Mark // Quotation_Mark is the set of Unicode characters with property Quotation_Mark. - _Radical = unicode.Radical // Radical is the set of Unicode characters with property Radical. - _Regional_Indicator = unicode.Regional_Indicator // Regional_Indicator is the set of Unicode characters with property Regional_Indicator. - _STerm = unicode.STerm // STerm is an alias for Sentence_Terminal. - _Sentence_Terminal = unicode.Sentence_Terminal // Sentence_Terminal is the set of Unicode characters with property Sentence_Terminal. - _Soft_Dotted = unicode.Soft_Dotted // Soft_Dotted is the set of Unicode characters with property Soft_Dotted. - _Terminal_Punctuation = unicode.Terminal_Punctuation // Terminal_Punctuation is the set of Unicode characters with property Terminal_Punctuation. - _Unified_Ideograph = unicode.Unified_Ideograph // Unified_Ideograph is the set of Unicode characters with property Unified_Ideograph. - _Variation_Selector = unicode.Variation_Selector // Variation_Selector is the set of Unicode characters with property Variation_Selector. - _White_Space = unicode.White_Space // White_Space is the set of Unicode characters with property White_Space. -) - -// Lexer contains both the input slice of runes and the slice of tokens -// parsed from the input -type Lexer struct { - // I is the input slice of runes - I []rune - - // Tokens is the slice of tokens constructed by the lexer from I - Tokens []*token.Token -} - -/* -NewFile constructs a Lexer created from the input file, fname. - -If the input file is a markdown file NewFile process treats all text outside -code blocks as whitespace. All text inside code blocks are treated as input text. - -If the input file is a normal text file NewFile treats all text in the inputfile -as input text. -*/ -func NewFile(fname string) *Lexer { - buf, err := ioutil.ReadFile(fname) - if err != nil { - panic(err) - } - input := []rune(string(buf)) - if strings.HasSuffix(fname, ".md") { - loadMd(input) - } - return New(input) -} - -func loadMd(input []rune) { - i := 0 - text := true - for i < len(input) { - if i <= len(input)-3 && input[i] == '`' && input[i+1] == '`' && input[i+2] == '`' { - text = !text - for j := 0; j < 3; j++ { - input[i+j] = ' ' - } - i += 3 - } - if i < len(input) { - if text { - if input[i] == '\n' { - input[i] = '\n' - } else { - input[i] = ' ' - } - } - i += 1 - } - } -} - -/* -New constructs a Lexer from a slice of runes. - -All contents of the input slice are treated as input text. -*/ -func New(input []rune) *Lexer { - lex := &Lexer{ - I: input, - Tokens: make([]*token.Token, 0, 2048), - } - lext := 0 - for lext < len(lex.I) { - for lext < len(lex.I) && unicode.IsSpace(lex.I[lext]) { - lext++ - } - if lext < len(lex.I) { - tok := lex.scan(lext) - lext = tok.Rext() - if !tok.Suppress() { - lex.addToken(tok) - } - } - } - lex.add(token.EOF, len(input), len(input)) - return lex -} - -func (l *Lexer) scan(i int) *token.Token { - // fmt.Printf("lexer.scan(%d)\n", i) - s, typ, rext := nullState, token.Error, i+1 - if i < len(l.I) { - // fmt.Printf(" rext %d, i %d\n", rext, i) - s = nextState[0](l.I[i]) - } - for s != nullState { - if rext >= len(l.I) { - typ = accept[s] - s = nullState - } else { - typ = accept[s] - s = nextState[s](l.I[rext]) - if s != nullState || typ == token.Error { - rext++ - } - } - } - tok := token.New(typ, i, rext, l.I) - // fmt.Printf(" %s\n", tok) - return tok -} - -func escape(r rune) string { - switch r { - case '"': - return "\"" - case '\\': - return "\\\\" - case '\r': - return "\\r" - case '\n': - return "\\n" - case '\t': - return "\\t" - } - return string(r) -} - -// GetLineColumn returns the line and column of rune[i] in the input -func (l *Lexer) GetLineColumn(i int) (line, col int) { - line, col = 1, 1 - for j := 0; j < i; j++ { - switch l.I[j] { - case '\n': - line++ - col = 1 - case '\t': - col += 4 - default: - col++ - } - } - return -} - -// GetLineColumnOfToken returns the line and column of token[i] in the imput -func (l *Lexer) GetLineColumnOfToken(i int) (line, col int) { - return l.GetLineColumn(l.Tokens[i].Lext()) -} - -// GetString returns the input string from the left extent of Token[lext] to -// the right extent of Token[rext] -func (l *Lexer) GetString(lext, rext int) string { - return string(l.I[l.Tokens[lext].Lext():l.Tokens[rext].Rext()]) -} - -func (l *Lexer) add(t token.Type, lext, rext int) { - l.addToken(token.New(t, lext, rext, l.I)) -} - -func (l *Lexer) addToken(tok *token.Token) { - l.Tokens = append(l.Tokens, tok) -} - -func any(r rune, set []rune) bool { - for _, r1 := range set { - if r == r1 { - return true - } - } - return false -} - -func not(r rune, set []rune) bool { - for _, r1 := range set { - if r == r1 { - return false - } - } - return true -} - -var accept = []token.Type{ - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.T_0, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.Error, - token.T_1, - token.Error, - token.Error, - token.Error, - token.Error, - token.T_2, - token.T_3, -} - -var nextState = []func(r rune) state{ - // Set0 - func(r rune) state { - switch { - case r == 'c': - return 1 - case r == 'f': - return 2 - case r == 'p': - return 3 - } - return nullState - }, - // Set1 - func(r rune) state { - switch { - case r == 'o': - return 4 - } - return nullState - }, - // Set2 - func(r rune) state { - switch { - case r == 'i': - return 5 - } - return nullState - }, - // Set3 - func(r rune) state { - switch { - case r == 'r': - return 6 - } - return nullState - }, - // Set4 - func(r rune) state { - switch { - case r == 'm': - return 7 - } - return nullState - }, - // Set5 - func(r rune) state { - switch { - case r == 'n': - return 8 - } - return nullState - }, - // Set6 - func(r rune) state { - switch { - case r == 'e': - return 9 - case r == 'o': - return 10 - } - return nullState - }, - // Set7 - func(r rune) state { - switch { - case r == 'm': - return 11 - } - return nullState - }, - // Set8 - func(r rune) state { - switch { - case r == 'a': - return 12 - } - return nullState - }, - // Set9 - func(r rune) state { - switch { - case r == 'p': - return 13 - } - return nullState - }, - // Set10 - func(r rune) state { - switch { - case r == 'c': - return 14 - } - return nullState - }, - // Set11 - func(r rune) state { - switch { - case r == 'i': - return 15 - } - return nullState - }, - // Set12 - func(r rune) state { - switch { - case r == 'l': - return 16 - } - return nullState - }, - // Set13 - func(r rune) state { - switch { - case r == 'a': - return 17 - } - return nullState - }, - // Set14 - func(r rune) state { - switch { - case r == 'e': - return 18 - } - return nullState - }, - // Set15 - func(r rune) state { - switch { - case r == 't': - return 19 - } - return nullState - }, - // Set16 - func(r rune) state { - switch { - case r == 'i': - return 20 - } - return nullState - }, - // Set17 - func(r rune) state { - switch { - case r == 'r': - return 21 - } - return nullState - }, - // Set18 - func(r rune) state { - switch { - case r == 's': - return 22 - } - return nullState - }, - // Set19 - func(r rune) state { - switch { - } - return nullState - }, - // Set20 - func(r rune) state { - switch { - case r == 'z': - return 23 - } - return nullState - }, - // Set21 - func(r rune) state { - switch { - case r == 'e': - return 24 - } - return nullState - }, - // Set22 - func(r rune) state { - switch { - case r == 's': - return 25 - } - return nullState - }, - // Set23 - func(r rune) state { - switch { - case r == 'e': - return 26 - } - return nullState - }, - // Set24 - func(r rune) state { - switch { - case r == '_': - return 27 - } - return nullState - }, - // Set25 - func(r rune) state { - switch { - case r == '_': - return 28 - } - return nullState - }, - // Set26 - func(r rune) state { - switch { - case r == '_': - return 29 - } - return nullState - }, - // Set27 - func(r rune) state { - switch { - case r == 'p': - return 30 - } - return nullState - }, - // Set28 - func(r rune) state { - switch { - case r == 'p': - return 31 - } - return nullState - }, - // Set29 - func(r rune) state { - switch { - case r == 'b': - return 32 - } - return nullState - }, - // Set30 - func(r rune) state { - switch { - case r == 'r': - return 33 - } - return nullState - }, - // Set31 - func(r rune) state { - switch { - case r == 'r': - return 34 - } - return nullState - }, - // Set32 - func(r rune) state { - switch { - case r == 'l': - return 35 - } - return nullState - }, - // Set33 - func(r rune) state { - switch { - case r == 'o': - return 36 - } - return nullState - }, - // Set34 - func(r rune) state { - switch { - case r == 'o': - return 37 - } - return nullState - }, - // Set35 - func(r rune) state { - switch { - case r == 'o': - return 38 - } - return nullState - }, - // Set36 - func(r rune) state { - switch { - case r == 'p': - return 39 - } - return nullState - }, - // Set37 - func(r rune) state { - switch { - case r == 'p': - return 40 - } - return nullState - }, - // Set38 - func(r rune) state { - switch { - case r == 'c': - return 41 - } - return nullState - }, - // Set39 - func(r rune) state { - switch { - case r == 'o': - return 42 - } - return nullState - }, - // Set40 - func(r rune) state { - switch { - case r == 'o': - return 43 - } - return nullState - }, - // Set41 - func(r rune) state { - switch { - case r == 'k': - return 44 - } - return nullState - }, - // Set42 - func(r rune) state { - switch { - case r == 's': - return 45 - } - return nullState - }, - // Set43 - func(r rune) state { - switch { - case r == 's': - return 46 - } - return nullState - }, - // Set44 - func(r rune) state { - switch { - } - return nullState - }, - // Set45 - func(r rune) state { - switch { - case r == 'a': - return 47 - } - return nullState - }, - // Set46 - func(r rune) state { - switch { - case r == 'a': - return 48 - } - return nullState - }, - // Set47 - func(r rune) state { - switch { - case r == 'l': - return 49 - } - return nullState - }, - // Set48 - func(r rune) state { - switch { - case r == 'l': - return 50 - } - return nullState - }, - // Set49 - func(r rune) state { - switch { - } - return nullState - }, - // Set50 - func(r rune) state { - switch { - } - return nullState - }, -} diff --git a/test/e2e/pkg/grammar/recovery/grammar-auto/parser/bsr/bsr.go b/test/e2e/pkg/grammar/recovery/grammar-auto/parser/bsr/bsr.go deleted file mode 100644 index 9550d403aaa..00000000000 --- a/test/e2e/pkg/grammar/recovery/grammar-auto/parser/bsr/bsr.go +++ /dev/null @@ -1,685 +0,0 @@ -// Package bsr is generated by gogll. Do not edit. - -/* -Package bsr implements a Binary Subtree Representation set as defined in - - Scott et al - Derivation representation using binary subtree sets, - Science of Computer Programming 175 (2019) -*/ -package bsr - -import ( - "bytes" - "fmt" - "sort" - "strings" - - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/lexer" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/parser/slot" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/parser/symbols" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/sppf" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/token" -) - -type bsr interface { - LeftExtent() int - RightExtent() int - Pivot() int -} - -/* -Set contains the set of Binary Subtree Representations (BSR). -*/ -type Set struct { - slotEntries map[BSR]bool - ntSlotEntries map[ntSlot][]BSR - stringEntries map[stringKey]*stringBSR - rightExtent int - lex *lexer.Lexer - - startSym symbols.NT -} - -type ntSlot struct { - nt symbols.NT - leftExtent int - rightExtent int -} - -// BSR is the binary subtree representation of a parsed nonterminal -type BSR struct { - Label slot.Label - leftExtent int - pivot int - rightExtent int - set *Set -} - -type BSRs []BSR - -type stringBSR struct { - Symbols symbols.Symbols - leftExtent int - pivot int - rightExtent int - set *Set -} - -type stringBSRs []*stringBSR - -type stringKey string - -// New returns a new initialised BSR Set -func New(startSymbol symbols.NT, l *lexer.Lexer) *Set { - return &Set{ - slotEntries: make(map[BSR]bool), - ntSlotEntries: make(map[ntSlot][]BSR), - stringEntries: make(map[stringKey]*stringBSR), - rightExtent: 0, - lex: l, - startSym: startSymbol, - } -} - -/* -Add a bsr to the set. (i,j) is the extent. k is the pivot. -*/ -func (s *Set) Add(l slot.Label, i, k, j int) { - // fmt.Printf("bsr.Add(%s,%d,%d,%d l.Pos %d)\n", l, i, k, j, l.Pos()) - if l.EoR() { - s.insert(BSR{l, i, k, j, s}) - } else { - if l.Pos() > 1 { - s.insert(&stringBSR{l.Symbols()[:l.Pos()], i, k, j, s}) - } - } -} - -// AddEmpty adds a grammar slot: X : ϵ• -func (s *Set) AddEmpty(l slot.Label, i int) { - s.insert(BSR{l, i, i, i, s}) -} - -/* -Contain returns true iff the BSR Set contains the NT symbol with left and -right extent. -*/ -func (s *Set) Contain(nt symbols.NT, left, right int) bool { - // fmt.Printf("bsr.Contain(%s,%d,%d)\n",nt,left,right) - for e := range s.slotEntries { - // fmt.Printf(" (%s,%d,%d)\n",e.Label.Head(),e.leftExtent,e.rightExtent) - if e.Label.Head() == nt && e.leftExtent == left && e.rightExtent == right { - // fmt.Println(" true") - return true - } - } - // fmt.Println(" false") - return false -} - -// Dump prints all the NT and string elements of the BSR set -func (s *Set) Dump() { - fmt.Println("Roots:") - for _, rt := range s.GetRoots() { - fmt.Println(rt) - } - fmt.Println() - - fmt.Println("NT BSRs:") - for _, bsr := range s.getNTBSRs() { - fmt.Println(bsr) - } - fmt.Println() - - fmt.Println("string BSRs:") - for _, bsr := range s.getStringBSRs() { - fmt.Println(bsr) - } - fmt.Println() -} - -// GetAll returns all BSR grammar slot entries -func (s *Set) GetAll() (bsrs []BSR) { - for b := range s.slotEntries { - bsrs = append(bsrs, b) - } - return -} - -// GetRightExtent returns the right extent of the BSR set -func (s *Set) GetRightExtent() int { - return s.rightExtent -} - -// GetRoot returns the root of the parse tree of an unambiguous parse. -// GetRoot fails if the parse was ambiguous. Use GetRoots() for ambiguous parses. -func (s *Set) GetRoot() BSR { - rts := s.GetRoots() - if len(rts) != 1 { - failf("%d parse trees exist for start symbol %s", len(rts), s.startSym) - } - return rts[0] -} - -// GetRoots returns all the roots of parse trees of the start symbol of the grammar. -func (s *Set) GetRoots() (roots []BSR) { - for b := range s.slotEntries { - if b.Label.Head() == s.startSym && b.leftExtent == 0 && s.rightExtent == b.rightExtent { - roots = append(roots, b) - } - } - return -} - -// GetAllStrings returns all string elements with symbols = str, -// left extent = lext and right extent = rext -func (s *Set) GetAllStrings(str symbols.Symbols, lext, rext int) (strs []*stringBSR) { - for _, s := range s.stringEntries { - if s.Symbols.Equal(str) && s.leftExtent == lext && s.rightExtent == rext { - strs = append(strs, s) - } - } - return -} - -func (s *Set) getNTBSRs() BSRs { - bsrs := make(BSRs, 0, len(s.ntSlotEntries)) - for _, bsrl := range s.ntSlotEntries { - for _, bsr := range bsrl { - bsrs = append(bsrs, bsr) - } - } - sort.Sort(bsrs) - return bsrs -} - -func (s *Set) getStringBSRs() stringBSRs { - bsrs := make(stringBSRs, 0, len(s.stringEntries)) - for _, bsr := range s.stringEntries { - bsrs = append(bsrs, bsr) - } - sort.Sort(bsrs) - return bsrs -} - -func (s *Set) getString(symbols symbols.Symbols, leftExtent, rightExtent int) *stringBSR { - // fmt.Printf("Set.getString(%s,%d,%d)\n", symbols, leftExtent, rightExtent) - - strBsr, exist := s.stringEntries[getStringKey(symbols, leftExtent, rightExtent)] - if exist { - return strBsr - } - - panic(fmt.Sprintf("Error: no string %s left extent=%d right extent=%d\n", - symbols, leftExtent, rightExtent)) -} - -func (s *Set) insert(bsr bsr) { - if bsr.RightExtent() > s.rightExtent { - s.rightExtent = bsr.RightExtent() - } - switch b := bsr.(type) { - case BSR: - s.slotEntries[b] = true - nt := ntSlot{b.Label.Head(), b.leftExtent, b.rightExtent} - s.ntSlotEntries[nt] = append(s.ntSlotEntries[nt], b) - case *stringBSR: - s.stringEntries[b.key()] = b - default: - panic(fmt.Sprintf("Invalid type %T", bsr)) - } -} - -func (s *stringBSR) key() stringKey { - return getStringKey(s.Symbols, s.leftExtent, s.rightExtent) -} - -func getStringKey(symbols symbols.Symbols, lext, rext int) stringKey { - return stringKey(fmt.Sprintf("%s,%d,%d", symbols, lext, rext)) -} - -// Alternate returns the index of the grammar rule alternate. -func (b BSR) Alternate() int { - return b.Label.Alternate() -} - -// GetAllNTChildren returns all the NT Children of b. If an NT child of b has -// ambiguous parses then all parses of that child are returned. -func (b BSR) GetAllNTChildren() [][]BSR { - children := [][]BSR{} - for i, s := range b.Label.Symbols() { - if s.IsNonTerminal() { - sChildren := b.GetNTChildrenI(i) - children = append(children, sChildren) - } - } - return children -} - -// GetNTChild returns the BSR of occurrence i of nt in s. -// GetNTChild fails if s has ambiguous subtrees of occurrence i of nt. -func (b BSR) GetNTChild(nt symbols.NT, i int) BSR { - bsrs := b.GetNTChildren(nt, i) - if len(bsrs) != 1 { - ambiguousSlots := []string{} - for _, c := range bsrs { - ambiguousSlots = append(ambiguousSlots, c.String()) - } - b.set.fail(b, "%s is ambiguous in %s\n %s", nt, b, strings.Join(ambiguousSlots, "\n ")) - } - return bsrs[0] -} - -// GetNTChildI returns the BSR of NT symbol[i] in the BSR set. -// GetNTChildI fails if the BSR set has ambiguous subtrees of NT i. -func (b BSR) GetNTChildI(i int) BSR { - bsrs := b.GetNTChildrenI(i) - if len(bsrs) != 1 { - b.set.fail(b, "NT %d is ambiguous in %s", i, b) - } - return bsrs[0] -} - -// GetNTChildren returns all the BSRs of occurrence i of nt in s -func (b BSR) GetNTChildren(nt symbols.NT, i int) []BSR { - // fmt.Printf("GetNTChild(%s,%d) %s\n", nt, i, b) - positions := []int{} - for j, s := range b.Label.Symbols() { - if s == nt { - positions = append(positions, j) - } - } - if len(positions) == 0 { - b.set.fail(b, "Error: %s has no NT %s", b, nt) - } - return b.GetNTChildrenI(positions[i]) -} - -// GetNTChildrenI returns all the BSRs of NT symbol[i] in s -func (b BSR) GetNTChildrenI(i int) []BSR { - // fmt.Printf("bsr.GetNTChildI(%d) %s Pos %d\n", i, b, b.Label.Pos()) - - if i >= len(b.Label.Symbols()) { - b.set.fail(b, "Error: cannot get NT child %d of %s", i, b) - } - if len(b.Label.Symbols()) == 1 { - return b.set.getNTSlot(b.Label.Symbols()[i], b.pivot, b.rightExtent) - } - if len(b.Label.Symbols()) == 2 { - if i == 0 { - return b.set.getNTSlot(b.Label.Symbols()[i], b.leftExtent, b.pivot) - } - return b.set.getNTSlot(b.Label.Symbols()[i], b.pivot, b.rightExtent) - } - if b.Label.Pos() == i+1 { - return b.set.getNTSlot(b.Label.Symbols()[i], b.pivot, b.rightExtent) - } - - // Walk to pos i from the right - symbols := b.Label.Symbols()[:b.Label.Pos()-1] - str := b.set.getString(symbols, b.leftExtent, b.pivot) - for len(symbols) > i+1 && len(symbols) > 2 { - symbols = symbols[:len(symbols)-1] - str = b.set.getString(symbols, str.leftExtent, str.pivot) - } - - bsrs := []BSR{} - if i == 0 { - bsrs = b.set.getNTSlot(b.Label.Symbols()[i], str.leftExtent, str.pivot) - } else { - bsrs = b.set.getNTSlot(b.Label.Symbols()[i], str.pivot, str.rightExtent) - } - - // fmt.Println(bsrs) - - return bsrs -} - -// GetTChildI returns the terminal symbol at position i in b. -// GetTChildI panics if symbol i is not a valid terminal -func (b BSR) GetTChildI(i int) *token.Token { - symbols := b.Label.Symbols() - - if i >= len(symbols) { - panic(fmt.Sprintf("%s has no T child %d", b, i)) - } - if symbols[i].IsNonTerminal() { - panic(fmt.Sprintf("symbol %d in %s is an NT", i, b)) - } - - lext := b.leftExtent - for j := 0; j < i; j++ { - if symbols[j].IsNonTerminal() { - nt := b.GetNTChildI(j) - lext += nt.rightExtent - nt.leftExtent - } else { - lext++ - } - } - return b.set.lex.Tokens[lext] -} - -// LeftExtent returns the left extent of the BSR in the stream of tokens -func (b BSR) LeftExtent() int { - return b.leftExtent -} - -// RightExtent returns the right extent of the BSR in the stream of tokens -func (b BSR) RightExtent() int { - return b.rightExtent -} - -// Pivot returns the pivot of the BSR -func (b BSR) Pivot() int { - return b.pivot -} - -func (b BSR) String() string { - srcStr := "ℇ" - if b.leftExtent < b.rightExtent { - srcStr = b.set.lex.GetString(b.LeftExtent(), b.RightExtent()-1) - } - return fmt.Sprintf("%s,%d,%d,%d - %s", - b.Label, b.leftExtent, b.pivot, b.rightExtent, srcStr) -} - -// BSRs Sort interface -func (bs BSRs) Len() int { - return len(bs) -} - -func (bs BSRs) Less(i, j int) bool { - if bs[i].Label < bs[j].Label { - return true - } - if bs[i].Label > bs[j].Label { - return false - } - if bs[i].leftExtent < bs[j].leftExtent { - return true - } - if bs[i].leftExtent > bs[j].leftExtent { - return false - } - return bs[i].rightExtent < bs[j].rightExtent -} - -func (bs BSRs) Swap(i, j int) { - bs[i], bs[j] = bs[j], bs[i] -} - -// stringBSRs Sort interface -func (sbs stringBSRs) Len() int { - return len(sbs) -} - -func (sbs stringBSRs) Less(i, j int) bool { - if sbs[i].Symbols.String() < sbs[j].Symbols.String() { - return true - } - if sbs[i].Symbols.String() > sbs[j].Symbols.String() { - return false - } - if sbs[i].leftExtent < sbs[j].leftExtent { - return true - } - if sbs[i].leftExtent > sbs[j].leftExtent { - return false - } - return sbs[i].rightExtent < sbs[j].rightExtent -} - -func (sbs stringBSRs) Swap(i, j int) { - sbs[i], sbs[j] = sbs[j], sbs[i] -} - -func (s stringBSR) LeftExtent() int { - return s.leftExtent -} - -func (s stringBSR) RightExtent() int { - return s.rightExtent -} - -func (s stringBSR) Pivot() int { - return s.pivot -} - -func (s stringBSR) Empty() bool { - return s.leftExtent == s.pivot && s.pivot == s.rightExtent -} - -// String returns a string representation of s -func (s stringBSR) String() string { - return fmt.Sprintf("%s,%d,%d,%d - %s", &s.Symbols, s.leftExtent, s.pivot, - s.rightExtent, s.set.lex.GetString(s.LeftExtent(), s.RightExtent())) -} - -func (s *Set) getNTSlot(sym symbols.Symbol, leftExtent, rightExtent int) (bsrs []BSR) { - nt, ok := sym.(symbols.NT) - if !ok { - line, col := s.getLineColumn(leftExtent) - failf("%s is not an NT at line %d col %d", sym, line, col) - } - return s.ntSlotEntries[ntSlot{nt, leftExtent, rightExtent}] -} - -func (s *Set) fail(b BSR, format string, a ...interface{}) { - msg := fmt.Sprintf(format, a...) - line, col := s.getLineColumn(b.LeftExtent()) - panic(fmt.Sprintf("Error in BSR: %s at line %d col %d\n", msg, line, col)) -} - -func failf(format string, args ...interface{}) { - panic(fmt.Sprintf("Error in BSR: %s\n", fmt.Sprintf(format, args...))) -} - -func (s *Set) getLineColumn(cI int) (line, col int) { - return s.lex.GetLineColumnOfToken(cI) -} - -// ReportAmbiguous lists the ambiguous subtrees of the parse forest -func (s *Set) ReportAmbiguous() { - fmt.Println("Ambiguous BSR Subtrees:") - rts := s.GetRoots() - if len(rts) != 1 { - fmt.Printf("BSR has %d ambigous roots\n", len(rts)) - } - for i, b := range s.GetRoots() { - fmt.Println("In root", i) - if !s.report(b) { - fmt.Println("No ambiguous BSRs") - } - } -} - -// report return true iff at least one ambigous BSR was found -func (s *Set) report(b BSR) bool { - ambiguous := false - for i, sym := range b.Label.Symbols() { - ln, col := s.getLineColumn(b.LeftExtent()) - if sym.IsNonTerminal() { - if len(b.GetNTChildrenI(i)) != 1 { - ambiguous = true - fmt.Printf(" Ambigous: in %s: NT %s (%d) at line %d col %d \n", - b, sym, i, ln, col) - fmt.Println(" Children:") - for _, c := range b.GetNTChildrenI(i) { - fmt.Printf(" %s\n", c) - } - } - for _, b1 := range b.GetNTChildrenI(i) { - s.report(b1) - } - } - } - return ambiguous -} - -// IsAmbiguous returns true if the BSR set does not have exactly one root, or -// if any BSR in the set has an NT symbol, which does not have exactly one -// sub-tree. -func (s *Set) IsAmbiguous() bool { - if len(s.GetRoots()) != 1 { - return true - } - return isAmbiguous(s.GetRoot()) -} - -// isAmbiguous returns true if b or any of its NT children is ambiguous. -// A BSR is ambiguous if any of its NT symbols does not have exactly one -// subtrees (children). -func isAmbiguous(b BSR) bool { - for i, s := range b.Label.Symbols() { - if s.IsNonTerminal() { - if len(b.GetNTChildrenI(i)) != 1 { - return true - } - for _, b1 := range b.GetNTChildrenI(i) { - if isAmbiguous(b1) { - return true - } - } - } - } - return false -} - -//---- SPPF ------------ - -type bldSPPF struct { - root *sppf.SymbolNode - extLeafNodes []sppf.Node - pNodes map[string]*sppf.PackedNode - sNodes map[string]*sppf.SymbolNode // Index is Node.Label() -} - -func (pf *Set) ToSPPF() *sppf.SymbolNode { - bld := &bldSPPF{ - pNodes: map[string]*sppf.PackedNode{}, - sNodes: map[string]*sppf.SymbolNode{}, - } - rt := pf.GetRoots()[0] - bld.root = bld.mkSN(rt.Label.Head().String(), rt.leftExtent, rt.rightExtent) - - for len(bld.extLeafNodes) > 0 { - // let w = (μ, i, j) be an extendable leaf node of G - w := bld.extLeafNodes[len(bld.extLeafNodes)-1] - bld.extLeafNodes = bld.extLeafNodes[:len(bld.extLeafNodes)-1] - - // μ is a nonterminal X in Γ - if nt, ok := w.(*sppf.SymbolNode); ok && symbols.IsNT(nt.Symbol) { - bsts := pf.getNTSlot(symbols.ToNT(nt.Symbol), nt.Lext, nt.Rext) - // for each (X ::=γ,i,k, j)∈Υ { mkPN(X ::=γ·,i,k, j,G) } } - for _, bst := range bsts { - slt := bst.Label.Slot() - nt.Children = append(nt.Children, - bld.mkPN(slt.NT, slt.Symbols, slt.Pos, - bst.leftExtent, bst.pivot, bst.rightExtent)) - } - } else { // w is an intermediate node - // suppose μ is X ::=α·δ - in := w.(*sppf.IntermediateNode) - if in.Pos == 1 { - in.Children = append(in.Children, bld.mkPN(in.NT, in.Body, in.Pos, - in.Lext, in.Lext, in.Rext)) - } else { - // for each (α,i,k, j)∈Υ { mkPN(X ::=α·δ,i,k, j,G) } } } } - alpha, delta := in.Body[:in.Pos], in.Body[in.Pos:] - for _, str := range pf.GetAllStrings(alpha, in.Lext, in.Rext) { - body := append(str.Symbols, delta...) - in.Children = append(in.Children, - bld.mkPN(in.NT, body, in.Pos, str.leftExtent, str.pivot, str.rightExtent)) - } - } - } - } - return bld.root -} - -func (bld *bldSPPF) mkIN(nt symbols.NT, body symbols.Symbols, pos int, - lext, rext int) *sppf.IntermediateNode { - - in := &sppf.IntermediateNode{ - NT: nt, - Body: body, - Pos: pos, - Lext: lext, - Rext: rext, - } - bld.extLeafNodes = append(bld.extLeafNodes, in) - return in -} - -func (bld *bldSPPF) mkPN(nt symbols.NT, body symbols.Symbols, pos int, - lext, pivot, rext int) *sppf.PackedNode { - // fmt.Printf("mkPN %s,%d,%d,%d\n", slotString(nt, body, pos), lext, pivot, rext) - - // X ::= ⍺ • β, k - pn := &sppf.PackedNode{ - NT: nt, - Body: body, - Pos: pos, - Lext: lext, - Rext: rext, - Pivot: pivot, - LeftChild: nil, - RightChild: nil, - } - if pn1, exist := bld.pNodes[pn.Label()]; exist { - return pn1 - } - bld.pNodes[pn.Label()] = pn - - if len(body) == 0 { // ⍺ = ϵ - pn.RightChild = bld.mkSN("ϵ", lext, lext) - } else { // if ( α=βx, where |x|=1) { - // mkN(x,k, j, y,G) - pn.RightChild = bld.mkSN(pn.Body[pn.Pos-1].String(), pivot, rext) - - // if (|β|=1) mkN(β,i,k,y,G) - if pos == 2 { - pn.LeftChild = bld.mkSN(pn.Body[pn.Pos-2].String(), lext, pivot) - } - // if (|β|>1) mkN(X ::=β·xδ,i,k,y,G) - if pos > 2 { - pn.LeftChild = bld.mkIN(pn.NT, pn.Body, pn.Pos-1, lext, pivot) - } - } - - return pn -} - -func (bld *bldSPPF) mkSN(symbol string, lext, rext int) *sppf.SymbolNode { - sn := &sppf.SymbolNode{ - Symbol: symbol, - Lext: lext, - Rext: rext, - } - if sn1, exist := bld.sNodes[sn.Label()]; exist { - return sn1 - } - bld.sNodes[sn.Label()] = sn - if symbols.IsNT(symbol) { - bld.extLeafNodes = append(bld.extLeafNodes, sn) - } - return sn -} - -func slotString(nt symbols.NT, body symbols.Symbols, pos int) string { - w := new(bytes.Buffer) - fmt.Fprintf(w, "%s:", nt) - for i, sym := range body { - fmt.Fprint(w, " ") - if i == pos { - fmt.Fprint(w, "•") - } - fmt.Fprint(w, sym) - } - if len(body) == pos { - fmt.Fprint(w, "•") - } - return w.String() -} - diff --git a/test/e2e/pkg/grammar/recovery/grammar-auto/parser/parser.go b/test/e2e/pkg/grammar/recovery/grammar-auto/parser/parser.go deleted file mode 100644 index 1081063e31a..00000000000 --- a/test/e2e/pkg/grammar/recovery/grammar-auto/parser/parser.go +++ /dev/null @@ -1,873 +0,0 @@ -// Package parser is generated by gogll. Do not edit. -package parser - -import ( - "bytes" - "fmt" - "sort" - "strings" - - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/lexer" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/parser/bsr" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/parser/slot" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/parser/symbols" - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/token" -) - -type parser struct { - cI int - - R *descriptors - U *descriptors - - popped map[poppedNode]bool - crf map[clusterNode][]*crfNode - crfNodes map[crfNode]*crfNode - - lex *lexer.Lexer - parseErrors []*Error - - bsrSet *bsr.Set -} - -func newParser(l *lexer.Lexer) *parser { - return &parser{ - cI: 0, - lex: l, - R: &descriptors{}, - U: &descriptors{}, - popped: make(map[poppedNode]bool), - crf: map[clusterNode][]*crfNode{ - {symbols.NT_Start, 0}: {}, - }, - crfNodes: map[crfNode]*crfNode{}, - bsrSet: bsr.New(symbols.NT_Start, l), - parseErrors: nil, - } -} - -// Parse returns the BSR set containing the parse forest. -// If the parse was successfull []*Error is nil -func Parse(l *lexer.Lexer) (*bsr.Set, []*Error) { - return newParser(l).parse() -} - -func (p *parser) parse() (*bsr.Set, []*Error) { - var L slot.Label - m, cU := len(p.lex.Tokens)-1, 0 - p.ntAdd(symbols.NT_Start, 0) - // p.DumpDescriptors() - for !p.R.empty() { - L, cU, p.cI = p.R.remove() - - // fmt.Println() - // fmt.Printf("L:%s, cI:%d, I[p.cI]:%s, cU:%d\n", L, p.cI, p.lex.Tokens[p.cI], cU) - // p.DumpDescriptors() - - switch L { - case slot.Commit0R0: // Commit : ∙commit - - p.bsrSet.Add(slot.Commit0R1, cU, p.cI, p.cI+1) - p.cI++ - if p.follow(symbols.NT_Commit) { - p.rtn(symbols.NT_Commit, cU, p.cI) - } else { - p.parseError(slot.Commit0R0, p.cI, followSets[symbols.NT_Commit]) - } - case slot.ConsensusExec0R0: // ConsensusExec : ∙ConsensusHeights - - p.call(slot.ConsensusExec0R1, cU, p.cI) - case slot.ConsensusExec0R1: // ConsensusExec : ConsensusHeights ∙ - - if p.follow(symbols.NT_ConsensusExec) { - p.rtn(symbols.NT_ConsensusExec, cU, p.cI) - } else { - p.parseError(slot.ConsensusExec0R0, p.cI, followSets[symbols.NT_ConsensusExec]) - } - case slot.ConsensusHeight0R0: // ConsensusHeight : ∙ConsensusRounds FinalizeBlock Commit - - p.call(slot.ConsensusHeight0R1, cU, p.cI) - case slot.ConsensusHeight0R1: // ConsensusHeight : ConsensusRounds ∙FinalizeBlock Commit - - if !p.testSelect(slot.ConsensusHeight0R1) { - p.parseError(slot.ConsensusHeight0R1, p.cI, first[slot.ConsensusHeight0R1]) - break - } - - p.call(slot.ConsensusHeight0R2, cU, p.cI) - case slot.ConsensusHeight0R2: // ConsensusHeight : ConsensusRounds FinalizeBlock ∙Commit - - if !p.testSelect(slot.ConsensusHeight0R2) { - p.parseError(slot.ConsensusHeight0R2, p.cI, first[slot.ConsensusHeight0R2]) - break - } - - p.call(slot.ConsensusHeight0R3, cU, p.cI) - case slot.ConsensusHeight0R3: // ConsensusHeight : ConsensusRounds FinalizeBlock Commit ∙ - - if p.follow(symbols.NT_ConsensusHeight) { - p.rtn(symbols.NT_ConsensusHeight, cU, p.cI) - } else { - p.parseError(slot.ConsensusHeight0R0, p.cI, followSets[symbols.NT_ConsensusHeight]) - } - case slot.ConsensusHeight1R0: // ConsensusHeight : ∙FinalizeBlock Commit - - p.call(slot.ConsensusHeight1R1, cU, p.cI) - case slot.ConsensusHeight1R1: // ConsensusHeight : FinalizeBlock ∙Commit - - if !p.testSelect(slot.ConsensusHeight1R1) { - p.parseError(slot.ConsensusHeight1R1, p.cI, first[slot.ConsensusHeight1R1]) - break - } - - p.call(slot.ConsensusHeight1R2, cU, p.cI) - case slot.ConsensusHeight1R2: // ConsensusHeight : FinalizeBlock Commit ∙ - - if p.follow(symbols.NT_ConsensusHeight) { - p.rtn(symbols.NT_ConsensusHeight, cU, p.cI) - } else { - p.parseError(slot.ConsensusHeight1R0, p.cI, followSets[symbols.NT_ConsensusHeight]) - } - case slot.ConsensusHeights0R0: // ConsensusHeights : ∙ConsensusHeight - - p.call(slot.ConsensusHeights0R1, cU, p.cI) - case slot.ConsensusHeights0R1: // ConsensusHeights : ConsensusHeight ∙ - - if p.follow(symbols.NT_ConsensusHeights) { - p.rtn(symbols.NT_ConsensusHeights, cU, p.cI) - } else { - p.parseError(slot.ConsensusHeights0R0, p.cI, followSets[symbols.NT_ConsensusHeights]) - } - case slot.ConsensusHeights1R0: // ConsensusHeights : ∙ConsensusHeight ConsensusHeights - - p.call(slot.ConsensusHeights1R1, cU, p.cI) - case slot.ConsensusHeights1R1: // ConsensusHeights : ConsensusHeight ∙ConsensusHeights - - if !p.testSelect(slot.ConsensusHeights1R1) { - p.parseError(slot.ConsensusHeights1R1, p.cI, first[slot.ConsensusHeights1R1]) - break - } - - p.call(slot.ConsensusHeights1R2, cU, p.cI) - case slot.ConsensusHeights1R2: // ConsensusHeights : ConsensusHeight ConsensusHeights ∙ - - if p.follow(symbols.NT_ConsensusHeights) { - p.rtn(symbols.NT_ConsensusHeights, cU, p.cI) - } else { - p.parseError(slot.ConsensusHeights1R0, p.cI, followSets[symbols.NT_ConsensusHeights]) - } - case slot.ConsensusRound0R0: // ConsensusRound : ∙Proposer - - p.call(slot.ConsensusRound0R1, cU, p.cI) - case slot.ConsensusRound0R1: // ConsensusRound : Proposer ∙ - - if p.follow(symbols.NT_ConsensusRound) { - p.rtn(symbols.NT_ConsensusRound, cU, p.cI) - } else { - p.parseError(slot.ConsensusRound0R0, p.cI, followSets[symbols.NT_ConsensusRound]) - } - case slot.ConsensusRound1R0: // ConsensusRound : ∙NonProposer - - p.call(slot.ConsensusRound1R1, cU, p.cI) - case slot.ConsensusRound1R1: // ConsensusRound : NonProposer ∙ - - if p.follow(symbols.NT_ConsensusRound) { - p.rtn(symbols.NT_ConsensusRound, cU, p.cI) - } else { - p.parseError(slot.ConsensusRound1R0, p.cI, followSets[symbols.NT_ConsensusRound]) - } - case slot.ConsensusRounds0R0: // ConsensusRounds : ∙ConsensusRound - - p.call(slot.ConsensusRounds0R1, cU, p.cI) - case slot.ConsensusRounds0R1: // ConsensusRounds : ConsensusRound ∙ - - if p.follow(symbols.NT_ConsensusRounds) { - p.rtn(symbols.NT_ConsensusRounds, cU, p.cI) - } else { - p.parseError(slot.ConsensusRounds0R0, p.cI, followSets[symbols.NT_ConsensusRounds]) - } - case slot.ConsensusRounds1R0: // ConsensusRounds : ∙ConsensusRound ConsensusRounds - - p.call(slot.ConsensusRounds1R1, cU, p.cI) - case slot.ConsensusRounds1R1: // ConsensusRounds : ConsensusRound ∙ConsensusRounds - - if !p.testSelect(slot.ConsensusRounds1R1) { - p.parseError(slot.ConsensusRounds1R1, p.cI, first[slot.ConsensusRounds1R1]) - break - } - - p.call(slot.ConsensusRounds1R2, cU, p.cI) - case slot.ConsensusRounds1R2: // ConsensusRounds : ConsensusRound ConsensusRounds ∙ - - if p.follow(symbols.NT_ConsensusRounds) { - p.rtn(symbols.NT_ConsensusRounds, cU, p.cI) - } else { - p.parseError(slot.ConsensusRounds1R0, p.cI, followSets[symbols.NT_ConsensusRounds]) - } - case slot.FinalizeBlock0R0: // FinalizeBlock : ∙finalize_block - - p.bsrSet.Add(slot.FinalizeBlock0R1, cU, p.cI, p.cI+1) - p.cI++ - if p.follow(symbols.NT_FinalizeBlock) { - p.rtn(symbols.NT_FinalizeBlock, cU, p.cI) - } else { - p.parseError(slot.FinalizeBlock0R0, p.cI, followSets[symbols.NT_FinalizeBlock]) - } - case slot.NonProposer0R0: // NonProposer : ∙ProcessProposal - - p.call(slot.NonProposer0R1, cU, p.cI) - case slot.NonProposer0R1: // NonProposer : ProcessProposal ∙ - - if p.follow(symbols.NT_NonProposer) { - p.rtn(symbols.NT_NonProposer, cU, p.cI) - } else { - p.parseError(slot.NonProposer0R0, p.cI, followSets[symbols.NT_NonProposer]) - } - case slot.PrepareProposal0R0: // PrepareProposal : ∙prepare_proposal - - p.bsrSet.Add(slot.PrepareProposal0R1, cU, p.cI, p.cI+1) - p.cI++ - if p.follow(symbols.NT_PrepareProposal) { - p.rtn(symbols.NT_PrepareProposal, cU, p.cI) - } else { - p.parseError(slot.PrepareProposal0R0, p.cI, followSets[symbols.NT_PrepareProposal]) - } - case slot.ProcessProposal0R0: // ProcessProposal : ∙process_proposal - - p.bsrSet.Add(slot.ProcessProposal0R1, cU, p.cI, p.cI+1) - p.cI++ - if p.follow(symbols.NT_ProcessProposal) { - p.rtn(symbols.NT_ProcessProposal, cU, p.cI) - } else { - p.parseError(slot.ProcessProposal0R0, p.cI, followSets[symbols.NT_ProcessProposal]) - } - case slot.Proposer0R0: // Proposer : ∙PrepareProposal - - p.call(slot.Proposer0R1, cU, p.cI) - case slot.Proposer0R1: // Proposer : PrepareProposal ∙ - - if p.follow(symbols.NT_Proposer) { - p.rtn(symbols.NT_Proposer, cU, p.cI) - } else { - p.parseError(slot.Proposer0R0, p.cI, followSets[symbols.NT_Proposer]) - } - case slot.Proposer1R0: // Proposer : ∙PrepareProposal ProcessProposal - - p.call(slot.Proposer1R1, cU, p.cI) - case slot.Proposer1R1: // Proposer : PrepareProposal ∙ProcessProposal - - if !p.testSelect(slot.Proposer1R1) { - p.parseError(slot.Proposer1R1, p.cI, first[slot.Proposer1R1]) - break - } - - p.call(slot.Proposer1R2, cU, p.cI) - case slot.Proposer1R2: // Proposer : PrepareProposal ProcessProposal ∙ - - if p.follow(symbols.NT_Proposer) { - p.rtn(symbols.NT_Proposer, cU, p.cI) - } else { - p.parseError(slot.Proposer1R0, p.cI, followSets[symbols.NT_Proposer]) - } - case slot.Recovery0R0: // Recovery : ∙ConsensusExec - - p.call(slot.Recovery0R1, cU, p.cI) - case slot.Recovery0R1: // Recovery : ConsensusExec ∙ - - if p.follow(symbols.NT_Recovery) { - p.rtn(symbols.NT_Recovery, cU, p.cI) - } else { - p.parseError(slot.Recovery0R0, p.cI, followSets[symbols.NT_Recovery]) - } - case slot.Start0R0: // Start : ∙Recovery - - p.call(slot.Start0R1, cU, p.cI) - case slot.Start0R1: // Start : Recovery ∙ - - if p.follow(symbols.NT_Start) { - p.rtn(symbols.NT_Start, cU, p.cI) - } else { - p.parseError(slot.Start0R0, p.cI, followSets[symbols.NT_Start]) - } - - default: - panic("This must not happen") - } - } - if !p.bsrSet.Contain(symbols.NT_Start, 0, m) { - p.sortParseErrors() - return nil, p.parseErrors - } - return p.bsrSet, nil -} - -func (p *parser) ntAdd(nt symbols.NT, j int) { - // fmt.Printf("p.ntAdd(%s, %d)\n", nt, j) - failed := true - expected := map[token.Type]string{} - for _, l := range slot.GetAlternates(nt) { - if p.testSelect(l) { - p.dscAdd(l, j, j) - failed = false - } else { - for k, v := range first[l] { - expected[k] = v - } - } - } - if failed { - for _, l := range slot.GetAlternates(nt) { - p.parseError(l, j, expected) - } - } -} - -/*** Call Return Forest ***/ - -type poppedNode struct { - X symbols.NT - k, j int -} - -type clusterNode struct { - X symbols.NT - k int -} - -type crfNode struct { - L slot.Label - i int -} - -/* -suppose that L is Y ::=αX ·β -if there is no CRF node labelled (L,i) - - create one let u be the CRF node labelled (L,i) - -if there is no CRF node labelled (X, j) { - - create a CRF node v labelled (X, j) - create an edge from v to u - ntAdd(X, j) - } else { - - let v be the CRF node labelled (X, j) - if there is not an edge from v to u { - create an edge from v to u - for all ((X, j,h)∈P) { - dscAdd(L, i, h); - bsrAdd(L, i, j, h) - } - } - } -*/ -func (p *parser) call(L slot.Label, i, j int) { - // fmt.Printf("p.call(%s,%d,%d)\n", L,i,j) - u, exist := p.crfNodes[crfNode{L, i}] - // fmt.Printf(" u exist=%t\n", exist) - if !exist { - u = &crfNode{L, i} - p.crfNodes[*u] = u - } - X := L.Symbols()[L.Pos()-1].(symbols.NT) - ndV := clusterNode{X, j} - v, exist := p.crf[ndV] - if !exist { - // fmt.Println(" v !exist") - p.crf[ndV] = []*crfNode{u} - p.ntAdd(X, j) - } else { - // fmt.Println(" v exist") - if !existEdge(v, u) { - // fmt.Printf(" !existEdge(%v)\n", u) - p.crf[ndV] = append(v, u) - // fmt.Printf("|popped|=%d\n", len(popped)) - for pnd := range p.popped { - if pnd.X == X && pnd.k == j { - p.dscAdd(L, i, pnd.j) - p.bsrSet.Add(L, i, j, pnd.j) - } - } - } - } -} - -func existEdge(nds []*crfNode, nd *crfNode) bool { - for _, nd1 := range nds { - if nd1 == nd { - return true - } - } - return false -} - -func (p *parser) rtn(X symbols.NT, k, j int) { - // fmt.Printf("p.rtn(%s,%d,%d)\n", X,k,j) - pn := poppedNode{X, k, j} - if _, exist := p.popped[pn]; !exist { - p.popped[pn] = true - for _, nd := range p.crf[clusterNode{X, k}] { - p.dscAdd(nd.L, nd.i, j) - p.bsrSet.Add(nd.L, nd.i, k, j) - } - } -} - -// func CRFString() string { -// buf := new(bytes.Buffer) -// buf.WriteString("CRF: {") -// for cn, nds := range crf{ -// for _, nd := range nds { -// fmt.Fprintf(buf, "%s->%s, ", cn, nd) -// } -// } -// buf.WriteString("}") -// return buf.String() -// } - -func (cn clusterNode) String() string { - return fmt.Sprintf("(%s,%d)", cn.X, cn.k) -} - -func (n crfNode) String() string { - return fmt.Sprintf("(%s,%d)", n.L.String(), n.i) -} - -// func PoppedString() string { -// buf := new(bytes.Buffer) -// buf.WriteString("Popped: {") -// for p, _ := range popped { -// fmt.Fprintf(buf, "(%s,%d,%d) ", p.X, p.k, p.j) -// } -// buf.WriteString("}") -// return buf.String() -// } - -/*** descriptors ***/ - -type descriptors struct { - set []*descriptor -} - -func (ds *descriptors) contain(d *descriptor) bool { - for _, d1 := range ds.set { - if d1 == d { - return true - } - } - return false -} - -func (ds *descriptors) empty() bool { - return len(ds.set) == 0 -} - -func (ds *descriptors) String() string { - buf := new(bytes.Buffer) - buf.WriteString("{") - for i, d := range ds.set { - if i > 0 { - buf.WriteString("; ") - } - fmt.Fprintf(buf, "%s", d) - } - buf.WriteString("}") - return buf.String() -} - -type descriptor struct { - L slot.Label - k int - i int -} - -func (d *descriptor) String() string { - return fmt.Sprintf("%s,%d,%d", d.L, d.k, d.i) -} - -func (p *parser) dscAdd(L slot.Label, k, i int) { - // fmt.Printf("p.dscAdd(%s,%d,%d)\n", L, k, i) - d := &descriptor{L, k, i} - if !p.U.contain(d) { - p.R.set = append(p.R.set, d) - p.U.set = append(p.U.set, d) - } -} - -func (ds *descriptors) remove() (L slot.Label, k, i int) { - d := ds.set[len(ds.set)-1] - ds.set = ds.set[:len(ds.set)-1] - // fmt.Printf("remove: %s,%d,%d\n", d.L, d.k, d.i) - return d.L, d.k, d.i -} - -func (p *parser) DumpDescriptors() { - p.DumpR() - p.DumpU() -} - -func (p *parser) DumpR() { - fmt.Println("R:") - for _, d := range p.R.set { - fmt.Printf(" %s\n", d) - } -} - -func (p *parser) DumpU() { - fmt.Println("U:") - for _, d := range p.U.set { - fmt.Printf(" %s\n", d) - } -} - -/*** TestSelect ***/ - -func (p *parser) follow(nt symbols.NT) bool { - _, exist := followSets[nt][p.lex.Tokens[p.cI].Type()] - return exist -} - -func (p *parser) testSelect(l slot.Label) bool { - _, exist := first[l][p.lex.Tokens[p.cI].Type()] - // fmt.Printf("testSelect(%s) = %t\n", l, exist) - return exist -} - -var first = []map[token.Type]string{ - // Commit : ∙commit - { - token.T_0: "commit", - }, - // Commit : commit ∙ - { - token.EOF: "$", - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusExec : ∙ConsensusHeights - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusExec : ConsensusHeights ∙ - { - token.EOF: "$", - }, - // ConsensusHeight : ∙ConsensusRounds FinalizeBlock Commit - { - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusHeight : ConsensusRounds ∙FinalizeBlock Commit - { - token.T_1: "finalize_block", - }, - // ConsensusHeight : ConsensusRounds FinalizeBlock ∙Commit - { - token.T_0: "commit", - }, - // ConsensusHeight : ConsensusRounds FinalizeBlock Commit ∙ - { - token.EOF: "$", - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusHeight : ∙FinalizeBlock Commit - { - token.T_1: "finalize_block", - }, - // ConsensusHeight : FinalizeBlock ∙Commit - { - token.T_0: "commit", - }, - // ConsensusHeight : FinalizeBlock Commit ∙ - { - token.EOF: "$", - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusHeights : ∙ConsensusHeight - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusHeights : ConsensusHeight ∙ - { - token.EOF: "$", - }, - // ConsensusHeights : ∙ConsensusHeight ConsensusHeights - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusHeights : ConsensusHeight ∙ConsensusHeights - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusHeights : ConsensusHeight ConsensusHeights ∙ - { - token.EOF: "$", - }, - // ConsensusRound : ∙Proposer - { - token.T_2: "prepare_proposal", - }, - // ConsensusRound : Proposer ∙ - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusRound : ∙NonProposer - { - token.T_3: "process_proposal", - }, - // ConsensusRound : NonProposer ∙ - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusRounds : ∙ConsensusRound - { - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusRounds : ConsensusRound ∙ - { - token.T_1: "finalize_block", - }, - // ConsensusRounds : ∙ConsensusRound ConsensusRounds - { - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusRounds : ConsensusRound ∙ConsensusRounds - { - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusRounds : ConsensusRound ConsensusRounds ∙ - { - token.T_1: "finalize_block", - }, - // FinalizeBlock : ∙finalize_block - { - token.T_1: "finalize_block", - }, - // FinalizeBlock : finalize_block ∙ - { - token.T_0: "commit", - }, - // NonProposer : ∙ProcessProposal - { - token.T_3: "process_proposal", - }, - // NonProposer : ProcessProposal ∙ - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // PrepareProposal : ∙prepare_proposal - { - token.T_2: "prepare_proposal", - }, - // PrepareProposal : prepare_proposal ∙ - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ProcessProposal : ∙process_proposal - { - token.T_3: "process_proposal", - }, - // ProcessProposal : process_proposal ∙ - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // Proposer : ∙PrepareProposal - { - token.T_2: "prepare_proposal", - }, - // Proposer : PrepareProposal ∙ - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // Proposer : ∙PrepareProposal ProcessProposal - { - token.T_2: "prepare_proposal", - }, - // Proposer : PrepareProposal ∙ProcessProposal - { - token.T_3: "process_proposal", - }, - // Proposer : PrepareProposal ProcessProposal ∙ - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // Recovery : ∙ConsensusExec - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // Recovery : ConsensusExec ∙ - { - token.EOF: "$", - }, - // Start : ∙Recovery - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // Start : Recovery ∙ - { - token.EOF: "$", - }, -} - -var followSets = []map[token.Type]string{ - // Commit - { - token.EOF: "$", - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusExec - { - token.EOF: "$", - }, - // ConsensusHeight - { - token.EOF: "$", - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusHeights - { - token.EOF: "$", - }, - // ConsensusRound - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ConsensusRounds - { - token.T_1: "finalize_block", - }, - // FinalizeBlock - { - token.T_0: "commit", - }, - // NonProposer - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // PrepareProposal - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // ProcessProposal - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // Proposer - { - token.T_1: "finalize_block", - token.T_2: "prepare_proposal", - token.T_3: "process_proposal", - }, - // Recovery - { - token.EOF: "$", - }, - // Start - { - token.EOF: "$", - }, -} - -/*** Errors ***/ - -/* -Error is returned by Parse at every point at which the parser fails to parse -a grammar production. For non-LL-1 grammars there will be an error for each -alternate attempted by the parser. - -The errors are sorted in descending order of input position (index of token in -the stream of tokens). - -Normally the error of interest is the one that has parsed the largest number of -tokens. -*/ -type Error struct { - // Index of token that caused the error. - cI int - - // Grammar slot at which the error occured. - Slot slot.Label - - // The token at which the error occurred. - Token *token.Token - - // The line and column in the input text at which the error occurred - Line, Column int - - // The tokens expected at the point where the error occurred - Expected map[token.Type]string -} - -func (pe *Error) String() string { - w := new(bytes.Buffer) - fmt.Fprintf(w, "Parse Error: %s I[%d]=%s at line %d col %d\n", - pe.Slot, pe.cI, pe.Token, pe.Line, pe.Column) - exp := []string{} - for _, e := range pe.Expected { - exp = append(exp, e) - } - fmt.Fprintf(w, "Expected one of: [%s]", strings.Join(exp, ",")) - return w.String() -} - -func (p *parser) parseError(slot slot.Label, i int, expected map[token.Type]string) { - pe := &Error{cI: i, Slot: slot, Token: p.lex.Tokens[i], Expected: expected} - p.parseErrors = append(p.parseErrors, pe) -} - -func (p *parser) sortParseErrors() { - sort.Slice(p.parseErrors, - func(i, j int) bool { - return p.parseErrors[j].Token.Lext() < p.parseErrors[i].Token.Lext() - }) - for _, pe := range p.parseErrors { - pe.Line, pe.Column = p.lex.GetLineColumn(pe.Token.Lext()) - } -} diff --git a/test/e2e/pkg/grammar/recovery/grammar-auto/parser/slot/slot.go b/test/e2e/pkg/grammar/recovery/grammar-auto/parser/slot/slot.go deleted file mode 100644 index 34a693478bc..00000000000 --- a/test/e2e/pkg/grammar/recovery/grammar-auto/parser/slot/slot.go +++ /dev/null @@ -1,522 +0,0 @@ - -// Package slot is generated by gogll. Do not edit. -package slot - -import( - "bytes" - "fmt" - - "github.com/cometbft/cometbft/test/e2e/pkg/grammar/recovery/grammar-auto/parser/symbols" -) - -type Label int - -const( - Commit0R0 Label = iota - Commit0R1 - ConsensusExec0R0 - ConsensusExec0R1 - ConsensusHeight0R0 - ConsensusHeight0R1 - ConsensusHeight0R2 - ConsensusHeight0R3 - ConsensusHeight1R0 - ConsensusHeight1R1 - ConsensusHeight1R2 - ConsensusHeights0R0 - ConsensusHeights0R1 - ConsensusHeights1R0 - ConsensusHeights1R1 - ConsensusHeights1R2 - ConsensusRound0R0 - ConsensusRound0R1 - ConsensusRound1R0 - ConsensusRound1R1 - ConsensusRounds0R0 - ConsensusRounds0R1 - ConsensusRounds1R0 - ConsensusRounds1R1 - ConsensusRounds1R2 - FinalizeBlock0R0 - FinalizeBlock0R1 - NonProposer0R0 - NonProposer0R1 - PrepareProposal0R0 - PrepareProposal0R1 - ProcessProposal0R0 - ProcessProposal0R1 - Proposer0R0 - Proposer0R1 - Proposer1R0 - Proposer1R1 - Proposer1R2 - Recovery0R0 - Recovery0R1 - Start0R0 - Start0R1 -) - -type Slot struct { - NT symbols.NT - Alt int - Pos int - Symbols symbols.Symbols - Label Label -} - -type Index struct { - NT symbols.NT - Alt int - Pos int -} - -func GetAlternates(nt symbols.NT) []Label { - alts, exist := alternates[nt] - if !exist { - panic(fmt.Sprintf("Invalid NT %s", nt)) - } - return alts -} - -func GetLabel(nt symbols.NT, alt, pos int) Label { - l, exist := slotIndex[Index{nt,alt,pos}] - if exist { - return l - } - panic(fmt.Sprintf("Error: no slot label for NT=%s, alt=%d, pos=%d", nt, alt, pos)) -} - -func (l Label) EoR() bool { - return l.Slot().EoR() -} - -func (l Label) Head() symbols.NT { - return l.Slot().NT -} - -func (l Label) Index() Index { - s := l.Slot() - return Index{s.NT, s.Alt, s.Pos} -} - -func (l Label) Alternate() int { - return l.Slot().Alt -} - -func (l Label) Pos() int { - return l.Slot().Pos -} - -func (l Label) Slot() *Slot { - s, exist := slots[l] - if !exist { - panic(fmt.Sprintf("Invalid slot label %d", l)) - } - return s -} - -func (l Label) String() string { - return l.Slot().String() -} - -func (l Label) Symbols() symbols.Symbols { - return l.Slot().Symbols -} - -func (s *Slot) EoR() bool { - return s.Pos >= len(s.Symbols) -} - -func (s *Slot) String() string { - buf := new(bytes.Buffer) - fmt.Fprintf(buf, "%s : ", s.NT) - for i, sym := range s.Symbols { - if i == s.Pos { - fmt.Fprintf(buf, "∙") - } - fmt.Fprintf(buf, "%s ", sym) - } - if s.Pos >= len(s.Symbols) { - fmt.Fprintf(buf, "∙") - } - return buf.String() -} - -var slots = map[Label]*Slot{ - Commit0R0: { - symbols.NT_Commit, 0, 0, - symbols.Symbols{ - symbols.T_0, - }, - Commit0R0, - }, - Commit0R1: { - symbols.NT_Commit, 0, 1, - symbols.Symbols{ - symbols.T_0, - }, - Commit0R1, - }, - ConsensusExec0R0: { - symbols.NT_ConsensusExec, 0, 0, - symbols.Symbols{ - symbols.NT_ConsensusHeights, - }, - ConsensusExec0R0, - }, - ConsensusExec0R1: { - symbols.NT_ConsensusExec, 0, 1, - symbols.Symbols{ - symbols.NT_ConsensusHeights, - }, - ConsensusExec0R1, - }, - ConsensusHeight0R0: { - symbols.NT_ConsensusHeight, 0, 0, - symbols.Symbols{ - symbols.NT_ConsensusRounds, - symbols.NT_FinalizeBlock, - symbols.NT_Commit, - }, - ConsensusHeight0R0, - }, - ConsensusHeight0R1: { - symbols.NT_ConsensusHeight, 0, 1, - symbols.Symbols{ - symbols.NT_ConsensusRounds, - symbols.NT_FinalizeBlock, - symbols.NT_Commit, - }, - ConsensusHeight0R1, - }, - ConsensusHeight0R2: { - symbols.NT_ConsensusHeight, 0, 2, - symbols.Symbols{ - symbols.NT_ConsensusRounds, - symbols.NT_FinalizeBlock, - symbols.NT_Commit, - }, - ConsensusHeight0R2, - }, - ConsensusHeight0R3: { - symbols.NT_ConsensusHeight, 0, 3, - symbols.Symbols{ - symbols.NT_ConsensusRounds, - symbols.NT_FinalizeBlock, - symbols.NT_Commit, - }, - ConsensusHeight0R3, - }, - ConsensusHeight1R0: { - symbols.NT_ConsensusHeight, 1, 0, - symbols.Symbols{ - symbols.NT_FinalizeBlock, - symbols.NT_Commit, - }, - ConsensusHeight1R0, - }, - ConsensusHeight1R1: { - symbols.NT_ConsensusHeight, 1, 1, - symbols.Symbols{ - symbols.NT_FinalizeBlock, - symbols.NT_Commit, - }, - ConsensusHeight1R1, - }, - ConsensusHeight1R2: { - symbols.NT_ConsensusHeight, 1, 2, - symbols.Symbols{ - symbols.NT_FinalizeBlock, - symbols.NT_Commit, - }, - ConsensusHeight1R2, - }, - ConsensusHeights0R0: { - symbols.NT_ConsensusHeights, 0, 0, - symbols.Symbols{ - symbols.NT_ConsensusHeight, - }, - ConsensusHeights0R0, - }, - ConsensusHeights0R1: { - symbols.NT_ConsensusHeights, 0, 1, - symbols.Symbols{ - symbols.NT_ConsensusHeight, - }, - ConsensusHeights0R1, - }, - ConsensusHeights1R0: { - symbols.NT_ConsensusHeights, 1, 0, - symbols.Symbols{ - symbols.NT_ConsensusHeight, - symbols.NT_ConsensusHeights, - }, - ConsensusHeights1R0, - }, - ConsensusHeights1R1: { - symbols.NT_ConsensusHeights, 1, 1, - symbols.Symbols{ - symbols.NT_ConsensusHeight, - symbols.NT_ConsensusHeights, - }, - ConsensusHeights1R1, - }, - ConsensusHeights1R2: { - symbols.NT_ConsensusHeights, 1, 2, - symbols.Symbols{ - symbols.NT_ConsensusHeight, - symbols.NT_ConsensusHeights, - }, - ConsensusHeights1R2, - }, - ConsensusRound0R0: { - symbols.NT_ConsensusRound, 0, 0, - symbols.Symbols{ - symbols.NT_Proposer, - }, - ConsensusRound0R0, - }, - ConsensusRound0R1: { - symbols.NT_ConsensusRound, 0, 1, - symbols.Symbols{ - symbols.NT_Proposer, - }, - ConsensusRound0R1, - }, - ConsensusRound1R0: { - symbols.NT_ConsensusRound, 1, 0, - symbols.Symbols{ - symbols.NT_NonProposer, - }, - ConsensusRound1R0, - }, - ConsensusRound1R1: { - symbols.NT_ConsensusRound, 1, 1, - symbols.Symbols{ - symbols.NT_NonProposer, - }, - ConsensusRound1R1, - }, - ConsensusRounds0R0: { - symbols.NT_ConsensusRounds, 0, 0, - symbols.Symbols{ - symbols.NT_ConsensusRound, - }, - ConsensusRounds0R0, - }, - ConsensusRounds0R1: { - symbols.NT_ConsensusRounds, 0, 1, - symbols.Symbols{ - symbols.NT_ConsensusRound, - }, - ConsensusRounds0R1, - }, - ConsensusRounds1R0: { - symbols.NT_ConsensusRounds, 1, 0, - symbols.Symbols{ - symbols.NT_ConsensusRound, - symbols.NT_ConsensusRounds, - }, - ConsensusRounds1R0, - }, - ConsensusRounds1R1: { - symbols.NT_ConsensusRounds, 1, 1, - symbols.Symbols{ - symbols.NT_ConsensusRound, - symbols.NT_ConsensusRounds, - }, - ConsensusRounds1R1, - }, - ConsensusRounds1R2: { - symbols.NT_ConsensusRounds, 1, 2, - symbols.Symbols{ - symbols.NT_ConsensusRound, - symbols.NT_ConsensusRounds, - }, - ConsensusRounds1R2, - }, - FinalizeBlock0R0: { - symbols.NT_FinalizeBlock, 0, 0, - symbols.Symbols{ - symbols.T_1, - }, - FinalizeBlock0R0, - }, - FinalizeBlock0R1: { - symbols.NT_FinalizeBlock, 0, 1, - symbols.Symbols{ - symbols.T_1, - }, - FinalizeBlock0R1, - }, - NonProposer0R0: { - symbols.NT_NonProposer, 0, 0, - symbols.Symbols{ - symbols.NT_ProcessProposal, - }, - NonProposer0R0, - }, - NonProposer0R1: { - symbols.NT_NonProposer, 0, 1, - symbols.Symbols{ - symbols.NT_ProcessProposal, - }, - NonProposer0R1, - }, - PrepareProposal0R0: { - symbols.NT_PrepareProposal, 0, 0, - symbols.Symbols{ - symbols.T_2, - }, - PrepareProposal0R0, - }, - PrepareProposal0R1: { - symbols.NT_PrepareProposal, 0, 1, - symbols.Symbols{ - symbols.T_2, - }, - PrepareProposal0R1, - }, - ProcessProposal0R0: { - symbols.NT_ProcessProposal, 0, 0, - symbols.Symbols{ - symbols.T_3, - }, - ProcessProposal0R0, - }, - ProcessProposal0R1: { - symbols.NT_ProcessProposal, 0, 1, - symbols.Symbols{ - symbols.T_3, - }, - ProcessProposal0R1, - }, - Proposer0R0: { - symbols.NT_Proposer, 0, 0, - symbols.Symbols{ - symbols.NT_PrepareProposal, - }, - Proposer0R0, - }, - Proposer0R1: { - symbols.NT_Proposer, 0, 1, - symbols.Symbols{ - symbols.NT_PrepareProposal, - }, - Proposer0R1, - }, - Proposer1R0: { - symbols.NT_Proposer, 1, 0, - symbols.Symbols{ - symbols.NT_PrepareProposal, - symbols.NT_ProcessProposal, - }, - Proposer1R0, - }, - Proposer1R1: { - symbols.NT_Proposer, 1, 1, - symbols.Symbols{ - symbols.NT_PrepareProposal, - symbols.NT_ProcessProposal, - }, - Proposer1R1, - }, - Proposer1R2: { - symbols.NT_Proposer, 1, 2, - symbols.Symbols{ - symbols.NT_PrepareProposal, - symbols.NT_ProcessProposal, - }, - Proposer1R2, - }, - Recovery0R0: { - symbols.NT_Recovery, 0, 0, - symbols.Symbols{ - symbols.NT_ConsensusExec, - }, - Recovery0R0, - }, - Recovery0R1: { - symbols.NT_Recovery, 0, 1, - symbols.Symbols{ - symbols.NT_ConsensusExec, - }, - Recovery0R1, - }, - Start0R0: { - symbols.NT_Start, 0, 0, - symbols.Symbols{ - symbols.NT_Recovery, - }, - Start0R0, - }, - Start0R1: { - symbols.NT_Start, 0, 1, - symbols.Symbols{ - symbols.NT_Recovery, - }, - Start0R1, - }, -} - -var slotIndex = map[Index]Label { - Index{ symbols.NT_Commit,0,0 }: Commit0R0, - Index{ symbols.NT_Commit,0,1 }: Commit0R1, - Index{ symbols.NT_ConsensusExec,0,0 }: ConsensusExec0R0, - Index{ symbols.NT_ConsensusExec,0,1 }: ConsensusExec0R1, - Index{ symbols.NT_ConsensusHeight,0,0 }: ConsensusHeight0R0, - Index{ symbols.NT_ConsensusHeight,0,1 }: ConsensusHeight0R1, - Index{ symbols.NT_ConsensusHeight,0,2 }: ConsensusHeight0R2, - Index{ symbols.NT_ConsensusHeight,0,3 }: ConsensusHeight0R3, - Index{ symbols.NT_ConsensusHeight,1,0 }: ConsensusHeight1R0, - Index{ symbols.NT_ConsensusHeight,1,1 }: ConsensusHeight1R1, - Index{ symbols.NT_ConsensusHeight,1,2 }: ConsensusHeight1R2, - Index{ symbols.NT_ConsensusHeights,0,0 }: ConsensusHeights0R0, - Index{ symbols.NT_ConsensusHeights,0,1 }: ConsensusHeights0R1, - Index{ symbols.NT_ConsensusHeights,1,0 }: ConsensusHeights1R0, - Index{ symbols.NT_ConsensusHeights,1,1 }: ConsensusHeights1R1, - Index{ symbols.NT_ConsensusHeights,1,2 }: ConsensusHeights1R2, - Index{ symbols.NT_ConsensusRound,0,0 }: ConsensusRound0R0, - Index{ symbols.NT_ConsensusRound,0,1 }: ConsensusRound0R1, - Index{ symbols.NT_ConsensusRound,1,0 }: ConsensusRound1R0, - Index{ symbols.NT_ConsensusRound,1,1 }: ConsensusRound1R1, - Index{ symbols.NT_ConsensusRounds,0,0 }: ConsensusRounds0R0, - Index{ symbols.NT_ConsensusRounds,0,1 }: ConsensusRounds0R1, - Index{ symbols.NT_ConsensusRounds,1,0 }: ConsensusRounds1R0, - Index{ symbols.NT_ConsensusRounds,1,1 }: ConsensusRounds1R1, - Index{ symbols.NT_ConsensusRounds,1,2 }: ConsensusRounds1R2, - Index{ symbols.NT_FinalizeBlock,0,0 }: FinalizeBlock0R0, - Index{ symbols.NT_FinalizeBlock,0,1 }: FinalizeBlock0R1, - Index{ symbols.NT_NonProposer,0,0 }: NonProposer0R0, - Index{ symbols.NT_NonProposer,0,1 }: NonProposer0R1, - Index{ symbols.NT_PrepareProposal,0,0 }: PrepareProposal0R0, - Index{ symbols.NT_PrepareProposal,0,1 }: PrepareProposal0R1, - Index{ symbols.NT_ProcessProposal,0,0 }: ProcessProposal0R0, - Index{ symbols.NT_ProcessProposal,0,1 }: ProcessProposal0R1, - Index{ symbols.NT_Proposer,0,0 }: Proposer0R0, - Index{ symbols.NT_Proposer,0,1 }: Proposer0R1, - Index{ symbols.NT_Proposer,1,0 }: Proposer1R0, - Index{ symbols.NT_Proposer,1,1 }: Proposer1R1, - Index{ symbols.NT_Proposer,1,2 }: Proposer1R2, - Index{ symbols.NT_Recovery,0,0 }: Recovery0R0, - Index{ symbols.NT_Recovery,0,1 }: Recovery0R1, - Index{ symbols.NT_Start,0,0 }: Start0R0, - Index{ symbols.NT_Start,0,1 }: Start0R1, -} - -var alternates = map[symbols.NT][]Label{ - symbols.NT_Start:[]Label{ Start0R0 }, - symbols.NT_Recovery:[]Label{ Recovery0R0 }, - symbols.NT_ConsensusExec:[]Label{ ConsensusExec0R0 }, - symbols.NT_ConsensusHeights:[]Label{ ConsensusHeights0R0,ConsensusHeights1R0 }, - symbols.NT_ConsensusHeight:[]Label{ ConsensusHeight0R0,ConsensusHeight1R0 }, - symbols.NT_ConsensusRounds:[]Label{ ConsensusRounds0R0,ConsensusRounds1R0 }, - symbols.NT_ConsensusRound:[]Label{ ConsensusRound0R0,ConsensusRound1R0 }, - symbols.NT_Proposer:[]Label{ Proposer0R0,Proposer1R0 }, - symbols.NT_NonProposer:[]Label{ NonProposer0R0 }, - symbols.NT_FinalizeBlock:[]Label{ FinalizeBlock0R0 }, - symbols.NT_Commit:[]Label{ Commit0R0 }, - symbols.NT_PrepareProposal:[]Label{ PrepareProposal0R0 }, - symbols.NT_ProcessProposal:[]Label{ ProcessProposal0R0 }, -} - diff --git a/test/e2e/pkg/grammar/recovery/grammar-auto/parser/symbols/symbols.go b/test/e2e/pkg/grammar/recovery/grammar-auto/parser/symbols/symbols.go deleted file mode 100644 index 48c0c3c0dca..00000000000 --- a/test/e2e/pkg/grammar/recovery/grammar-auto/parser/symbols/symbols.go +++ /dev/null @@ -1,147 +0,0 @@ - -// Package symbols is generated by gogll. Do not edit. -package symbols - -import( - "bytes" - "fmt" -) - -type Symbol interface{ - isSymbol() - IsNonTerminal() bool - String() string -} - -func (NT) isSymbol() {} -func (T) isSymbol() {} - -// NT is the type of non-terminals symbols -type NT int -const( - NT_Commit NT = iota - NT_ConsensusExec - NT_ConsensusHeight - NT_ConsensusHeights - NT_ConsensusRound - NT_ConsensusRounds - NT_FinalizeBlock - NT_NonProposer - NT_PrepareProposal - NT_ProcessProposal - NT_Proposer - NT_Recovery - NT_Start -) - -// T is the type of terminals symbols -type T int -const( - T_0 T = iota // commit - T_1 // finalize_block - T_2 // prepare_proposal - T_3 // process_proposal -) - -type Symbols []Symbol - -func (ss Symbols) Equal(ss1 Symbols) bool { - if len(ss) != len(ss1) { - return false - } - for i, s := range ss { - if s.String() != ss1[i].String() { - return false - } - } - return true -} - -func (ss Symbols) String() string { - w := new(bytes.Buffer) - for i, s := range ss { - if i > 0 { - fmt.Fprint(w, " ") - } - fmt.Fprintf(w, "%s", s) - } - return w.String() -} - -func (ss Symbols) Strings() []string { - strs := make([]string, len(ss)) - for i, s := range ss { - strs[i] = s.String() - } - return strs -} - -func (NT) IsNonTerminal() bool { - return true -} - -func (T) IsNonTerminal() bool { - return false -} - -func (nt NT) String() string { - return ntToString[nt] -} - -func (t T) String() string { - return tToString[t] -} - -// IsNT returns true iff sym is a non-terminal symbol of the grammar -func IsNT(sym string) bool { - _, exist := stringNT[sym] - return exist -} - -// ToNT returns the NT value of sym or panics if sym is not a non-terminal of the grammar -func ToNT(sym string) NT { - nt, exist := stringNT[sym] - if !exist { - panic(fmt.Sprintf("No NT: %s", sym)) - } - return nt -} - -var ntToString = []string { - "Commit", /* NT_Commit */ - "ConsensusExec", /* NT_ConsensusExec */ - "ConsensusHeight", /* NT_ConsensusHeight */ - "ConsensusHeights", /* NT_ConsensusHeights */ - "ConsensusRound", /* NT_ConsensusRound */ - "ConsensusRounds", /* NT_ConsensusRounds */ - "FinalizeBlock", /* NT_FinalizeBlock */ - "NonProposer", /* NT_NonProposer */ - "PrepareProposal", /* NT_PrepareProposal */ - "ProcessProposal", /* NT_ProcessProposal */ - "Proposer", /* NT_Proposer */ - "Recovery", /* NT_Recovery */ - "Start", /* NT_Start */ -} - -var tToString = []string { - "commit", /* T_0 */ - "finalize_block", /* T_1 */ - "prepare_proposal", /* T_2 */ - "process_proposal", /* T_3 */ -} - -var stringNT = map[string]NT{ - "Commit":NT_Commit, - "ConsensusExec":NT_ConsensusExec, - "ConsensusHeight":NT_ConsensusHeight, - "ConsensusHeights":NT_ConsensusHeights, - "ConsensusRound":NT_ConsensusRound, - "ConsensusRounds":NT_ConsensusRounds, - "FinalizeBlock":NT_FinalizeBlock, - "NonProposer":NT_NonProposer, - "PrepareProposal":NT_PrepareProposal, - "ProcessProposal":NT_ProcessProposal, - "Proposer":NT_Proposer, - "Recovery":NT_Recovery, - "Start":NT_Start, -} diff --git a/test/e2e/pkg/infra/digitalocean/digitalocean.go b/test/e2e/pkg/infra/digitalocean/digitalocean.go index d9af0c25228..db6d753aa8a 100644 --- a/test/e2e/pkg/infra/digitalocean/digitalocean.go +++ b/test/e2e/pkg/infra/digitalocean/digitalocean.go @@ -3,6 +3,7 @@ package digitalocean import ( "context" "fmt" + "net" "os" "path/filepath" "strconv" @@ -20,8 +21,13 @@ type Provider struct { infra.ProviderData } -// Noop currently. Setup is performed externally to the e2e test tool. func (p *Provider) Setup() error { + for _, n := range p.Testnet.Nodes { + if n.ClockSkew != 0 { + return fmt.Errorf("node %q contains clock skew configuration (not supported on DO)", n.Name) + } + } + return nil } @@ -46,6 +52,7 @@ func (p Provider) StartNodes(ctx context.Context, nodes ...*e2e.Node) error { return execAnsible(ctx, p.Testnet.Dir, playbookFile, nodeIPs) } + func (p Provider) StopTestnet(ctx context.Context) error { nodeIPs := make([]string, len(p.Testnet.Nodes)) for i, n := range p.Testnet.Nodes { @@ -59,6 +66,7 @@ func (p Provider) StopTestnet(ctx context.Context) error { } return execAnsible(ctx, p.Testnet.Dir, playbookFile, nodeIPs) } + func (p Provider) Disconnect(ctx context.Context, _ string, ip string) error { playbook := ansiblePerturbConnectionBytes(true) playbookFile := getNextPlaybookFilename() @@ -67,6 +75,7 @@ func (p Provider) Disconnect(ctx context.Context, _ string, ip string) error { } return execAnsible(ctx, p.Testnet.Dir, playbookFile, []string{ip}) } + func (p Provider) Reconnect(ctx context.Context, _ string, ip string) error { playbook := ansiblePerturbConnectionBytes(false) playbookFile := getNextPlaybookFilename() @@ -76,11 +85,15 @@ func (p Provider) Reconnect(ctx context.Context, _ string, ip string) error { return execAnsible(ctx, p.Testnet.Dir, playbookFile, []string{ip}) } -func (p Provider) CheckUpgraded(_ context.Context, node *e2e.Node) (string, bool, error) { +func (Provider) CheckUpgraded(_ context.Context, node *e2e.Node) (string, bool, error) { // Upgrade not supported yet by DO provider return node.Name, false, nil } +func (Provider) NodeIP(node *e2e.Node) net.IP { + return node.ExternalIP +} + func (p Provider) writePlaybook(yaml, playbook string) error { //nolint: gosec // G306: Expect WriteFile permissions to be 0600 or less @@ -101,7 +114,7 @@ const basePlaybook = `- name: e2e custom playbook ` func ansibleAddTask(playbook, name, contents string) string { - return playbook + " - name: " + name + "\n" + contents + return playbook + " - name: " + name + "\n" + contents + "\n" } func ansibleAddSystemdTask(playbook string, starting bool) string { @@ -109,6 +122,7 @@ func ansibleAddSystemdTask(playbook string, starting bool) string { if starting { startStop = "started" } + // testappd is the name of the daemon running the node in the ansible scripts in the qa-infra repo. contents := fmt.Sprintf(` ansible.builtin.systemd: name: testappd state: %s @@ -126,7 +140,7 @@ func ansibleAddShellTasks(playbook, name string, shells ...string) string { } // file as bytes to be written out to disk. -// ansibleStartBytes generates an Ansible playbook to start the network +// ansibleStartBytes generates an Ansible playbook to start the network. func ansibleSystemdBytes(starting bool) string { return ansibleAddSystemdTask(basePlaybook, starting) } @@ -147,7 +161,7 @@ func ansiblePerturbConnectionBytes(disconnect bool) string { } // ExecCompose runs a Docker Compose command for a testnet. -func execAnsible(ctx context.Context, dir, playbook string, nodeIPs []string, args ...string) error { +func execAnsible(ctx context.Context, dir, playbook string, nodeIPs []string, args ...string) error { //nolint:unparam playbook = filepath.Join(dir, playbook) return exec.CommandVerbose(ctx, append( []string{"ansible-playbook", playbook, "-f", "50", "-u", "root", "--inventory", strings.Join(nodeIPs, ",") + ","}, diff --git a/test/e2e/pkg/infra/docker/docker.go b/test/e2e/pkg/infra/docker/docker.go index e1710ba5890..90d9b1ced57 100644 --- a/test/e2e/pkg/infra/docker/docker.go +++ b/test/e2e/pkg/infra/docker/docker.go @@ -3,6 +3,7 @@ package docker import ( "bytes" "context" + "net" "os" "path/filepath" "text/template" @@ -12,6 +13,8 @@ import ( "github.com/cometbft/cometbft/test/e2e/pkg/infra" ) +const DockerComposeFile = "compose.yaml" + var _ infra.Provider = (*Provider)(nil) // Provider implements a docker-compose backed infrastructure provider. @@ -26,12 +29,12 @@ func (p *Provider) Setup() error { if err != nil { return err } - //nolint: gosec - // G306: Expect WriteFile permissions to be 0600 or less - err = os.WriteFile(filepath.Join(p.Testnet.Dir, "docker-compose.yml"), compose, 0o644) + //nolint: gosec // G306: Expect WriteFile permissions to be 0600 or less + err = os.WriteFile(filepath.Join(p.Testnet.Dir, DockerComposeFile), compose, 0o644) if err != nil { return err } + return nil } @@ -55,7 +58,7 @@ func (p Provider) Reconnect(ctx context.Context, name string, _ string) error { return Exec(ctx, "network", "connect", p.Testnet.Name+"_"+p.Testnet.Name, name) } -func (p Provider) CheckUpgraded(ctx context.Context, node *e2e.Node) (string, bool, error) { +func (Provider) CheckUpgraded(ctx context.Context, node *e2e.Node) (string, bool, error) { testnet := node.Testnet out, err := ExecComposeOutput(ctx, testnet.Dir, "ps", "-q", node.Name) if err != nil { @@ -64,17 +67,21 @@ func (p Provider) CheckUpgraded(ctx context.Context, node *e2e.Node) (string, bo name := node.Name upgraded := false if len(out) == 0 { - name = name + "_u" + name += "_u" upgraded = true } return name, upgraded, nil } +func (Provider) NodeIP(node *e2e.Node) net.IP { + return node.InternalIP +} + // dockerComposeBytes generates a Docker Compose config file for a testnet and returns the // file as bytes to be written out to disk. func dockerComposeBytes(testnet *e2e.Testnet) ([]byte, error) { // Must use version 2 Docker Compose format, to support IPv6. - tmpl, err := template.New("docker-compose").Parse(`version: '2.4' + tmpl, err := template.New("docker-compose").Parse(` networks: {{ .Name }}: labels: @@ -98,6 +105,12 @@ services: {{- if or (eq .ABCIProtocol "builtin") (eq .ABCIProtocol "builtin_connsync") }} entrypoint: /usr/bin/entrypoint-builtin {{- end }} +{{- if .ClockSkew }} + environment: + - COMETBFT_CLOCK_SKEW={{ .ClockSkew }} +{{- end }} + cap_add: + - NET_ADMIN init: true ports: - 26656 @@ -112,7 +125,6 @@ services: - 2346 volumes: - ./{{ .Name }}:/cometbft - - ./{{ .Name }}:/tendermint networks: {{ $.Name }}: ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .InternalIP }} @@ -126,6 +138,12 @@ services: {{- if or (eq .ABCIProtocol "builtin") (eq .ABCIProtocol "builtin_connsync") }} entrypoint: /usr/bin/entrypoint-builtin {{- end }} +{{- if .ClockSkew }} + environment: + - COMETBFT_CLOCK_SKEW={{ .ClockSkew }} +{{- end }} + cap_add: + - NET_ADMIN init: true ports: - 26656 @@ -140,7 +158,6 @@ services: - 2346 volumes: - ./{{ .Name }}:/cometbft - - ./{{ .Name }}:/tendermint networks: {{ $.Name }}: ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .InternalIP }} @@ -161,21 +178,21 @@ services: // ExecCompose runs a Docker Compose command for a testnet. func ExecCompose(ctx context.Context, dir string, args ...string) error { return exec.Command(ctx, append( - []string{"docker-compose", "-f", filepath.Join(dir, "docker-compose.yml")}, + []string{"docker", "compose", "-f", filepath.Join(dir, DockerComposeFile)}, args...)...) } -// ExecCompose runs a Docker Compose command for a testnet and returns the command's output. +// ExecComposeOutput runs a Docker Compose command for a testnet and returns the command's output. func ExecComposeOutput(ctx context.Context, dir string, args ...string) ([]byte, error) { return exec.CommandOutput(ctx, append( - []string{"docker-compose", "-f", filepath.Join(dir, "docker-compose.yml")}, + []string{"docker", "compose", "-f", filepath.Join(dir, DockerComposeFile)}, args...)...) } // ExecComposeVerbose runs a Docker Compose command for a testnet and displays its output. func ExecComposeVerbose(ctx context.Context, dir string, args ...string) error { return exec.CommandVerbose(ctx, append( - []string{"docker-compose", "-f", filepath.Join(dir, "docker-compose.yml")}, + []string{"docker", "compose", "-f", filepath.Join(dir, DockerComposeFile)}, args...)...) } @@ -183,3 +200,8 @@ func ExecComposeVerbose(ctx context.Context, dir string, args ...string) error { func Exec(ctx context.Context, args ...string) error { return exec.Command(ctx, append([]string{"docker"}, args...)...) } + +// ExecVerbose runs a Docker command while displaying its output. +func ExecVerbose(ctx context.Context, args ...string) error { + return exec.CommandVerbose(ctx, append([]string{"docker"}, args...)...) +} diff --git a/test/e2e/pkg/infra/latencies.go b/test/e2e/pkg/infra/latencies.go new file mode 100644 index 00000000000..0d2223b5ee8 --- /dev/null +++ b/test/e2e/pkg/infra/latencies.go @@ -0,0 +1,48 @@ +package infra + +import ( + "bytes" + "os" + "text/template" + + e2e "github.com/cometbft/cometbft/test/e2e/pkg" +) + +// GenerateIPZonesTable generates a file with a table mapping IP addresses to geographical zone for latencies. +func GenerateIPZonesTable(nodes []*e2e.Node, zonesPath string, useInternalIP bool) error { + // Generate file with table mapping IP addresses to geographical zone for latencies. + zonesTable, err := zonesTableBytes(nodes, useInternalIP) + if err != nil { + return err + } + //nolint: gosec // G306: Expect WriteFile permissions to be 0600 or less + err = os.WriteFile(zonesPath, zonesTable, 0o644) + if err != nil { + return err + } + return nil +} + +func zonesTableBytes(nodes []*e2e.Node, useInternalIP bool) ([]byte, error) { + tmpl, err := template.New("zones").Parse(`Node,IP,Zone +{{- range .Nodes }} +{{- if .Zone }} +{{ .Name }},{{ if $.UseInternalIP }}{{ .InternalIP }}{{ else }}{{ .ExternalIP }}{{ end }},{{ .Zone }} +{{- end }} +{{- end }}`) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = tmpl.Execute(&buf, struct { + Nodes []*e2e.Node + UseInternalIP bool + }{ + Nodes: nodes, + UseInternalIP: useInternalIP, + }) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/test/e2e/pkg/infra/provider.go b/test/e2e/pkg/infra/provider.go index ab63f87ba01..aad430f59f0 100644 --- a/test/e2e/pkg/infra/provider.go +++ b/test/e2e/pkg/infra/provider.go @@ -2,6 +2,7 @@ package infra import ( "context" + "net" e2e "github.com/cometbft/cometbft/test/e2e/pkg" ) @@ -9,7 +10,6 @@ import ( // Provider defines an API for manipulating the infrastructure of a // specific set of testnet infrastructure. type Provider interface { - // Setup generates any necessary configuration for the infrastructure // provider during testnet setup. Setup() error @@ -17,23 +17,28 @@ type Provider interface { // Starts the nodes passed as parameter. A nodes MUST NOT // be started twice before calling StopTestnet // If no nodes are passed, start the whole network - StartNodes(context.Context, ...*e2e.Node) error + StartNodes(ctx context.Context, nodes ...*e2e.Node) error // Stops the whole network - StopTestnet(context.Context) error + StopTestnet(ctx context.Context) error // Disconnects the node from the network - Disconnect(context.Context, string, string) error + Disconnect(ctx context.Context, name string, ip string) error // Reconnects the node to the network. // This should only be called after Disconnect - Reconnect(context.Context, string, string) error + Reconnect(ctx context.Context, name string, ip string) error - // Returns the the provider's infrastructure data + // Returns the provider's infrastructure data GetInfrastructureData() *e2e.InfrastructureData // Checks whether the node has been upgraded in this run - CheckUpgraded(context.Context, *e2e.Node) (string, bool, error) + CheckUpgraded(ctx context.Context, node *e2e.Node) (string, bool, error) + + // NodeIP returns the IP address of the node that is used to communicate + // with other nodes in the network (the internal IP address in case of the + // docker infra type and the external IP address otherwise). + NodeIP(node *e2e.Node) net.IP } type ProviderData struct { @@ -41,7 +46,7 @@ type ProviderData struct { InfrastructureData e2e.InfrastructureData } -// Returns the the provider's infrastructure data +// GetInfrastructureData returns the provider's infrastructure data. func (pd ProviderData) GetInfrastructureData() *e2e.InfrastructureData { return &pd.InfrastructureData } diff --git a/test/e2e/pkg/infrastructure.go b/test/e2e/pkg/infrastructure.go index 22f8e9ae0cf..33a0a9009ba 100644 --- a/test/e2e/pkg/infrastructure.go +++ b/test/e2e/pkg/infrastructure.go @@ -18,7 +18,7 @@ const ( // InfrastructureData contains the relevant information for a set of existing // infrastructure that is to be used for running a testnet. type InfrastructureData struct { - Path string + Path string `json:"path"` // Provider is the name of infrastructure provider backing the testnet. // For example, 'docker' if it is running locally in a docker network or @@ -39,15 +39,17 @@ type InfrastructureData struct { // InstanceData contains the relevant information for a machine instance backing // one of the nodes in the testnet. type InstanceData struct { - IPAddress net.IP `json:"ip_address"` - ExtIPAddress net.IP `json:"ext_ip_address"` - Port uint32 `json:"port"` + IPAddress net.IP `json:"ip_address"` + ExtIPAddress net.IP `json:"ext_ip_address"` + RPCPort uint32 `json:"rpc_port"` + GRPCPort uint32 `json:"grpc_port"` + PrivilegedGRPCPort uint32 `json:"privileged_grpc_port"` } -func sortNodeNames(m Manifest) []string { +func sortNodeNames(m *Manifest) []string { // Set up nodes, in alphabetical order (IPs and ports get same order). nodeNames := []string{} - for name := range m.Nodes { + for name := range m.NodesMap { nodeNames = append(nodeNames, name) } sort.Strings(nodeNames) @@ -72,13 +74,14 @@ func NewDockerInfrastructureData(m Manifest) (InfrastructureData, error) { Network: netAddress, } localHostIP := net.ParseIP("127.0.0.1") - for _, name := range sortNodeNames(m) { + for _, name := range sortNodeNames(&m) { ifd.Instances[name] = InstanceData{ - IPAddress: ipGen.Next(), - ExtIPAddress: localHostIP, - Port: portGen.Next(), + IPAddress: ipGen.Next(), + ExtIPAddress: localHostIP, + RPCPort: portGen.Next(), + GRPCPort: portGen.Next(), + PrivilegedGRPCPort: portGen.Next(), } - } return ifd, nil } diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 02f38f43d4b..254d4876ca2 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -29,9 +29,9 @@ type Manifest struct { // specifying an empty set will start with no validators in genesis, and // the application must return the validator set in InitChain via the // setting validator_update.0 (see below). - Validators *map[string]int64 `toml:"validators"` + Validators map[string]int64 `toml:"validators"` - // ValidatorUpdates is a map of heights to validator names and their power, + // ValidatorUpdatesMap is a map of heights to validator names and their power, // and will be returned by the ABCI application. For example, the following // changes the power of validator01 and validator02 at height 1000: // @@ -43,27 +43,22 @@ type Manifest struct { // application returns the validator updates as-is, i.e. removing a // validator must be done by returning it with power 0, and any validators // not specified are not changed. - ValidatorUpdates map[string]map[string]int64 `toml:"validator_update"` + ValidatorUpdatesMap map[string]map[string]int64 `toml:"validator_update"` - // Nodes specifies the network nodes. At least one node must be given. - Nodes map[string]*ManifestNode `toml:"node"` + // NodesMap specifies the network nodes. At least one node must be given. + NodesMap map[string]*ManifestNode `toml:"node"` // Disable the peer-exchange reactor on all nodes. DisablePexReactor bool `toml:"disable_pex"` // KeyType sets the curve that will be used by validators. - // Options are ed25519 & secp256k1 + // Options are ed25519, secp256k1 and bls12381. KeyType string `toml:"key_type"` // Evidence indicates the amount of evidence that will be injected into the // testnet via the RPC endpoint of a random node. Default is 0 Evidence int `toml:"evidence"` - // VoteExtensionsEnableHeight configures the first height during which - // the chain will use and require vote extension data to be present - // in precommit messages. - VoteExtensionsEnableHeight int64 `toml:"vote_extensions_enable_height"` - // ABCIProtocol specifies the protocol used to communicate with the ABCI // application: "unix", "tcp", "grpc", "builtin" or "builtin_connsync". // @@ -91,28 +86,100 @@ type Manifest struct { LoadTxSizeBytes int `toml:"load_tx_size_bytes"` LoadTxBatchSize int `toml:"load_tx_batch_size"` LoadTxConnections int `toml:"load_tx_connections"` + LoadMaxSeconds int `toml:"load_max_seconds"` + LoadMaxTxs int `toml:"load_max_txs"` + + // Weight for each lane defined by the app. The transaction loader will + // assign lanes to generated transactions proportionally to their weights. + LoadLaneWeights map[string]uint `toml:"load_lane_weights"` + + // LogLevel specifies the log level to be set on all nodes. + LogLevel string `toml:"log_level"` + + // LogFormat specifies the log format to be set on all nodes. + LogFormat string `toml:"log_format"` // Enable or disable Prometheus metrics on all nodes. // Defaults to false (disabled). Prometheus bool `toml:"prometheus"` + // BlockMaxBytes specifies the maximum size in bytes of a block. This + // value will be written to the genesis file of all nodes. + BlockMaxBytes int64 `toml:"block_max_bytes"` + // Defines a minimum size for the vote extensions. VoteExtensionSize uint `toml:"vote_extension_size"` + // VoteExtensionsEnableHeight configures the first height during which + // the chain will use and require vote extension data to be present + // in precommit messages. + VoteExtensionsEnableHeight int64 `toml:"vote_extensions_enable_height"` + + // VoteExtensionsUpdateHeight configures the height at which consensus + // param VoteExtensionsEnableHeight will be set. + // -1 denotes it is set at genesis. + // 0 denotes it is set at InitChain. + VoteExtensionsUpdateHeight int64 `toml:"vote_extensions_update_height"` + // Upper bound of sleep duration then gossipping votes and block parts PeerGossipIntraloopSleepDuration time.Duration `toml:"peer_gossip_intraloop_sleep_duration"` + // Maximum number of peers to which the node gossips transactions + ExperimentalMaxGossipConnectionsToPersistentPeers uint `toml:"experimental_max_gossip_connections_to_persistent_peers"` + ExperimentalMaxGossipConnectionsToNonPersistentPeers uint `toml:"experimental_max_gossip_connections_to_non_persistent_peers"` + // Enable or disable e2e tests for CometBFT's expected behavior with respect // to ABCI. ABCITestsEnabled bool `toml:"abci_tests_enabled"` + + // Default geographical zone ID for simulating latencies, assigned to nodes that don't have a + // specific zone assigned. + DefaultZone string `toml:"default_zone"` + + // PbtsEnableHeight configures the first height during which + // the chain will start using Proposer-Based Timestamps (PBTS) + // to create and validate new blocks. + PbtsEnableHeight int64 `toml:"pbts_enable_height"` + + // PbtsUpdateHeight configures the height at which consensus + // param PbtsEnableHeight will be set. + // -1 denotes it is set at genesis. + // 0 denotes it is set at InitChain. + PbtsUpdateHeight int64 `toml:"pbts_update_height"` + + // Used to disable lanes for testing behavior of + // networks that upgrade to a version of CometBFT + // that supports lanes but do not opt for using them. + NoLanes bool `toml:"no_lanes"` + + // Mapping from lane IDs to lane priorities. These lanes will be used by the + // application for setting up the mempool and for classifying transactions. + Lanes map[string]uint32 `toml:"lanes"` + + // Genesis is a set of key-value config entries to write to the + // produced genesis file. The format is "key = value". + // Example: "consensus_params.evidence.max_bytes = 1024". + Genesis []string `toml:"genesis"` + + // Config is a set of key-value config entries to write to CometBFT's + // configuration files for all nodes. The format is "key = value". + // Example: "p2p.send_rate = 512000". + Config []string `toml:"config"` + + // If true, the application will return validator updates and + // `ConsensusParams` updates at every height. + // This is useful to create a more dynamic testnet. + // * An existing validator will be chosen, and its power will alternate between 0 and 1. + // * `ConsensusParams` will be flipping on and off key types not set at genesis. + ConstantFlip bool `toml:"constant_flip"` } // ManifestNode represents a node in a testnet manifest. type ManifestNode struct { - // Mode specifies the type of node: "validator", "full", "light" or "seed". + // ModeStr specifies the type of node: "validator", "full", "light" or "seed". // Defaults to "validator". Full nodes do not get a signing key (a dummy key // is generated), and seed nodes run in seed mode with the PEX reactor enabled. - Mode string `toml:"mode"` + ModeStr string `toml:"mode"` // Version specifies which version of CometBFT this node is. Specifying different // versions for different nodes allows for testing the interaction of different @@ -121,24 +188,24 @@ type ManifestNode struct { // on the machine where the test is being run. Version string `toml:"version"` - // Seeds is the list of node names to use as P2P seed nodes. Defaults to none. - Seeds []string `toml:"seeds"` + // SeedsList is the list of node names to use as P2P seed nodes. Defaults to none. + SeedsList []string `toml:"seeds"` - // PersistentPeers is a list of node names to maintain persistent P2P + // PersistentPeersList is a list of node names to maintain persistent P2P // connections to. If neither seeds nor persistent peers are specified, // this defaults to all other nodes in the network. For light clients, // this relates to the providers the light client is connected to. - PersistentPeers []string `toml:"persistent_peers"` + PersistentPeersList []string `toml:"persistent_peers"` - // Database specifies the database backend: "goleveldb", "cleveldb", - // "rocksdb", "boltdb", or "badgerdb". Defaults to goleveldb. + // Database specifies the database backend: "goleveldb", "rocksdb", + // "pebbledb" or "badgerdb". Defaults to "goleveldb". Database string `toml:"database"` - // PrivvalProtocol specifies the protocol used to sign consensus messages: + // PrivvalProtocolStr specifies the protocol used to sign consensus messages: // "file", "unix", or "tcp". Defaults to "file". For unix and tcp, the ABCI // application will launch a remote signer client in a separate goroutine. // Only nodes with mode=validator will actually make use of this. - PrivvalProtocol string `toml:"privval_protocol"` + PrivvalProtocolStr string `toml:"privval_protocol"` // StartAt specifies the block height at which the node will be started. The // runner will wait for the network to reach at least this block height. @@ -154,10 +221,10 @@ type ManifestNode struct { // StartAt set to an appropriate height where a snapshot is available. StateSync bool `toml:"state_sync"` - // PersistInterval specifies the height interval at which the application + // PersistIntervalPtr specifies the height interval at which the application // will persist state to disk. Defaults to 1 (every height), setting this to // 0 disables state persistence. - PersistInterval *uint64 `toml:"persist_interval"` + PersistIntervalPtr *uint64 `toml:"persist_interval"` // SnapshotInterval specifies the height interval at which the application // will take state sync snapshots. Defaults to 0 (disabled). @@ -185,6 +252,32 @@ type ManifestNode struct { // It defaults to false so unless the configured, the node will // receive load. SendNoLoad bool `toml:"send_no_load"` + + // Geographical zone ID for simulating latencies. + Zone string `toml:"zone"` + + // ExperimentalKeyLayout sets the key representation in the DB + ExperimentalKeyLayout string `toml:"experimental_db_key_layout"` + + // Compact triggers compaction on the DB after pruning + Compact bool `toml:"compact"` + + // CompactionInterval sets the number of blocks at which we trigger compaction + CompactionInterval int64 `toml:"compaction_interval"` + + // DiscardABCIResponses disables abci rsponses + DiscardABCIResponses bool `toml:"discard_abci_responses"` + + // Indexer sets the indexer, default kv + Indexer string `toml:"indexer"` + + // Simulated clock skew for this node + ClockSkew time.Duration `toml:"clock_skew"` + + // Config is a set of key-value config entries to write to CometBFT's + // configuration files for this node. The format is "key = value". + // Example: "p2p.send_rate = 512000". + Config []string `toml:"config"` } // Save saves the testnet manifest to a file. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 5767ba37ad1..6c7f7b7eb35 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -1,28 +1,31 @@ package e2e import ( - "bytes" "context" + "encoding/csv" "errors" "fmt" "io" "math/rand" "net" - "os" "path/filepath" + "slices" "strconv" "strings" - "text/template" "time" + _ "embed" + "github.com/cometbft/cometbft/crypto" + "github.com/cometbft/cometbft/crypto/bls12381" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/crypto/secp256k1" + cmtrand "github.com/cometbft/cometbft/internal/rand" rpchttp "github.com/cometbft/cometbft/rpc/client/http" grpcclient "github.com/cometbft/cometbft/rpc/grpc/client" grpcprivileged "github.com/cometbft/cometbft/rpc/grpc/client/privileged" - - _ "embed" + "github.com/cometbft/cometbft/test/e2e/app" + "github.com/cometbft/cometbft/types" ) const ( @@ -62,45 +65,43 @@ const ( PerturbationRestart Perturbation = "restart" PerturbationUpgrade Perturbation = "upgrade" - EvidenceAgeHeight int64 = 7 - EvidenceAgeTime time.Duration = 500 * time.Millisecond + EvidenceAgeHeight int64 = 14 + EvidenceAgeTime time.Duration = 1500 * time.Millisecond ) // Testnet represents a single testnet. +// It includes all fields from the associated Manifest instance. type Testnet struct { - Name string - File string - Dir string - IP *net.IPNet - InitialHeight int64 - InitialState map[string]string - Validators map[*Node]int64 - ValidatorUpdates map[int64]map[*Node]int64 - Nodes []*Node - DisablePexReactor bool - KeyType string - Evidence int - LoadTxSizeBytes int - LoadTxBatchSize int - LoadTxConnections int - ABCIProtocol string - PrepareProposalDelay time.Duration - ProcessProposalDelay time.Duration - CheckTxDelay time.Duration - VoteExtensionDelay time.Duration - FinalizeBlockDelay time.Duration - UpgradeVersion string - Prometheus bool - VoteExtensionsEnableHeight int64 - VoteExtensionSize uint - PeerGossipIntraloopSleepDuration time.Duration - ABCITestsEnabled bool + *Manifest + + Name string + File string + Dir string + + IP *net.IPNet + ValidatorUpdates map[int64]map[string]int64 + Nodes []*Node + + // If not empty, ignore the manifest and send transaction load only to the + // node names in this list. It is set only from a command line flag. + LoadTargetNodes []string + + // Latency Emulation is enabled when all the nodes have a zone assigned. + LatencyEmulationEnabled bool + + // For generating transaction load on lanes proportionally to their + // priorities. + laneIDs []string + laneCumulativeWeights []uint + sumWeights uint } // Node represents a CometBFT node in a testnet. +// It includes all fields from the associated ManifestNode instance. type Node struct { + ManifestNode + Name string - Version string Testnet *Testnet Mode Mode PrivvalKey crypto.PrivKey @@ -110,82 +111,60 @@ type Node struct { RPCProxyPort uint32 GRPCProxyPort uint32 GRPCPrivilegedProxyPort uint32 - StartAt int64 - BlockSyncVersion string - StateSync bool - Database string ABCIProtocol Protocol PrivvalProtocol Protocol PersistInterval uint64 - SnapshotInterval uint64 - RetainBlocks uint64 - EnableCompanionPruning bool Seeds []*Node PersistentPeers []*Node Perturbations []Perturbation - SendNoLoad bool Prometheus bool PrometheusProxyPort uint32 + Zone string } -// LoadTestnet loads a testnet from a manifest file, using the filename to -// determine the testnet name and directory (from the basename of the file). +// LoadTestnet loads a testnet from a manifest file. The testnet files are +// generated in the given directory, which is also use to determine the testnet +// name (the directory's basename). // The testnet generation must be deterministic, since it is generated // separately by the runner and the test cases. For this reason, testnets use a // random seed to generate e.g. keys. -func LoadTestnet(file string, ifd InfrastructureData) (*Testnet, error) { +func LoadTestnet(file string, ifd InfrastructureData, dir string) (*Testnet, error) { manifest, err := LoadManifest(file) if err != nil { return nil, err } - return NewTestnetFromManifest(manifest, file, ifd) + return NewTestnetFromManifest(manifest, file, ifd, dir) } -// NewTestnetFromManifest creates and validates a testnet from a manifest -func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureData) (*Testnet, error) { - dir := strings.TrimSuffix(file, filepath.Ext(file)) +// NewTestnetFromManifest creates and validates a testnet from a manifest. +func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureData, dir string) (*Testnet, error) { + if dir == "" { + // Set default testnet directory. + dir = strings.TrimSuffix(file, filepath.Ext(file)) + } keyGen := newKeyGenerator(randomSeed) - proxyPortGen := newPortGenerator(proxyPortFirst) prometheusProxyPortGen := newPortGenerator(prometheusProxyPortFirst) _, ipNet, err := net.ParseCIDR(ifd.Network) if err != nil { return nil, fmt.Errorf("invalid IP network address %q: %w", ifd.Network, err) } - testnet := &Testnet{ - Name: filepath.Base(dir), - File: file, - Dir: dir, - IP: ipNet, - InitialHeight: 1, - InitialState: manifest.InitialState, - Validators: map[*Node]int64{}, - ValidatorUpdates: map[int64]map[*Node]int64{}, - Nodes: []*Node{}, - DisablePexReactor: manifest.DisablePexReactor, - Evidence: manifest.Evidence, - LoadTxSizeBytes: manifest.LoadTxSizeBytes, - LoadTxBatchSize: manifest.LoadTxBatchSize, - LoadTxConnections: manifest.LoadTxConnections, - ABCIProtocol: manifest.ABCIProtocol, - PrepareProposalDelay: manifest.PrepareProposalDelay, - ProcessProposalDelay: manifest.ProcessProposalDelay, - CheckTxDelay: manifest.CheckTxDelay, - VoteExtensionDelay: manifest.VoteExtensionDelay, - FinalizeBlockDelay: manifest.FinalizeBlockDelay, - UpgradeVersion: manifest.UpgradeVersion, - Prometheus: manifest.Prometheus, - VoteExtensionsEnableHeight: manifest.VoteExtensionsEnableHeight, - VoteExtensionSize: manifest.VoteExtensionSize, - PeerGossipIntraloopSleepDuration: manifest.PeerGossipIntraloopSleepDuration, - ABCITestsEnabled: manifest.ABCITestsEnabled, - } - if len(manifest.KeyType) != 0 { - testnet.KeyType = manifest.KeyType - } - if manifest.InitialHeight > 0 { - testnet.InitialHeight = manifest.InitialHeight + Manifest: &manifest, + + Name: filepath.Base(dir), + File: file, + Dir: dir, + + IP: ipNet, + ValidatorUpdates: map[int64]map[string]int64{}, + Nodes: []*Node{}, + } + if testnet.InitialHeight == 0 { + testnet.InitialHeight = 1 + } + if testnet.KeyType == "" { + testnet.KeyType = ed25519.KeyType } if testnet.ABCIProtocol == "" { testnet.ABCIProtocol = string(ProtocolBuiltin) @@ -203,8 +182,39 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa testnet.LoadTxSizeBytes = defaultTxSizeBytes } - for _, name := range sortNodeNames(manifest) { - nodeManifest := manifest.Nodes[name] + if len(testnet.Lanes) == 0 { + testnet.Lanes = app.DefaultLanes() + } + if len(testnet.LoadLaneWeights) == 0 { + // Assign same weight to all lanes. + testnet.LoadLaneWeights = make(map[string]uint, len(testnet.Lanes)) + for id := range testnet.Lanes { + testnet.LoadLaneWeights[id] = 1 + } + } + if len(testnet.Lanes) < 1 { + return nil, errors.New("number of lanes must be greater or equal to one") + } + + // Pre-compute lane data needed for generating transaction load. + testnet.laneIDs = make([]string, 0, len(testnet.Lanes)) + laneWeights := make([]uint, 0, len(testnet.Lanes)) + for lane := range testnet.Lanes { + testnet.laneIDs = append(testnet.laneIDs, lane) + weight := testnet.LoadLaneWeights[lane] + laneWeights = append(laneWeights, weight) + testnet.sumWeights += weight + } + testnet.laneCumulativeWeights = make([]uint, len(testnet.Lanes)) + testnet.laneCumulativeWeights[0] = laneWeights[0] + for i := 1; i < len(testnet.laneCumulativeWeights); i++ { + testnet.laneCumulativeWeights[i] = testnet.laneCumulativeWeights[i-1] + laneWeights[i] + } + + testnet.LatencyEmulationEnabled = true + + for _, name := range sortNodeNames(&manifest) { + nodeManifest := manifest.NodesMap[name] ind, ok := ifd.Instances[name] if !ok { return nil, fmt.Errorf("information for node '%s' missing from infrastructure data", name) @@ -213,36 +223,29 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa if len(extIP) == 0 { extIP = ind.IPAddress } - v := nodeManifest.Version - if v == "" { - v = localVersion - } node := &Node{ - Name: name, - Version: v, - Testnet: testnet, - PrivvalKey: keyGen.Generate(manifest.KeyType), - NodeKey: keyGen.Generate("ed25519"), + ManifestNode: *nodeManifest, + Name: name, + Testnet: testnet, + + PrivvalKey: keyGen.Generate(testnet.KeyType), + NodeKey: keyGen.Generate(ed25519.KeyType), InternalIP: ind.IPAddress, ExternalIP: extIP, - RPCProxyPort: proxyPortGen.Next(), - GRPCProxyPort: proxyPortGen.Next(), - GRPCPrivilegedProxyPort: proxyPortGen.Next(), + RPCProxyPort: ind.RPCPort, + GRPCProxyPort: ind.GRPCPort, + GRPCPrivilegedProxyPort: ind.PrivilegedGRPCPort, Mode: ModeValidator, - Database: "goleveldb", ABCIProtocol: Protocol(testnet.ABCIProtocol), PrivvalProtocol: ProtocolFile, - StartAt: nodeManifest.StartAt, - BlockSyncVersion: nodeManifest.BlockSyncVersion, - StateSync: nodeManifest.StateSync, PersistInterval: 1, - SnapshotInterval: nodeManifest.SnapshotInterval, - RetainBlocks: nodeManifest.RetainBlocks, - EnableCompanionPruning: nodeManifest.EnableCompanionPruning, Perturbations: []Perturbation{}, - SendNoLoad: nodeManifest.SendNoLoad, Prometheus: testnet.Prometheus, + Zone: nodeManifest.Zone, + } + if node.Version == "" { + node.Version = localVersion } if node.StartAt == testnet.InitialHeight { node.StartAt = 0 // normalize to 0 for initial nodes, since code expects this @@ -250,20 +253,20 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa if node.BlockSyncVersion == "" { node.BlockSyncVersion = "v0" } - if nodeManifest.Mode != "" { - node.Mode = Mode(nodeManifest.Mode) + if nodeManifest.ModeStr != "" { + node.Mode = Mode(nodeManifest.ModeStr) } if node.Mode == ModeLight { node.ABCIProtocol = ProtocolBuiltin } - if nodeManifest.Database != "" { - node.Database = nodeManifest.Database + if node.Database == "" { + node.Database = "goleveldb" } - if nodeManifest.PrivvalProtocol != "" { - node.PrivvalProtocol = Protocol(nodeManifest.PrivvalProtocol) + if nodeManifest.PrivvalProtocolStr != "" { + node.PrivvalProtocol = Protocol(nodeManifest.PrivvalProtocolStr) } - if nodeManifest.PersistInterval != nil { - node.PersistInterval = *nodeManifest.PersistInterval + if nodeManifest.PersistIntervalPtr != nil { + node.PersistInterval = *nodeManifest.PersistIntervalPtr } if node.Prometheus { node.PrometheusProxyPort = prometheusProxyPortGen.Next() @@ -271,23 +274,38 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa for _, p := range nodeManifest.Perturb { node.Perturbations = append(node.Perturbations, Perturbation(p)) } + if nodeManifest.Zone != "" { + node.Zone = nodeManifest.Zone + } else if testnet.DefaultZone != "" { + node.Zone = testnet.DefaultZone + } + if node.Zone == "" { + testnet.LatencyEmulationEnabled = false + } + + // Configs are applied in order, so a local Config in Node + // should override a global config in Testnet. + if len(manifest.Config) > 0 { + node.Config = append(testnet.Config, node.Config...) + } + testnet.Nodes = append(testnet.Nodes, node) } // We do a second pass to set up seeds and persistent peers, which allows graph cycles. for _, node := range testnet.Nodes { - nodeManifest, ok := manifest.Nodes[node.Name] + nodeManifest, ok := manifest.NodesMap[node.Name] if !ok { return nil, fmt.Errorf("failed to look up manifest for node %q", node.Name) } - for _, seedName := range nodeManifest.Seeds { + for _, seedName := range nodeManifest.SeedsList { seed := testnet.LookupNode(seedName) if seed == nil { return nil, fmt.Errorf("unknown seed %q for node %q", seedName, node.Name) } node.Seeds = append(node.Seeds, seed) } - for _, peerName := range nodeManifest.PersistentPeers { + for _, peerName := range nodeManifest.PersistentPeersList { peer := testnet.LookupNode(peerName) if peer == nil { return nil, fmt.Errorf("unknown persistent peer %q for node %q", peerName, node.Name) @@ -308,39 +326,69 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa } // Set up genesis validators. If not specified explicitly, use all validator nodes. - if manifest.Validators != nil { - for validatorName, power := range *manifest.Validators { - validator := testnet.LookupNode(validatorName) - if validator == nil { - return nil, fmt.Errorf("unknown validator %q", validatorName) - } - testnet.Validators[validator] = power + if len(testnet.Validators) == 0 { + if testnet.Validators == nil { // Can this ever happen? + testnet.Validators = make(map[string]int64) } - } else { for _, node := range testnet.Nodes { if node.Mode == ModeValidator { - testnet.Validators[node] = 100 + testnet.Validators[node.Name] = 100 } } } // Set up validator updates. - for heightStr, validators := range manifest.ValidatorUpdates { + // NOTE: This map traversal is non-deterministic, but that's acceptable because + // the loop only constructs another map. + // We don't rely on traversal order for any side effects. + for heightStr, validators := range manifest.ValidatorUpdatesMap { height, err := strconv.Atoi(heightStr) if err != nil { return nil, fmt.Errorf("invalid validator update height %q: %w", height, err) } - valUpdate := map[*Node]int64{} + valUpdate := map[string]int64{} for name, power := range validators { node := testnet.LookupNode(name) if node == nil { return nil, fmt.Errorf("unknown validator %q for update at height %v", name, height) } - valUpdate[node] = power + valUpdate[node.Name] = power } testnet.ValidatorUpdates[int64(height)] = valUpdate } + if testnet.ConstantFlip && len(testnet.Validators) > 1 { + // Pick "lowest" validator by name + var minNode string + for n := range testnet.Validators { + if len(minNode) == 0 || n < minNode { + minNode = n + } + } + if len(minNode) == 0 { + return nil, errors.New("`testnet.Validators` is empty") + } + + const flipSpan = 3000 + for i := max(1, manifest.InitialHeight); i < manifest.InitialHeight+flipSpan; i++ { + // FIXME: we do not flip the validator when there is + // **any** scheduled validator update for that height. + // We may have a validator update that affects a + // **different** validator here and the height can be + // odd. We add our validator back in odd heights, so we + // are skipping this in this case. Therefore, in the + // next even height we are removing a validator that is + // not present in the validator set. + if _, ok := testnet.ValidatorUpdates[i]; ok { + continue + } + valUpdate := map[string]int64{ + minNode: i % 2, // flipping every height + } + testnet.ValidatorUpdates[i] = valUpdate + } + } + return testnet, testnet.Validate() } @@ -355,11 +403,136 @@ func (t Testnet) Validate() error { if len(t.Nodes) == 0 { return errors.New("network has no nodes") } + if err := t.validateZones(t.Nodes); err != nil { + return err + } + if t.BlockMaxBytes > types.MaxBlockSizeBytes { + return fmt.Errorf("value of BlockMaxBytes cannot be higher than %d", types.MaxBlockSizeBytes) + } + if t.VoteExtensionsUpdateHeight < -1 { + return fmt.Errorf("value of VoteExtensionsUpdateHeight must be positive, 0 (InitChain), "+ + "or -1 (Genesis); update height %d", t.VoteExtensionsUpdateHeight) + } + if t.VoteExtensionsEnableHeight < 0 { + return fmt.Errorf("value of VoteExtensionsEnableHeight must be positive, or 0 (disable); "+ + "enable height %d", t.VoteExtensionsEnableHeight) + } + if t.VoteExtensionsUpdateHeight > 0 && t.VoteExtensionsUpdateHeight < t.InitialHeight { + return fmt.Errorf("a value of VoteExtensionsUpdateHeight greater than 0 "+ + "must not be less than InitialHeight; "+ + "update height %d, initial height %d", + t.VoteExtensionsUpdateHeight, t.InitialHeight, + ) + } + if t.VoteExtensionsEnableHeight > 0 { + if t.VoteExtensionsEnableHeight < t.InitialHeight { + return fmt.Errorf("a value of VoteExtensionsEnableHeight greater than 0 "+ + "must not be less than InitialHeight; "+ + "enable height %d, initial height %d", + t.VoteExtensionsEnableHeight, t.InitialHeight, + ) + } + if t.VoteExtensionsEnableHeight <= t.VoteExtensionsUpdateHeight { + return fmt.Errorf("a value of VoteExtensionsEnableHeight greater than 0 "+ + "must be greater than VoteExtensionsUpdateHeight; "+ + "update height %d, enable height %d", + t.VoteExtensionsUpdateHeight, t.VoteExtensionsEnableHeight, + ) + } + } + if t.PbtsEnableHeight < 0 { + return fmt.Errorf("value of PbtsEnableHeight must be positive, or 0 (disable); "+ + "enable height %d", t.PbtsEnableHeight) + } + if t.PbtsUpdateHeight > 0 && t.PbtsUpdateHeight < t.InitialHeight { + return fmt.Errorf("a value of PbtsUpdateHeight greater than 0 "+ + "must not be less than InitialHeight; "+ + "update height %d, initial height %d", + t.PbtsUpdateHeight, t.InitialHeight, + ) + } + if t.PbtsEnableHeight > 0 { + if t.PbtsEnableHeight < t.InitialHeight { + return fmt.Errorf("a value of PbtsEnableHeight greater than 0 "+ + "must not be less than InitialHeight; "+ + "enable height %d, initial height %d", + t.PbtsEnableHeight, t.InitialHeight, + ) + } + if t.PbtsEnableHeight <= t.PbtsUpdateHeight { + return fmt.Errorf("a value of PbtsEnableHeight greater than 0 "+ + "must be greater than PbtsUpdateHeight; "+ + "update height %d, enable height %d", + t.PbtsUpdateHeight, t.PbtsEnableHeight, + ) + } + } + nodeNames := sortNodeNames(t.Manifest) + for _, nodeName := range t.LoadTargetNodes { + if !slices.Contains(nodeNames, nodeName) { + return fmt.Errorf("%s is not the list of nodes", nodeName) + } + } + if len(t.LoadLaneWeights) != len(t.Lanes) { + return fmt.Errorf("number of lane weights (%d) must be equal to "+ + "the number of lanes defined by the app (%d)", + len(t.LoadLaneWeights), len(t.Lanes), + ) + } + for lane := range t.Lanes { + if _, ok := t.LoadLaneWeights[lane]; !ok { + return fmt.Errorf("lane %s not in weights map", lane) + } + } + if t.sumWeights <= 0 { + return errors.New("the sum of all lane weights must be greater than 0") + } for _, node := range t.Nodes { if err := node.Validate(t); err != nil { return fmt.Errorf("invalid node %q: %w", node.Name, err) } } + for _, field := range t.Genesis { + if _, _, err := ParseKeyValueField("genesis", field); err != nil { + return err + } + } + for _, field := range t.Config { + if _, _, err := ParseKeyValueField("config", field); err != nil { + return err + } + } + return nil +} + +func (t *Testnet) validateZones(nodes []*Node) error { + allZones, _, err := LoadZoneLatenciesMatrix() + if err != nil { + return err + } + + // Check that the zone ids of all nodes are valid when the matrix file exists. + nodesWithoutZone := make([]string, 0, len(nodes)) + for _, node := range nodes { + if node.Zone == "" { + nodesWithoutZone = append(nodesWithoutZone, node.Name) + continue + } + if !slices.Contains(allZones, node.Zone) { + return fmt.Errorf("invalid zone %s for node %s, not present in zone-latencies matrix", + node.Zone, node.Name) + } + } + + // Either all nodes have a zone or none have. + if len(nodesWithoutZone) > 0 && len(nodesWithoutZone) != len(nodes) { + return fmt.Errorf("the following nodes do not have a zone assigned (while other nodes have): %v", strings.Join(nodesWithoutZone, ", ")) + } + + if len(nodesWithoutZone) > 0 && t.LatencyEmulationEnabled { + return fmt.Errorf("latency emulation is enabled but the following nodes do not have a zone assigned: %v", strings.Join(nodesWithoutZone, ", ")) + } + return nil } @@ -399,7 +572,7 @@ func (n Node) Validate(testnet Testnet) error { return fmt.Errorf("invalid block sync setting %q", n.BlockSyncVersion) } switch n.Database { - case "goleveldb", "cleveldb", "boltdb", "rocksdb", "badgerdb": + case "goleveldb", "rocksdb", "badgerdb", "pebbledb": default: return fmt.Errorf("invalid database setting %q", n.Database) } @@ -411,6 +584,9 @@ func (n Node) Validate(testnet Testnet) error { if n.Mode == ModeLight && n.ABCIProtocol != ProtocolBuiltin && n.ABCIProtocol != ProtocolBuiltinConnSync { return errors.New("light client must use builtin protocol") } + if n.Mode != ModeFull && n.Mode != ModeValidator && n.ClockSkew != 0 { + return errors.New("clock skew configuration only supported on full nodes") + } switch n.PrivvalProtocol { case ProtocolFile, ProtocolUNIX, ProtocolTCP: default: @@ -443,7 +619,7 @@ func (n Node) Validate(testnet Testnet) error { switch perturbation { case PerturbationUpgrade: if upgradeFound { - return fmt.Errorf("'upgrade' perturbation can appear at most once per node") + return errors.New("'upgrade' perturbation can appear at most once per node") } upgradeFound = true case PerturbationDisconnect, PerturbationKill, PerturbationPause, PerturbationRestart: @@ -451,7 +627,11 @@ func (n Node) Validate(testnet Testnet) error { return fmt.Errorf("invalid perturbation %q", perturbation) } } - + for _, entry := range n.Config { + if _, _, err := ParseKeyValueField("config", entry); err != nil { + return err + } + } return nil } @@ -503,32 +683,26 @@ func (t Testnet) HasPerturbations() bool { return false } -//go:embed templates/prometheus-yaml.tmpl -var prometheusYamlTemplate string - -func (t Testnet) prometheusConfigBytes() ([]byte, error) { - tmpl, err := template.New("prometheus-yaml").Parse(prometheusYamlTemplate) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = tmpl.Execute(&buf, t) - if err != nil { - return nil, err +// weightedRandomIndex, given a list of cumulative weights and the sum of all +// weights, it picks one of them randomly and proportionally to its weight, and +// returns its index in the list. +func weightedRandomIndex(cumWeights []uint, sumWeights uint) int { + // Generate a random number in the range [0, sumWeights). + r := cmtrand.Int31n(int32(sumWeights)) + + // Return i when the random number falls in the i'th interval. + for i, cumWeight := range cumWeights { + if r < int32(cumWeight) { + return i + } } - return buf.Bytes(), nil + return -1 // unreachable } -func (t Testnet) WritePrometheusConfig() error { - bytes, err := t.prometheusConfigBytes() - if err != nil { - return err - } - err = os.WriteFile(filepath.Join(t.Dir, "prometheus.yaml"), bytes, 0o644) //nolint:gosec - if err != nil { - return err - } - return nil +// WeightedRandomLane returns an element in the list of lane ids, according to a +// predefined weight for each lane in the list. +func (t *Testnet) WeightedRandomLane() string { + return t.laneIDs[weightedRandomIndex(t.laneCumulativeWeights, t.sumWeights)] } // Address returns a P2P endpoint address for the node. @@ -557,9 +731,17 @@ func (n Node) AddressRPC() string { // Client returns an RPC client for the node. func (n Node) Client() (*rpchttp.HTTP, error) { + //nolint:nosprintfhostport return rpchttp.New(fmt.Sprintf("http://%s:%v/v1", n.ExternalIP, n.RPCProxyPort)) } +// ClientInternalIP returns an RPC client using the node's internal IP. +// This is useful for running the loader from inside a private DO network. +func (n Node) ClientInternalIP() (*rpchttp.HTTP, error) { + //nolint:nosprintfhostport + return rpchttp.New(fmt.Sprintf("http://%s:%v/v1", n.InternalIP, n.RPCProxyPort)) +} + // GRPCClient creates a gRPC client for the node. func (n Node) GRPCClient(ctx context.Context) (grpcclient.Client, error) { return grpcclient.New( @@ -578,7 +760,7 @@ func (n Node) GRPCPrivilegedClient(ctx context.Context) (grpcprivileged.Client, ) } -// Stateless returns true if the node is either a seed node or a light node +// Stateless returns true if the node is either a seed node or a light node. func (n Node) Stateless() bool { return n.Mode == ModeLight || n.Mode == ModeSeed } @@ -602,9 +784,15 @@ func (g *keyGenerator) Generate(keyType string) crypto.PrivKey { panic(err) // this shouldn't happen } switch keyType { - case "secp256k1": + case secp256k1.KeyType: return secp256k1.GenPrivKeySecp256k1(seed) - case "", "ed25519": + case bls12381.KeyType: + pk, err := bls12381.GenPrivKeyFromSecret(seed) + if err != nil { + panic(fmt.Sprintf("unrecoverable error when generating key; key type %s, err %v", bls12381.KeyType, err)) + } + return pk + case ed25519.KeyType: return ed25519.GenPrivKeyFromSecret(seed) default: panic("KeyType not supported") // should not make it this far @@ -667,3 +855,60 @@ func (g *ipGenerator) Next() net.IP { } return ip } + +//go:embed files/aws-latencies.csv +var awsLatenciesMatrixCsvContent string + +// LoadZoneLatenciesMatrix parses the file containing the matrix of latencies +// from each zone to another one. It returns the list of all zone IDs, and a map +// from each zone ID to the latencies to each other zone. +func LoadZoneLatenciesMatrix() ([]string, map[string][]uint32, error) { + records, err := parseCsv(awsLatenciesMatrixCsvContent) + if err != nil { + return nil, nil, err + } + zones := records[0][1:] // Discard first element in header (value "from/to") + records = records[1:] // Discard header + matrix := make(map[string][]uint32, len(records)) + for _, r := range records { + zoneID := r[0] + matrix[zoneID] = make([]uint32, len(r)-1) + for i, l := range r[1:] { + lat, err := strconv.ParseUint(l, 10, 32) + if err != nil { + return nil, nil, ErrInvalidZoneID{l, err} + } + matrix[zoneID][i] = uint32(lat) + } + } + return zones, matrix, nil +} + +type ErrInvalidZoneID struct { + ZoneID string + Err error +} + +func (e ErrInvalidZoneID) Error() string { + return fmt.Sprintf("invalid zone id (%s): %v", e.ZoneID, e.Err) +} + +func parseCsv(csvString string) ([][]string, error) { + csvReader := csv.NewReader(strings.NewReader(csvString)) + csvReader.Comment = '#' + records, err := csvReader.ReadAll() + if err != nil { + return nil, err + } + + return records, nil +} + +func ParseKeyValueField(name string, field string) (key string, value string, err error) { + tokens := strings.Split(field, "=") + if len(tokens) != 2 { + return key, value, fmt.Errorf("invalid '%s' field: \"%s\", "+ + "expected \"key = value\"", name, field) + } + return strings.TrimSpace(tokens[0]), strings.TrimSpace(tokens[1]), nil +} diff --git a/test/e2e/run-multiple.sh b/test/e2e/run-multiple.sh index 571a78a7faf..6587738174d 100755 --- a/test/e2e/run-multiple.sh +++ b/test/e2e/run-multiple.sh @@ -20,10 +20,11 @@ FAILED=() for MANIFEST in "$@"; do START=$SECONDS echo "==> Running testnet: $MANIFEST" + echo "==> Manifest:" + cat "$MANIFEST" if ! ./build/runner -f "$MANIFEST"; then - echo "==> Testnet $MANIFEST failed, dumping manifest..." - cat "$MANIFEST" + echo "==> Testnet failed: $MANIFEST" echo "==> Dumping container logs for $MANIFEST..." ./build/runner -f "$MANIFEST" logs diff --git a/test/e2e/runner/benchmark.go b/test/e2e/runner/benchmark.go index bd671fc4f58..2f3bfea7be1 100644 --- a/test/e2e/runner/benchmark.go +++ b/test/e2e/runner/benchmark.go @@ -20,7 +20,7 @@ import ( // 4. Min block interval (fastest block) // // Metrics are based of the `benchmarkLength`, the amount of consecutive blocks -// sampled from in the testnet +// sampled from in the testnet. func Benchmark(ctx context.Context, testnet *e2e.Testnet, benchmarkLength int64) error { block, _, err := waitForHeight(ctx, testnet, 0) if err != nil { @@ -84,7 +84,7 @@ type testnetStats struct { } func (t *testnetStats) OutputJSON(net *e2e.Testnet) string { - jsn, err := json.Marshal(map[string]interface{}{ + jsn, err := json.Marshal(map[string]any{ "case": filepath.Base(net.File), "start_height": t.startHeight, "end_height": t.endHeight, @@ -97,7 +97,6 @@ func (t *testnetStats) OutputJSON(net *e2e.Testnet) string { "txns": t.numtxns, "dur": t.totalTime.Seconds(), }) - if err != nil { return "" } diff --git a/test/e2e/runner/cleanup.go b/test/e2e/runner/cleanup.go index 852612312be..aadfd76042b 100644 --- a/test/e2e/runner/cleanup.go +++ b/test/e2e/runner/cleanup.go @@ -4,10 +4,10 @@ import ( "context" "errors" "fmt" + "io/fs" "os" "path/filepath" - "github.com/cometbft/cometbft/libs/log" e2e "github.com/cometbft/cometbft/test/e2e/pkg" "github.com/cometbft/cometbft/test/e2e/pkg/exec" "github.com/cometbft/cometbft/test/e2e/pkg/infra/docker" @@ -50,20 +50,20 @@ func cleanupDocker() error { return nil } -// cleanupDir cleans up a testnet directory +// cleanupDir cleans up a testnet directory. func cleanupDir(dir string) error { if dir == "" { return errors.New("no directory set") } _, err := os.Stat(dir) - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil } else if err != nil { return err } - logger.Info("cleanup dir", "msg", log.NewLazySprintf("Removing testnet directory %q", dir)) + logger.Info("Clean up testnet directory", "dir", dir) // On Linux, some local files in the volume will be owned by root since CometBFT // runs as root inside the container, so we need to clean them up from within a @@ -73,7 +73,7 @@ func cleanupDir(dir string) error { return err } err = docker.Exec(context.Background(), "run", "--rm", "--entrypoint", "", "-v", fmt.Sprintf("%v:/network", absDir), - "cometbft/e2e-node", "sh", "-c", "rm -rf /network/*/") + "cometbft/e2e-node:local-version", "sh", "-c", "rm -rf /network/*/") if err != nil { return err } diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index ce778bb53ae..01d06d77863 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -10,22 +10,21 @@ import ( "path/filepath" "time" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/tmhash" "github.com/cometbft/cometbft/internal/test" cmtjson "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/privval" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" e2e "github.com/cometbft/cometbft/test/e2e/pkg" "github.com/cometbft/cometbft/types" "github.com/cometbft/cometbft/version" ) -// 1 in 4 evidence is light client evidence, the rest is duplicate vote evidence +// 1 in 4 evidence is light client evidence, the rest is duplicate vote evidence. const lightClientEvidenceRatio = 4 -// InjectEvidence takes a running testnet and generates an amount of valid +// InjectEvidence takes a running testnet and generates an amount of valid/invalid // evidence and broadcasts it to a random node through the rpc endpoint `/broadcast_evidence`. // Evidence is random and can be a mixture of LightClientAttackEvidence and // DuplicateVoteEvidence. @@ -88,16 +87,21 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo } var ev types.Evidence - for i := 1; i <= amount; i++ { + for i := 0; i < amount; i++ { + validEv := true if i%lightClientEvidenceRatio == 0 { + validEv = i%(lightClientEvidenceRatio*2) != 0 // Alternate valid and invalid evidence ev, err = generateLightClientAttackEvidence( - ctx, privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, + ctx, privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, validEv, ) } else { var dve *types.DuplicateVoteEvidence dve, err = generateDuplicateVoteEvidence( privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, ) + if err != nil { + return err + } if dve.VoteA.Height < testnet.VoteExtensionsEnableHeight { dve.VoteA.Extension = nil dve.VoteA.ExtensionSignature = nil @@ -111,9 +115,18 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo } _, err := client.BroadcastEvidence(ctx, ev) - if err != nil { + if !validEv { + // The tests will count committed evidences later on, + // and only valid evidences will make it + amount++ + } + if validEv != (err == nil) { + if err == nil { + return errors.New("submitting invalid evidence didn't return an error") + } return err } + time.Sleep(5 * time.Second / time.Duration(amount)) } // wait for the node to reach the height above the forged height so that @@ -156,6 +169,7 @@ func generateLightClientAttackEvidence( vals *types.ValidatorSet, chainID string, evTime time.Time, + validEvidence bool, ) (*types.LightClientAttackEvidence, error) { // forge a random header forgedHeight := height + 2 @@ -165,7 +179,7 @@ func generateLightClientAttackEvidence( // add a new bogus validator and remove an existing one to // vary the validator set slightly - pv, conflictingVals, err := mutateValidatorSet(ctx, privVals, vals) + pv, conflictingVals, err := mutateValidatorSet(ctx, privVals, vals, !validEvidence) if err != nil { return nil, err } @@ -174,12 +188,17 @@ func generateLightClientAttackEvidence( // create a commit for the forged header blockID := makeBlockID(header.Hash(), 1000, []byte("partshash")) - voteSet := types.NewVoteSet(chainID, forgedHeight, 0, cmtproto.SignedMsgType(2), conflictingVals) + voteSet := types.NewVoteSet(chainID, forgedHeight, 0, types.SignedMsgType(2), conflictingVals) commit, err := test.MakeCommitFromVoteSet(blockID, voteSet, pv, forgedTime) if err != nil { return nil, err } + // malleate the last signature of the commit by adding one to its first byte + if !validEvidence { + commit.Signatures[len(commit.Signatures)-1].Signature[0]++ + } + ev := &types.LightClientAttackEvidence{ ConflictingBlock: &types.LightBlock{ SignedHeader: &types.SignedHeader{ @@ -199,7 +218,7 @@ func generateLightClientAttackEvidence( } // generateDuplicateVoteEvidence picks a random validator from the val set and -// returns duplicate vote evidence against the validator +// returns duplicate vote evidence against the validator. func generateDuplicateVoteEvidence( privVals []types.MockPV, height int64, @@ -228,7 +247,7 @@ func generateDuplicateVoteEvidence( } // getRandomValidatorIndex picks a random validator from a slice of mock PrivVals that's -// also part of the validator set, returning the PrivVal and its index in the validator set +// also part of the validator set, returning the PrivVal and its index in the validator set. func getRandomValidatorIndex(privVals []types.MockPV, vals *types.ValidatorSet) (types.MockPV, int32, error) { for _, idx := range rand.Perm(len(privVals)) { pv := privVals[idx] @@ -293,7 +312,11 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.Bloc } } -func mutateValidatorSet(ctx context.Context, privVals []types.MockPV, vals *types.ValidatorSet, +func mutateValidatorSet( + ctx context.Context, + privVals []types.MockPV, + vals *types.ValidatorSet, + nop bool, ) ([]types.PrivValidator, *types.ValidatorSet, error) { newVal, newPrivVal, err := test.Validator(ctx, 10) if err != nil { @@ -301,10 +324,14 @@ func mutateValidatorSet(ctx context.Context, privVals []types.MockPV, vals *type } var newVals *types.ValidatorSet - if vals.Size() > 2 { - newVals = types.NewValidatorSet(append(vals.Copy().Validators[:vals.Size()-1], newVal)) + if nop { + newVals = types.NewValidatorSet(vals.Copy().Validators) } else { - newVals = types.NewValidatorSet(append(vals.Copy().Validators, newVal)) + if vals.Size() > 2 { + newVals = types.NewValidatorSet(append(vals.Copy().Validators[:vals.Size()-1], newVal)) + } else { + newVals = types.NewValidatorSet(append(vals.Copy().Validators, newVal)) + } } // we need to sort the priv validators with the same index as the validator set diff --git a/test/e2e/runner/latency_emulation.go b/test/e2e/runner/latency_emulation.go new file mode 100644 index 00000000000..8d2042a0078 --- /dev/null +++ b/test/e2e/runner/latency_emulation.go @@ -0,0 +1,85 @@ +package main + +import ( + "fmt" + "slices" + + e2e "github.com/cometbft/cometbft/test/e2e/pkg" + "github.com/cometbft/cometbft/test/e2e/pkg/infra" +) + +// tcCommands generates the content of a shell script that includes a list of tc +// (traffic control) commands to emulate latency from the given node to all +// other nodes in the testnet. +func tcCommands(node *e2e.Node, infp infra.Provider) ([]string, error) { + allZones, zoneMatrix, err := e2e.LoadZoneLatenciesMatrix() + if err != nil { + return nil, err + } + nodeZoneIndex := slices.Index(allZones, node.Zone) + + tcCmds := []string{ + "#!/bin/sh", + "set -e", + + // Delete any existing qdisc on the root of the eth0 interface. + "tc qdisc del dev eth0 root 2> /dev/null || true", + + // Add a new root qdisc of type HTB with a default class of 10. + "tc qdisc add dev eth0 root handle 1: htb default 10", + + // Add a root class with identifier 1:1 and a rate limit of 1 gigabit per second. + "tc class add dev eth0 parent 1: classid 1:1 htb rate 1gbit", + + // Add a default class under the root class with identifier 1:10 and a rate limit of 1 gigabit per second. + "tc class add dev eth0 parent 1:1 classid 1:10 htb rate 1gbit", + + // Add an SFQ qdisc to the default class with handle 10: to manage traffic with fairness. + "tc qdisc add dev eth0 parent 1:10 handle 10: sfq perturb 10", + } + + // handle must be unique for each rule; start from one higher than last handle used above (10). + handle := 11 + for _, targetZone := range allZones { + // Get latency from node's zone to target zone (note that the matrix is symmetric). + latency := zoneMatrix[targetZone][nodeZoneIndex] + if latency <= 0 { + continue + } + + // Assign latency +/- 0.05% to handle. + delta := latency / 20 + if delta == 0 { + // Zero is not allowed in normal distribution. + delta = 1 + } + + // Add a class with the calculated handle, under the root class, with the specified rate. + tcCmds = append(tcCmds, fmt.Sprintf("tc class add dev eth0 parent 1:1 classid 1:%d htb rate 1gbit", handle)) + + // Add a netem qdisc to simulate the specified delay with normal distribution. + tcCmds = append(tcCmds, fmt.Sprintf("tc qdisc add dev eth0 parent 1:%d handle %d: netem delay %dms %dms distribution normal", handle, handle, latency, delta)) + + // Set emulated latency to nodes in the target zone. + for _, otherNode := range node.Testnet.Nodes { + if otherNode.Zone == targetZone || node.Name == otherNode.Name { + continue + } + otherNodeIP := infp.NodeIP(otherNode) + // Assign latency handle to target node. + tcCmds = append(tcCmds, fmt.Sprintf("tc filter add dev eth0 protocol ip parent 1: prio 1 u32 match ip dst %s/32 flowid 1:%d", otherNodeIP, handle)) + } + + handle++ + } + + // Display tc configuration for debugging. + tcCmds = append(tcCmds, []string{ + fmt.Sprintf("echo Traffic Control configuration on %s:", node.Name), + "tc qdisc show", + "tc class show dev eth0", + // "tc filter show dev eth0", // too verbose + }...) + + return tcCmds, nil +} diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go index 2dd630f2e7d..f241a70fd92 100644 --- a/test/e2e/runner/load.go +++ b/test/e2e/runner/load.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "sync" "time" @@ -20,14 +21,17 @@ const workerPoolSize = 16 // Load generates transactions against the network until the given context is // canceled. -func Load(ctx context.Context, testnet *e2e.Testnet) error { +func Load(ctx context.Context, testnet *e2e.Testnet, useInternalIP bool) error { initialTimeout := 1 * time.Minute stallTimeout := 30 * time.Second chSuccess := make(chan struct{}) + chFailed := make(chan error) ctx, cancel := context.WithCancel(ctx) defer cancel() - logger.Info("load", "msg", log.NewLazySprintf("Starting transaction load (%v workers)...", workerPoolSize)) + logger.Info("load", "msg", log.NewLazySprintf("Starting transaction load (%v workers)...", workerPoolSize), + "tx/s", testnet.LoadTxBatchSize, "tx-bytes", testnet.LoadTxSizeBytes, "conn", testnet.LoadTxConnections, + "max-seconds", testnet.LoadMaxSeconds, "target-nodes", testnet.LoadTargetNodes) started := time.Now() u := [16]byte(uuid.New()) // generate run ID on startup @@ -35,37 +39,77 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error { go loadGenerate(ctx, txCh, testnet, u[:]) for _, n := range testnet.Nodes { - if n.SendNoLoad { + if len(testnet.LoadTargetNodes) == 0 { + if n.SendNoLoad { + continue + } + } else if !slices.Contains(testnet.LoadTargetNodes, n.Name) { continue } for w := 0; w < testnet.LoadTxConnections; w++ { - go loadProcess(ctx, txCh, chSuccess, n) + go loadProcess(ctx, txCh, chSuccess, chFailed, n, useInternalIP) } } - // Monitor successful transactions, and abort on stalls. - success := 0 + maxTimer := time.NewTimer(time.Duration(testnet.LoadMaxSeconds) * time.Second) + if testnet.LoadMaxSeconds <= 0 { + <-maxTimer.C + } + + // Monitor successful and failed transactions, and abort on stalls. + success, failed := 0, 0 + errorCounter := make(map[string]int) timeout := initialTimeout for { + rate := log.NewLazySprintf("%.1f", float64(success)/time.Since(started).Seconds()) + select { case <-chSuccess: success++ timeout = stallTimeout + case err := <-chFailed: + failed++ + errorCounter[err.Error()]++ case <-time.After(timeout): return fmt.Errorf("unable to submit transactions for %v", timeout) + case <-maxTimer.C: + logger.Info("load", "msg", log.NewLazySprintf("Transaction load finished after reaching %v seconds (%v tx/s)", testnet.LoadMaxSeconds, rate)) + return nil case <-ctx.Done(): if success == 0 { return errors.New("failed to submit any transactions") } - logger.Info("load", "msg", log.NewLazySprintf("Ending transaction load after %v txs (%.1f tx/s)...", - success, float64(success)/time.Since(started).Seconds())) + logger.Info("load", "msg", log.NewLazySprintf("Ending transaction load after %v txs (%v tx/s)...", success, rate)) + return nil + } + + // Log every ~1 second the number of sent transactions. + total := success + failed + if total%testnet.LoadTxBatchSize == 0 { + successRate := float64(success) / float64(total) + logger.Debug("load", "success", success, "failed", failed, "success/total", log.NewLazySprintf("%.2f", successRate), "tx/s", rate) + if len(errorCounter) > 0 { + for err, c := range errorCounter { + if c == 1 { + logger.Error("failed to send transaction", "err", err) + } else { + logger.Error("failed to send multiple transactions", "count", c, "err", err) + } + } + errorCounter = make(map[string]int) + } + } + + // Check if reached max number of allowed transactions to send. + if testnet.LoadMaxTxs > 0 && success >= testnet.LoadMaxTxs { + logger.Info("load", "msg", log.NewLazySprintf("Transaction load finished after reaching %v txs (%v tx/s)", success, rate)) return nil } } } -// loadGenerate generates jobs until the context is canceled +// loadGenerate generates jobs until the context is canceled. func loadGenerate(ctx context.Context, txCh chan<- types.Tx, testnet *e2e.Testnet, id []byte) { t := time.NewTimer(0) defer t.Stop() @@ -104,6 +148,7 @@ func createTxBatch(ctx context.Context, txCh chan<- types.Tx, testnet *e2e.Testn Size: uint64(testnet.LoadTxSizeBytes), Rate: uint64(testnet.LoadTxBatchSize), Connections: uint64(testnet.LoadTxConnections), + Lane: testnet.WeightedRandomLane(), }) if err != nil { panic(fmt.Sprintf("Failed to generate tx: %v", err)) @@ -117,11 +162,12 @@ func createTxBatch(ctx context.Context, txCh chan<- types.Tx, testnet *e2e.Testn } }() } +FOR_LOOP: for i := 0; i < testnet.LoadTxBatchSize; i++ { select { case genCh <- struct{}{}: case <-ctx.Done(): - break + break FOR_LOOP } } close(genCh) @@ -130,19 +176,24 @@ func createTxBatch(ctx context.Context, txCh chan<- types.Tx, testnet *e2e.Testn // loadProcess processes transactions by sending transactions received on the txCh // to the client. -func loadProcess(ctx context.Context, txCh <-chan types.Tx, chSuccess chan<- struct{}, n *e2e.Node) { +func loadProcess(ctx context.Context, txCh <-chan types.Tx, chSuccess chan<- struct{}, chFailed chan<- error, n *e2e.Node, useInternalIP bool) { var client *rpchttp.HTTP var err error s := struct{}{} for tx := range txCh { if client == nil { - client, err = n.Client() + if useInternalIP { + client, err = n.ClientInternalIP() + } else { + client, err = n.Client() + } if err != nil { logger.Info("non-fatal error creating node client", "error", err) continue } } if _, err = client.BroadcastTxSync(ctx, tx); err != nil { + chFailed <- err continue } chSuccess <- s diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index 51a57a8b39a..0cb7ff960b4 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -4,9 +4,11 @@ import ( "context" "errors" "fmt" + "io/fs" "math/rand" "os" "strconv" + "strings" "github.com/spf13/cobra" @@ -19,7 +21,7 @@ import ( const randomSeed = 2308084734268 -var logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +var logger = log.NewLoggerWithColor(os.Stdout, false) func main() { NewCLI().Run() @@ -41,7 +43,7 @@ func NewCLI() *CLI { Short: "End-to-end test runner", SilenceUsage: true, SilenceErrors: true, // we'll output them ourselves in Run() - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { file, err := cmd.Flags().GetString("file") if err != nil { return err @@ -80,7 +82,12 @@ func NewCLI() *CLI { return fmt.Errorf("unknown infrastructure type '%s'", inft) } - testnet, err := e2e.LoadTestnet(file, ifd) + testnetDir, err := cmd.Flags().GetString("testnet-dir") + if err != nil { + return err + } + + testnet, err := e2e.LoadTestnet(file, ifd, testnetDir) if err != nil { return fmt.Errorf("loading testnet: %s", err) } @@ -106,7 +113,7 @@ func NewCLI() *CLI { } return nil }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { if err := Cleanup(cli.testnet); err != nil { return err } @@ -120,7 +127,7 @@ func NewCLI() *CLI { ctx, loadCancel := context.WithCancel(context.Background()) defer loadCancel() go func() { - err := Load(ctx, cli.testnet) + err := Load(ctx, cli.testnet, false) if err != nil { logger.Error(fmt.Sprintf("Transaction load failed: %v", err.Error())) } @@ -175,6 +182,8 @@ func NewCLI() *CLI { cli.root.PersistentFlags().StringP("file", "f", "", "Testnet TOML manifest") _ = cli.root.MarkPersistentFlagRequired("file") + cli.root.PersistentFlags().StringP("testnet-dir", "d", "", "Set the directory for the testnet files generated during setup") + cli.root.PersistentFlags().StringP("infrastructure-type", "", "docker", "Backing infrastructure used to run the testnet. Either 'digital-ocean' or 'docker'") cli.root.PersistentFlags().StringP("infrastructure-data", "", "", "path to the json file containing the infrastructure data. Only used if the 'infrastructure-type' is set to a value other than 'docker'") @@ -185,7 +194,7 @@ func NewCLI() *CLI { cli.root.AddCommand(&cobra.Command{ Use: "setup", Short: "Generates the testnet directory and configuration", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { return Setup(cli.testnet, cli.infp) }, }) @@ -193,9 +202,9 @@ func NewCLI() *CLI { cli.root.AddCommand(&cobra.Command{ Use: "start", Short: "Starts the testnet, waiting for nodes to become available", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { _, err := os.Stat(cli.testnet.Dir) - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { err = Setup(cli.testnet, cli.infp) } if err != nil { @@ -208,7 +217,7 @@ func NewCLI() *CLI { cli.root.AddCommand(&cobra.Command{ Use: "perturb", Short: "Perturbs the testnet, e.g. by restarting or disconnecting nodes", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { return Perturb(cmd.Context(), cli.testnet, cli.infp) }, }) @@ -216,7 +225,7 @@ func NewCLI() *CLI { cli.root.AddCommand(&cobra.Command{ Use: "wait", Short: "Waits for a few blocks to be produced and all nodes to catch up", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { return Wait(cmd.Context(), cli.testnet, 5) }, }) @@ -224,19 +233,64 @@ func NewCLI() *CLI { cli.root.AddCommand(&cobra.Command{ Use: "stop", Short: "Stops the testnet", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { logger.Info("Stopping testnet") return cli.infp.StopTestnet(context.Background()) }, }) - cli.root.AddCommand(&cobra.Command{ + loadCmd := &cobra.Command{ Use: "load", - Short: "Generates transaction load until the command is canceled", - RunE: func(cmd *cobra.Command, args []string) (err error) { - return Load(context.Background(), cli.testnet) + Short: "Generates transaction load until the command is canceled.", + RunE: func(cmd *cobra.Command, _ []string) (err error) { + useInternalIP, err := cmd.Flags().GetBool("internal-ip") + if err != nil { + return err + } + if loadRate, err := cmd.Flags().GetInt("rate"); err != nil { + return err + } else if loadRate > 0 { + cli.testnet.LoadTxBatchSize = loadRate + } + if loadSize, err := cmd.Flags().GetInt("size"); err != nil { + return err + } else if loadSize > 0 { + cli.testnet.LoadTxSizeBytes = loadSize + } + if loadConnections, err := cmd.Flags().GetInt("conn"); err != nil { + return err + } else if loadConnections > 0 { + cli.testnet.LoadTxConnections = loadConnections + } + if loadTime, err := cmd.Flags().GetInt("time"); err != nil { + return err + } else if loadTime > 0 { + cli.testnet.LoadMaxSeconds = loadTime + } + if loadTargetNodes, err := cmd.Flags().GetStringSlice("nodes"); err != nil { + return err + } else if len(loadTargetNodes) > 0 { + cli.testnet.LoadTargetNodes = loadTargetNodes + } + if err = cli.testnet.Validate(); err != nil { + return err + } + return Load(context.Background(), cli.testnet, useInternalIP) }, - }) + } + loadCmd.PersistentFlags().IntP("rate", "r", -1, + "Number of transactions generate each second on all connections). Overwrites manifest option load_tx_batch_size.") + loadCmd.PersistentFlags().IntP("size", "s", -1, + "Transaction size in bytes. Overwrites manifest option load_tx_size_bytes.") + loadCmd.PersistentFlags().IntP("conn", "c", -1, + "Number of connections to open at each target node simultaneously. Overwrites manifest option load_tx_connections.") + loadCmd.PersistentFlags().IntP("time", "t", -1, + "Maximum duration (in seconds) of the load test. Overwrites manifest option load_max_seconds.") + loadCmd.PersistentFlags().StringSliceP("nodes", "n", nil, + "Comma-separated list of node names to send load to. Manifest option send_no_load will be ignored.") + loadCmd.PersistentFlags().BoolP("internal-ip", "i", false, + "Use nodes' internal IP addresses when sending transaction load. For running from inside a DO private network.") + cli.root.AddCommand(loadCmd) cli.root.AddCommand(&cobra.Command{ Use: "evidence [amount]", @@ -264,31 +318,101 @@ func NewCLI() *CLI { cli.root.AddCommand(&cobra.Command{ Use: "test", Short: "Runs test cases against a running testnet", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { return Test(cli.testnet, cli.infp.GetInfrastructureData()) }, }) + monitorCmd := cobra.Command{ + Use: "monitor", + Aliases: []string{"mon"}, + Short: "Manage monitoring services such as Prometheus, Grafana, ElasticSearch, etc.", + Long: "Manage monitoring services such as Prometheus, Grafana, ElasticSearch, etc.\n" + + "First run 'setup' to generate a Prometheus config file.", + } + monitorStartCmd := cobra.Command{ + Use: "start", + Aliases: []string{"up"}, + Short: "Start monitoring services.", + RunE: func(cmd *cobra.Command, _ []string) error { + _, err := os.Stat(PrometheusConfigFile) + if errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("file %s not found", PrometheusConfigFile) + } + if err := docker.ExecComposeVerbose(cmd.Context(), "monitoring", "up", "-d"); err != nil { + return err + } + logger.Info("Grafana: http://localhost:3000 ; Prometheus: http://localhost:9090") + return nil + }, + } + monitorStopCmd := cobra.Command{ + Use: "stop", + Aliases: []string{"down"}, + Short: "Stop monitoring services.", + RunE: func(cmd *cobra.Command, _ []string) error { + _, err := os.Stat(PrometheusConfigFile) + if errors.Is(err, fs.ErrNotExist) { + return nil + } + logger.Info("Shutting down monitoring services.") + if err := docker.ExecComposeVerbose(cmd.Context(), "monitoring", "down"); err != nil { + return err + } + // Remove prometheus config only when there is no testnet. + if _, err := os.Stat(cli.testnet.Dir); errors.Is(err, fs.ErrNotExist) { + if err := os.RemoveAll(PrometheusConfigFile); err != nil { + return err + } + } + return nil + }, + } + monitorCmd.AddCommand(&monitorStartCmd) + monitorCmd.AddCommand(&monitorStopCmd) + cli.root.AddCommand(&monitorCmd) + cli.root.AddCommand(&cobra.Command{ - Use: "cleanup", - Short: "Removes the testnet directory", - RunE: func(cmd *cobra.Command, args []string) error { + Use: "cleanup", + Aliases: []string{"clean"}, + Short: "Removes the testnet directory", + RunE: func(cmd *cobra.Command, _ []string) error { + // Alert if monitoring services are still running. + outBytes, err := docker.ExecComposeOutput(cmd.Context(), "monitoring", "ps", "--services", "--filter", "status=running") + out := strings.TrimSpace(string(outBytes)) + if err == nil && len(out) != 0 { + logger.Info("Monitoring services are still running:\n" + out) + } return Cleanup(cli.testnet) }, }) - cli.root.AddCommand(&cobra.Command{ + var splitLogs bool + logCmd := &cobra.Command{ Use: "logs", - Short: "Shows the testnet logs", - RunE: func(cmd *cobra.Command, args []string) error { + Short: "Shows the testnet logs. Use `--split` to split logs into separate files", + RunE: func(cmd *cobra.Command, _ []string) error { + splitLogs, _ = cmd.Flags().GetBool("split") + if splitLogs { + for _, node := range cli.testnet.Nodes { + fmt.Println("Log for", node.Name) + err := docker.ExecComposeVerbose(context.Background(), cli.testnet.Dir, "logs", node.Name) + if err != nil { + return err + } + } + return nil + } return docker.ExecComposeVerbose(context.Background(), cli.testnet.Dir, "logs") }, - }) + } + logCmd.PersistentFlags().BoolVar(&splitLogs, "split", false, "outputs separate logs for each container") + cli.root.AddCommand(logCmd) cli.root.AddCommand(&cobra.Command{ Use: "tail", Short: "Tails the testnet logs", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { return docker.ExecComposeVerbose(context.Background(), cli.testnet.Dir, "logs", "--follow") }, }) @@ -302,10 +426,10 @@ func NewCLI() *CLI { Min Block Interval Max Block Interval over a 100 block sampling period. - + Does not run any perturbations. `, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { if err := Cleanup(cli.testnet); err != nil { return err } @@ -317,7 +441,7 @@ Does not run any perturbations. ctx, loadCancel := context.WithCancel(cmd.Context()) defer loadCancel() go func() { - err := Load(ctx, cli.testnet) + err := Load(ctx, cli.testnet, false) if err != nil { logger.Error(fmt.Sprintf("Transaction load errored: %v", err.Error())) } diff --git a/test/e2e/runner/perturb.go b/test/e2e/runner/perturb.go index 3d26060561e..1c9dc81feb2 100644 --- a/test/e2e/runner/perturb.go +++ b/test/e2e/runner/perturb.go @@ -41,6 +41,8 @@ func PerturbNode(ctx context.Context, node *e2e.Node, perturbation e2e.Perturbat node.Name, name)) } + timeout := 20 * time.Second + switch perturbation { case e2e.PerturbationDisconnect: logger.Info("perturb node", "msg", log.NewLazySprintf("Disconnecting node %v...", node.Name)) @@ -60,6 +62,13 @@ func PerturbNode(ctx context.Context, node *e2e.Node, perturbation e2e.Perturbat if err := docker.ExecCompose(context.Background(), testnet.Dir, "start", name); err != nil { return nil, err } + if node.PersistInterval == 0 { + timeout *= 5 + } else { + // still need to give some extra time to the runner + // to wait for the node to restart when killing + timeout *= 2 + } case e2e.PerturbationPause: logger.Info("perturb node", "msg", log.NewLazySprintf("Pausing node %v...", node.Name)) @@ -76,6 +85,9 @@ func PerturbNode(ctx context.Context, node *e2e.Node, perturbation e2e.Perturbat if err := docker.ExecCompose(context.Background(), testnet.Dir, "restart", name); err != nil { return nil, err } + if node.PersistInterval == 0 { + timeout *= 5 + } case e2e.PerturbationUpgrade: oldV := node.Version @@ -101,12 +113,15 @@ func PerturbNode(ctx context.Context, node *e2e.Node, perturbation e2e.Perturbat if err := docker.ExecCompose(context.Background(), testnet.Dir, "up", "-d", name+"_u"); err != nil { return nil, err } + if node.PersistInterval == 0 { + timeout *= 5 + } default: return nil, fmt.Errorf("unexpected perturbation %q", perturbation) } - status, err := waitForNode(ctx, node, 0, 20*time.Second) + status, err := waitForNode(ctx, node, 0, timeout) if err != nil { return nil, err } diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index f6f17384230..b5ac5239ea4 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -46,7 +46,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty subctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() result, err := client.Block(subctx, nil) - if err == context.DeadlineExceeded || err == context.Canceled { + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { return nil, nil, ctx.Err() } if err != nil { @@ -72,7 +72,6 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty } timer.Reset(1 * time.Second) } - } } @@ -93,9 +92,11 @@ func waitForNode(ctx context.Context, node *e2e.Node, height int64, timeout time return nil, ctx.Err() case <-timer.C: status, err := client.Status(ctx) + sinceLastChanged := time.Since(lastChanged) switch { - case time.Since(lastChanged) > timeout: - return nil, fmt.Errorf("timed out waiting for %v to reach height %v", node.Name, height) + case sinceLastChanged > timeout: + return nil, fmt.Errorf("waiting for node %v timed out: exceeded %v wait timeout after waiting for %v", + node.Name, timeout, sinceLastChanged) case err != nil: case status.SyncInfo.LatestBlockHeight >= height && (height == 0 || !status.SyncInfo.CatchingUp): return status, nil diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 2853a02d0f3..3193cb0ae74 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -10,15 +10,21 @@ import ( "path/filepath" "regexp" "sort" + "strconv" "strings" + "text/template" "time" "github.com/BurntSushi/toml" + "github.com/mitchellh/mapstructure" + "github.com/spf13/viper" + + _ "embed" "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/libs/log" - "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/p2p/nodekey" "github.com/cometbft/cometbft/privval" e2e "github.com/cometbft/cometbft/test/e2e/pkg" "github.com/cometbft/cometbft/test/e2e/pkg/infra" @@ -35,20 +41,18 @@ const ( PrivvalStateFile = "data/priv_validator_state.json" PrivvalDummyKeyFile = "config/dummy_validator_key.json" PrivvalDummyStateFile = "data/dummy_validator_state.json" + + PrometheusConfigFile = "monitoring/prometheus.yml" ) // Setup sets up the testnet configuration. func Setup(testnet *e2e.Testnet, infp infra.Provider) error { - logger.Info("setup", "msg", log.NewLazySprintf("Generating testnet files in %q", testnet.Dir)) + logger.Info("setup", "msg", log.NewLazySprintf("Generating testnet files in %#q", testnet.Dir)) if err := os.MkdirAll(testnet.Dir, os.ModePerm); err != nil { return err } - if err := infp.Setup(); err != nil { - return err - } - genesis, err := MakeGenesis(testnet) if err != nil { return err @@ -98,7 +102,7 @@ func Setup(testnet *e2e.Testnet, infp infra.Provider) error { return err } - err = (&p2p.NodeKey{PrivKey: node.NodeKey}).SaveAs(filepath.Join(nodeDir, "config", "node_key.json")) + err = (&nodekey.NodeKey{PrivKey: node.NodeKey}).SaveAs(filepath.Join(nodeDir, "config", "node_key.json")) if err != nil { return err } @@ -114,14 +118,39 @@ func Setup(testnet *e2e.Testnet, infp infra.Provider) error { filepath.Join(nodeDir, PrivvalDummyKeyFile), filepath.Join(nodeDir, PrivvalDummyStateFile), )).Save() + + if testnet.LatencyEmulationEnabled { + // Generate a shell script file containing tc (traffic control) commands + // to emulate latency to other nodes. + tcCmds, err := tcCommands(node, infp) + if err != nil { + return err + } + latencyPath := filepath.Join(nodeDir, "emulate-latency.sh") + //nolint: gosec // G306: Expect WriteFile permissions to be 0600 or less + if err = os.WriteFile(latencyPath, []byte(strings.Join(tcCmds, "\n")), 0o755); err != nil { + return err + } + } } if testnet.Prometheus { - if err := testnet.WritePrometheusConfig(); err != nil { + if err := WritePrometheusConfig(testnet, PrometheusConfigFile); err != nil { + return err + } + // Make a copy of the Prometheus config file in the testnet directory. + // This should be temporary to keep it compatible with the qa-infra + // repository. + if err := WritePrometheusConfig(testnet, filepath.Join(testnet.Dir, "prometheus.yml")); err != nil { return err } } + //nolint: revive + if err := infp.Setup(); err != nil { + return err + } + return nil } @@ -137,10 +166,23 @@ func MakeGenesis(testnet *e2e.Testnet) (types.GenesisDoc, error) { genesis.ConsensusParams.Version.App = 1 genesis.ConsensusParams.Evidence.MaxAgeNumBlocks = e2e.EvidenceAgeHeight genesis.ConsensusParams.Evidence.MaxAgeDuration = e2e.EvidenceAgeTime - genesis.ConsensusParams.ABCI.VoteExtensionsEnableHeight = testnet.VoteExtensionsEnableHeight - for validator, power := range testnet.Validators { + genesis.ConsensusParams.Validator.PubKeyTypes = []string{testnet.KeyType} + if testnet.BlockMaxBytes != 0 { + genesis.ConsensusParams.Block.MaxBytes = testnet.BlockMaxBytes + } + if testnet.VoteExtensionsUpdateHeight == -1 { + genesis.ConsensusParams.Feature.VoteExtensionsEnableHeight = testnet.VoteExtensionsEnableHeight + } + if testnet.PbtsUpdateHeight == -1 { + genesis.ConsensusParams.Feature.PbtsEnableHeight = testnet.PbtsEnableHeight + } + for valName, power := range testnet.Validators { + validator := testnet.LookupNode(valName) + if validator == nil { + return types.GenesisDoc{}, fmt.Errorf("unknown validator %q for genesis doc", valName) + } genesis.Validators = append(genesis.Validators, types.GenesisValidator{ - Name: validator.Name, + Name: valName, Address: validator.PrivvalKey.PubKey().Address(), PubKey: validator.PrivvalKey.PubKey(), Power: power, @@ -154,11 +196,40 @@ func MakeGenesis(testnet *e2e.Testnet) (types.GenesisDoc, error) { if len(testnet.InitialState) > 0 { appState, err := json.Marshal(testnet.InitialState) if err != nil { - return genesis, err + return types.GenesisDoc{}, err } genesis.AppState = appState } - return genesis, genesis.ValidateAndComplete() + + // Customized genesis fields provided in the manifest + if len(testnet.Genesis) > 0 { + v := viper.New() + v.SetConfigType("json") + + for _, field := range testnet.Genesis { + key, value, err := e2e.ParseKeyValueField("genesis", field) + if err != nil { + return types.GenesisDoc{}, err + } + logger.Debug("Applying 'genesis' field", key, value) + v.Set(key, value) + } + + // We use viper because it leaves untouched keys that are not set. + // The GenesisDoc does not use the original `mapstructure` tag. + err := v.Unmarshal(&genesis, func(d *mapstructure.DecoderConfig) { + d.TagName = "json" + d.ErrorUnused = true + }) + if err != nil { + return types.GenesisDoc{}, fmt.Errorf("failed parsing 'genesis' field: %v", err) + } + } + + if err := genesis.ValidateAndComplete(); err != nil { + return types.GenesisDoc{}, err + } + return genesis, nil } // MakeConfig generates a CometBFT config for a node. @@ -179,9 +250,10 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.P2P.AddrBookStrict = false cfg.DBBackend = node.Database - cfg.StateSync.DiscoveryTime = 5 * time.Second cfg.BlockSync.Version = node.BlockSyncVersion cfg.Consensus.PeerGossipIntraloopSleepDuration = node.Testnet.PeerGossipIntraloopSleepDuration + cfg.Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers = int(node.Testnet.ExperimentalMaxGossipConnectionsToNonPersistentPeers) + cfg.Mempool.ExperimentalMaxGossipConnectionsToPersistentPeers = int(node.Testnet.ExperimentalMaxGossipConnectionsToPersistentPeers) // Assume that full nodes and validators will have a data companion // attached, which will need access to the privileged gRPC endpoint. @@ -201,8 +273,11 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { case e2e.ProtocolGRPC: cfg.ProxyApp = AppAddressTCP cfg.ABCI = "grpc" - case e2e.ProtocolBuiltin, e2e.ProtocolBuiltinConnSync: - cfg.ProxyApp = "" + case e2e.ProtocolBuiltin: + cfg.ProxyApp = "e2e" + cfg.ABCI = "" + case e2e.ProtocolBuiltinConnSync: + cfg.ProxyApp = "e2e_connsync" cfg.ABCI = "" default: return nil, fmt.Errorf("unexpected ABCI protocol setting %q", node.ABCIProtocol) @@ -250,6 +325,7 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { if len(cfg.StateSync.RPCServers) < 2 { return nil, errors.New("unable to find 2 suitable state sync RPC servers") } + cfg.StateSync.MaxDiscoveryTime = 30 * time.Second } cfg.P2P.Seeds = "" @@ -270,16 +346,64 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.P2P.PexReactor = false } + if node.Testnet.LogLevel != "" { + cfg.LogLevel = node.Testnet.LogLevel + } + + cfg.LogColors = false + if node.Testnet.LogFormat != "" { + cfg.LogFormat = node.Testnet.LogFormat + } + if node.Prometheus { cfg.Instrumentation.Prometheus = true } + if node.ExperimentalKeyLayout != "" { + cfg.Storage.ExperimentalKeyLayout = node.ExperimentalKeyLayout + } + + if node.Compact { + cfg.Storage.Compact = node.Compact + } + + if node.DiscardABCIResponses { + cfg.Storage.DiscardABCIResponses = node.DiscardABCIResponses + } + + if node.Indexer != "" { + cfg.TxIndex.Indexer = node.Indexer + } + + if node.CompactionInterval != 0 && node.Compact { + cfg.Storage.CompactionInterval = node.CompactionInterval + } + + // We currently need viper in order to parse config files. + if len(node.Config) > 0 { + v := viper.New() + for _, field := range node.Config { + key, value, err := e2e.ParseKeyValueField("config", field) + if err != nil { + return nil, err + } + logger.Debug("Applying 'config' field", "node", node.Name, key, value) + v.Set(key, value) + } + err := v.Unmarshal(cfg, func(d *mapstructure.DecoderConfig) { + d.ErrorUnused = true + }) + if err != nil { + return nil, fmt.Errorf("failed parsing 'config' field of node %v: %v", node.Name, err) + } + } + return cfg, nil } // MakeAppConfig generates an ABCI application config for a node. func MakeAppConfig(node *e2e.Node) ([]byte, error) { - cfg := map[string]interface{}{ + cfg := map[string]any{ "chain_id": node.Testnet.Name, "dir": "data/app", "listen": AppAddressUNIX, @@ -295,7 +419,14 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { "vote_extension_delay": node.Testnet.VoteExtensionDelay, "finalize_block_delay": node.Testnet.FinalizeBlockDelay, "vote_extension_size": node.Testnet.VoteExtensionSize, + "vote_extensions_enable_height": node.Testnet.VoteExtensionsEnableHeight, + "vote_extensions_update_height": node.Testnet.VoteExtensionsUpdateHeight, "abci_requests_logging_enabled": node.Testnet.ABCITestsEnabled, + "pbts_enable_height": node.Testnet.PbtsEnableHeight, + "pbts_update_height": node.Testnet.PbtsUpdateHeight, + "no_lanes": node.Testnet.Manifest.NoLanes, + "lanes": node.Testnet.Manifest.Lanes, + "constant_flip": node.Testnet.ConstantFlip, } switch node.ABCIProtocol { case e2e.ProtocolUNIX: @@ -327,14 +458,24 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { } } + // TODO: check if the produced validator updates is indeed valid. + // This goes to the application configuration file. if len(node.Testnet.ValidatorUpdates) > 0 { validatorUpdates := map[string]map[string]int64{} for height, validators := range node.Testnet.ValidatorUpdates { updateVals := map[string]int64{} - for node, power := range validators { - updateVals[base64.StdEncoding.EncodeToString(node.PrivvalKey.PubKey().Bytes())] = power + for valName, power := range validators { + validator := node.Testnet.LookupNode(valName) + if validator == nil { + return nil, fmt.Errorf("unknown validator %q for validator updates in testnet, height %d", valName, height) + } + updateVals[base64.StdEncoding.EncodeToString(validator.PrivvalKey.PubKey().Bytes())] = power } - validatorUpdates[fmt.Sprintf("%v", height)] = updateVals + // TODO: the validator updates are written to the toml + // file in lexicographical order. This means that + // update 1000 comes after update 1, and much before + // update 2. Consider producing `0001` instead of `1`. + validatorUpdates[strconv.FormatInt(height, 10)] = updateVals } cfg["validator_update"] = validatorUpdates } @@ -347,6 +488,26 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { return buf.Bytes(), nil } +//go:embed templates/prometheus-yml.tmpl +var prometheusYamlTemplate string + +func WritePrometheusConfig(testnet *e2e.Testnet, path string) error { + tmpl, err := template.New("prometheus-yaml").Parse(prometheusYamlTemplate) + if err != nil { + return err + } + var buf bytes.Buffer + err = tmpl.Execute(&buf, testnet) + if err != nil { + return err + } + err = os.WriteFile(path, buf.Bytes(), 0o644) //nolint:gosec + if err != nil { + return err + } + return nil +} + // UpdateConfigStateSync updates the state sync config for a node. func UpdateConfigStateSync(node *e2e.Node, height int64, hash []byte) error { cfgPath := filepath.Join(node.Testnet.Dir, node.Name, "config", "config.toml") diff --git a/test/e2e/runner/start.go b/test/e2e/runner/start.go index a3a7b072450..c26541ef6b1 100644 --- a/test/e2e/runner/start.go +++ b/test/e2e/runner/start.go @@ -2,7 +2,7 @@ package main import ( "context" - "fmt" + "errors" "sort" "time" @@ -13,7 +13,7 @@ import ( func Start(ctx context.Context, testnet *e2e.Testnet, p infra.Provider) error { if len(testnet.Nodes) == 0 { - return fmt.Errorf("no nodes in testnet") + return errors.New("no nodes in testnet") } // Nodes are already sorted by name. Sort them by name then startAt, @@ -37,7 +37,7 @@ func Start(ctx context.Context, testnet *e2e.Testnet, p infra.Provider) error { }) if nodeQueue[0].StartAt > 0 { - return fmt.Errorf("no initial nodes in testnet") + return errors.New("no initial nodes in testnet") } // Start initial nodes (StartAt: 0) @@ -58,19 +58,11 @@ func Start(ctx context.Context, testnet *e2e.Testnet, p infra.Provider) error { if node.PrometheusProxyPort > 0 { logger.Info("start", "msg", log.NewLazySprintf("Node %v up on http://%s:%v; with Prometheus on http://%s:%v/metrics", - node.Name, - node.ExternalIP, - node.RPCProxyPort, - node.ExternalIP, - node.PrometheusProxyPort, - ), + node.Name, node.ExternalIP, node.RPCProxyPort, node.ExternalIP, node.PrometheusProxyPort), ) } else { logger.Info("start", "msg", log.NewLazySprintf("Node %v up on http://%s:%v", - node.Name, - node.ExternalIP, - node.RPCProxyPort, - )) + node.Name, node.ExternalIP, node.RPCProxyPort)) } } @@ -127,8 +119,13 @@ func Start(ctx context.Context, testnet *e2e.Testnet, p infra.Provider) error { if err != nil { return err } - logger.Info("start", "msg", log.NewLazySprintf("Node %v up on http://%s:%v at height %v", - node.Name, node.ExternalIP, node.RPCProxyPort, status.SyncInfo.LatestBlockHeight)) + if node.PrometheusProxyPort > 0 { + logger.Info("start", "msg", log.NewLazySprintf("Node %v up on http://%s:%v at height %v; with Prometheus on http://%s:%v/metrics", + node.Name, node.ExternalIP, node.RPCProxyPort, status.SyncInfo.LatestBlockHeight, node.ExternalIP, node.PrometheusProxyPort)) + } else { + logger.Info("start", "msg", log.NewLazySprintf("Node %v up on http://%s:%v at height %v", + node.Name, node.ExternalIP, node.RPCProxyPort, status.SyncInfo.LatestBlockHeight)) + } } return nil diff --git a/test/e2e/pkg/templates/prometheus-yaml.tmpl b/test/e2e/runner/templates/prometheus-yml.tmpl similarity index 53% rename from test/e2e/pkg/templates/prometheus-yaml.tmpl rename to test/e2e/runner/templates/prometheus-yml.tmpl index 3c7636e0ddc..9fdef8df606 100644 --- a/test/e2e/pkg/templates/prometheus-yaml.tmpl +++ b/test/e2e/runner/templates/prometheus-yml.tmpl @@ -5,5 +5,5 @@ scrape_configs: {{- range .Nodes }} - job_name: '{{ .Name }}' static_configs: - - targets: ['localhost:{{ .PrometheusProxyPort }}'] + - targets: ['localhost:{{ .PrometheusProxyPort }}','host.docker.internal:{{ .PrometheusProxyPort }}'] {{end}} \ No newline at end of file diff --git a/test/e2e/runner/test.go b/test/e2e/runner/test.go index a42e50d2581..f2babb9c7df 100644 --- a/test/e2e/runner/test.go +++ b/test/e2e/runner/test.go @@ -2,20 +2,23 @@ package main import ( "context" + "fmt" "os" e2e "github.com/cometbft/cometbft/test/e2e/pkg" "github.com/cometbft/cometbft/test/e2e/pkg/exec" ) -// Test runs test cases under tests/ +// Test runs test cases under tests. func Test(testnet *e2e.Testnet, ifd *e2e.InfrastructureData) error { - logger.Info("Running tests in ./tests/...") - err := os.Setenv("E2E_MANIFEST", testnet.File) if err != nil { return err } + err = os.Setenv("E2E_TESTNET_DIR", testnet.Dir) + if err != nil { + return err + } if p := ifd.Path; p != "" { err = os.Setenv("INFRASTRUCTURE_FILE", p) if err != nil { @@ -27,12 +30,21 @@ func Test(testnet *e2e.Testnet, ifd *e2e.InfrastructureData) error { return err } - cmd := []string{"go", "test", "-count", "1"} + cmd := []string{"go", "test", "-tags", "bls12381", "-count", "1"} verbose := os.Getenv("VERBOSE") if verbose == "1" { cmd = append(cmd, "-v") } cmd = append(cmd, "./tests/...") + tests := "all tests" + runTest := os.Getenv("RUN_TEST") + if len(runTest) != 0 { + cmd = append(cmd, "-run", runTest) + tests = fmt.Sprintf("%q", runTest) + } + + logger.Info(fmt.Sprintf("Running %s in ./tests/...", tests)) + return exec.CommandVerbose(context.Background(), cmd...) } diff --git a/test/e2e/tests/abci_test.go b/test/e2e/tests/abci_test.go index 4645f2782c1..82d7fdb8cc2 100644 --- a/test/e2e/tests/abci_test.go +++ b/test/e2e/tests/abci_test.go @@ -3,22 +3,24 @@ package e2e_test import ( "testing" + "github.com/stretchr/testify/require" + e2e "github.com/cometbft/cometbft/test/e2e/pkg" "github.com/cometbft/cometbft/test/e2e/pkg/grammar" - "github.com/stretchr/testify/require" ) func TestCheckABCIGrammar(t *testing.T) { checker := grammar.NewGrammarChecker(grammar.DefaultConfig()) testNode(t, func(t *testing.T, node e2e.Node) { + t.Helper() if !node.Testnet.ABCITestsEnabled { return } - reqs, err := fetchABCIRequests(t, node.Name) + executions, err := fetchABCIRequests(t, node.Name) require.NoError(t, err) - for i, r := range reqs { + for i, e := range executions { isCleanStart := i == 0 - _, err := checker.Verify(r, isCleanStart) + _, err := checker.Verify(e, isCleanStart) require.NoError(t, err) } }) @@ -26,6 +28,7 @@ func TestCheckABCIGrammar(t *testing.T) { func TestNodeNameExtracting(t *testing.T) { testNode(t, func(t *testing.T, node e2e.Node) { + t.Helper() if !node.Testnet.ABCITestsEnabled { return } diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index d4f0f91561e..3059f36681f 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -2,6 +2,7 @@ package e2e_test import ( "bytes" + "encoding/hex" "fmt" "math/rand" "strconv" @@ -18,6 +19,7 @@ import ( // Tests that any initial state given in genesis has made it into the app. func TestApp_InitialState(t *testing.T) { testNode(t, func(t *testing.T, node e2e.Node) { + t.Helper() if len(node.Testnet.InitialState) == 0 { return } @@ -36,7 +38,9 @@ func TestApp_InitialState(t *testing.T) { // Tests that the app hash (as reported by the app) matches the last // block and the node sync status. func TestApp_Hash(t *testing.T) { + t.Helper() testNode(t, func(t *testing.T, node e2e.Node) { + t.Helper() client, err := node.Client() require.NoError(t, err) @@ -52,20 +56,22 @@ func TestApp_Hash(t *testing.T) { require.NoError(t, err) require.NotZero(t, status.SyncInfo.LatestBlockHeight) return status.SyncInfo.LatestBlockHeight >= requestedHeight - }, 5*time.Second, 500*time.Millisecond) + }, 30*time.Second, 500*time.Millisecond) block, err := client.Block(ctx, &requestedHeight) require.NoError(t, err) require.Equal(t, - fmt.Sprintf("%x", info.Response.LastBlockAppHash), - fmt.Sprintf("%x", block.Block.AppHash.Bytes()), + hex.EncodeToString(info.Response.LastBlockAppHash), + hex.EncodeToString(block.Block.AppHash.Bytes()), "app hash does not match last block's app hash") }) } // Tests that we can set a value and retrieve it. func TestApp_Tx(t *testing.T) { + t.Helper() testNode(t, func(t *testing.T, node e2e.Node) { + t.Helper() client, err := node.Client() require.NoError(t, err) @@ -77,19 +83,22 @@ func TestApp_Tx(t *testing.T) { require.NoError(t, err) key := fmt.Sprintf("testapp-tx-%v", node.Name) - value := fmt.Sprintf("%x", bz) + value := hex.EncodeToString(bz) tx := types.Tx(fmt.Sprintf("%v=%v", key, value)) - _, err = client.BroadcastTxSync(ctx, tx) + res, err := client.BroadcastTxSync(ctx, tx) require.NoError(t, err) + require.NotNil(t, res) + require.Zero(t, res.Code) hash := tx.Hash() - waitTime := 30 * time.Second + require.Equal(t, res.Hash, hash) + waitTime := 1 * time.Minute require.Eventuallyf(t, func() bool { txResp, err := client.Tx(ctx, hash, false) return err == nil && bytes.Equal(txResp.Tx, tx) }, waitTime, time.Second, - "submitted tx wasn't committed after %v", waitTime, + "submitted tx (%X) wasn't committed after %v", hash, waitTime, ) // NOTE: we don't test abci query of the light client @@ -101,12 +110,13 @@ func TestApp_Tx(t *testing.T) { require.NoError(t, err) assert.Equal(t, key, string(abciResp.Response.Key)) assert.Equal(t, value, string(abciResp.Response.Value)) - }) } func TestApp_VoteExtensions(t *testing.T) { + t.Helper() testNode(t, func(t *testing.T, node e2e.Node) { + t.Helper() client, err := node.Client() require.NoError(t, err) info, err := client.ABCIInfo(ctx) @@ -120,7 +130,6 @@ func TestApp_VoteExtensions(t *testing.T) { // the app to have any extension value set (via a normal tx). if node.Testnet.VoteExtensionsEnableHeight != 0 && info.Response.LastBlockHeight > node.Testnet.VoteExtensionsEnableHeight { - parts := bytes.Split(resp.Response.Value, []byte("|")) require.Len(t, parts, 2) extSum, err := strconv.Atoi(string(parts[0])) diff --git a/test/e2e/tests/block_test.go b/test/e2e/tests/block_test.go index c713a9feebc..4fecd26a102 100644 --- a/test/e2e/tests/block_test.go +++ b/test/e2e/tests/block_test.go @@ -1,7 +1,9 @@ package e2e_test import ( + "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -11,8 +13,10 @@ import ( // Tests that block headers are identical across nodes where present. func TestBlock_Header(t *testing.T) { + t.Helper() blocks := fetchBlockChain(t) testNode(t, func(t *testing.T, node e2e.Node) { + t.Helper() if node.Mode == e2e.ModeSeed || node.EnableCompanionPruning { return } @@ -25,7 +29,13 @@ func TestBlock_Header(t *testing.T) { first := status.SyncInfo.EarliestBlockHeight last := status.SyncInfo.LatestBlockHeight if node.RetainBlocks > 0 { - first++ // avoid race conditions with block pruning + // This was done in case pruning is activated. + // As it happens in the background this lowers the chances + // that the block at height=first will be pruned by the time we test + // this. If this test starts to fail often, it is worth revisiting this logic. + // To reproduce this failure locally, it is advised to set the storage.pruning.interval + // to 1s instead of 10s. + first += int64(node.RetainBlocks) // avoid race conditions with block pruning } for _, block := range blocks { @@ -47,14 +57,39 @@ func TestBlock_Header(t *testing.T) { }) } -// Tests that the node contains the expected block range. -func TestBlock_Range(t *testing.T) { +// Tests that the node configured to prune are actually pruning. +func TestBlock_Pruning(t *testing.T) { + t.Helper() testNode(t, func(t *testing.T, node e2e.Node) { - // We do not run this test on seed nodes or nodes with data - // companion-related pruning enabled. - if node.Mode == e2e.ModeSeed || node.EnableCompanionPruning { + t.Helper() + // We do not run this test on stateless nodes or nodes with data + // companion-related pruning enabled or nodes not pruning. + if node.Stateless() || node.EnableCompanionPruning || node.RetainBlocks == 0 { return } + client, err := node.Client() + require.NoError(t, err) + status, err := client.Status(ctx) + require.NoError(t, err) + first0 := status.SyncInfo.EarliestBlockHeight + + require.Eventually(t, func() bool { + status, err := client.Status(ctx) + require.NoError(t, err) + first := status.SyncInfo.EarliestBlockHeight + last := status.SyncInfo.LatestBlockHeight + pruning := first > first0 + pruningEnough := last-first+1 < int64(node.RetainBlocks)+10 // 10 represents some leeway + return pruning && pruningEnough + }, 1*time.Minute, 3*time.Second, "node %v is not pruning correctly", node.Name) + }) +} + +// Tests that the node contains the expected block range. +func TestBlock_Range(t *testing.T) { + t.Helper() + testNode(t, func(t *testing.T, node e2e.Node) { + t.Helper() client, err := node.Client() require.NoError(t, err) @@ -70,13 +105,7 @@ func TestBlock_Range(t *testing.T) { "state synced nodes should not contain network's initial height") case node.RetainBlocks > 0 && int64(node.RetainBlocks) < (last-node.Testnet.InitialHeight+1): - // Delta handles race conditions in reading first/last heights. - // The pruning mechanism is now asynchronous and might have been woken up yet to complete the pruning - // So we have no guarantees that all the blocks will have been pruned by the time we check - // Thus we allow for some flexibility in the difference between the expected retain blocks number - // and the actual retain blocks (which should be greater) - assert.InDelta(t, node.RetainBlocks, last-first+1, 10, - "node not pruning expected blocks") + // we test nodes are actually pruning in `TestBlock_Pruning` assert.GreaterOrEqual(t, uint64(last-first+1), node.RetainBlocks, "node pruned more blocks than it should") default: @@ -85,12 +114,25 @@ func TestBlock_Range(t *testing.T) { } for h := first; h <= last; h++ { - resp, err := client.Block(ctx, &(h)) - if err != nil && node.RetainBlocks > 0 && h == first { - // Ignore errors in first block if node is pruning blocks due to race conditions. + if h < first { continue } + resp, err := client.Block(ctx, &(h)) + if node.RetainBlocks > 0 && !node.EnableCompanionPruning && + (err != nil && strings.Contains(err.Error(), " is not available, lowest height is ") || + resp.Block == nil) { + // If node is pruning and doesn't return a valid block + // compare wanted block to blockstore's base, and update `first`. + status, err := client.Status(ctx) + require.NoError(t, err) + first = status.SyncInfo.EarliestBlockHeight + if h < first { + continue + } + } require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Block) assert.Equal(t, h, resp.Block.Height) } @@ -100,3 +142,35 @@ func TestBlock_Range(t *testing.T) { } }) } + +// Tests that time is monotonically increasing, +// and that blocks produced according to BFT Time follow MedianTime calculation. +func TestBlock_Time(t *testing.T) { + t.Helper() + blocks := fetchBlockChain(t) + testnet := loadTestnet(t) + + // blocks are 1-indexed, i.e., blocks[0] = height 1, blocks[1] = height 2, etc. + lastBlock := blocks[0] + valSchedule := newValidatorSchedule(t, &testnet) + for _, block := range blocks[1:] { + require.Less(t, lastBlock.Time, block.Time) + lastBlock = block + + if testnet.PbtsEnableHeight == 0 || block.Height < testnet.PbtsEnableHeight { + expTime := block.LastCommit.MedianTime(valSchedule.Set) + + require.Equal( + t, + expTime, + block.Time, + "height=%d, valSet=%s\n%s", + block.Height, + valSchedule.Set, + block.LastCommit.StringIndented(" "), + ) + } + + valSchedule.IncreaseHeight(t, 1) + } +} diff --git a/test/e2e/tests/e2e_test.go b/test/e2e/tests/e2e_test.go index 1bd6e798c73..b0df27b68ee 100644 --- a/test/e2e/tests/e2e_test.go +++ b/test/e2e/tests/e2e_test.go @@ -3,7 +3,6 @@ package e2e_test import ( "context" "os" - "os/exec" "path/filepath" "strings" "sync" @@ -16,6 +15,7 @@ import ( rpctypes "github.com/cometbft/cometbft/rpc/core/types" "github.com/cometbft/cometbft/test/e2e/app" e2e "github.com/cometbft/cometbft/test/e2e/pkg" + "github.com/cometbft/cometbft/test/e2e/pkg/infra/docker" "github.com/cometbft/cometbft/types" ) @@ -23,6 +23,7 @@ func init() { // This can be used to manually specify a testnet manifest and/or node to // run tests against. The testnet must have been started by the runner first. // os.Setenv("E2E_MANIFEST", "networks/ci.toml") + // os.Setenv("E2E_TESTNET_DIR", "networks/ci") // os.Setenv("E2E_NODE", "validator01") } @@ -60,7 +61,6 @@ func testNode(t *testing.T, testFunc func(*testing.T, e2e.Node)) { node := *node t.Run(node.Name, func(t *testing.T) { - t.Parallel() testFunc(t, node) }) } @@ -71,7 +71,7 @@ func testNode(t *testing.T, testFunc func(*testing.T, e2e.Node)) { // // If maxNodes is set to 0 or below, all full nodes and validators will be // tested. -func testFullNodesOrValidators(t *testing.T, maxNodes int, testFunc func(*testing.T, e2e.Node)) { +func testFullNodesOrValidators(t *testing.T, maxNodes int, testFunc func(*testing.T, e2e.Node)) { //nolint:unparam // maxNodes always receives 0 but that could change so we should keep the parameter. t.Helper() testnet := loadTestnet(t) @@ -92,7 +92,6 @@ func testFullNodesOrValidators(t *testing.T, maxNodes int, testFunc func(*testin if node.Mode == e2e.ModeFull || node.Mode == e2e.ModeValidator { node := *node t.Run(node.Name, func(t *testing.T) { - t.Parallel() testFunc(t, node) }) nodeCount++ @@ -139,7 +138,12 @@ func loadTestnet(t *testing.T) e2e.Testnet { } require.NoError(t, err) - testnet, err := e2e.LoadTestnet(manifestFile, ifd) + testnetDir := os.Getenv("E2E_TESTNET_DIR") + if !filepath.IsAbs(testnetDir) { + testnetDir = filepath.Join("..", testnetDir) + } + + testnet, err := e2e.LoadTestnet(manifestFile, ifd, testnetDir) require.NoError(t, err) testnetCache[manifestFile] = *testnet return *testnet @@ -200,6 +204,7 @@ func fetchBlockChain(t *testing.T) []*types.Block { // fetchABCIRequests go through the logs of a specific node and collect all ABCI requests (each slice represents requests from beginning until the first crash, // and then between two crashes) for a specific node. func fetchABCIRequests(t *testing.T, nodeName string) ([][]*abci.Request, error) { + t.Helper() testnet := loadTestnet(t) logs, err := fetchNodeLogs(testnet) if err != nil { @@ -228,6 +233,5 @@ func fetchABCIRequests(t *testing.T, nodeName string) ([][]*abci.Request, error) } func fetchNodeLogs(testnet e2e.Testnet) ([]byte, error) { - dir := filepath.Join(testnet.Dir, "docker-compose.yml") - return exec.Command("docker-compose", "-f", dir, "logs").Output() + return docker.ExecComposeOutput(context.Background(), testnet.Dir, "logs") } diff --git a/test/e2e/tests/evidence_test.go b/test/e2e/tests/evidence_test.go index f7f2ede7902..34a7e24e150 100644 --- a/test/e2e/tests/evidence_test.go +++ b/test/e2e/tests/evidence_test.go @@ -7,7 +7,7 @@ import ( ) // assert that all nodes that have blocks at the height of a misbehavior has evidence -// for that misbehavior +// for that misbehavior. func TestEvidence_Misbehavior(t *testing.T) { blocks := fetchBlockChain(t) testnet := loadTestnet(t) diff --git a/test/e2e/tests/grpc_test.go b/test/e2e/tests/grpc_test.go index 67a71f7f572..ccb814c9369 100644 --- a/test/e2e/tests/grpc_test.go +++ b/test/e2e/tests/grpc_test.go @@ -2,19 +2,25 @@ package e2e_test import ( "context" - "fmt" + "errors" "testing" "time" - coretypes "github.com/cometbft/cometbft/rpc/core/types" - "github.com/cometbft/cometbft/rpc/grpc/client/privileged" + "github.com/stretchr/testify/require" + e2e "github.com/cometbft/cometbft/test/e2e/pkg" "github.com/cometbft/cometbft/version" - "github.com/stretchr/testify/require" ) +// These tests are in place to confirm that both the non-privileged and privileged GRPC services can be called upon +// successfully and produce the expected outcomes. They consist of straightforward method invocations for each service. +// The emphasis is on avoiding complex scenarios and excluding hard-to-test cases like pruning logic. + +// Test the GRPC Version service. Invoke the GetVersion method. func TestGRPC_Version(t *testing.T) { + t.Helper() testFullNodesOrValidators(t, 0, func(t *testing.T, node e2e.Node) { + t.Helper() ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute) defer ctxCancel() client, err := node.GRPCClient(ctx) @@ -24,243 +30,221 @@ func TestGRPC_Version(t *testing.T) { res, err := client.GetVersion(ctx) require.NoError(t, err) - require.Equal(t, version.TMCoreSemVer, res.Node) + require.Equal(t, version.CMTSemVer, res.Node) require.Equal(t, version.ABCIVersion, res.ABCI) require.Equal(t, version.P2PProtocol, res.P2P) require.Equal(t, version.BlockProtocol, res.Block) }) } +// Test the GRPC Block Service. Invoke the GetBlockByHeight method to return a block +// at the latest height returned by the Block Service's GetLatestHeight method. func TestGRPC_Block_GetByHeight(t *testing.T) { testFullNodesOrValidators(t, 0, func(t *testing.T, node e2e.Node) { + t.Helper() - client, err := node.Client() - require.NoError(t, err) - status, err := client.Status(ctx) + // Get the latest height + latestHeight, err := getLatestHeight(node) require.NoError(t, err) - // We are not testing getting the first block in these - // tests to prevent race conditions with the pruning mechanism - // that might make the tests fail. Just testing the last block - // is enough to validate the fact that we can fetch a block using - // the gRPC endpoint - last := status.SyncInfo.LatestBlockHeight - ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute) defer ctxCancel() + gRPCClient, err := node.GRPCClient(ctx) require.NoError(t, err) defer gRPCClient.Close() // Get last block and fetch it using the gRPC endpoint - lastBlock, err := gRPCClient.GetBlockByHeight(ctx, last) + lastBlock, err := gRPCClient.GetBlockByHeight(ctx, latestHeight) - // Last block tests + // Last block tests. Check if heights match, the latest height retrieved and the block height fetched require.NoError(t, err) require.NotNil(t, lastBlock.BlockID) - require.Equal(t, lastBlock.Block.Height, last) - - }) -} - -func TestGRPC_Block_GetLatest(t *testing.T) { - testFullNodesOrValidators(t, 1, func(t *testing.T, node e2e.Node) { - if node.Mode != e2e.ModeFull && node.Mode != e2e.ModeValidator { - return - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - gclient, err := node.GRPCClient(ctx) - require.NoError(t, err) - defer gclient.Close() - - resultCh, err := gclient.GetLatestHeight(ctx) - require.NoError(t, err) - - select { - case <-ctx.Done(): - require.Fail(t, "did not expect context to be canceled") - case result := <-resultCh: - require.NoError(t, result.Error) - block, err := gclient.GetLatestBlock(ctx) - require.NoError(t, err) - // We can be off by at most one block, depending on how quickly the - // latest block request was executed. - require.True(t, result.Height == block.Block.Height || result.Height == block.Block.Height+1) - } - }) -} - -func TestGRPC_Block_GetLatestHeight(t *testing.T) { - testFullNodesOrValidators(t, 0, func(t *testing.T, node e2e.Node) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - client, err := node.Client() - require.NoError(t, err) - status, err := client.Status(ctx) - require.NoError(t, err) - - gclient, err := node.GRPCClient(ctx) - require.NoError(t, err) - defer gclient.Close() - - resultCh, err := gclient.GetLatestHeight(ctx) - require.NoError(t, err) - - select { - case <-ctx.Done(): - require.Fail(t, "did not expect context to be canceled") - case result := <-resultCh: - require.NoError(t, result.Error) - require.True(t, result.Height == status.SyncInfo.LatestBlockHeight || result.Height == status.SyncInfo.LatestBlockHeight+1) - } + require.Equal(t, lastBlock.Block.Height, latestHeight) + require.NotNil(t, lastBlock.Block.LastCommit) }) } +// Test the GRPC Block Results service. Invoke the GetBlockResults method to retrieve the block results +// at the latest height returned by the Block Service's GetLatestHeight method. func TestGRPC_GetBlockResults(t *testing.T) { + t.Helper() testFullNodesOrValidators(t, 0, func(t *testing.T, node e2e.Node) { - client, err := node.Client() - require.NoError(t, err) - status, err := client.Status(ctx) - require.NoError(t, err) + t.Helper() - first := status.SyncInfo.EarliestBlockHeight - last := status.SyncInfo.LatestBlockHeight - if node.RetainBlocks > 0 { - first++ - } + // Get the latest height + latestHeight, err := getLatestHeight(node) + require.NoError(t, err) ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute) defer ctxCancel() + gRPCClient, err := node.GRPCClient(ctx) require.NoError(t, err) defer gRPCClient.Close() - // GetLatestBlockResults - latestBlockResults, err := gRPCClient.GetLatestBlockResults(ctx) - - require.GreaterOrEqual(t, last, latestBlockResults.Height) - require.NoError(t, err, "Unexpected error for GetLatestBlockResults") - require.NotNil(t, latestBlockResults) - - successCases := []struct { - expectedHeight int64 - }{ - {first}, - {latestBlockResults.Height}, - } - errorCases := []struct { - requestHeight int64 - }{ - {first - 2}, - {last + 100000}, - } - - for _, tc := range successCases { - res, err := gRPCClient.GetBlockResults(ctx, tc.expectedHeight) - - require.NoError(t, err, fmt.Sprintf("Unexpected error for GetBlockResults at expected height: %d", tc.expectedHeight)) - require.NotNil(t, res) - require.Equal(t, res.Height, tc.expectedHeight) - } - for _, tc := range errorCases { - _, err = gRPCClient.GetBlockResults(ctx, tc.requestHeight) - require.Error(t, err) - } + // Fetch the block results at the latest height retrieved + // Ensure the heights match, the latest height used to fetch the Block Results and + // the height returned in the block results. + // Also ensure the AppHash is not nil. + blockResults, err := gRPCClient.GetBlockResults(ctx, latestHeight) + require.NoError(t, err) + require.Equal(t, blockResults.Height, latestHeight) + require.NotNil(t, blockResults.AppHash) }) } +// Test the GRPC Privileged Pruning Service methods to set and get the block retain height. func TestGRPC_BlockRetainHeight(t *testing.T) { + t.Helper() testFullNodesOrValidators(t, 0, func(t *testing.T, node e2e.Node) { + t.Helper() if !node.EnableCompanionPruning { return } - grpcClient, status, cleanup := getGRPCPrivilegedClientForTesting(t, node) - defer cleanup() + // Get the latest height + latestHeight, err := getLatestHeight(node) + require.NoError(t, err) + + ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute) + defer ctxCancel() - err := grpcClient.SetBlockRetainHeight(ctx, uint64(status.SyncInfo.LatestBlockHeight-1)) + gRPCClient, err := node.GRPCPrivilegedClient(ctx) require.NoError(t, err) + defer gRPCClient.Close() - res, err := grpcClient.GetBlockRetainHeight(ctx) + // Test the setting the block retain height method from the GRPC Pruning service + // Ensure that the height set matches the retrieved retain height + err = gRPCClient.SetBlockRetainHeight(ctx, uint64(latestHeight-1)) + require.NoError(t, err) + res, err := gRPCClient.GetBlockRetainHeight(ctx) require.NoError(t, err) require.NotNil(t, res) - require.Equal(t, res.PruningService, uint64(status.SyncInfo.LatestBlockHeight-1)) + require.Equal(t, res.PruningService, uint64(latestHeight-1)) }) } +// Test the GRPC Privileged Pruning Service methods to set and get the block results retain height. func TestGRPC_BlockResultsRetainHeight(t *testing.T) { + t.Helper() testFullNodesOrValidators(t, 0, func(t *testing.T, node e2e.Node) { + t.Helper() if !node.EnableCompanionPruning { return } - grpcClient, status, cleanup := getGRPCPrivilegedClientForTesting(t, node) - defer cleanup() + // Get the latest height + latestHeight, err := getLatestHeight(node) + require.NoError(t, err) + + ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute) + defer ctxCancel() - err := grpcClient.SetBlockResultsRetainHeight(ctx, uint64(status.SyncInfo.LatestBlockHeight)-1) - require.NoError(t, err, "Unexpected error for SetBlockResultsRetainHeight") + gRPCClient, err := node.GRPCPrivilegedClient(ctx) + require.NoError(t, err) + defer gRPCClient.Close() - height, err := grpcClient.GetBlockResultsRetainHeight(ctx) - require.NoError(t, err, "Unexpected error for GetBlockRetainHeight") - require.Equal(t, height, uint64(status.SyncInfo.LatestBlockHeight)-1) + // Test the setting the block results retain height method from the GRPC Pruning service + // Ensure that the height set matches the retrieved retain height + err = gRPCClient.SetBlockResultsRetainHeight(ctx, uint64(latestHeight-1)) + require.NoError(t, err) + height, err := gRPCClient.GetBlockResultsRetainHeight(ctx) + require.NoError(t, err) + require.Equal(t, height, uint64(latestHeight-1)) }) } +// Test the GRPC Privileged Pruning Service methods to set and get the tx indexer retain height. func TestGRPC_TxIndexerRetainHeight(t *testing.T) { testFullNodesOrValidators(t, 0, func(t *testing.T, node e2e.Node) { + t.Helper() if !node.EnableCompanionPruning { return } - grpcClient, status, cleanup := getGRPCPrivilegedClientForTesting(t, node) - defer cleanup() + // Get the latest height + latestHeight, err := getLatestHeight(node) + require.NoError(t, err) - err := grpcClient.SetTxIndexerRetainHeight(ctx, uint64(status.SyncInfo.LatestBlockHeight)-1) - require.NoError(t, err, "Unexpected error for SetTxIndexerRetainHeight") + ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute) + defer ctxCancel() - height, err := grpcClient.GetTxIndexerRetainHeight(ctx) - require.NoError(t, err, "Unexpected error for GetTxIndexerRetainHeight") - require.Equal(t, height, uint64(status.SyncInfo.LatestBlockHeight)-1) + gRPCClient, err := node.GRPCPrivilegedClient(ctx) + require.NoError(t, err) + defer gRPCClient.Close() + + // Test the setting the tx indexer retain height method from the GRPC Pruning service + // Ensure that the height set matches the retrieved retain height + err = gRPCClient.SetTxIndexerRetainHeight(ctx, uint64(latestHeight-1)) + require.NoError(t, err) + height, err := gRPCClient.GetTxIndexerRetainHeight(ctx) + require.NoError(t, err) + require.Equal(t, height, uint64(latestHeight-1)) }) } +// Test the GRPC Privileged Pruning Service methods to set and get the block indexer retain height. func TestGRPC_BlockIndexerRetainHeight(t *testing.T) { + t.Helper() testFullNodesOrValidators(t, 0, func(t *testing.T, node e2e.Node) { + t.Helper() if !node.EnableCompanionPruning { return } - grpcClient, status, cleanup := getGRPCPrivilegedClientForTesting(t, node) - defer cleanup() + // Get the latest height + latestHeight, err := getLatestHeight(node) + require.NoError(t, err) - err := grpcClient.SetBlockIndexerRetainHeight(ctx, uint64(status.SyncInfo.LatestBlockHeight)-1) - require.NoError(t, err, "Unexpected error for SetTxIndexerRetainHeight") + ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute) + defer ctxCancel() + + gRPCClient, err := node.GRPCPrivilegedClient(ctx) + require.NoError(t, err) + defer gRPCClient.Close() - height, err := grpcClient.GetBlockIndexerRetainHeight(ctx) - require.NoError(t, err, "Unexpected error for GetTxIndexerRetainHeight") - require.Equal(t, height, uint64(status.SyncInfo.LatestBlockHeight)-1) + // Test the setting the block indexer retain height method from the GRPC Pruning service + // Ensure that the height set matches the retrieved retain height + err = gRPCClient.SetBlockIndexerRetainHeight(ctx, uint64(latestHeight-1)) + require.NoError(t, err) + height, err := gRPCClient.GetBlockIndexerRetainHeight(ctx) + require.NoError(t, err) + require.Equal(t, height, uint64(latestHeight-1)) }) } -func getGRPCPrivilegedClientForTesting(t *testing.T, node e2e.Node) (privileged.Client, *coretypes.ResultStatus, func()) { - ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute) - - grpcClient, err := node.GRPCPrivilegedClient(ctx) - require.NoError(t, err) - - client, err := node.Client() - require.NoError(t, err) - - status, err := client.Status(ctx) - require.NoError(t, err) +// This method returns the latest height retrieved from the GRPC Block Service invoking the +// GetLatestHeight, which returns a channel that receives the latest height. Once a height is +// received in the channel, return that height. +func getLatestHeight(node e2e.Node) (int64, error) { + ctx, ctxCancel := context.WithTimeout(context.Background(), 3*time.Minute) // 3 minute timeout + defer ctxCancel() + gRPCClient, err := node.GRPCClient(ctx) + if err != nil { + return 0, err + } + defer gRPCClient.Close() + latestHeightCh, err := gRPCClient.GetLatestHeight(ctx) + if err != nil { + return 0, err + } - return grpcClient, status, func() { - ctxCancel() - err := grpcClient.Close() - require.NoError(t, err) + for { + select { + case <-ctx.Done(): + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + // Context has timed out + return 0, errors.New("context deadline exceeded while waiting for latest height") + } + // Context has been canceled + return 0, ctx.Err() + case latest, ok := <-latestHeightCh: + if ok { + return latest.Height, nil + } else { + return 0, errors.New("failed to receive latest height from channel") + } + } } } diff --git a/test/e2e/tests/net_test.go b/test/e2e/tests/net_test.go index ab1d6705c63..84fbe3290f2 100644 --- a/test/e2e/tests/net_test.go +++ b/test/e2e/tests/net_test.go @@ -14,6 +14,7 @@ func TestNet_Peers(t *testing.T) { t.SkipNow() testNode(t, func(t *testing.T, node e2e.Node) { + t.Helper() // Seed nodes shouldn't necessarily mesh with the entire network. if node.Mode == e2e.ModeSeed { return diff --git a/test/e2e/tests/validator_test.go b/test/e2e/tests/validator_test.go index ea92f697c75..11bcd6f66bc 100644 --- a/test/e2e/tests/validator_test.go +++ b/test/e2e/tests/validator_test.go @@ -2,6 +2,7 @@ package e2e_test import ( "bytes" + "fmt" "testing" "github.com/stretchr/testify/require" @@ -13,7 +14,9 @@ import ( // Tests that validator sets are available and correct according to // scheduled validator updates. func TestValidator_Sets(t *testing.T) { + t.Helper() testNode(t, func(t *testing.T, node e2e.Node) { + t.Helper() if node.Mode == e2e.ModeSeed || node.EnableCompanionPruning { return } @@ -28,11 +31,17 @@ func TestValidator_Sets(t *testing.T) { // skip first block if node is pruning blocks, to avoid race conditions if node.RetainBlocks > 0 { - first++ + // This was done in case pruning is activated. + // As it happens in the background this lowers the chances + // that the block at height=first will be pruned by the time we test + // this. If this test starts to fail often, it is worth revisiting this logic. + // To reproduce this failure locally, it is advised to set the storage.pruning.interval + // to 1s instead of 10s. + first += int64(node.RetainBlocks) } - valSchedule := newValidatorSchedule(*node.Testnet) - valSchedule.Increment(first - node.Testnet.InitialHeight) + valSchedule := newValidatorSchedule(t, node.Testnet) + valSchedule.IncreaseHeight(t, first-node.Testnet.InitialHeight) for h := first; h <= last; h++ { validators := []*types.Validator{} @@ -47,7 +56,7 @@ func TestValidator_Sets(t *testing.T) { } require.Equal(t, valSchedule.Set.Validators, validators, "incorrect validator set at height %v", h) - valSchedule.Increment(1) + valSchedule.IncreaseHeight(t, 1) } }) } @@ -55,13 +64,15 @@ func TestValidator_Sets(t *testing.T) { // Tests that a validator proposes blocks when it's supposed to. It tolerates some // missed blocks, e.g. due to testnet perturbations. func TestValidator_Propose(t *testing.T) { + t.Helper() blocks := fetchBlockChain(t) testNode(t, func(t *testing.T, node e2e.Node) { + t.Helper() if node.Mode != e2e.ModeValidator { return } address := node.PrivvalKey.PubKey().Address() - valSchedule := newValidatorSchedule(*node.Testnet) + valSchedule := newValidatorSchedule(t, node.Testnet) expectCount := 0 proposeCount := 0 @@ -72,27 +83,36 @@ func TestValidator_Propose(t *testing.T) { proposeCount++ } } - valSchedule.Increment(1) + valSchedule.IncreaseHeight(t, 1) } - require.False(t, proposeCount == 0 && expectCount > 0, - "node did not propose any blocks (expected %v)", expectCount) - if expectCount > 5 { - require.GreaterOrEqual(t, proposeCount, 3, "validator didn't propose even 3 blocks") + if expectCount == 0 { + return + } + + if node.ClockSkew != 0 && node.Testnet.PbtsEnableHeight != 0 { + t.Logf("node with skewed clock (by %v), proposed %v, expected %v", + node.ClockSkew, proposeCount, expectCount) + return } + require.Greater(t, proposeCount, 0, + "node did not propose any blocks (expected %v)", expectCount) + require.False(t, expectCount > 5 && proposeCount < 3, "node only proposed %v blocks, expected %v", proposeCount, expectCount) }) } // Tests that a validator signs blocks when it's supposed to. It tolerates some // missed blocks, e.g. due to testnet perturbations. func TestValidator_Sign(t *testing.T) { + t.Helper() blocks := fetchBlockChain(t) testNode(t, func(t *testing.T, node e2e.Node) { + t.Helper() if node.Mode != e2e.ModeValidator { return } address := node.PrivvalKey.PubKey().Address() - valSchedule := newValidatorSchedule(*node.Testnet) + valSchedule := newValidatorSchedule(t, node.Testnet) expectCount := 0 signCount := 0 @@ -112,7 +132,7 @@ func TestValidator_Sign(t *testing.T) { } else { require.False(t, signed, "unexpected signature for block %v", block.LastCommit.Height) } - valSchedule.Increment(1) + valSchedule.IncreaseHeight(t, 1) } require.False(t, signCount == 0 && expectCount > 0, @@ -128,29 +148,35 @@ func TestValidator_Sign(t *testing.T) { type validatorSchedule struct { Set *types.ValidatorSet height int64 - updates map[int64]map[*e2e.Node]int64 + testnet *e2e.Testnet } -func newValidatorSchedule(testnet e2e.Testnet) *validatorSchedule { +func newValidatorSchedule(t *testing.T, testnet *e2e.Testnet) *validatorSchedule { + t.Helper() valMap := testnet.Validators // genesis validators if v, ok := testnet.ValidatorUpdates[0]; ok { // InitChain validators valMap = v } + vals, err := makeVals(testnet, valMap) + require.NoError(t, err) return &validatorSchedule{ height: testnet.InitialHeight, - Set: types.NewValidatorSet(makeVals(valMap)), - updates: testnet.ValidatorUpdates, + Set: types.NewValidatorSet(vals), + testnet: testnet, } } -func (s *validatorSchedule) Increment(heights int64) { +func (s *validatorSchedule) IncreaseHeight(t *testing.T, heights int64) { + t.Helper() for i := int64(0); i < heights; i++ { s.height++ if s.height > 2 { // validator set updates are offset by 2, since they only take effect // two blocks after they're returned. - if update, ok := s.updates[s.height-2]; ok { - if err := s.Set.UpdateWithChangeSet(makeVals(update)); err != nil { + if update, ok := s.testnet.ValidatorUpdates[s.height-2]; ok { + vals, err := makeVals(s.testnet, update) + require.NoError(t, err) + if err := s.Set.UpdateWithChangeSet(vals); err != nil { panic(err) } } @@ -159,10 +185,14 @@ func (s *validatorSchedule) Increment(heights int64) { } } -func makeVals(valMap map[*e2e.Node]int64) []*types.Validator { +func makeVals(testnet *e2e.Testnet, valMap map[string]int64) ([]*types.Validator, error) { vals := make([]*types.Validator, 0, len(valMap)) - for node, power := range valMap { - vals = append(vals, types.NewValidator(node.PrivvalKey.PubKey(), power)) + for valName, power := range valMap { + validator := testnet.LookupNode(valName) + if validator == nil { + return nil, fmt.Errorf("unknown validator %q for `validatorSchedule`", valName) + } + vals = append(vals, types.NewValidator(validator.PrivvalKey.PubKey(), power)) } - return vals + return vals, nil } diff --git a/test/fuzz/mempool/checktx.go b/test/fuzz/mempool/checktx.go index 50d634fedb3..707ce68b10d 100644 --- a/test/fuzz/mempool/checktx.go +++ b/test/fuzz/mempool/checktx.go @@ -1,6 +1,8 @@ package reactor import ( + "context" + "github.com/cometbft/cometbft/abci/example/kvstore" "github.com/cometbft/cometbft/config" mempl "github.com/cometbft/cometbft/mempool" @@ -20,11 +22,20 @@ func init() { cfg := config.DefaultMempoolConfig() cfg.Broadcast = false - mempool = mempl.NewCListMempool(cfg, appConnMem, 0) + + resp, err := app.Info(context.Background(), proxy.InfoRequest) + if err != nil { + panic(err) + } + lanesInfo, err := mempl.BuildLanesInfo(resp.LanePriorities, resp.DefaultLane) + if err != nil { + panic(err) + } + mempool = mempl.NewCListMempool(cfg, appConnMem, lanesInfo, 0) } func Fuzz(data []byte) int { - _, err := mempool.CheckTx(data) + _, err := mempool.CheckTx(data, "") if err != nil { return 0 } diff --git a/test/fuzz/tests/mempool_test.go b/test/fuzz/tests/mempool_test.go index 54b804d8961..613ed4c9037 100644 --- a/test/fuzz/tests/mempool_test.go +++ b/test/fuzz/tests/mempool_test.go @@ -3,13 +3,15 @@ package tests import ( + "context" "testing" abciclient "github.com/cometbft/cometbft/abci/client" "github.com/cometbft/cometbft/abci/example/kvstore" "github.com/cometbft/cometbft/config" cmtsync "github.com/cometbft/cometbft/libs/sync" - mempool "github.com/cometbft/cometbft/mempool" + mempl "github.com/cometbft/cometbft/mempool" + "github.com/cometbft/cometbft/proxy" ) func FuzzMempool(f *testing.F) { @@ -24,9 +26,17 @@ func FuzzMempool(f *testing.F) { cfg := config.DefaultMempoolConfig() cfg.Broadcast = false - mp := mempool.NewCListMempool(cfg, conn, 0) + resp, err := app.Info(context.Background(), proxy.InfoRequest) + if err != nil { + panic(err) + } + lanesInfo, err := mempl.BuildLanesInfo(resp.LanePriorities, resp.DefaultLane) + if err != nil { + panic(err) + } + mp := mempl.NewCListMempool(cfg, conn, lanesInfo, 0) - f.Fuzz(func(t *testing.T, data []byte) { - _, _ = mp.CheckTx(data) + f.Fuzz(func(_ *testing.T, data []byte) { + _, _ = mp.CheckTx(data, "") }) } diff --git a/test/fuzz/tests/p2p_secretconnection_test.go b/test/fuzz/tests/p2p_secretconnection_test.go index f61fa14d9c8..0393f0382c2 100644 --- a/test/fuzz/tests/p2p_secretconnection_test.go +++ b/test/fuzz/tests/p2p_secretconnection_test.go @@ -10,12 +10,12 @@ import ( "testing" "github.com/cometbft/cometbft/crypto/ed25519" - "github.com/cometbft/cometbft/libs/async" - sc "github.com/cometbft/cometbft/p2p/conn" + "github.com/cometbft/cometbft/internal/async" + sc "github.com/cometbft/cometbft/p2p/transport/tcp/conn" ) func FuzzP2PSecretConnection(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { fuzz(data) }) } @@ -69,12 +69,12 @@ func (drw kvstoreConn) Close() (err error) { err2 := drw.PipeWriter.CloseWithError(io.EOF) err1 := drw.PipeReader.Close() if err2 != nil { - return err + return err //nolint:nilerr // this is a false positive } return err1 } -// Each returned ReadWriteCloser is akin to a net.Connection +// Each returned ReadWriteCloser is akin to a net.Connection. func makeKVStoreConnPair() (fooConn, barConn kvstoreConn) { barReader, fooWriter := io.Pipe() fooReader, barWriter := io.Pipe() @@ -92,14 +92,14 @@ func makeSecretConnPair() (fooSecConn, barSecConn *sc.SecretConnection) { // Make connections from both sides in parallel. trs, ok := async.Parallel( - func(_ int) (val interface{}, abort bool, err error) { + func(_ int) (val any, abort bool, err error) { fooSecConn, err = sc.MakeSecretConnection(fooConn, fooPrvKey) if err != nil { log.Printf("failed to establish SecretConnection for foo: %v", err) return nil, true, err } remotePubBytes := fooSecConn.RemotePubKey() - if !remotePubBytes.Equals(barPubKey) { + if !bytes.Equal(remotePubBytes.Bytes(), barPubKey.Bytes()) { err = fmt.Errorf("unexpected fooSecConn.RemotePubKey. Expected %v, got %v", barPubKey, fooSecConn.RemotePubKey()) log.Print(err) @@ -107,14 +107,14 @@ func makeSecretConnPair() (fooSecConn, barSecConn *sc.SecretConnection) { } return nil, false, nil }, - func(_ int) (val interface{}, abort bool, err error) { + func(_ int) (val any, abort bool, err error) { barSecConn, err = sc.MakeSecretConnection(barConn, barPrvKey) if barSecConn == nil { log.Printf("failed to establish SecretConnection for bar: %v", err) return nil, true, err } remotePubBytes := barSecConn.RemotePubKey() - if !remotePubBytes.Equals(fooPubKey) { + if !bytes.Equal(remotePubBytes.Bytes(), fooPubKey.Bytes()) { err = fmt.Errorf("unexpected barSecConn.RemotePubKey. Expected %v, got %v", fooPubKey, barSecConn.RemotePubKey()) log.Print(err) diff --git a/test/fuzz/tests/rpc_jsonrpc_server_test.go b/test/fuzz/tests/rpc_jsonrpc_server_test.go index db6c0a2090e..de7ee7fd0f7 100644 --- a/test/fuzz/tests/rpc_jsonrpc_server_test.go +++ b/test/fuzz/tests/rpc_jsonrpc_server_test.go @@ -21,19 +21,19 @@ func FuzzRPCJSONRPCServer(f *testing.F) { I int `json:"i"` } rpcFuncMap := map[string]*rpcserver.RPCFunc{ - "c": rpcserver.NewRPCFunc(func(ctx *rpctypes.Context, args *args, options ...rpcserver.Option) (string, error) { + "c": rpcserver.NewRPCFunc(func(_ *rpctypes.Context, _ *args, _ ...rpcserver.Option) (string, error) { return "foo", nil }, "args"), } mux := http.NewServeMux() rpcserver.RegisterRPCFuncs(mux, rpcFuncMap, log.NewNopLogger()) - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { if len(data) == 0 { return } - req, err := http.NewRequest("POST", "http://localhost/", bytes.NewReader(data)) + req, err := http.NewRequest(http.MethodPost, "http://localhost/", bytes.NewReader(data)) if err != nil { panic(err) } diff --git a/test/loadtime/README.md b/test/loadtime/README.md index 7f61ed42d02..6deddced691 100644 --- a/test/loadtime/README.md +++ b/test/loadtime/README.md @@ -22,18 +22,18 @@ make build The `load` binary is built when `make build` is invoked. The `load` tool generates transactions and broadcasts them to CometBFT. -`load` leverages the [tm-load-test](https://github.com/informalsystems/tm-load-test) -framework. As a result, all flags and options specified on the `tm-load-test` apply to +`load` leverages the [cometbft-load-test](https://github.com/cometbft/cometbft-load-test) +framework. As a result, all flags and options specified on the `cometbft-load-test` apply to `load`. -Below is a basic invocation for generating load against a CometBFT websocket running -on `localhost:25567` +Below is a basic invocation for generating load against a CometBFT websocket v1 running +on `localhost:26657` ```bash ./build/load \ -c 1 -T 10 -r 1000 -s 1024 \ --broadcast-tx-method sync \ - --endpoints ws://localhost:26657/v1/websocket + --endpoints ws://localhost:26657/websocket ``` ## `report` diff --git a/test/loadtime/basic.sh b/test/loadtime/basic.sh index a7eab7c275b..68d68092133 100755 --- a/test/loadtime/basic.sh +++ b/test/loadtime/basic.sh @@ -5,7 +5,7 @@ set -euo pipefail # A basic invocation of the loadtime tool. ./build/load \ - -c 1 -T 10 -r 1000 -s 1024 \ + -c 1 -T 100 -r 1000 -s 1024 \ --broadcast-tx-method sync \ --endpoints ws://localhost:26657/v1/websocket diff --git a/test/loadtime/cmd/load/main.go b/test/loadtime/cmd/load/main.go index d6bb798bc1d..03746af3c63 100644 --- a/test/loadtime/cmd/load/main.go +++ b/test/loadtime/cmd/load/main.go @@ -1,11 +1,11 @@ package main import ( - "fmt" + "errors" "github.com/google/uuid" - "github.com/informalsystems/tm-load-test/pkg/loadtest" + "github.com/cometbft/cometbft-load-test/pkg/loadtest" "github.com/cometbft/cometbft/test/loadtime/payload" ) @@ -43,13 +43,13 @@ func main() { }) } -func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error { +func (*ClientFactory) ValidateConfig(cfg loadtest.Config) error { psb, err := payload.MaxUnpaddedSize() if err != nil { return err } if psb > cfg.Size { - return fmt.Errorf("payload size exceeds configured size") + return errors.New("payload size exceeds configured size") } return nil } diff --git a/test/loadtime/cmd/report/main.go b/test/loadtime/cmd/report/main.go index 4b00f6adb72..a0ced07a18a 100644 --- a/test/loadtime/cmd/report/main.go +++ b/test/loadtime/cmd/report/main.go @@ -10,15 +10,15 @@ import ( "strings" dbm "github.com/cometbft/cometbft-db" - "github.com/cometbft/cometbft/store" "github.com/cometbft/cometbft/test/loadtime/report" ) var ( - db = flag.String("database-type", "goleveldb", "the type of database holding the blockstore") - dir = flag.String("data-dir", "", "path to the directory containing the CometBFT databases") - csvOut = flag.String("csv", "", "dump the extracted latencies as raw csv for use in additional tooling") + db = flag.String("database-type", "goleveldb", "the type of database holding the blockstore") + dir = flag.String("data-dir", "", "path to the directory containing the CometBFT databases") + csvOut = flag.String("csv", "", "dump the extracted latencies as raw csv for use in additional tooling") + oneline = flag.Bool("oneline", false, "display the results in one line of comma-separated values") ) func main() { @@ -65,19 +65,26 @@ func main() { return } for _, r := range rs.List() { - fmt.Printf(""+ - "Experiment ID: %s\n\n"+ - "\tConnections: %d\n"+ - "\tRate: %d\n"+ - "\tSize: %d\n\n"+ - "\tTotal Valid Tx: %d\n"+ - "\tTotal Negative Latencies: %d\n"+ - "\tMinimum Latency: %s\n"+ - "\tMaximum Latency: %s\n"+ - "\tAverage Latency: %s\n"+ - "\tStandard Deviation: %s\n\n", r.ID, r.Connections, r.Rate, r.Size, len(r.All), r.NegativeCount, r.Min, r.Max, r.Avg, r.StdDev) + if *oneline { + fmt.Printf("%s,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", + r.ID, r.Connections, r.Rate, r.Size, len(r.All), r.NegativeCount, r.Min.Nanoseconds(), r.Max.Nanoseconds(), r.Avg.Nanoseconds(), r.StdDev.Nanoseconds(), rs.ErrorCount()) + } else { + fmt.Printf(""+ + "Experiment ID: %s\n\n"+ + "\tConnections: %d\n"+ + "\tRate: %d\n"+ + "\tSize: %d\n\n"+ + "\tTotal Valid Tx: %d\n"+ + "\tTotal Negative Latencies: %d\n"+ + "\tMinimum Latency: %s\n"+ + "\tMaximum Latency: %s\n"+ + "\tAverage Latency: %s\n"+ + "\tStandard Deviation: %s\n\n", r.ID, r.Connections, r.Rate, r.Size, len(r.All), r.NegativeCount, r.Min, r.Max, r.Avg, r.StdDev) + } + } + if !*oneline { + fmt.Printf("Total Invalid Tx: %d\n", rs.ErrorCount()) } - fmt.Printf("Total Invalid Tx: %d\n", rs.ErrorCount()) } func toCSVRecords(rs []report.Report) [][]string { @@ -87,7 +94,7 @@ func toCSVRecords(rs []report.Report) [][]string { } res := make([][]string, total+1) - res[0] = []string{"experiment_id", "block_time", "duration_ns", "tx_hash", "connections", "rate", "size"} + res[0] = []string{"experiment_id", "block_time", "duration_ns", "tx_hash", "lane", "connections", "rate", "size"} offset := 1 for _, r := range rs { idStr := r.ID.String() @@ -95,7 +102,16 @@ func toCSVRecords(rs []report.Report) [][]string { rateStr := strconv.FormatInt(int64(r.Rate), 10) sizeStr := strconv.FormatInt(int64(r.Size), 10) for i, v := range r.All { - res[offset+i] = []string{idStr, strconv.FormatInt(v.BlockTime.UnixNano(), 10), strconv.FormatInt(int64(v.Duration), 10), fmt.Sprintf("%X", v.Hash), connStr, rateStr, sizeStr} + res[offset+i] = []string{ + idStr, + strconv.FormatInt(v.BlockTime.UnixNano(), 10), + strconv.FormatInt(int64(v.Duration), 10), + fmt.Sprintf("%X", v.Hash), + v.Lane, + connStr, + rateStr, + sizeStr, + } } offset += len(r.All) } diff --git a/test/loadtime/payload/payload.go b/test/loadtime/payload/payload.go index 778729f8b9d..c845e2ccc22 100644 --- a/test/loadtime/payload/payload.go +++ b/test/loadtime/payload/payload.go @@ -8,35 +8,37 @@ import ( "math" "google.golang.org/protobuf/proto" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/timestamppb" ) -const keyPrefix = "a=" -const maxPayloadSize = 4 * 1024 * 1024 +const ( + keyPrefix = "a=" + maxPayloadSize = 4 * 1024 * 1024 +) // NewBytes generates a new payload and returns the encoded representation of // the payload as a slice of bytes. NewBytes uses the fields on the Options // to create the payload. func NewBytes(p *Payload) ([]byte, error) { p.Padding = make([]byte, 1) - if p.Time == nil { + if p.GetTime() == nil { p.Time = timestamppb.Now() } us, err := CalculateUnpaddedSize(p) if err != nil { return nil, err } - if p.Size > maxPayloadSize { - return nil, fmt.Errorf("configured size %d is too large (>%d)", p.Size, maxPayloadSize) + if p.GetSize() > maxPayloadSize { + return nil, fmt.Errorf("configured size %d is too large (>%d)", p.GetSize(), maxPayloadSize) } - pSize := int(p.Size) // #nosec -- The "if" above makes this cast safe + pSize := int(p.GetSize()) // #nosec -- The "if" above makes this cast safe if pSize < us { return nil, fmt.Errorf("configured size %d not large enough to fit unpadded transaction of size %d", pSize, us) } // We halve the padding size because we transform the TX to hex p.Padding = make([]byte, (pSize-us)/2) - _, err = rand.Read(p.Padding) + _, err = rand.Read(p.GetPadding()) if err != nil { return nil, err } @@ -89,8 +91,8 @@ func MaxUnpaddedSize() (int, error) { // purpose of determining how much padding to add to add to reach the target size. // CalculateUnpaddedSize returns an error if the payload Padding field is longer than 1. func CalculateUnpaddedSize(p *Payload) (int, error) { - if len(p.Padding) != 1 { - return 0, fmt.Errorf("expected length of padding to be 1, received %d", len(p.Padding)) + if len(p.GetPadding()) != 1 { + return 0, fmt.Errorf("expected length of padding to be 1, received %d", len(p.GetPadding())) } b, err := proto.Marshal(p) if err != nil { diff --git a/test/loadtime/payload/payload.pb.go b/test/loadtime/payload/payload.pb.go index 765c81d3da2..5293a534e7d 100644 --- a/test/loadtime/payload/payload.pb.go +++ b/test/loadtime/payload/payload.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.20.1 +// protoc-gen-go v1.33.0 +// protoc v4.25.3 // source: payload/payload.proto package payload @@ -35,6 +35,7 @@ type Payload struct { Time *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=time,proto3" json:"time,omitempty"` Id []byte `protobuf:"bytes,5,opt,name=id,proto3" json:"id,omitempty"` Padding []byte `protobuf:"bytes,6,opt,name=padding,proto3" json:"padding,omitempty"` + Lane string `protobuf:"bytes,7,opt,name=lane,proto3" json:"lane,omitempty"` } func (x *Payload) Reset() { @@ -111,6 +112,13 @@ func (x *Payload) GetPadding() []byte { return nil } +func (x *Payload) GetLane() string { + if x != nil { + return x.Lane + } + return "" +} + var File_payload_payload_proto protoreflect.FileDescriptor var file_payload_payload_proto_rawDesc = []byte{ @@ -118,7 +126,7 @@ var file_payload_payload_proto_rawDesc = []byte{ 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6c, 0x6f, 0x61, 0x64, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x01, 0x0a, 0x07, 0x50, + 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x01, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x74, 0x65, @@ -129,9 +137,10 @@ var file_payload_payload_proto_rawDesc = []byte{ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x6d, - 0x69, 0x6e, 0x74, 0x2f, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x74, 0x2f, 0x74, + 0x0c, 0x52, 0x07, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x61, + 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6c, 0x61, 0x6e, 0x65, 0x42, 0x34, + 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6d, + 0x65, 0x74, 0x62, 0x66, 0x74, 0x2f, 0x63, 0x6f, 0x6d, 0x65, 0x74, 0x62, 0x66, 0x74, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } diff --git a/test/loadtime/payload/payload.proto b/test/loadtime/payload/payload.proto index 59438a058c4..00143c8ac40 100644 --- a/test/loadtime/payload/payload.proto +++ b/test/loadtime/payload/payload.proto @@ -15,4 +15,5 @@ message Payload { google.protobuf.Timestamp time = 4; bytes id = 5; bytes padding = 6; + string lane = 7; } diff --git a/test/loadtime/payload/payload_test.go b/test/loadtime/payload/payload_test.go index 62ea3919f79..b095a6781af 100644 --- a/test/loadtime/payload/payload_test.go +++ b/test/loadtime/payload/payload_test.go @@ -43,16 +43,16 @@ func TestRoundTrip(t *testing.T) { if err != nil { t.Fatalf("reading payload %s", err) } - if p.Size != payloadSizeTarget { - t.Fatalf("payload size value %d does not match expected %d", p.Size, payloadSizeTarget) + if p.GetSize() != payloadSizeTarget { + t.Fatalf("payload size value %d does not match expected %d", p.GetSize(), payloadSizeTarget) } - if p.Connections != testConns { - t.Fatalf("payload connections value %d does not match expected %d", p.Connections, testConns) + if p.GetConnections() != testConns { + t.Fatalf("payload connections value %d does not match expected %d", p.GetConnections(), testConns) } - if p.Rate != testRate { - t.Fatalf("payload rate value %d does not match expected %d", p.Rate, testRate) + if p.GetRate() != testRate { + t.Fatalf("payload rate value %d does not match expected %d", p.GetRate(), testRate) } - if !bytes.Equal(p.Id, testID[:]) { - t.Fatalf("payload ID value %d does not match expected %d", p.Id, testID) + if !bytes.Equal(p.GetId(), testID[:]) { + t.Fatalf("payload ID value %d does not match expected %d", p.GetId(), testID) } } diff --git a/test/loadtime/report/report.go b/test/loadtime/report/report.go index 32ee960f915..5c86b3601ba 100644 --- a/test/loadtime/report/report.go +++ b/test/loadtime/report/report.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/gofrs/uuid" + "github.com/google/uuid" "gonum.org/v1/gonum/stat" "github.com/cometbft/cometbft/test/loadtime/payload" @@ -20,7 +20,7 @@ import ( type BlockStore interface { Height() int64 Base() int64 - LoadBlock(int64) *types.Block + LoadBlock(height int64) (*types.Block, *types.BlockMeta) } // DataPoint contains the set of data collected for each transaction. @@ -28,6 +28,7 @@ type DataPoint struct { Duration time.Duration BlockTime time.Time Hash []byte + Lane string } // Report contains the data calculated from reading the timestamped transactions @@ -71,7 +72,7 @@ func (rs *Reports) ErrorCount() int { return rs.errorCount } -func (rs *Reports) addDataPoint(id uuid.UUID, l time.Duration, bt time.Time, hash []byte, conns, rate, size uint64) { +func (rs *Reports) addDataPoint(id uuid.UUID, lane string, l time.Duration, bt time.Time, hash []byte, conns, rate, size uint64) { r, ok := rs.s[id] if !ok { r = Report{ @@ -84,7 +85,7 @@ func (rs *Reports) addDataPoint(id uuid.UUID, l time.Duration, bt time.Time, has } rs.s[id] = r } - r.All = append(r.All, DataPoint{Duration: l, BlockTime: bt, Hash: hash}) + r.All = append(r.All, DataPoint{Duration: l, BlockTime: bt, Hash: hash, Lane: lane}) if l > r.Max { r.Max = l } @@ -119,7 +120,6 @@ func (rs *Reports) calculateAll() { } return rs.l[i].Connections < rs.l[j].Connections }) - } func (rs *Reports) addError() { @@ -131,6 +131,7 @@ func (rs *Reports) addError() { func GenerateFromBlockStore(s BlockStore) (*Reports, error) { type payloadData struct { id uuid.UUID + lane string l time.Duration bt time.Time hash []byte @@ -167,16 +168,17 @@ func GenerateFromBlockStore(s BlockStore) (*Reports, error) { continue } - l := b.bt.Sub(p.Time.AsTime()) - idb := (*[16]byte)(p.Id) + l := b.bt.Sub(p.GetTime().AsTime()) + idb := (*[16]byte)(p.GetId()) pdc <- payloadData{ l: l, bt: b.bt, hash: b.tx.Hash(), id: uuid.UUID(*idb), - connections: p.Connections, - rate: p.Rate, - size: p.Size, + lane: p.GetLane(), + connections: p.GetConnections(), + rate: p.GetRate(), + size: p.GetSize(), } } }() @@ -188,7 +190,7 @@ func GenerateFromBlockStore(s BlockStore) (*Reports, error) { go func() { base, height := s.Base(), s.Height() - prev := s.LoadBlock(base) + prev, _ := s.LoadBlock(base) for i := base + 1; i < height; i++ { // Data from two adjacent block are used here simultaneously, // blocks of height H and H+1. The transactions of the block of @@ -201,7 +203,7 @@ func GenerateFromBlockStore(s BlockStore) (*Reports, error) { // chain contains payload transactions, those transactions will not // be used in the latency calculations because the last block whose // transactions are used is the block one before the last. - cur := s.LoadBlock(i) + cur, _ := s.LoadBlock(i) for _, tx := range prev.Data.Txs { txc <- txData{tx: tx, bt: cur.Time} } @@ -214,7 +216,7 @@ func GenerateFromBlockStore(s BlockStore) (*Reports, error) { reports.addError() continue } - reports.addDataPoint(pd.id, pd.l, pd.bt, pd.hash, pd.connections, pd.rate, pd.size) + reports.addDataPoint(pd.id, pd.lane, pd.l, pd.bt, pd.hash, pd.connections, pd.rate, pd.size) } reports.calculateAll() return reports, nil diff --git a/test/loadtime/report/report_test.go b/test/loadtime/report/report_test.go index a8525b42723..1d9cdecba8b 100644 --- a/test/loadtime/report/report_test.go +++ b/test/loadtime/report/report_test.go @@ -12,6 +12,10 @@ import ( "github.com/cometbft/cometbft/types" ) +const ( + testPartSize = 65536 +) + type mockBlockStore struct { base int64 blocks []*types.Block @@ -25,8 +29,14 @@ func (m *mockBlockStore) Base() int64 { return m.base } -func (m *mockBlockStore) LoadBlock(i int64) *types.Block { - return m.blocks[i-m.base] +func (m *mockBlockStore) LoadBlock(i int64) (*types.Block, *types.BlockMeta) { + block := m.blocks[i-m.base] + partSet, err := block.MakePartSet(testPartSize) + if err != nil { + panic("could not create a part set") + } + blockMeta := types.NewBlockMeta(block, partSet) + return block, blockMeta } func TestGenerateReport(t *testing.T) { diff --git a/test/test_cover.sh b/test/test_cover.sh deleted file mode 100644 index 5e13cbba24a..00000000000 --- a/test/test_cover.sh +++ /dev/null @@ -1,14 +0,0 @@ -#! /bin/bash - -PKGS=$(go list github.com/cometbft/cometbft/...) - -set -e - -echo "mode: atomic" > coverage.txt -for pkg in ${PKGS[@]}; do - go test -timeout 5m -race -coverprofile=profile.out -covermode=atomic "$pkg" - if [ -f profile.out ]; then - tail -n +2 profile.out >> coverage.txt; - rm profile.out - fi -done diff --git a/tests.mk b/tests.mk index ec461eb69de..b5b7ca4751f 100644 --- a/tests.mk +++ b/tests.mk @@ -5,71 +5,64 @@ BINDIR ?= $(GOPATH)/bin -## required to be run first by most tests -build_docker_test_image: - docker build -t tester -f ./test/docker/Dockerfile . -.PHONY: build_docker_test_image - -### coverage, app, persistence, and libs tests -test_cover: - # run the go unit tests with coverage - bash test/test_cover.sh -.PHONY: test_cover - -test_apps: - # run the app tests using bash - # requires `abci-cli` and `cometbft` binaries installed - bash test/app/test.sh +#?test_apps: Run the app tests +test_apps: install install_abci + @bash test/app/test.sh .PHONY: test_apps -test_abci_apps: - bash abci/tests/test_app/test.sh -.PHONY: test_abci_apps - -test_abci_cli: - # test the cli against the examples in the tutorial at: - # ./docs/abci-cli.md - # if test fails, update the docs ^ - @ bash abci/tests/test_cli/test.sh +#?test_abci_cli: Test the cli against the examples in the tutorial at: ./docs/abci-cli.md +# if test fails, update the docs ^ +test_abci_cli: install_abci + @bash abci/tests/test_cli/test.sh .PHONY: test_abci_cli -test_integrations: - make build_docker_test_image - make tools - make install - make install_abci - make test_cover - make test_apps - make test_abci_apps - make test_abci_cli - make test_libs +#?test_integrations: Runs all integration tests +test_integrations: test_apps test_abci_cli test_integrations_cleanup .PHONY: test_integrations +#?test_integrations_cleanup: Cleans up the test data created by test_integrations +test_integrations_cleanup: + @bash test/app/clean.sh +.PHONY: test_integrations_cleanup + test_release: @go test -tags release $(PACKAGES) .PHONY: test_release -test100: - @for i in {1..100}; do make test; done -.PHONY: test100 - -vagrant_test: - vagrant up - vagrant ssh -c 'make test_integrations' -.PHONY: vagrant_test - ### go tests test: @echo "--> Running go test" - @go test -p 1 $(PACKAGES) -tags deadlock + @go test -p 1 $(PACKAGES) -tags bls12381 .PHONY: test test_race: @echo "--> Running go test --race" - @go test -p 1 -v -race $(PACKAGES) + @go test -p 1 -race $(PACKAGES) -tags bls12381 .PHONY: test_race test_deadlock: - @echo "--> Running go test --deadlock" - @go test -p 1 -v $(PACKAGES) -tags deadlock -.PHONY: test_race + @echo "--> Running go test with deadlock support" + @go test -p 1 $(PACKAGES) -tags deadlock,bls12381 +.PHONY: test_deadlock + +# Implements test splitting and running. This is pulled directly from +# the github action workflows for better local reproducibility. + +GO_TEST_FILES := $(shell find $(CURDIR) -name "*_test.go") + +# default to four splits by default +NUM_SPLIT ?= 4 + +# The format statement filters out all packages that don't have tests. +# Note we need to check for both in-package tests (.TestGoFiles) and +# out-of-package tests (.XTestGoFiles). +$(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR) + go list -f "{{ if (or .TestGoFiles .XTestGoFiles) }}{{ .ImportPath }}{{ end }}" ./... | sort > $@ + +split-test-packages:$(BUILDDIR)/packages.txt + split -d -n l/$(NUM_SPLIT) $< $<. + +# Used by the GitHub CI, in order to run tests in parallel +test-group-%:split-test-packages + cat $(BUILDDIR)/packages.txt.$* + cat $(BUILDDIR)/packages.txt.$* | xargs go test -tags bls12381 -mod=readonly -timeout=400s -race -coverprofile=$(BUILDDIR)/$*.profile.out diff --git a/third_party/proto/gogoproto/README.md b/third_party/proto/gogoproto/README.md deleted file mode 100644 index 3852d578d3e..00000000000 --- a/third_party/proto/gogoproto/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Buf - -This file is now stored in the buf registry at [buf.build/cosmos/gogo-proto](https://buf.build/cosmos/gogo-proto). diff --git a/tools/README.md b/tools/README.md deleted file mode 100644 index 98e36c86887..00000000000 --- a/tools/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# tools - -Tools for working with CometBFT and associated technologies. -Documentation for these tools can be found online in the -[CometBFT tools documentation](https://docs.cometbft.com/main/tools/). diff --git a/tools/proto/Dockerfile b/tools/proto/Dockerfile deleted file mode 100644 index bd2d486a33f..00000000000 --- a/tools/proto/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM bufbuild/buf:latest as buf - -FROM golang:1.14-alpine3.11 as builder - -RUN apk add --update --no-cache build-base curl git upx && \ - rm -rf /var/cache/apk/* - -ENV GOLANG_PROTOBUF_VERSION=1.3.1 \ - GOGOPROTO_VERSION=1.4.1 - -RUN GO111MODULE=on go get \ - github.com/golang/protobuf/protoc-gen-go@v${GOLANG_PROTOBUF_VERSION} \ - github.com/cosmos/gogoproto/protoc-gen-gogo@v${GOGOPROTO_VERSION} \ - github.com/cosmos/gogoproto/protoc-gen-gogofaster@v${GOGOPROTO_VERSION} && \ - mv /go/bin/protoc-gen-go* /usr/local/bin/ - - -FROM alpine:edge - -WORKDIR /work - -RUN echo 'http://dl-cdn.alpinelinux.org/alpine/edge/testing' >> /etc/apk/repositories && \ - apk add --update --no-cache clang && \ - rm -rf /var/cache/apk/* - -COPY --from=builder /usr/local/bin /usr/local/bin -COPY --from=buf /usr/local/bin /usr/local/bin diff --git a/tools/tools.go b/tools/tools.go deleted file mode 100644 index 23d2366bb99..00000000000 --- a/tools/tools.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build tools - -// This file uses the recommended method for tracking developer tools in a go module. -// -// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module - -package tools - -import ( - _ "github.com/bufbuild/buf/cmd/buf" - _ "github.com/golangci/golangci-lint/cmd/golangci-lint" - _ "github.com/pointlander/peg" - _ "github.com/vektra/mockery/v2" -) diff --git a/types/block.go b/types/block.go index 51955521b98..a866a15a270 100644 --- a/types/block.go +++ b/types/block.go @@ -10,22 +10,23 @@ import ( "github.com/cosmos/gogoproto/proto" gogotypes "github.com/cosmos/gogoproto/types" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/merkle" "github.com/cometbft/cometbft/crypto/tmhash" - "github.com/cometbft/cometbft/libs/bits" + "github.com/cometbft/cometbft/internal/bits" cmtbytes "github.com/cometbft/cometbft/libs/bytes" cmtmath "github.com/cometbft/cometbft/libs/math" cmtsync "github.com/cometbft/cometbft/libs/sync" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" + cmttime "github.com/cometbft/cometbft/types/time" "github.com/cometbft/cometbft/version" ) const ( // MaxHeaderBytes is a maximum header size. // NOTE: Because app hash can be of arbitrary size, the header is therefore not - // capped in size and thus this number should be seen as a soft max + // capped in size and thus this number should be seen as a soft max. MaxHeaderBytes int64 = 626 // MaxOverheadForBlock - maximum overhead to encode a block (up to @@ -35,7 +36,7 @@ const ( // Uvarint length of MaxBlockSizeBytes: 4 bytes // 2 fields (2 embedded): 2 bytes // Uvarint length of Data.Txs: 4 bytes - // Data.Txs field: 1 byte + // Data.Txs field: 1 byte. MaxOverheadForBlock int64 = 11 ) @@ -43,10 +44,11 @@ const ( type Block struct { mtx cmtsync.Mutex - Header `json:"header"` - Data `json:"data"` - Evidence EvidenceData `json:"evidence"` - LastCommit *Commit `json:"last_commit"` + verifiedHash cmtbytes.HexBytes // Verified block hash (not included in the struct hash) + Header `json:"header"` + Data `json:"data"` + Evidence EvidenceData `json:"evidence"` + LastCommit *Commit `json:"last_commit"` } // ValidateBasic performs basic validation that doesn't involve state data. @@ -69,7 +71,7 @@ func (b *Block) ValidateBasic() error { return errors.New("nil LastCommit") } if err := b.LastCommit.ValidateBasic(); err != nil { - return fmt.Errorf("wrong LastCommit: %v", err) + return fmt.Errorf("wrong LastCommit: %w", err) } if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { @@ -91,7 +93,7 @@ func (b *Block) ValidateBasic() error { // NOTE: b.Evidence.Evidence may be nil, but we're just looping. for i, ev := range b.Evidence.Evidence { if err := ev.ValidateBasic(); err != nil { - return fmt.Errorf("invalid evidence (#%d): %v", i, err) + return fmt.Errorf("invalid evidence (#%d): %w", i, err) } } @@ -105,7 +107,7 @@ func (b *Block) ValidateBasic() error { return nil } -// fillHeader fills in any remaining header fields that are a function of the block data +// fillHeader fills in any remaining header fields that are a function of the block data. func (b *Block) fillHeader() { if b.LastCommitHash == nil { b.LastCommitHash = b.LastCommit.Hash() @@ -130,8 +132,13 @@ func (b *Block) Hash() cmtbytes.HexBytes { if b.LastCommit == nil { return nil } + if b.verifiedHash != nil { + return b.verifiedHash + } b.fillHeader() - return b.Header.Hash() + hash := b.Header.Hash() + b.verifiedHash = hash + return hash } // MakePartSet returns a PartSet containing parts of a serialized block. @@ -190,7 +197,7 @@ func (b *Block) String() string { // Data // Evidence // LastCommit -// Hash +// Hash. func (b *Block) StringIndented(indent string) string { if b == nil { return "nil-Block" @@ -216,7 +223,7 @@ func (b *Block) StringShort() string { return fmt.Sprintf("Block#%X", b.Hash()) } -// ToProto converts Block to protobuf +// ToProto converts Block to protobuf. func (b *Block) ToProto() (*cmtproto.Block, error) { if b == nil { return nil, errors.New("nil Block") @@ -237,7 +244,7 @@ func (b *Block) ToProto() (*cmtproto.Block, error) { return pb, nil } -// FromProto sets a protobuf Block to the given pointer. +// BlockFromProto sets a protobuf Block to the given pointer. // It returns an error if the block is invalid. func BlockFromProto(bp *cmtproto.Block) (*Block, error) { if bp == nil { @@ -270,7 +277,7 @@ func BlockFromProto(bp *cmtproto.Block) (*Block, error) { return b, b.ValidateBasic() } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // MaxDataBytes returns the maximum size of block's data. // @@ -314,7 +321,7 @@ func MaxDataBytesNoEvidence(maxBytes int64, valsCount int) int64 { return maxDataBytes } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // Header defines the structure of a CometBFT block header. // NOTE: changes to the Header should be duplicated in: @@ -393,15 +400,15 @@ func (h Header) ValidateBasic() error { } if err := ValidateHash(h.LastCommitHash); err != nil { - return fmt.Errorf("wrong LastCommitHash: %v", err) + return fmt.Errorf("wrong LastCommitHash: %w", err) } if err := ValidateHash(h.DataHash); err != nil { - return fmt.Errorf("wrong DataHash: %v", err) + return fmt.Errorf("wrong DataHash: %w", err) } if err := ValidateHash(h.EvidenceHash); err != nil { - return fmt.Errorf("wrong EvidenceHash: %v", err) + return fmt.Errorf("wrong EvidenceHash: %w", err) } if len(h.ProposerAddress) != crypto.AddressSize { @@ -414,17 +421,17 @@ func (h Header) ValidateBasic() error { // Basic validation of hashes related to application data. // Will validate fully against state in state#ValidateBlock. if err := ValidateHash(h.ValidatorsHash); err != nil { - return fmt.Errorf("wrong ValidatorsHash: %v", err) + return fmt.Errorf("wrong ValidatorsHash: %w", err) } if err := ValidateHash(h.NextValidatorsHash); err != nil { - return fmt.Errorf("wrong NextValidatorsHash: %v", err) + return fmt.Errorf("wrong NextValidatorsHash: %w", err) } if err := ValidateHash(h.ConsensusHash); err != nil { - return fmt.Errorf("wrong ConsensusHash: %v", err) + return fmt.Errorf("wrong ConsensusHash: %w", err) } // NOTE: AppHash is arbitrary length if err := ValidateHash(h.LastResultsHash); err != nil { - return fmt.Errorf("wrong LastResultsHash: %v", err) + return fmt.Errorf("wrong LastResultsHash: %w", err) } return nil @@ -512,7 +519,7 @@ func (h *Header) StringIndented(indent string) string { ) } -// ToProto converts Header to protobuf +// ToProto converts Header to protobuf. func (h *Header) ToProto() *cmtproto.Header { if h == nil { return nil @@ -536,7 +543,7 @@ func (h *Header) ToProto() *cmtproto.Header { } } -// FromProto sets a protobuf Header to the given pointer. +// HeaderFromProto sets a protobuf Header to the given pointer. // It returns an error if the header is invalid. func HeaderFromProto(ph *cmtproto.Header) (Header, error) { if ph == nil { @@ -554,7 +561,6 @@ func HeaderFromProto(ph *cmtproto.Header) (Header, error) { h.ChainID = ph.ChainID h.Height = ph.Height h.Time = ph.Time - h.Height = ph.Height h.LastBlockID = *bi h.ValidatorsHash = ph.ValidatorsHash h.NextValidatorsHash = ph.NextValidatorsHash @@ -569,7 +575,7 @@ func HeaderFromProto(ph *cmtproto.Header) (Header, error) { return *h, h.ValidateBasic() } -//------------------------------------- +// ------------------------------------- // BlockIDFlag indicates which BlockID the signature is for. type BlockIDFlag byte @@ -586,9 +592,14 @@ const ( const ( // Max size of commit without any commitSigs -> 82 for BlockID, 8 for Height, 4 for Round. MaxCommitOverheadBytes int64 = 94 - // Commit sig size is made up of 64 bytes for the signature, 20 bytes for the address, - // 1 byte for the flag and 14 bytes for the timestamp - MaxCommitSigBytes int64 = 109 + + // 4 bytes for field tags + 1 byte for signature LEN + 1 byte for + // validator address LEN + 1 byte for timestamp LEN. + maxCommitSigProtoEncOverhead = 4 + 1 + 1 + 1 + 3 // 3 ??? + // Commit sig size is made up of MaxSignatureSize (96) bytes for the + // signature, 20 bytes for the address, 1 byte for the flag and 14 bytes for + // the timestamp. + MaxCommitSigBytes = 131 + maxCommitSigProtoEncOverhead ) // CommitSig is a part of the Vote included in a Commit. @@ -600,9 +611,10 @@ type CommitSig struct { } func MaxCommitBytes(valCount int) int64 { + // 1 byte field tag + 1 byte LEN + 1 byte ??? + const protoRepeatedFieldLenOverhead int64 = 3 // From the repeated commit sig field - var protoEncodingOverhead int64 = 2 - return MaxCommitOverheadBytes + ((MaxCommitSigBytes + protoEncodingOverhead) * int64(valCount)) + return MaxCommitOverheadBytes + ((MaxCommitSigBytes + protoRepeatedFieldLenOverhead) * int64(valCount)) } // NewCommitSigAbsent returns new CommitSig with BlockIDFlagAbsent. Other @@ -613,12 +625,12 @@ func NewCommitSigAbsent() CommitSig { } } -// CommitSig returns a string representation of CommitSig. +// String returns a string representation of CommitSig. // // 1. first 6 bytes of signature // 2. first 6 bytes of validator address // 3. block ID flag -// 4. timestamp +// 4. timestamp. func (cs CommitSig) String() string { return fmt.Sprintf("CommitSig{%X by %X on %v @ %s}", cmtbytes.Fingerprint(cs.Signature), @@ -684,7 +696,7 @@ func (cs CommitSig) ValidateBasic() error { return nil } -// ToProto converts CommitSig to protobuf +// ToProto converts CommitSig to protobuf. func (cs *CommitSig) ToProto() *cmtproto.CommitSig { if cs == nil { return nil @@ -709,7 +721,7 @@ func (cs *CommitSig) FromProto(csp cmtproto.CommitSig) error { return cs.ValidateBasic() } -//------------------------------------- +// ------------------------------------- // ExtendedCommitSig contains a commit signature along with its corresponding // vote extension and vote extension signature. @@ -729,7 +741,7 @@ func NewExtendedCommitSigAbsent() ExtendedCommitSig { // // 1. commit sig // 2. first 6 bytes of vote extension -// 3. first 6 bytes of vote extension signature +// 3. first 6 bytes of vote extension signature. func (ecs ExtendedCommitSig) String() string { return fmt.Sprintf("ExtendedCommitSig{%s with %X %X}", ecs.CommitSig, @@ -760,7 +772,7 @@ func (ecs ExtendedCommitSig) ValidateBasic() error { return nil } -// EnsureExtensions validates that a vote extensions signature is present for +// EnsureExtension validates that a vote extensions signature is present for // this ExtendedCommitSig. func (ecs ExtendedCommitSig) EnsureExtension(extEnabled bool) error { if extEnabled { @@ -829,7 +841,7 @@ func (ecs *ExtendedCommitSig) FromProto(ecsp cmtproto.ExtendedCommitSig) error { return ecs.ValidateBasic() } -//------------------------------------- +// ------------------------------------- // Commit contains the evidence that a block was committed by a set of validators. // NOTE: Commit is empty for height 1, but never nil. @@ -849,6 +861,15 @@ type Commit struct { hash cmtbytes.HexBytes } +// Clone creates a deep copy of this commit. +func (commit *Commit) Clone() *Commit { + sigs := make([]CommitSig, len(commit.Signatures)) + copy(sigs, commit.Signatures) + commCopy := *commit + commCopy.Signatures = sigs + return &commCopy +} + // GetVote converts the CommitSig for the given valIdx to a Vote. Commits do // not contain vote extensions, so the vote extension and vote extension // signature will not be present in the returned vote. @@ -857,7 +878,7 @@ type Commit struct { func (commit *Commit) GetVote(valIdx int32) *Vote { commitSig := commit.Signatures[valIdx] return &Vote{ - Type: cmtproto.PrecommitType, + Type: PrecommitType, Height: commit.Height, Round: commit.Round, BlockID: commitSig.BlockID(commit.BlockID), @@ -876,7 +897,7 @@ func (commit *Commit) GetVote(valIdx int32) *Vote { // // Panics if valIdx >= commit.Size(). // -// See VoteSignBytes +// See VoteSignBytes. func (commit *Commit) VoteSignBytes(chainID string, valIdx int32) []byte { v := commit.GetVote(valIdx).ToProto() return VoteSignBytes(chainID, v) @@ -910,14 +931,40 @@ func (commit *Commit) ValidateBasic() error { } for i, commitSig := range commit.Signatures { if err := commitSig.ValidateBasic(); err != nil { - return fmt.Errorf("wrong CommitSig #%d: %v", i, err) + return fmt.Errorf("wrong CommitSig #%d: %w", i, err) } } } return nil } -// Hash returns the hash of the commit +// MedianTime computes the median time for a Commit based on the associated validator set. +// The median time is the weighted median of the Timestamp fields of the commit votes, +// with heights defined by the validator's voting powers. +// The BFT Time algorithm ensures that the computed median time is always picked among +// the timestamps produced by honest processes, i.e., faulty processes cannot arbitrarily +// increase or decrease the median time. +// See: https://github.com/cometbft/cometbft/blob/main/spec/consensus/bft-time.md +func (commit *Commit) MedianTime(validators *ValidatorSet) time.Time { + weightedTimes := make([]*cmttime.WeightedTime, len(commit.Signatures)) + totalVotingPower := int64(0) + + for i, commitSig := range commit.Signatures { + if commitSig.BlockIDFlag == BlockIDFlagAbsent { + continue + } + _, validator := validators.GetByAddressMut(commitSig.ValidatorAddress) + // If there's no condition, TestValidateBlockCommit panics; not needed normally. + if validator != nil { + totalVotingPower += validator.VotingPower + weightedTimes[i] = cmttime.NewWeightedTime(commitSig.Timestamp, validator.VotingPower) + } + } + + return cmttime.WeightedMedian(weightedTimes, totalVotingPower) +} + +// Hash returns the hash of the commit. func (commit *Commit) Hash() cmtbytes.HexBytes { if commit == nil { return nil @@ -982,7 +1029,7 @@ func (commit *Commit) StringIndented(indent string) string { indent, commit.hash) } -// ToProto converts Commit to protobuf +// ToProto converts Commit to protobuf. func (commit *Commit) ToProto() *cmtproto.Commit { if commit == nil { return nil @@ -1002,16 +1049,14 @@ func (commit *Commit) ToProto() *cmtproto.Commit { return c } -// FromProto sets a protobuf Commit to the given pointer. +// CommitFromProto sets a protobuf Commit to the given pointer. // It returns an error if the commit is invalid. func CommitFromProto(cp *cmtproto.Commit) (*Commit, error) { if cp == nil { return nil, errors.New("nil Commit") } - var ( - commit = new(Commit) - ) + commit := new(Commit) bi, err := BlockIDFromProto(&cp.BlockID) if err != nil { @@ -1033,7 +1078,7 @@ func CommitFromProto(cp *cmtproto.Commit) (*Commit, error) { return commit, commit.ValidateBasic() } -//------------------------------------- +// ------------------------------------- // ExtendedCommit is similar to Commit, except that its signatures also retain // their corresponding vote extensions and vote extension signatures. @@ -1060,7 +1105,7 @@ func (ec *ExtendedCommit) Clone() *ExtendedCommit { // Panics if any of the votes have invalid or absent vote extension data. // Inverse of VoteSet.MakeExtendedCommit(). func (ec *ExtendedCommit) ToExtendedVoteSet(chainID string, vals *ValidatorSet) *VoteSet { - voteSet := NewExtendedVoteSet(chainID, ec.Height, ec.Round, cmtproto.PrecommitType, vals) + voteSet := NewExtendedVoteSet(chainID, ec.Height, ec.Round, PrecommitType, vals) ec.addSigsToVoteSet(voteSet) return voteSet } @@ -1086,7 +1131,7 @@ func (ec *ExtendedCommit) addSigsToVoteSet(voteSet *VoteSet) { // Panics if signatures from the commit can't be added to the voteset. // Inverse of VoteSet.MakeCommit(). func (commit *Commit) ToVoteSet(chainID string, vals *ValidatorSet) *VoteSet { - voteSet := NewVoteSet(chainID, commit.Height, commit.Round, cmtproto.PrecommitType, vals) + voteSet := NewVoteSet(chainID, commit.Height, commit.Round, PrecommitType, vals) for idx, cs := range commit.Signatures { if cs.BlockIDFlag == BlockIDFlagAbsent { continue // OK, some precommits can be missing. @@ -1135,7 +1180,7 @@ func (ec *ExtendedCommit) ToCommit() *Commit { func (ec *ExtendedCommit) GetExtendedVote(valIndex int32) *Vote { ecs := ec.ExtendedSignatures[valIndex] return &Vote{ - Type: cmtproto.PrecommitType, + Type: PrecommitType, Height: ec.Height, Round: ec.Round, BlockID: ecs.BlockID(ec.BlockID), @@ -1151,7 +1196,7 @@ func (ec *ExtendedCommit) GetExtendedVote(valIndex int32) *Vote { // Type returns the vote type of the extended commit, which is always // VoteTypePrecommit // Implements VoteSetReader. -func (ec *ExtendedCommit) Type() byte { return byte(cmtproto.PrecommitType) } +func (*ExtendedCommit) Type() byte { return byte(PrecommitType) } // GetHeight returns height of the extended commit. // Implements VoteSetReader. @@ -1175,12 +1220,12 @@ func (ec *ExtendedCommit) Size() int { // Implements VoteSetReader. func (ec *ExtendedCommit) BitArray() *bits.BitArray { if ec.bitArray == nil { - ec.bitArray = bits.NewBitArray(len(ec.ExtendedSignatures)) - for i, extCommitSig := range ec.ExtendedSignatures { + initialBitFn := func(i int) bool { // TODO: need to check the BlockID otherwise we could be counting conflicts, // not just the one with +2/3 ! - ec.bitArray.SetIndex(i, extCommitSig.BlockIDFlag != BlockIDFlagAbsent) + return ec.ExtendedSignatures[i].BlockIDFlag != BlockIDFlagAbsent } + ec.bitArray = bits.NewBitArrayFromFn(len(ec.ExtendedSignatures), initialBitFn) } return ec.bitArray } @@ -1218,14 +1263,14 @@ func (ec *ExtendedCommit) ValidateBasic() error { } for i, extCommitSig := range ec.ExtendedSignatures { if err := extCommitSig.ValidateBasic(); err != nil { - return fmt.Errorf("wrong ExtendedCommitSig #%d: %v", i, err) + return fmt.Errorf("wrong ExtendedCommitSig #%d: %w", i, err) } } } return nil } -// ToProto converts ExtendedCommit to protobuf +// ToProto converts ExtendedCommit to protobuf. func (ec *ExtendedCommit) ToProto() *cmtproto.ExtendedCommit { if ec == nil { return nil @@ -1273,11 +1318,10 @@ func ExtendedCommitFromProto(ecp *cmtproto.ExtendedCommit) (*ExtendedCommit, err return extCommit, extCommit.ValidateBasic() } -//------------------------------------- +// ------------------------------------- -// Data contains the set of transactions included in the block +// Data contains the set of transactions included in the block. type Data struct { - // Txs that will be applied by state @ block.Height+1. // NOTE: not all txs here are valid. We're just agreeing on the order first. // This means that block.AppHash does not include these txs. @@ -1287,7 +1331,7 @@ type Data struct { hash cmtbytes.HexBytes } -// Hash returns the hash of the data +// Hash returns the hash of the data. func (data *Data) Hash() cmtbytes.HexBytes { if data == nil { return (Txs{}).Hash() @@ -1318,7 +1362,7 @@ func (data *Data) StringIndented(indent string) string { indent, data.hash) } -// ToProto converts Data to protobuf +// ToProto converts Data to protobuf. func (data *Data) ToProto() cmtproto.Data { tp := new(cmtproto.Data) @@ -1354,9 +1398,9 @@ func DataFromProto(dp *cmtproto.Data) (Data, error) { return *data, nil } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- -// EvidenceData contains any evidence of malicious wrong-doing by validators +// EvidenceData contains a list of evidence committed by a validator. type EvidenceData struct { Evidence EvidenceList `json:"evidence"` @@ -1373,7 +1417,7 @@ func (data *EvidenceData) Hash() cmtbytes.HexBytes { return data.hash } -// ByteSize returns the total byte size of all the evidence +// ByteSize returns the total byte size of all the evidence. func (data *EvidenceData) ByteSize() int64 { if data.byteSize == 0 && len(data.Evidence) != 0 { pb, err := data.ToProto() @@ -1396,7 +1440,7 @@ func (data *EvidenceData) StringIndented(indent string) string { evStrings[i] = fmt.Sprintf("... (%v total)", len(data.Evidence)) break } - evStrings[i] = fmt.Sprintf("Evidence:%v", ev) + evStrings[i] = "Evidence:" + ev.String() } return fmt.Sprintf(`EvidenceData{ %s %v @@ -1405,7 +1449,7 @@ func (data *EvidenceData) StringIndented(indent string) string { indent, data.hash) } -// ToProto converts EvidenceData to protobuf +// ToProto converts EvidenceData to protobuf. func (data *EvidenceData) ToProto() (*cmtproto.EvidenceList, error) { if data == nil { return nil, errors.New("nil evidence data") @@ -1445,21 +1489,21 @@ func (data *EvidenceData) FromProto(eviData *cmtproto.EvidenceList) error { return nil } -//-------------------------------------------------------------------------------- +// -------------------------------------------------------------------------------- -// BlockID +// BlockID. type BlockID struct { Hash cmtbytes.HexBytes `json:"hash"` PartSetHeader PartSetHeader `json:"parts"` } -// Equals returns true if the BlockID matches the given BlockID +// Equals returns true if the BlockID matches the given BlockID. func (blockID BlockID) Equals(other BlockID) bool { return bytes.Equal(blockID.Hash, other.Hash) && blockID.PartSetHeader.Equals(other.PartSetHeader) } -// Key returns a machine-readable string representation of the BlockID +// Key returns a machine-readable string representation of the BlockID. func (blockID BlockID) Key() string { pbph := blockID.PartSetHeader.ToProto() bz, err := pbph.Marshal() @@ -1474,10 +1518,10 @@ func (blockID BlockID) Key() string { func (blockID BlockID) ValidateBasic() error { // Hash can be empty in case of POLBlockID in Proposal. if err := ValidateHash(blockID.Hash); err != nil { - return fmt.Errorf("wrong Hash") + return errors.New("wrong Hash") } if err := blockID.PartSetHeader.ValidateBasic(); err != nil { - return fmt.Errorf("wrong PartSetHeader: %v", err) + return fmt.Errorf("wrong PartSetHeader: %w", err) } return nil } @@ -1500,12 +1544,12 @@ func (blockID BlockID) IsComplete() bool { // 1. hash // 2. part set header // -// See PartSetHeader#String +// See PartSetHeader#String. func (blockID BlockID) String() string { return fmt.Sprintf(`%v:%v`, blockID.Hash, blockID.PartSetHeader) } -// ToProto converts BlockID to protobuf +// ToProto converts BlockID to protobuf. func (blockID *BlockID) ToProto() cmtproto.BlockID { if blockID == nil { return cmtproto.BlockID{} @@ -1517,7 +1561,7 @@ func (blockID *BlockID) ToProto() cmtproto.BlockID { } } -// FromProto sets a protobuf BlockID to the given pointer. +// BlockIDFromProto sets a protobuf BlockID to the given pointer. // It returns an error if the block id is invalid. func BlockIDFromProto(bID *cmtproto.BlockID) (*BlockID, error) { if bID == nil { diff --git a/types/block_meta.go b/types/block_meta.go index d66cc8f36cd..0d0e5f7d1ef 100644 --- a/types/block_meta.go +++ b/types/block_meta.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" ) // BlockMeta contains meta information. @@ -40,7 +40,7 @@ func (bm *BlockMeta) ToProto() *cmtproto.BlockMeta { return pb } -func BlockMetaFromProto(pb *cmtproto.BlockMeta) (*BlockMeta, error) { +func BlockMetaFromTrustedProto(pb *cmtproto.BlockMeta) (*BlockMeta, error) { if pb == nil { return nil, errors.New("blockmeta is empty") } @@ -62,7 +62,7 @@ func BlockMetaFromProto(pb *cmtproto.BlockMeta) (*BlockMeta, error) { bm.Header = h bm.NumTxs = int(pb.NumTxs) - return bm, bm.ValidateBasic() + return bm, nil } // ValidateBasic performs basic validation. diff --git a/types/block_meta_test.go b/types/block_meta_test.go index b557f284a50..6f907f2cd68 100644 --- a/types/block_meta_test.go +++ b/types/block_meta_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/crypto/tmhash" - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtrand "github.com/cometbft/cometbft/internal/rand" ) func TestBlockMeta_ToProto(t *testing.T) { @@ -30,11 +30,10 @@ func TestBlockMeta_ToProto(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.testName, func(t *testing.T) { pb := tt.bm.ToProto() - bm, err := BlockMetaFromProto(pb) + bm, err := BlockMetaFromTrustedProto(pb) if !tt.expErr { require.NoError(t, err, tt.testName) @@ -49,10 +48,14 @@ func TestBlockMeta_ToProto(t *testing.T) { func TestBlockMeta_ValidateBasic(t *testing.T) { h := makeRandHeader() bi := BlockID{Hash: h.Hash(), PartSetHeader: PartSetHeader{Total: 123, Hash: cmtrand.Bytes(tmhash.Size)}} - bi2 := BlockID{Hash: cmtrand.Bytes(tmhash.Size), - PartSetHeader: PartSetHeader{Total: 123, Hash: cmtrand.Bytes(tmhash.Size)}} - bi3 := BlockID{Hash: []byte("incorrect hash"), - PartSetHeader: PartSetHeader{Total: 123, Hash: []byte("incorrect hash")}} + bi2 := BlockID{ + Hash: cmtrand.Bytes(tmhash.Size), + PartSetHeader: PartSetHeader{Total: 123, Hash: cmtrand.Bytes(tmhash.Size)}, + } + bi3 := BlockID{ + Hash: []byte("incorrect hash"), + PartSetHeader: PartSetHeader{Total: 123, Hash: []byte("incorrect hash")}, + } bm := &BlockMeta{ BlockID: bi, @@ -85,7 +88,6 @@ func TestBlockMeta_ValidateBasic(t *testing.T) { {"failure wrong length blockID hash", bm3, true}, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { if err := tt.bm.ValidateBasic(); (err != nil) != tt.wantErr { t.Errorf("BlockMeta.ValidateBasic() error = %v, wantErr %v", err, tt.wantErr) diff --git a/types/block_test.go b/types/block_test.go index f9c97a7e840..30286ce6162 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -1,9 +1,6 @@ package types import ( - // it is ok to use math/rand here: we do not need a cryptographically secure random - // number generator here and we can run the tests a bit faster - "crypto/rand" "encoding/hex" "math" @@ -16,14 +13,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/merkle" "github.com/cometbft/cometbft/crypto/tmhash" - "github.com/cometbft/cometbft/libs/bits" + "github.com/cometbft/cometbft/internal/bits" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/bytes" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" cmttime "github.com/cometbft/cometbft/types/time" "github.com/cometbft/cometbft/version" ) @@ -38,17 +34,17 @@ func TestBlockAddEvidence(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, cmtproto.PrecommitType, 10, 1, false) - extCommit, err := MakeExtCommit(lastID, h-1, 1, voteSet, vals, time.Now(), false) + voteSet, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1, false) + extCommit, err := MakeExtCommit(lastID, h-1, 1, voteSet, vals, cmttime.Now(), false) require.NoError(t, err) - ev, err := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") + ev, err := NewMockDuplicateVoteEvidenceWithValidator(h, cmttime.Now(), vals[0], "block-test-chain") require.NoError(t, err) evList := []Evidence{ev} block := MakeBlock(h, txs, extCommit.ToCommit(), evList) require.NotNil(t, block) - require.Equal(t, 1, len(block.Evidence.Evidence)) + require.Len(t, block.Evidence.Evidence, 1) require.NotNil(t, block.EvidenceHash) } @@ -59,12 +55,12 @@ func TestBlockValidateBasic(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, cmtproto.PrecommitType, 10, 1, false) - extCommit, err := MakeExtCommit(lastID, h-1, 1, voteSet, vals, time.Now(), false) + voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1, false) + extCommit, err := MakeExtCommit(lastID, h-1, 1, voteSet, vals, cmttime.Now(), false) require.NoError(t, err) commit := extCommit.ToCommit() - ev, err := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") + ev, err := NewMockDuplicateVoteEvidenceWithValidator(h, cmttime.Now(), vals[0], "block-test-chain") require.NoError(t, err) evList := []Evidence{ev} @@ -73,17 +69,17 @@ func TestBlockValidateBasic(t *testing.T) { malleateBlock func(*Block) expErr bool }{ - {"Make Block", func(blk *Block) {}, false}, + {"Make Block", func(_ *Block) {}, false}, {"Make Block w/ proposer Addr", func(blk *Block) { blk.ProposerAddress = valSet.GetProposer().Address }, false}, {"Negative Height", func(blk *Block) { blk.Height = -1 }, true}, {"Remove 1/2 the commits", func(blk *Block) { blk.LastCommit.Signatures = commit.Signatures[:commit.Size()/2] - blk.LastCommit.hash = nil // clear hash or change wont be noticed + blk.LastCommit.hash = nil // clear hash or change won't be noticed }, true}, {"Remove LastCommitHash", func(blk *Block) { blk.LastCommitHash = []byte("something else") }, true}, {"Tampered Data", func(blk *Block) { blk.Data.Txs[0] = Tx("something else") - blk.Data.hash = nil // clear hash or change wont be noticed + blk.Data.hash = nil // clear hash or change won't be noticed }, true}, {"Tampered DataHash", func(blk *Block) { blk.DataHash = cmtrand.Bytes(len(blk.DataHash)) @@ -96,8 +92,6 @@ func TestBlockValidateBasic(t *testing.T) { }, true}, } for i, tc := range testCases { - tc := tc - i := i t.Run(tc.testName, func(t *testing.T) { block := MakeBlock(h, txs, commit, evList) block.ProposerAddress = valSet.GetProposer().Address @@ -115,7 +109,7 @@ func TestBlockHash(t *testing.T) { func TestBlockMakePartSet(t *testing.T) { bps, err := (*Block)(nil).MakePartSet(2) - assert.Error(t, err) + require.Error(t, err) assert.Nil(t, bps) partSet, err := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil).MakePartSet(1024) @@ -126,17 +120,17 @@ func TestBlockMakePartSet(t *testing.T) { func TestBlockMakePartSetWithEvidence(t *testing.T) { bps, err := (*Block)(nil).MakePartSet(2) - assert.Error(t, err) + require.Error(t, err) assert.Nil(t, bps) lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, cmtproto.PrecommitType, 10, 1, false) - extCommit, err := MakeExtCommit(lastID, h-1, 1, voteSet, vals, time.Now(), false) + voteSet, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1, false) + extCommit, err := MakeExtCommit(lastID, h-1, 1, voteSet, vals, cmttime.Now(), false) require.NoError(t, err) - ev, err := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") + ev, err := NewMockDuplicateVoteEvidenceWithValidator(h, cmttime.Now(), vals[0], "block-test-chain") require.NoError(t, err) evList := []Evidence{ev} @@ -152,11 +146,11 @@ func TestBlockHashesTo(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, cmtproto.PrecommitType, 10, 1, false) - extCommit, err := MakeExtCommit(lastID, h-1, 1, voteSet, vals, time.Now(), false) + voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1, false) + extCommit, err := MakeExtCommit(lastID, h-1, 1, voteSet, vals, cmttime.Now(), false) require.NoError(t, err) - ev, err := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") + ev, err := NewMockDuplicateVoteEvidenceWithValidator(h, cmttime.Now(), vals[0], "block-test-chain") require.NoError(t, err) evList := []Evidence{ev} @@ -213,10 +207,12 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) BlockID { var nilBytes []byte -// This follows RFC-6962, i.e. `echo -n ” | sha256sum` -var emptyBytes = []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, +// This follows RFC-6962, i.e. `echo -n ” | sha256sum`. +var emptyBytes = []byte{ + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, - 0x78, 0x52, 0xb8, 0x55} + 0x78, 0x52, 0xb8, 0x55, +} func TestNilHeaderHashDoesntCrash(t *testing.T) { assert.Equal(t, nilBytes, []byte((*Header)(nil).Hash())) @@ -231,13 +227,13 @@ func TestNilDataHashDoesntCrash(t *testing.T) { func TestCommit(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, cmtproto.PrecommitType, 10, 1, true) - extCommit, err := MakeExtCommit(lastID, h-1, 1, voteSet, vals, time.Now(), true) + voteSet, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1, true) + extCommit, err := MakeExtCommit(lastID, h-1, 1, voteSet, vals, cmttime.Now(), true) require.NoError(t, err) assert.Equal(t, h-1, extCommit.Height) assert.EqualValues(t, 1, extCommit.Round) - assert.Equal(t, cmtproto.PrecommitType, cmtproto.SignedMsgType(extCommit.Type())) + assert.Equal(t, PrecommitType, SignedMsgType(extCommit.Type())) if extCommit.Size() <= 0 { t.Fatalf("commit %v has a zero or negative size: %d", extCommit, extCommit.Size()) } @@ -255,15 +251,14 @@ func TestCommitValidateBasic(t *testing.T) { malleateCommit func(*Commit) expectErr bool }{ - {"Random Commit", func(com *Commit) {}, false}, + {"Random Commit", func(_ *Commit) {}, false}, {"Incorrect signature", func(com *Commit) { com.Signatures[0].Signature = []byte{0} }, false}, {"Incorrect height", func(com *Commit) { com.Height = int64(-100) }, true}, {"Incorrect round", func(com *Commit) { com.Round = -100 }, true}, } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { - com := randCommit(time.Now()) + com := randCommit(cmttime.Now()) tc.malleateCommit(com) assert.Equal(t, tc.expectErr, com.ValidateBasic() != nil, "Validate Basic had an unexpected result") }) @@ -312,7 +307,6 @@ func TestMaxCommitBytes(t *testing.T) { pb = commit.ToProto() assert.EqualValues(t, MaxCommitBytes(MaxVotesCount), int64(pb.Size())) - } func TestHeaderHash(t *testing.T) { @@ -356,7 +350,6 @@ func TestHeaderHash(t *testing.T) { }, nil}, } for _, tc := range testCases { - tc := tc t.Run(tc.desc, func(t *testing.T) { assert.Equal(t, tc.expectHash, tc.header.Hash()) @@ -439,7 +432,7 @@ func TestMaxHeaderBytes(t *testing.T) { func randCommit(now time.Time) *Commit { lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, cmtproto.PrecommitType, 10, 1, false) + voteSet, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1, false) extCommit, err := MakeExtCommit(lastID, h-1, 1, voteSet, vals, now, false) if err != nil { panic(err) @@ -466,14 +459,12 @@ func TestBlockMaxDataBytes(t *testing.T) { 0: {-10, 1, 0, true, 0}, 1: {10, 1, 0, true, 0}, 2: {841, 1, 0, true, 0}, - 3: {842, 1, 0, false, 0}, - 4: {843, 1, 0, false, 1}, - 5: {954, 2, 0, false, 1}, - 6: {1053, 2, 100, false, 0}, + 3: {875, 1, 0, false, 0}, + 4: {876, 1, 0, false, 1}, + 5: {1019, 2, 0, false, 0}, } for i, tc := range testCases { - tc := tc if tc.panics { assert.Panics(t, func() { MaxDataBytes(tc.maxBytes, tc.evidenceBytes, tc.valsCount) @@ -497,12 +488,11 @@ func TestBlockMaxDataBytesNoEvidence(t *testing.T) { 0: {-10, 1, true, 0}, 1: {10, 1, true, 0}, 2: {841, 1, true, 0}, - 3: {842, 1, false, 0}, - 4: {843, 1, false, 1}, + 3: {875, 1, false, 0}, + 4: {876, 1, false, 1}, } for i, tc := range testCases { - tc := tc if tc.panics { assert.Panics(t, func() { MaxDataBytesNoEvidence(tc.maxBytes, tc.valsCount) @@ -534,16 +524,15 @@ func TestVoteSetToExtendedCommit(t *testing.T) { includeExtension: true, }, } { - t.Run(testCase.name, func(t *testing.T) { blockID := makeBlockIDRandom() valSet, vals := RandValidatorSet(10, 1) var voteSet *VoteSet if testCase.includeExtension { - voteSet = NewExtendedVoteSet("test_chain_id", 3, 1, cmtproto.PrecommitType, valSet) + voteSet = NewExtendedVoteSet("test_chain_id", 3, 1, PrecommitType, valSet) } else { - voteSet = NewVoteSet("test_chain_id", 3, 1, cmtproto.PrecommitType, valSet) + voteSet = NewVoteSet("test_chain_id", 3, 1, PrecommitType, valSet) } for i := 0; i < len(vals); i++ { pubKey, err := vals[i].GetPubKey() @@ -553,12 +542,12 @@ func TestVoteSetToExtendedCommit(t *testing.T) { ValidatorIndex: int32(i), Height: 3, Round: 1, - Type: cmtproto.PrecommitType, + Type: PrecommitType, BlockID: blockID, - Timestamp: time.Now(), + Timestamp: cmttime.Now(), } v := vote.ToProto() - err = vals[i].SignVote(voteSet.ChainID(), v) + err = vals[i].SignVote(voteSet.ChainID(), v, true) require.NoError(t, err) vote.Signature = v.Signature if testCase.includeExtension { @@ -568,11 +557,11 @@ func TestVoteSetToExtendedCommit(t *testing.T) { require.NoError(t, err) require.True(t, added) } - var veHeight int64 + p := DefaultFeatureParams() if testCase.includeExtension { - veHeight = 1 + p.VoteExtensionsEnableHeight = 1 } - ec := voteSet.MakeExtendedCommit(ABCIParams{VoteExtensionsEnableHeight: veHeight}) + ec := voteSet.MakeExtendedCommit(p) for i := int32(0); int(i) < len(vals); i++ { vote1 := voteSet.GetByIndex(i) @@ -592,7 +581,7 @@ func TestVoteSetToExtendedCommit(t *testing.T) { // Panics if signatures from the ExtendedCommit can't be added to the voteset. // Inverse of VoteSet.MakeExtendedCommit(). func toVoteSet(ec *ExtendedCommit, chainID string, vals *ValidatorSet) *VoteSet { - voteSet := NewVoteSet(chainID, ec.Height, ec.Round, cmtproto.PrecommitType, vals) + voteSet := NewVoteSet(chainID, ec.Height, ec.Round, PrecommitType, vals) ec.addSigsToVoteSet(voteSet) return voteSet } @@ -619,9 +608,9 @@ func TestExtendedCommitToVoteSet(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, cmtproto.PrecommitType, 10, 1, true) - extCommit, err := MakeExtCommit(lastID, h-1, 1, voteSet, vals, time.Now(), true) - assert.NoError(t, err) + voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1, true) + extCommit, err := MakeExtCommit(lastID, h-1, 1, voteSet, vals, cmttime.Now(), true) + require.NoError(t, err) if !testCase.includeExtension { for i := 0; i < len(vals); i++ { @@ -679,7 +668,7 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { } for _, tc := range testCases { - voteSet, valSet, vals := randVoteSet(height-1, round, cmtproto.PrecommitType, tc.numValidators, 1, false) + voteSet, valSet, vals := randVoteSet(height-1, round, PrecommitType, tc.numValidators, 1, false) vi := int32(0) for n := range tc.blockIDs { @@ -691,25 +680,26 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { ValidatorIndex: vi, Height: height - 1, Round: round, - Type: cmtproto.PrecommitType, + Type: PrecommitType, BlockID: tc.blockIDs[n], Timestamp: cmttime.Now(), } added, err := signAddVote(vals[vi], vote, voteSet) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, added) vi++ } } - veHeightParam := ABCIParams{VoteExtensionsEnableHeight: 0} + veHeightParam := DefaultFeatureParams() + veHeightParam.VoteExtensionsEnableHeight = 0 if tc.valid { extCommit := voteSet.MakeExtendedCommit(veHeightParam) // panics without > 2/3 valid votes assert.NotNil(t, extCommit) err := valSet.VerifyCommit(voteSet.ChainID(), blockID, height-1, extCommit.ToCommit()) - assert.Nil(t, err) + require.NoError(t, err) } else { assert.Panics(t, func() { voteSet.MakeExtendedCommit(veHeightParam) }) } @@ -745,7 +735,6 @@ func TestBlockIDValidateBasic(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { blockID := BlockID{ Hash: tc.blockIDHash, @@ -758,7 +747,7 @@ func TestBlockIDValidateBasic(t *testing.T) { func TestBlockProtoBuf(t *testing.T) { h := cmtrand.Int63() - c1 := randCommit(time.Now()) + c1 := randCommit(cmttime.Now()) b1 := MakeBlock(h, []Tx{Tx([]byte{1})}, &Commit{Signatures: []CommitSig{}}, []Evidence{}) b1.ProposerAddress = cmtrand.Bytes(crypto.AddressSize) @@ -830,7 +819,7 @@ func TestDataProtoBuf(t *testing.T) { // TestEvidenceDataProtoBuf ensures parity in converting to and from proto. func TestEvidenceDataProtoBuf(t *testing.T) { const chainID = "mychain" - ev, err := NewMockDuplicateVoteEvidence(math.MaxInt64, time.Now(), chainID) + ev, err := NewMockDuplicateVoteEvidence(math.MaxInt64, cmttime.Now(), chainID) require.NoError(t, err) data := &EvidenceData{Evidence: EvidenceList{ev}} _ = data.ByteSize() @@ -866,7 +855,7 @@ func TestEvidenceDataProtoBuf(t *testing.T) { func makeRandHeader() Header { chainID := "test" - t := time.Now() + t := cmttime.Now() height := cmtrand.Int63() randBytes := cmtrand.Bytes(tmhash.Size) randAddress := cmtrand.Bytes(crypto.AddressSize) @@ -904,7 +893,6 @@ func TestHeaderProto(t *testing.T) { } for _, tt := range tc { - tt := tt t.Run(tt.msg, func(t *testing.T) { pb := tt.h1.ToProto() h, err := HeaderFromProto(pb) @@ -914,7 +902,6 @@ func TestHeaderProto(t *testing.T) { } else { require.Error(t, err, tt.msg) } - }) } } @@ -944,7 +931,7 @@ func TestBlockIDProtoBuf(t *testing.T) { } func TestSignedHeaderProtoBuf(t *testing.T) { - commit := randCommit(time.Now()) + commit := randCommit(cmttime.Now()) h := makeRandHeader() sh := SignedHeader{Header: &h, Commit: commit} diff --git a/types/canonical.go b/types/canonical.go index 9ba64ab0a60..d98608cfa17 100644 --- a/types/canonical.go +++ b/types/canonical.go @@ -3,16 +3,16 @@ package types import ( "time" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" cmttime "github.com/cometbft/cometbft/types/time" ) // Canonical* wraps the structs in types for amino encoding them for use in SignBytes / the Signable interface. -// TimeFormat is used for generating the sigs +// TimeFormat is used for generating the sigs. const TimeFormat = time.RFC3339Nano -//----------------------------------- +// ----------------------------------- // Canonicalize the structs func CanonicalizeBlockID(bid cmtproto.BlockID) *cmtproto.CanonicalBlockID { @@ -33,18 +33,18 @@ func CanonicalizeBlockID(bid cmtproto.BlockID) *cmtproto.CanonicalBlockID { return cbid } -// CanonicalizeVote transforms the given PartSetHeader to a CanonicalPartSetHeader. +// CanonicalizePartSetHeader transforms the given PartSetHeader to a CanonicalPartSetHeader. func CanonicalizePartSetHeader(psh cmtproto.PartSetHeader) cmtproto.CanonicalPartSetHeader { return cmtproto.CanonicalPartSetHeader(psh) } -// CanonicalizeVote transforms the given Proposal to a CanonicalProposal. +// CanonicalizeProposal transforms the given Proposal to a CanonicalProposal. func CanonicalizeProposal(chainID string, proposal *cmtproto.Proposal) cmtproto.CanonicalProposal { return cmtproto.CanonicalProposal{ - Type: cmtproto.ProposalType, - Height: proposal.Height, // encoded as sfixed64 - Round: int64(proposal.Round), // encoded as sfixed64 - POLRound: int64(proposal.PolRound), + Type: ProposalType, + Height: proposal.Height, // encoded as sfixed64 + Round: int64(proposal.Round), // encoded as sfixed64 + POLRound: int64(proposal.PolRound), // FIXME: not matching BlockID: CanonicalizeBlockID(proposal.BlockID), Timestamp: proposal.Timestamp, ChainID: chainID, diff --git a/types/canonical_test.go b/types/canonical_test.go index 1dc439f5856..fa8c97b6223 100644 --- a/types/canonical_test.go +++ b/types/canonical_test.go @@ -4,21 +4,29 @@ import ( "reflect" "testing" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto/tmhash" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmtrand "github.com/cometbft/cometbft/internal/rand" ) func TestCanonicalizeBlockID(t *testing.T) { randhash := cmtrand.Bytes(tmhash.Size) - block1 := cmtproto.BlockID{Hash: randhash, - PartSetHeader: cmtproto.PartSetHeader{Total: 5, Hash: randhash}} - block2 := cmtproto.BlockID{Hash: randhash, - PartSetHeader: cmtproto.PartSetHeader{Total: 10, Hash: randhash}} - cblock1 := cmtproto.CanonicalBlockID{Hash: randhash, - PartSetHeader: cmtproto.CanonicalPartSetHeader{Total: 5, Hash: randhash}} - cblock2 := cmtproto.CanonicalBlockID{Hash: randhash, - PartSetHeader: cmtproto.CanonicalPartSetHeader{Total: 10, Hash: randhash}} + block1 := cmtproto.BlockID{ + Hash: randhash, + PartSetHeader: cmtproto.PartSetHeader{Total: 5, Hash: randhash}, + } + block2 := cmtproto.BlockID{ + Hash: randhash, + PartSetHeader: cmtproto.PartSetHeader{Total: 10, Hash: randhash}, + } + cblock1 := cmtproto.CanonicalBlockID{ + Hash: randhash, + PartSetHeader: cmtproto.CanonicalPartSetHeader{Total: 5, Hash: randhash}, + } + cblock2 := cmtproto.CanonicalBlockID{ + Hash: randhash, + PartSetHeader: cmtproto.CanonicalPartSetHeader{Total: 10, Hash: randhash}, + } tests := []struct { name string @@ -29,7 +37,6 @@ func TestCanonicalizeBlockID(t *testing.T) { {"second", block2, &cblock2}, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { if got := CanonicalizeBlockID(tt.args); !reflect.DeepEqual(got, tt.want) { t.Errorf("CanonicalizeBlockID() = %v, want %v", got, tt.want) diff --git a/types/consensus_breakage_test.go b/types/consensus_breakage_test.go new file mode 100644 index 00000000000..5b62310efc5 --- /dev/null +++ b/types/consensus_breakage_test.go @@ -0,0 +1,167 @@ +package types + +import ( + "encoding/hex" + "testing" + "time" + + "github.com/stretchr/testify/require" + + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" + "github.com/cometbft/cometbft/crypto" + "github.com/cometbft/cometbft/crypto/ed25519" + "github.com/cometbft/cometbft/crypto/tmhash" +) + +// Ensure validators_hash and next_validators_hash are deterministic. +func TestValidatorsHash(t *testing.T) { + vset := deterministicValidatorSet(t) + require.Equal(t, []byte{0x3a, 0x37, 0x2b, 0xdc, 0xb3, 0xb9, 0x41, 0x8f, 0x55, 0xe1, 0x32, 0x37, 0xc6, 0xf2, 0x80, 0x1a, 0x20, 0xf7, 0x9f, 0xbe, 0x5f, 0x46, 0xc7, 0xf3, 0xdb, 0x77, 0x80, 0x13, 0xd9, 0x3a, 0xe9, 0xd4}, vset.Hash()) +} + +// Ensure last_commit_hash is deterministic. +func TestLastCommitHash(t *testing.T) { + lastCommit := deterministicLastCommit() + require.Equal(t, []byte{0x8, 0xba, 0xdc, 0xd5, 0x36, 0x3f, 0x2e, 0xb5, 0x47, 0x91, 0x0, 0xc0, 0xa, 0xea, 0x5c, 0x20, 0xb, 0x5b, 0x81, 0x2, 0x6, 0x27, 0xe9, 0x22, 0x77, 0xff, 0x82, 0xc3, 0x1, 0x1e, 0xba, 0xb5}, lastCommit.Hash().Bytes()) +} + +// Ensure consensus_hash is deterministic. +func TestConsensusHash(t *testing.T) { + params := DefaultConsensusParams() + require.Equal(t, []byte{0x68, 0xec, 0xd6, 0xf3, 0x33, 0x11, 0x9c, 0xe4, 0x37, 0x51, 0xec, 0xe5, 0x83, 0xb9, 0x81, 0xf2, 0x35, 0x8, 0xae, 0xaf, 0x42, 0x21, 0xff, 0x58, 0x2b, 0x1b, 0xb3, 0x3b, 0xe4, 0x2b, 0xce, 0xfa}, params.Hash()) +} + +// Ensure data_hash is deterministic. +func TestDataHash(t *testing.T) { + // hash from byte slices + data := Data{ + Txs: Txs{ + []byte{0x01, 0x02, 0x03}, + }, + } + require.Equal(t, []byte{0x17, 0xfd, 0x4, 0x25, 0xd0, 0x2b, 0xac, 0x41, 0x1c, 0x75, 0x83, 0xd6, 0xa9, 0xfa, 0x75, 0x80, 0x37, 0x9a, 0x26, 0x91, 0x62, 0x9e, 0x9c, 0x1c, 0xe6, 0xc6, 0x7f, 0x89, 0x53, 0x19, 0xb, 0x99}, data.Hash().Bytes()) +} + +// Ensure evidence_hash is deterministic. +func TestEvidenceHash(t *testing.T) { + valSet := deterministicValidatorSet(t) + + // DuplicateVoteEvidence + valAddress := valSet.Validators[0].Address + dp, err := NewDuplicateVoteEvidence( + deterministicVote(1, valAddress), + deterministicVote(2, valAddress), + time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), + valSet, + ) + require.NoError(t, err) + + require.Equal(t, []byte{0x92, 0xa7, 0x6b, 0x39, 0x43, 0x37, 0xf0, 0xc0, 0x4c, 0x95, 0x15, 0x46, 0xad, 0xc7, 0x5a, 0x59, 0xcb, 0x7c, 0xae, 0x7b, 0xca, 0x7, 0xe, 0x49, 0xfc, 0x93, 0xc1, 0x11, 0x14, 0x9, 0xb5, 0xe2}, dp.Hash()) + + // LightClientAttackEvidence + lcE := LightClientAttackEvidence{ + ConflictingBlock: &LightBlock{ + SignedHeader: &SignedHeader{}, + ValidatorSet: valSet, + }, + CommonHeight: 1, + + ByzantineValidators: valSet.Validators, + TotalVotingPower: 100, + Timestamp: time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), + } + require.Equal(t, []byte{0x58, 0xcc, 0x2f, 0x44, 0xd3, 0xa2, 0x78, 0x66, 0x87, 0x47, 0x1, 0xfb, 0xad, 0x57, 0x3d, 0xa9, 0xad, 0x1c, 0xfd, 0x88, 0xfa, 0x31, 0x45, 0x53, 0x1c, 0x82, 0x2f, 0x20, 0xa5, 0x8b, 0xee, 0xa1}, lcE.Hash()) + + // EvidenceList + evList := EvidenceList{dp, &lcE} + require.Equal(t, []byte{0x1, 0xe9, 0x26, 0x6a, 0xe5, 0x16, 0x4c, 0xba, 0xfe, 0x4a, 0x54, 0xdd, 0x55, 0x56, 0xee, 0xc, 0xa7, 0xb4, 0x3d, 0xa0, 0xec, 0xab, 0xb5, 0xc9, 0x35, 0x71, 0x3, 0xc8, 0x1f, 0xae, 0x77, 0xae}, evList.Hash()) +} + +// Ensure last_block_id is deterministic. +func TestLastBlockID(t *testing.T) { + lbi := BlockID{ + Hash: tmhash.Sum([]byte("blockID_hash")), + PartSetHeader: PartSetHeader{ + Total: 1000000, + Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + }, + } + lbip := lbi.ToProto() + v, err := lbip.Marshal() + require.NoError(t, err) + require.Equal(t, []byte{0xa, 0x20, 0x8b, 0x1, 0x2, 0x33, 0x86, 0xc3, 0x71, 0x77, 0x8e, 0xcb, 0x63, 0x68, 0x57, 0x3e, 0x53, 0x9a, 0xfc, 0x3c, 0xc8, 0x60, 0xec, 0x3a, 0x2f, 0x61, 0x4e, 0x54, 0xfe, 0x56, 0x52, 0xf4, 0xfc, 0x80, 0x12, 0x26, 0x8, 0xc0, 0x84, 0x3d, 0x12, 0x20, 0x72, 0xdb, 0x3d, 0x95, 0x96, 0x35, 0xdf, 0xf1, 0xbb, 0x56, 0x7b, 0xed, 0xaa, 0x70, 0x57, 0x33, 0x92, 0xc5, 0x15, 0x96, 0x66, 0xa3, 0xf8, 0xca, 0xf1, 0x1e, 0x41, 0x3a, 0xac, 0x52, 0x20, 0x7a}, v) +} + +// Ensure version is deterministic. +func TestVersion(t *testing.T) { + cV := cmtversion.Consensus{ + Block: 1, + App: 2, + } + v, err := cV.Marshal() + require.NoError(t, err) + require.Equal(t, []byte{0x8, 0x1, 0x10, 0x2}, v) +} + +// It's the responsibility of the ABCI developers to ensure that app_hash +// and last_results_hash are deterministic. + +// ============================================================================= + +func deterministicValidatorSet(t *testing.T) *ValidatorSet { + t.Helper() + + pkBytes, err := hex.DecodeString("D9838D11F68AE4679BD91BC2693CDF62FAABAEA7B4290A70ED5F200B4B67881C") + require.NoError(t, err) + pk := ed25519.PubKey(pkBytes) + val := NewValidator(pk, 1) + return NewValidatorSet([]*Validator{val}) +} + +func deterministicLastCommit() *Commit { + return &Commit{ + Height: 1, + Round: 0, + BlockID: BlockID{ + Hash: tmhash.Sum([]byte("blockID_hash")), + PartSetHeader: PartSetHeader{ + Total: 1000000, + Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + }, + }, + Signatures: []CommitSig{ + { + BlockIDFlag: BlockIDFlagAbsent, + }, + { + BlockIDFlag: BlockIDFlagCommit, + ValidatorAddress: crypto.AddressHash([]byte("validator_address")), + Timestamp: time.Unix(1515151515, 0), + Signature: make([]byte, ed25519.SignatureSize), + }, + }, + } +} + +func deterministicVote(t byte, valAddress crypto.Address) *Vote { + stamp, err := time.Parse(TimeFormat, "2017-12-25T03:00:01.234Z") + if err != nil { + panic(err) + } + + return &Vote{ + Type: SignedMsgType(t), + Height: 3, + Round: 2, + Timestamp: stamp, + BlockID: BlockID{ + Hash: tmhash.Sum([]byte("blockID_hash")), + PartSetHeader: PartSetHeader{ + Total: 1000000, + Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + }, + }, + ValidatorAddress: valAddress, + ValidatorIndex: 56789, + } +} diff --git a/types/encoding_helper.go b/types/encoding_helper.go index 3590ac9a252..85366d952b4 100644 --- a/types/encoding_helper.go +++ b/types/encoding_helper.go @@ -7,8 +7,8 @@ import ( ) // cdcEncode returns nil if the input is nil, otherwise returns -// proto.Marshal(Value{Value: item}) -func cdcEncode(item interface{}) []byte { +// proto.Marshal(Value{Value: item}). +func cdcEncode(item any) []byte { if item != nil && !isTypedNil(item) && !isEmpty(item) { switch item := item.(type) { case string: diff --git a/types/errors/sanity.go b/types/errors/sanity.go index e8a5cff5a87..67ee5f131bb 100644 --- a/types/errors/sanity.go +++ b/types/errors/sanity.go @@ -3,35 +3,40 @@ package errors import "fmt" type ( - // ErrNegativeField is returned every time some field which should be non-negative turns out negative + // ErrNegativeField is returned every time some field which should be non-negative turns out negative. ErrNegativeField struct { Field string } - // ErrRequiredField is returned every time a required field is not provided + // ErrRequiredField is returned every time a required field is not provided. ErrRequiredField struct { Field string } - // ErrInvalidField is returned every time a value does not pass a validity check + // ErrInvalidField is returned every time a value does not pass a validity check. ErrInvalidField struct { Field string Reason string } - // ErrWrongField is returned every time a value does not pass a validaty check, accompanied with error + // ErrWrongField is returned every time a value does not pass a validaty check, accompanied with error. ErrWrongField struct { Field string Err error } + + // ErrNegativeOrZeroField is returned every time some field which should be positive turns out negative or zero. + ErrNegativeOrZeroField struct { + Field string + } ) func (e ErrNegativeField) Error() string { - return fmt.Sprintf("%s can't be negative", e.Field) + return e.Field + " can't be negative" } func (e ErrRequiredField) Error() string { - return fmt.Sprintf("%s is required", e.Field) + return e.Field + " is required" } func (e ErrInvalidField) Error() string { @@ -41,3 +46,7 @@ func (e ErrInvalidField) Error() string { func (e ErrWrongField) Error() string { return fmt.Sprintf("wrong %s: %v", e.Field, e.Err) } + +func (e ErrNegativeOrZeroField) Error() string { + return e.Field + " must be positive" +} diff --git a/types/event_bus.go b/types/event_bus.go index 4e1bf406739..bfce928d200 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -3,6 +3,7 @@ package types import ( "context" "fmt" + "strconv" "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/libs/log" @@ -81,8 +82,8 @@ func (b *EventBus) Subscribe( return b.pubsub.Subscribe(ctx, subscriber, query, outCapacity...) } -// This method can be used for a local consensus explorer and synchronous -// testing. Do not use for for public facing / untrusted subscriptions! +// SubscribeUnbuffered can be used for a local consensus explorer and synchronous +// testing. Do not use for public facing / untrusted subscriptions! func (b *EventBus) SubscribeUnbuffered( ctx context.Context, subscriber string, @@ -109,21 +110,19 @@ func (b *EventBus) Publish(eventType string, eventData TMEventData) error { // map of stringified events where each key is composed of the event // type and each of the event's attributes keys in the form of // "{event.Type}.{attribute.Key}" and the value is each attribute's value. -func (b *EventBus) validateAndStringifyEvents(events []types.Event, logger log.Logger) map[string][]string { +func (*EventBus) validateAndStringifyEvents(events []types.Event) map[string][]string { result := make(map[string][]string) for _, event := range events { if len(event.Type) == 0 { - logger.Debug("Got an event with an empty type (skipping)", "event", event) continue } - + prefix := event.Type + "." for _, attr := range event.Attributes { if len(attr.Key) == 0 { - logger.Debug("Got an event attribute with an empty key(skipping)", "event", event) continue } - compositeTag := fmt.Sprintf("%s.%s", event.Type, attr.Key) + compositeTag := prefix + attr.Key result[compositeTag] = append(result[compositeTag], attr.Value) } } @@ -134,8 +133,7 @@ func (b *EventBus) validateAndStringifyEvents(events []types.Event, logger log.L func (b *EventBus) PublishEventNewBlock(data EventDataNewBlock) error { // no explicit deadline for publishing events ctx := context.Background() - - events := b.validateAndStringifyEvents(data.ResultFinalizeBlock.Events, b.Logger.With("height", data.Block.Height)) + events := b.validateAndStringifyEvents(data.ResultFinalizeBlock.Events) // add predefined new block event events[EventTypeKey] = append(events[EventTypeKey], EventNewBlock) @@ -147,7 +145,7 @@ func (b *EventBus) PublishEventNewBlockEvents(data EventDataNewBlockEvents) erro // no explicit deadline for publishing events ctx := context.Background() - events := b.validateAndStringifyEvents(data.Events, b.Logger.With("height", data.Height)) + events := b.validateAndStringifyEvents(data.Events) // add predefined new block event events[EventTypeKey] = append(events[EventTypeKey], EventNewBlockEvents) @@ -171,6 +169,15 @@ func (b *EventBus) PublishEventValidBlock(data EventDataRoundState) error { return b.Publish(EventValidBlock, data) } +func (b *EventBus) PublishEventPendingTx(data EventDataPendingTx) error { + // no explicit deadline for publishing events + ctx := context.Background() + return b.pubsub.PublishWithEvents(ctx, data, map[string][]string{ + EventTypeKey: {EventPendingTx}, + TxHashKey: {fmt.Sprintf("%X", Tx(data.Tx).Hash())}, + }) +} + // PublishEventTx publishes tx event with events from Result. Note it will add // predefined keys (EventTypeKey, TxHashKey). Existing events with the same keys // will be overwritten. @@ -178,12 +185,12 @@ func (b *EventBus) PublishEventTx(data EventDataTx) error { // no explicit deadline for publishing events ctx := context.Background() - events := b.validateAndStringifyEvents(data.Result.Events, b.Logger.With("tx", data.Tx)) + events := b.validateAndStringifyEvents(data.Result.Events) // add predefined compositeKeys events[EventTypeKey] = append(events[EventTypeKey], EventTx) events[TxHashKey] = append(events[TxHashKey], fmt.Sprintf("%X", Tx(data.Tx).Hash())) - events[TxHeightKey] = append(events[TxHeightKey], fmt.Sprintf("%d", data.Height)) + events[TxHeightKey] = append(events[TxHeightKey], strconv.FormatInt(data.Height, 10)) return b.pubsub.PublishWithEvents(ctx, data, events) } @@ -224,14 +231,14 @@ func (b *EventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpd return b.Publish(EventValidatorSetUpdates, data) } -// ----------------------------------------------------------------------------- +// -----------------------------------------------------------------------------. type NopEventBus struct{} func (NopEventBus) Subscribe( context.Context, string, cmtpubsub.Query, - chan<- interface{}, + chan<- any, ) error { return nil } @@ -264,6 +271,10 @@ func (NopEventBus) PublishEventVote(EventDataVote) error { return nil } +func (NopEventBus) PublishEventPendingTx(EventDataPendingTx) error { + return nil +} + func (NopEventBus) PublishEventTx(EventDataTx) error { return nil } diff --git a/types/event_bus_test.go b/types/event_bus_test.go index 8b04f9fd24a..467d0fe54be 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -13,8 +13,45 @@ import ( abci "github.com/cometbft/cometbft/abci/types" cmtpubsub "github.com/cometbft/cometbft/libs/pubsub" cmtquery "github.com/cometbft/cometbft/libs/pubsub/query" + cmttime "github.com/cometbft/cometbft/types/time" ) +func TestEventBusPublishEventPendingTx(t *testing.T) { + eventBus := NewEventBus() + err := eventBus.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + t.Error(err) + } + }) + + tx := Tx("foo") + // PublishEventPendingTx adds 1 composite key, so the query below should work + query := fmt.Sprintf("tm.event='PendingTx' AND tx.hash='%X'", tx.Hash()) + txsSub, err := eventBus.Subscribe(context.Background(), "test", cmtquery.MustCompile(query)) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + msg := <-txsSub.Out() + edt := msg.Data().(EventDataPendingTx) + assert.EqualValues(t, tx, edt.Tx) + close(done) + }() + + err = eventBus.PublishEventPendingTx(EventDataPendingTx{ + Tx: tx, + }) + require.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a pending transaction after 1 sec.") + } +} + func TestEventBusPublishEventTx(t *testing.T) { eventBus := NewEventBus() err := eventBus.Start() @@ -55,7 +92,7 @@ func TestEventBusPublishEventTx(t *testing.T) { Tx: tx, Result: result, }}) - assert.NoError(t, err) + require.NoError(t, err) select { case <-done: @@ -75,7 +112,7 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { }) block := MakeBlock(0, []Tx{}, nil, []Evidence{}) - resultFinalizeBlock := abci.ResponseFinalizeBlock{ + resultFinalizeBlock := abci.FinalizeBlockResponse{ Events: []abci.Event{ {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, }, @@ -96,7 +133,7 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { }() var ps *PartSet - ps, err = block.MakePartSet(MaxBlockSizeBytes) + ps, err = block.MakePartSet(BlockPartSizeBytes) require.NoError(t, err) err = eventBus.PublishEventNewBlock(EventDataNewBlock{ @@ -107,7 +144,7 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { }, ResultFinalizeBlock: resultFinalizeBlock, }) - assert.NoError(t, err) + require.NoError(t, err) select { case <-done: @@ -209,7 +246,7 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { Tx: tx, Result: result, }}) - assert.NoError(t, err) + require.NoError(t, err) select { case <-done: @@ -251,7 +288,7 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { err = eventBus.PublishEventNewBlockHeader(EventDataNewBlockHeader{ Header: block.Header, }) - assert.NoError(t, err) + require.NoError(t, err) select { case <-done: @@ -293,7 +330,7 @@ func TestEventBusPublishEventNewBlockEvents(t *testing.T) { }}, }}, }) - assert.NoError(t, err) + require.NoError(t, err) select { case <-done: @@ -312,7 +349,7 @@ func TestEventBusPublishEventNewEvidence(t *testing.T) { } }) - ev, err := NewMockDuplicateVoteEvidence(1, time.Now(), "test-chain-id") + ev, err := NewMockDuplicateVoteEvidence(1, cmttime.Now(), "test-chain-id") require.NoError(t, err) query := "tm.event='NewEvidence'" @@ -332,7 +369,7 @@ func TestEventBusPublishEventNewEvidence(t *testing.T) { Evidence: ev, Height: 4, }) - assert.NoError(t, err) + require.NoError(t, err) select { case <-done: @@ -429,16 +466,16 @@ func BenchmarkEventBus(b *testing.B) { } for _, bm := range benchmarks { - bm := bm b.Run(bm.name, func(b *testing.B) { - benchmarkEventBus(bm.numClients, bm.randQueries, bm.randEvents, b) + benchmarkEventBus(b, bm.numClients, bm.randQueries, bm.randEvents) }) } } -func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *testing.B) { +func benchmarkEventBus(b *testing.B, numClients int, randQueries bool, randEvents bool) { + b.Helper() // for random* functions - rnd := rand.New(rand.NewSource(time.Now().Unix())) + rnd := rand.New(rand.NewSource(cmttime.Now().Unix())) eventBus := NewEventBusWithBufferCapacity(0) // set buffer capacity to 0 so we are not testing cache err := eventBus.Start() diff --git a/types/events.go b/types/events.go index 23afad06e9d..edcfc837465 100644 --- a/types/events.go +++ b/types/events.go @@ -20,6 +20,7 @@ const ( EventNewBlockHeader = "NewBlockHeader" EventNewBlockEvents = "NewBlockEvents" EventNewEvidence = "NewEvidence" + EventPendingTx = "PendingTx" EventTx = "Tx" EventValidatorSetUpdates = "ValidatorSetUpdates" @@ -42,7 +43,7 @@ const ( // ENCODING / DECODING // TMEventData implements events.EventData. -type TMEventData interface { +type TMEventData interface { //nolint:revive // this empty interface angers the linter // empty interface } @@ -66,7 +67,7 @@ func init() { type EventDataNewBlock struct { Block *Block `json:"block"` BlockID BlockID `json:"block_id"` - ResultFinalizeBlock abci.ResponseFinalizeBlock `json:"result_finalize_block"` + ResultFinalizeBlock abci.FinalizeBlockResponse `json:"result_finalize_block"` } type EventDataNewBlockHeader struct { @@ -84,12 +85,17 @@ type EventDataNewEvidence struct { Evidence Evidence `json:"evidence"` } -// All txs fire EventDataTx +// All txs fire EventDataPendingTx. +type EventDataPendingTx struct { + Tx []byte `json:"tx"` +} + +// All txs fire EventDataTx. type EventDataTx struct { abci.TxResult } -// NOTE: This goes into the replay WAL +// NOTE: This goes into the replay WAL. type EventDataRoundState struct { Height int64 `json:"height"` Round int32 `json:"round"` @@ -134,11 +140,11 @@ const ( EventTypeKey = "tm.event" // TxHashKey is a reserved key, used to specify transaction's hash. - // see EventBus#PublishEventTx + // see EventBus#PublishEventTx. TxHashKey = "tx.hash" // TxHeightKey is a reserved key, used to specify transaction block's height. - // see EventBus#PublishEventTx + // see EventBus#PublishEventTx. TxHeightKey = "tx.height" // BlockHeightKey is a reserved key used for indexing FinalizeBlock events. @@ -172,16 +178,17 @@ func QueryForEvent(eventType string) cmtpubsub.Query { return cmtquery.MustCompile(fmt.Sprintf("%s='%s'", EventTypeKey, eventType)) } -// BlockEventPublisher publishes all block related events +// BlockEventPublisher publishes all block related events. type BlockEventPublisher interface { PublishEventNewBlock(block EventDataNewBlock) error PublishEventNewBlockHeader(header EventDataNewBlockHeader) error PublishEventNewBlockEvents(events EventDataNewBlockEvents) error PublishEventNewEvidence(evidence EventDataNewEvidence) error - PublishEventTx(EventDataTx) error - PublishEventValidatorSetUpdates(EventDataValidatorSetUpdates) error + PublishEventPendingTx(tx EventDataPendingTx) error + PublishEventTx(tx EventDataTx) error + PublishEventValidatorSetUpdates(updates EventDataValidatorSetUpdates) error } type TxEventPublisher interface { - PublishEventTx(EventDataTx) error + PublishEventTx(tx EventDataTx) error } diff --git a/types/evidence.go b/types/evidence.go index 7e700ce568d..676f94bb6f5 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -10,15 +10,15 @@ import ( "time" abci "github.com/cometbft/cometbft/abci/types" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto/merkle" "github.com/cometbft/cometbft/crypto/tmhash" + cmtrand "github.com/cometbft/cometbft/internal/rand" cmtjson "github.com/cometbft/cometbft/libs/json" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" cmterrors "github.com/cometbft/cometbft/types/errors" ) -// Evidence represents any provable malicious activity by a validator. +// Evidence represents any provable misbehavior committed by a validator. // Verification logic for each evidence is part of the evidence module. type Evidence interface { ABCI() []abci.Misbehavior // forms individual evidence to be sent to the application @@ -30,7 +30,7 @@ type Evidence interface { ValidateBasic() error // basic consistency check } -//-------------------------------------------------------------------------------------- +// -------------------------------------------------------------------------------------- // DuplicateVoteEvidence contains evidence of a single validator signing two conflicting votes. type DuplicateVoteEvidence struct { @@ -38,16 +38,16 @@ type DuplicateVoteEvidence struct { VoteB *Vote `json:"vote_b"` // abci specific information - TotalVotingPower int64 - ValidatorPower int64 - Timestamp time.Time + TotalVotingPower int64 `json:"total_voting_power"` + ValidatorPower int64 `json:"validator_power"` + Timestamp time.Time `json:"timestamp"` } var _ Evidence = &DuplicateVoteEvidence{} // NewDuplicateVoteEvidence creates DuplicateVoteEvidence with right ordering given // two conflicting votes. If either of the votes is nil, the val set is nil or the voter is -// not in the val set, an error is returned +// not in the val set, an error is returned. func NewDuplicateVoteEvidence(vote1, vote2 *Vote, blockTime time.Time, valSet *ValidatorSet, ) (*DuplicateVoteEvidence, error) { var voteA, voteB *Vote @@ -78,10 +78,10 @@ func NewDuplicateVoteEvidence(vote1, vote2 *Vote, blockTime time.Time, valSet *V }, nil } -// ABCI returns the application relevant representation of the evidence +// ABCI returns the application relevant representation of the evidence. func (dve *DuplicateVoteEvidence) ABCI() []abci.Misbehavior { return []abci.Misbehavior{{ - Type: abci.MisbehaviorType_DUPLICATE_VOTE, + Type: abci.MISBEHAVIOR_TYPE_DUPLICATE_VOTE, Validator: abci.Validator{ Address: dve.VoteA.ValidatorAddress, Power: dve.ValidatorPower, @@ -108,7 +108,7 @@ func (dve *DuplicateVoteEvidence) Hash() []byte { return tmhash.Sum(dve.Bytes()) } -// Height returns the height of the infraction +// Height returns the height of the infraction. func (dve *DuplicateVoteEvidence) Height() int64 { return dve.VoteA.Height } @@ -118,7 +118,7 @@ func (dve *DuplicateVoteEvidence) String() string { return fmt.Sprintf("DuplicateVoteEvidence{VoteA: %v, VoteB: %v}", dve.VoteA, dve.VoteB) } -// Time returns the time of the infraction +// Time returns the time of the infraction. func (dve *DuplicateVoteEvidence) Time() time.Time { return dve.Timestamp } @@ -145,7 +145,7 @@ func (dve *DuplicateVoteEvidence) ValidateBasic() error { return nil } -// ToProto encodes DuplicateVoteEvidence to protobuf +// ToProto encodes DuplicateVoteEvidence to protobuf. func (dve *DuplicateVoteEvidence) ToProto() *cmtproto.DuplicateVoteEvidence { voteB := dve.VoteB.ToProto() voteA := dve.VoteA.ToProto() @@ -159,7 +159,7 @@ func (dve *DuplicateVoteEvidence) ToProto() *cmtproto.DuplicateVoteEvidence { return &tp } -// DuplicateVoteEvidenceFromProto decodes protobuf into DuplicateVoteEvidence +// DuplicateVoteEvidenceFromProto decodes protobuf into DuplicateVoteEvidence. func DuplicateVoteEvidenceFromProto(pb *cmtproto.DuplicateVoteEvidence) (*DuplicateVoteEvidence, error) { if pb == nil { return nil, errors.New("nil duplicate vote evidence") @@ -200,31 +200,38 @@ func DuplicateVoteEvidenceFromProto(pb *cmtproto.DuplicateVoteEvidence) (*Duplic return dve, dve.ValidateBasic() } -//------------------------------------ LIGHT EVIDENCE -------------------------------------- +// ------------------------------------ LIGHT EVIDENCE -------------------------------------- // LightClientAttackEvidence is a generalized evidence that captures all forms of known attacks on // a light client such that a full node can verify, propose and commit the evidence on-chain for // punishment of the malicious validators. There are three forms of attacks: Lunatic, Equivocation // and Amnesia. These attacks are exhaustive. You can find a more detailed overview of this at -// cometbft/docs/architecture/tendermint-core/adr-047-handling-evidence-from-light-client.md +// cometbft/docs/architecture/tendermint-core/adr-047-handling-evidence-from-light-client.md. type LightClientAttackEvidence struct { - ConflictingBlock *LightBlock - CommonHeight int64 + ConflictingBlock *LightBlock `json:"conflicting_block"` + CommonHeight int64 `json:"common_height"` - // abci specific information - ByzantineValidators []*Validator // validators in the validator set that misbehaved in creating the conflicting block - TotalVotingPower int64 // total voting power of the validator set at the common height - Timestamp time.Time // timestamp of the block at the common height + // ABCI specific information + + // validators in the validator set that misbehaved in creating the conflicting + // block + ByzantineValidators []*Validator `json:"byzantine_validators"` + + // total voting power of the validator set at the common height + TotalVotingPower int64 `json:"total_voting_power"` + + // timestamp of the block at the common height + Timestamp time.Time `json:"timestamp"` } var _ Evidence = &LightClientAttackEvidence{} -// ABCI forms an array of abci.Misbehavior for each byzantine validator +// ABCI forms an array of abci.Misbehavior for each byzantine validator. func (l *LightClientAttackEvidence) ABCI() []abci.Misbehavior { abciEv := make([]abci.Misbehavior, len(l.ByzantineValidators)) for idx, val := range l.ByzantineValidators { abciEv[idx] = abci.Misbehavior{ - Type: abci.MisbehaviorType_LIGHT_CLIENT_ATTACK, + Type: abci.MISBEHAVIOR_TYPE_LIGHT_CLIENT_ATTACK, Validator: TM2PB.Validator(val), Height: l.Height(), Time: l.Timestamp, @@ -234,7 +241,7 @@ func (l *LightClientAttackEvidence) ABCI() []abci.Misbehavior { return abciEv } -// Bytes returns the proto-encoded evidence as a byte array +// Bytes returns the proto-encoded evidence as a byte array. func (l *LightClientAttackEvidence) Bytes() []byte { pbe, err := l.ToProto() if err != nil { @@ -249,9 +256,10 @@ func (l *LightClientAttackEvidence) Bytes() []byte { // GetByzantineValidators finds out what style of attack LightClientAttackEvidence was and then works out who // the malicious validators were and returns them. This is used both for forming the ByzantineValidators -// field and for validating that it is correct. Validators are ordered based on validator power +// field and for validating that it is correct. Validators are ordered based on validator power. func (l *LightClientAttackEvidence) GetByzantineValidators(commonVals *ValidatorSet, - trusted *SignedHeader) []*Validator { + trusted *SignedHeader, +) []*Validator { var validators []*Validator // First check if the header is invalid. This means that it is a lunatic attack and therefore we take the // validators who are in the commonVals and voted for the lunatic header @@ -298,7 +306,7 @@ func (l *LightClientAttackEvidence) GetByzantineValidators(commonVals *Validator return validators } -// ConflictingHeaderIsInvalid takes a trusted header and matches it againt a conflicting header +// ConflictingHeaderIsInvalid takes a trusted header and matches it against a conflicting header // to determine whether the conflicting header was the product of a valid state transition // or not. If it is then all the deterministic fields of the header should be the same. // If not, it is an invalid header and constitutes a lunatic attack. @@ -308,7 +316,6 @@ func (l *LightClientAttackEvidence) ConflictingHeaderIsInvalid(trustedHeader *He !bytes.Equal(trustedHeader.ConsensusHash, l.ConflictingBlock.ConsensusHash) || !bytes.Equal(trustedHeader.AppHash, l.ConflictingBlock.AppHash) || !bytes.Equal(trustedHeader.LastResultsHash, l.ConflictingBlock.LastResultsHash) - } // Hash returns the hash of the header and the commonHeight. This is designed to cause hash collisions @@ -318,7 +325,7 @@ func (l *LightClientAttackEvidence) ConflictingHeaderIsInvalid(trustedHeader *He // most commit signatures (captures the most byzantine validators) but anything greater than 1/3 is // sufficient. // TODO: We should change the hash to include the commit, header, total voting power, byzantine -// validators and timestamp +// validators and timestamp. func (l *LightClientAttackEvidence) Hash() []byte { buf := make([]byte, binary.MaxVarintLen64) n := binary.PutVarint(buf, l.CommonHeight) @@ -330,18 +337,18 @@ func (l *LightClientAttackEvidence) Hash() []byte { // Height returns the last height at which the primary provider and witness provider had the same header. // We use this as the height of the infraction rather than the actual conflicting header because we know -// that the malicious validators were bonded at this height which is important for evidence expiry +// that the malicious validators were bonded at this height which is important for evidence expiry. func (l *LightClientAttackEvidence) Height() int64 { return l.CommonHeight } -// String returns a string representation of LightClientAttackEvidence +// String returns a string representation of LightClientAttackEvidence. func (l *LightClientAttackEvidence) String() string { return fmt.Sprintf(`LightClientAttackEvidence{ - ConflictingBlock: %v, - CommonHeight: %d, - ByzatineValidators: %v, - TotalVotingPower: %d, + ConflictingBlock: %v, + CommonHeight: %d, + ByzatineValidators: %v, + TotalVotingPower: %d, Timestamp: %v}#%X`, l.ConflictingBlock.String(), l.CommonHeight, l.ByzantineValidators, l.TotalVotingPower, l.Timestamp, l.Hash()) @@ -386,7 +393,7 @@ func (l *LightClientAttackEvidence) ValidateBasic() error { return nil } -// ToProto encodes LightClientAttackEvidence to protobuf +// ToProto encodes LightClientAttackEvidence to protobuf. func (l *LightClientAttackEvidence) ToProto() (*cmtproto.LightClientAttackEvidence, error) { conflictingBlock, err := l.ConflictingBlock.ToProto() if err != nil { @@ -411,7 +418,7 @@ func (l *LightClientAttackEvidence) ToProto() (*cmtproto.LightClientAttackEviden }, nil } -// LightClientAttackEvidenceFromProto decodes protobuf +// LightClientAttackEvidenceFromProto decodes protobuf. func LightClientAttackEvidenceFromProto(lpb *cmtproto.LightClientAttackEvidence) (*LightClientAttackEvidence, error) { if lpb == nil { return nil, cmterrors.ErrRequiredField{Field: "light_client_attack_evidence"} @@ -442,7 +449,7 @@ func LightClientAttackEvidenceFromProto(lpb *cmtproto.LightClientAttackEvidence) return l, l.ValidateBasic() } -//------------------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------------------ // EvidenceList is a list of Evidence. Evidences is not a word. type EvidenceList []Evidence @@ -489,10 +496,10 @@ func (evl EvidenceList) ToABCI() []abci.Misbehavior { return el } -//------------------------------------------ PROTO -------------------------------------- +// ------------------------------------------ PROTO -------------------------------------- // EvidenceToProto is a generalized function for encoding evidence that conforms to the -// evidence interface to protobuf +// evidence interface to protobuf. func EvidenceToProto(evidence Evidence) (*cmtproto.Evidence, error) { if evidence == nil { return nil, errors.New("nil evidence") @@ -524,7 +531,7 @@ func EvidenceToProto(evidence Evidence) (*cmtproto.Evidence, error) { } // EvidenceFromProto is a generalized function for decoding protobuf into the -// evidence interface +// evidence interface. func EvidenceFromProto(evidence *cmtproto.Evidence) (Evidence, error) { if evidence == nil { return nil, errors.New("nil evidence") @@ -545,7 +552,7 @@ func init() { cmtjson.RegisterType(&LightClientAttackEvidence{}, "tendermint/LightClientAttackEvidence") } -//-------------------------------------------- ERRORS -------------------------------------- +// -------------------------------------------- ERRORS -------------------------------------- // ErrInvalidEvidence wraps a piece of evidence and the error denoting how or why it is invalid. type ErrInvalidEvidence struct { @@ -579,19 +586,22 @@ func (err *ErrEvidenceOverflow) Error() string { return fmt.Sprintf("Too much evidence: Max %d, got %d", err.Max, err.Got) } -//-------------------------------------------- MOCKING -------------------------------------- +// -------------------------------------------- MOCKING -------------------------------------- // unstable - use only for testing -// assumes the round to be 0 and the validator index to be 0 +// NewMockDuplicateVoteEvidence assumes the round to be 0 and the validator +// index to be 0. func NewMockDuplicateVoteEvidence(height int64, time time.Time, chainID string) (*DuplicateVoteEvidence, error) { val := NewMockPV() return NewMockDuplicateVoteEvidenceWithValidator(height, time, val, chainID) } -// assumes voting power to be 10 and validator to be the only one in the set +// NewMockDuplicateVoteEvidenceWithValidator assumes voting power to be 10 +// and validator to be the only one in the set. func NewMockDuplicateVoteEvidenceWithValidator(height int64, time time.Time, - pv PrivValidator, chainID string) (*DuplicateVoteEvidence, error) { + pv PrivValidator, chainID string, +) (*DuplicateVoteEvidence, error) { pubKey, err := pv.GetPubKey() if err != nil { return nil, err @@ -599,14 +609,14 @@ func NewMockDuplicateVoteEvidenceWithValidator(height int64, time time.Time, val := NewValidator(pubKey, 10) voteA := makeMockVote(height, 0, 0, pubKey.Address(), randBlockID(), time) vA := voteA.ToProto() - err = pv.SignVote(chainID, vA) + err = pv.SignVote(chainID, vA, false) if err != nil { return nil, err } voteA.Signature = vA.Signature voteB := makeMockVote(height, 0, 0, pubKey.Address(), randBlockID(), time) vB := voteB.ToProto() - err = pv.SignVote(chainID, vB) + err = pv.SignVote(chainID, vB, false) if err != nil { return nil, err } @@ -615,9 +625,10 @@ func NewMockDuplicateVoteEvidenceWithValidator(height int64, time time.Time, } func makeMockVote(height int64, round, index int32, addr Address, - blockID BlockID, time time.Time) *Vote { + blockID BlockID, time time.Time, +) *Vote { return &Vote{ - Type: cmtproto.SignedMsgType(2), + Type: SignedMsgType(2), Height: height, Round: round, BlockID: blockID, diff --git a/types/evidence_test.go b/types/evidence_test.go index 89968c248eb..551a56036d6 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -8,11 +8,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/tmhash" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" + cmtrand "github.com/cometbft/cometbft/internal/rand" + cmtjson "github.com/cometbft/cometbft/libs/json" + cmttime "github.com/cometbft/cometbft/types/time" "github.com/cometbft/cometbft/version" ) @@ -28,6 +29,7 @@ func TestEvidenceList(t *testing.T) { } func randomDuplicateVoteEvidence(t *testing.T) *DuplicateVoteEvidence { + t.Helper() val := NewMockPV() blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) @@ -43,11 +45,11 @@ func randomDuplicateVoteEvidence(t *testing.T) *DuplicateVoteEvidence { func TestDuplicateVoteEvidence(t *testing.T) { const height = int64(13) - ev, err := NewMockDuplicateVoteEvidence(height, time.Now(), "mock-chain-id") + ev, err := NewMockDuplicateVoteEvidence(height, cmttime.Now(), "mock-chain-id") require.NoError(t, err) assert.Equal(t, ev.Hash(), tmhash.Sum(ev.Bytes())) assert.NotNil(t, ev.String()) - assert.Equal(t, ev.Height(), height) + assert.Equal(t, height, ev.Height()) } func TestDuplicateVoteEvidenceValidation(t *testing.T) { @@ -61,7 +63,7 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { malleateEvidence func(*DuplicateVoteEvidence) expectErr bool }{ - {"Good DuplicateVoteEvidence", func(ev *DuplicateVoteEvidence) {}, false}, + {"Good DuplicateVoteEvidence", func(_ *DuplicateVoteEvidence) {}, false}, {"Nil vote A", func(ev *DuplicateVoteEvidence) { ev.VoteA = nil }, true}, {"Nil vote B", func(ev *DuplicateVoteEvidence) { ev.VoteB = nil }, true}, {"Nil votes", func(ev *DuplicateVoteEvidence) { @@ -78,7 +80,6 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { }, true}, } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { vote1 := MakeVoteNoError(t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0x02, blockID, defaultVoteTime) vote2 := MakeVoteNoError(t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0x02, blockID2, defaultVoteTime) @@ -95,7 +96,7 @@ func TestLightClientAttackEvidenceBasic(t *testing.T) { height := int64(5) commonHeight := height - 1 nValidators := 10 - voteSet, valSet, privVals := randVoteSet(height, 1, cmtproto.PrecommitType, nValidators, 1, false) + voteSet, valSet, privVals := randVoteSet(height, 1, PrecommitType, nValidators, 1, false) header := makeHeaderRandom() header.Height = height blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) @@ -156,12 +157,12 @@ func TestLightClientAttackEvidenceValidation(t *testing.T) { height := int64(5) commonHeight := height - 1 nValidators := 10 - voteSet, valSet, privVals := randVoteSet(height, 1, cmtproto.PrecommitType, nValidators, 1, false) + voteSet, valSet, privVals := randVoteSet(height, 1, PrecommitType, nValidators, 1, false) header := makeHeaderRandom() header.Height = height header.ValidatorsHash = valSet.Hash() blockID := makeBlockID(header.Hash(), math.MaxInt32, tmhash.Sum([]byte("partshash"))) - extCommit, err := MakeExtCommit(blockID, height, 1, voteSet, privVals, time.Now(), false) + extCommit, err := MakeExtCommit(blockID, height, 1, voteSet, privVals, cmttime.Now(), false) require.NoError(t, err) commit := extCommit.ToCommit() @@ -178,14 +179,14 @@ func TestLightClientAttackEvidenceValidation(t *testing.T) { Timestamp: header.Time, ByzantineValidators: valSet.Validators[:nValidators/2], } - assert.NoError(t, lcae.ValidateBasic()) + require.NoError(t, lcae.ValidateBasic()) testCases := []struct { testName string malleateEvidence func(*LightClientAttackEvidence) expectErr bool }{ - {"Good LightClientAttackEvidence", func(ev *LightClientAttackEvidence) {}, false}, + {"Good LightClientAttackEvidence", func(_ *LightClientAttackEvidence) {}, false}, {"Negative height", func(ev *LightClientAttackEvidence) { ev.CommonHeight = -10 }, true}, {"Height is greater than divergent block", func(ev *LightClientAttackEvidence) { ev.CommonHeight = height + 1 @@ -203,7 +204,6 @@ func TestLightClientAttackEvidenceValidation(t *testing.T) { }, true}, } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { lcae := &LightClientAttackEvidence{ ConflictingBlock: &LightBlock{ @@ -220,19 +220,18 @@ func TestLightClientAttackEvidenceValidation(t *testing.T) { } tc.malleateEvidence(lcae) if tc.expectErr { - assert.Error(t, lcae.ValidateBasic(), tc.testName) + require.Error(t, lcae.ValidateBasic(), tc.testName) } else { - assert.NoError(t, lcae.ValidateBasic(), tc.testName) + require.NoError(t, lcae.ValidateBasic(), tc.testName) } }) } - } func TestMockEvidenceValidateBasic(t *testing.T) { - goodEvidence, err := NewMockDuplicateVoteEvidence(int64(1), time.Now(), "mock-chain-id") + goodEvidence, err := NewMockDuplicateVoteEvidence(int64(1), cmttime.Now(), "mock-chain-id") require.NoError(t, err) - assert.Nil(t, goodEvidence.ValidateBasic()) + require.NoError(t, goodEvidence.ValidateBasic()) } func makeHeaderRandom() *Header { @@ -240,7 +239,7 @@ func makeHeaderRandom() *Header { Version: cmtversion.Consensus{Block: version.BlockProtocol, App: 1}, ChainID: cmtrand.Str(12), Height: int64(cmtrand.Uint16()) + 1, - Time: time.Now(), + Time: cmttime.Now(), LastBlockID: makeBlockIDRandom(), LastCommitHash: crypto.CRandBytes(tmhash.Size), DataHash: crypto.CRandBytes(tmhash.Size), @@ -292,21 +291,40 @@ func TestEvidenceProto(t *testing.T) { {"DuplicateVoteEvidence success", &DuplicateVoteEvidence{VoteA: v2, VoteB: v}, false, false}, } for _, tt := range tests { - tt := tt t.Run(tt.testName, func(t *testing.T) { pb, err := EvidenceToProto(tt.evidence) if tt.toProtoErr { - assert.Error(t, err, tt.testName) + require.Error(t, err, tt.testName) return } - assert.NoError(t, err, tt.testName) + require.NoError(t, err, tt.testName) evi, err := EvidenceFromProto(pb) if tt.fromProtoErr { - assert.Error(t, err, tt.testName) + require.Error(t, err, tt.testName) return } require.Equal(t, tt.evidence, evi, tt.testName) }) } } + +// Test that the new JSON tags are picked up correctly, see issue #3528. +func TestDuplicateVoteEvidenceJSON(t *testing.T) { + var evidence DuplicateVoteEvidence + js, err := cmtjson.Marshal(evidence) + require.NoError(t, err) + + wantJSON := `{"type":"tendermint/DuplicateVoteEvidence","value":{"vote_a":null,"vote_b":null,"total_voting_power":"0","validator_power":"0","timestamp":"0001-01-01T00:00:00Z"}}` + assert.Equal(t, wantJSON, string(js)) +} + +// Test that the new JSON tags are picked up correctly, see issue #3528. +func TestLightClientAttackEvidenceJSON(t *testing.T) { + var evidence LightClientAttackEvidence + js, err := cmtjson.Marshal(evidence) + require.NoError(t, err) + + wantJSON := `{"type":"tendermint/LightClientAttackEvidence","value":{"conflicting_block":null,"common_height":"0","byzantine_validators":null,"total_voting_power":"0","timestamp":"0001-01-01T00:00:00Z"}}` + assert.Equal(t, wantJSON, string(js)) +} diff --git a/types/genesis.go b/types/genesis.go index d6d0b2afaa4..36bc138903f 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -6,12 +6,13 @@ import ( "errors" "fmt" "os" + "slices" "time" "github.com/cometbft/cometbft/crypto" + cmtos "github.com/cometbft/cometbft/internal/os" cmtbytes "github.com/cometbft/cometbft/libs/bytes" cmtjson "github.com/cometbft/cometbft/libs/json" - cmtos "github.com/cometbft/cometbft/libs/os" cmttime "github.com/cometbft/cometbft/types/time" ) @@ -20,7 +21,7 @@ const ( MaxChainIDLen = 50 ) -//------------------------------------------------------------ +// ------------------------------------------------------------ // core types for a genesis definition // NOTE: any changes to the genesis definition should // be reflected in the documentation: @@ -51,10 +52,10 @@ func (genDoc *GenesisDoc) SaveAs(file string) error { if err != nil { return err } - return cmtos.WriteFile(file, genDocBytes, 0644) + return cmtos.WriteFile(file, genDocBytes, 0o644) } -// ValidatorHash returns the hash of the validator set contained in the GenesisDoc +// ValidatorHash returns the hash of the validator set contained in the GenesisDoc. func (genDoc *GenesisDoc) ValidatorHash() []byte { vals := make([]*Validator, len(genDoc.Validators)) for i, v := range genDoc.Validators { @@ -65,7 +66,7 @@ func (genDoc *GenesisDoc) ValidatorHash() []byte { } // ValidateAndComplete checks that all necessary fields are present -// and fills in defaults for optional fields left empty +// and fills in defaults for optional fields left empty. func (genDoc *GenesisDoc) ValidateAndComplete() error { if genDoc.ChainID == "" { return errors.New("genesis doc must include non-empty chain_id") @@ -86,6 +87,7 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error { return err } + acceptedPubKeyTypes := genDoc.ConsensusParams.Validator.PubKeyTypes for i, v := range genDoc.Validators { if v.Power == 0 { return fmt.Errorf("the genesis file cannot contain validators with no voting power: %v", v) @@ -96,6 +98,11 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error { if len(v.Address) == 0 { genDoc.Validators[i].Address = v.PubKey.Address() } + + if !slices.Contains(acceptedPubKeyTypes, v.PubKey.Type()) { + formatStr := "validator %v uses an unsupported pubkey type: %q" + return fmt.Errorf(formatStr, v, v.PubKey.Type()) + } } if genDoc.GenesisTime.IsZero() { @@ -105,7 +112,7 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error { return nil } -//------------------------------------------------------------ +// ------------------------------------------------------------ // Make genesis state from file // GenesisDocFromJSON unmarshalls JSON data into a GenesisDoc. @@ -113,7 +120,7 @@ func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) { genDoc := GenesisDoc{} err := cmtjson.Unmarshal(jsonBlob, &genDoc) if err != nil { - return nil, err + return nil, fmt.Errorf("invalid json for GenesisDoc: %s", err) } if err := genDoc.ValidateAndComplete(); err != nil { diff --git a/types/genesis_test.go b/types/genesis_test.go index 65dcf0bc44e..47a8260ed41 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -48,33 +48,57 @@ func TestGenesisBad(t *testing.T) { `},"power":"10","name":""}` + `]}`, ), + // unsupported validator pubkey type + []byte( + `{ + "chain_id": "test-chain-QDKdJr", + "validators": [{ + "pub_key":{"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="}, + "power":"10", + "name":"" + }], + "consensus_params": { + "validator": {"pub_key_types":["secp256k1"]}, + "block": {"max_bytes": "100"}, + "evidence": {"max_age_num_blocks": "100", "max_age_duration": "10"} + } + }`, + ), } - for _, testCase := range testCases { + for i, testCase := range testCases { _, err := GenesisDocFromJSON(testCase) - assert.Error(t, err, "expected error for empty genDoc json") + formatStr := "test case %i: expected error for invalid genesis doc" + require.Error(t, err, formatStr, i) } } -func TestGenesisGood(t *testing.T) { +func TestBasicGenesisDoc(t *testing.T) { // test a good one by raw json genDocBytes := []byte( `{ "genesis_time": "0001-01-01T00:00:00Z", "chain_id": "test-chain-QDKdJr", "initial_height": "1000", - "consensus_params": null, "validators": [{ "pub_key":{"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="}, "power":"10", "name":"" }], "app_hash":"", - "app_state":{"account_owner": "Bob"} + "app_state":{"account_owner": "Bob"}, + "consensus_params": { + "synchrony": {"precision": "1", "message_delay": "10"}, + "validator": {"pub_key_types":["ed25519"]}, + "block": {"max_bytes": "100"}, + "evidence": {"max_age_num_blocks": "100", "max_age_duration": "10"}, + "feature": {"vote_extension_enable_height": "0", "pbts_enable_height": "0"} + } }`, ) + _, err := GenesisDocFromJSON(genDocBytes) - assert.NoError(t, err, "expected no error for good genDoc json") + require.NoError(t, err, "expected no error for good genDoc json") pubkey := ed25519.GenPrivKey().PubKey() // create a base gendoc from struct @@ -83,11 +107,11 @@ func TestGenesisGood(t *testing.T) { Validators: []GenesisValidator{{pubkey.Address(), pubkey, 10, "myval"}}, } genDocBytes, err = cmtjson.Marshal(baseGenDoc) - assert.NoError(t, err, "error marshaling genDoc") + require.NoError(t, err, "error marshaling genDoc") // test base gendoc and check consensus params were filled genDoc, err := GenesisDocFromJSON(genDocBytes) - assert.NoError(t, err, "expected no error for valid genDoc json") + require.NoError(t, err, "expected no error for valid genDoc json") assert.NotNil(t, genDoc.ConsensusParams, "expected consensus params to be filled in") // check validator's address is filled @@ -95,16 +119,16 @@ func TestGenesisGood(t *testing.T) { // create json with consensus params filled genDocBytes, err = cmtjson.Marshal(genDoc) - assert.NoError(t, err, "error marshaling genDoc") + require.NoError(t, err, "error marshaling genDoc") genDoc, err = GenesisDocFromJSON(genDocBytes) - assert.NoError(t, err, "expected no error for valid genDoc json") + require.NoError(t, err, "expected no error for valid genDoc json") // test with invalid consensus params genDoc.ConsensusParams.Block.MaxBytes = 0 genDocBytes, err = cmtjson.Marshal(genDoc) - assert.NoError(t, err, "error marshaling genDoc") + require.NoError(t, err, "error marshaling genDoc") _, err = GenesisDocFromJSON(genDocBytes) - assert.Error(t, err, "expected error for genDoc json with block size of 0") + require.Error(t, err, "expected error for genDoc json with block size of 0") // Genesis doc from raw json missingValidatorsTestCases := [][]byte{ @@ -116,7 +140,7 @@ func TestGenesisGood(t *testing.T) { for _, tc := range missingValidatorsTestCases { _, err := GenesisDocFromJSON(tc) - assert.NoError(t, err) + require.NoError(t, err) } } diff --git a/types/keys.go b/types/keys.go index 941e82b65b0..cb1161bf579 100644 --- a/types/keys.go +++ b/types/keys.go @@ -1,6 +1,6 @@ package types -// UNSTABLE +// UNSTABLE. var ( PeerStateKey = "ConsensusReactor.peerState" ) diff --git a/types/light.go b/types/light.go index e3ef1f63db5..946ca9d8e26 100644 --- a/types/light.go +++ b/types/light.go @@ -5,11 +5,11 @@ import ( "errors" "fmt" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" ) // LightBlock is a SignedHeader and a ValidatorSet. -// It is the basis of the light client +// It is the basis of the light client. type LightBlock struct { *SignedHeader `json:"signed_header"` ValidatorSet *ValidatorSet `json:"validator_set"` @@ -17,7 +17,7 @@ type LightBlock struct { // ValidateBasic checks that the data is correct and consistent // -// This does no verification of the signatures +// This does no verification of the signatures. func (lb LightBlock) ValidateBasic(chainID string) error { if lb.SignedHeader == nil { return errors.New("missing signed header") @@ -43,7 +43,7 @@ func (lb LightBlock) ValidateBasic(chainID string) error { return nil } -// String returns a string representation of the LightBlock +// String returns a string representation of the LightBlock. func (lb LightBlock) String() string { return lb.StringIndented("") } @@ -51,7 +51,7 @@ func (lb LightBlock) String() string { // StringIndented returns an indented string representation of the LightBlock // // SignedHeader -// ValidatorSet +// ValidatorSet. func (lb LightBlock) StringIndented(indent string) string { return fmt.Sprintf(`LightBlock{ %s %v @@ -62,7 +62,7 @@ func (lb LightBlock) StringIndented(indent string) string { indent) } -// ToProto converts the LightBlock to protobuf +// ToProto converts the LightBlock to protobuf. func (lb *LightBlock) ToProto() (*cmtproto.LightBlock, error) { if lb == nil { return nil, nil @@ -84,7 +84,7 @@ func (lb *LightBlock) ToProto() (*cmtproto.LightBlock, error) { } // LightBlockFromProto converts from protobuf back into the Lightblock. -// An error is returned if either the validator set or signed header are invalid +// An error is returned if either the validator set or signed header are invalid. func LightBlockFromProto(pb *cmtproto.LightBlock) (*LightBlock, error) { if pb == nil { return nil, errors.New("nil light block") @@ -111,7 +111,7 @@ func LightBlockFromProto(pb *cmtproto.LightBlock) (*LightBlock, error) { return lb, nil } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // SignedHeader is a header along with the commits that prove it. type SignedHeader struct { @@ -169,7 +169,7 @@ func (sh SignedHeader) String() string { // StringIndented returns an indented string representation of SignedHeader. // // Header -// Commit +// Commit. func (sh SignedHeader) StringIndented(indent string) string { return fmt.Sprintf(`SignedHeader{ %s %v @@ -180,7 +180,7 @@ func (sh SignedHeader) StringIndented(indent string) string { indent) } -// ToProto converts SignedHeader to protobuf +// ToProto converts SignedHeader to protobuf. func (sh *SignedHeader) ToProto() *cmtproto.SignedHeader { if sh == nil { return nil @@ -197,7 +197,7 @@ func (sh *SignedHeader) ToProto() *cmtproto.SignedHeader { return psh } -// FromProto sets a protobuf SignedHeader to the given pointer. +// SignedHeaderFromProto sets a protobuf SignedHeader to the given pointer. // It returns an error if the header or the commit is invalid. func SignedHeaderFromProto(shp *cmtproto.SignedHeader) (*SignedHeader, error) { if shp == nil { diff --git a/types/light_test.go b/types/light_test.go index 82af25277f1..7ce575badc1 100644 --- a/types/light_test.go +++ b/types/light_test.go @@ -6,15 +6,17 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" "github.com/cometbft/cometbft/crypto" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" + cmttime "github.com/cometbft/cometbft/types/time" "github.com/cometbft/cometbft/version" ) func TestLightBlockValidateBasic(t *testing.T) { header := makeRandHeader() - commit := randCommit(time.Now()) + commit := randCommit(cmttime.Now()) vals, _ := RandValidatorSet(5, 1) header.Height = commit.Height header.LastBlockID = commit.BlockID @@ -39,7 +41,7 @@ func TestLightBlockValidateBasic(t *testing.T) { {"valid light block", sh, vals, false}, {"hashes don't match", sh, vals2, true}, {"invalid validator set", sh, vals3, true}, - {"invalid signed header", &SignedHeader{Header: &header, Commit: randCommit(time.Now())}, vals, true}, + {"invalid signed header", &SignedHeader{Header: &header, Commit: randCommit(cmttime.Now())}, vals, true}, } for _, tc := range testCases { @@ -49,17 +51,16 @@ func TestLightBlockValidateBasic(t *testing.T) { } err := lightBlock.ValidateBasic(header.ChainID) if tc.expectErr { - assert.Error(t, err, tc.name) + require.Error(t, err, tc.name) } else { - assert.NoError(t, err, tc.name) + require.NoError(t, err, tc.name) } } - } func TestLightBlockProtobuf(t *testing.T) { header := makeRandHeader() - commit := randCommit(time.Now()) + commit := randCommit(cmttime.Now()) vals, _ := RandValidatorSet(5, 1) header.Height = commit.Height header.LastBlockID = commit.BlockID @@ -94,24 +95,23 @@ func TestLightBlockProtobuf(t *testing.T) { } lbp, err := lightBlock.ToProto() if tc.toProtoErr { - assert.Error(t, err, tc.name) + require.Error(t, err, tc.name) } else { - assert.NoError(t, err, tc.name) + require.NoError(t, err, tc.name) } lb, err := LightBlockFromProto(lbp) if tc.toBlockErr { - assert.Error(t, err, tc.name) + require.Error(t, err, tc.name) } else { - assert.NoError(t, err, tc.name) + require.NoError(t, err, tc.name) assert.Equal(t, lightBlock, lb) } } - } func TestSignedHeaderValidateBasic(t *testing.T) { - commit := randCommit(time.Now()) + commit := randCommit(cmttime.Now()) chainID := "𠜎" timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) h := Header{ @@ -147,7 +147,6 @@ func TestSignedHeaderValidateBasic(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { sh := SignedHeader{ Header: tc.shHeader, diff --git a/types/params.go b/types/params.go index 749f07bfb6d..cfa06687525 100644 --- a/types/params.go +++ b/types/params.go @@ -3,17 +3,21 @@ package types import ( "errors" "fmt" + "math" "time" + gogo "github.com/cosmos/gogoproto/types" + + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" + "github.com/cometbft/cometbft/crypto/bls12381" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/crypto/secp256k1" "github.com/cometbft/cometbft/crypto/tmhash" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" ) const ( // MaxBlockSizeBytes is the maximum permitted size of the blocks. - MaxBlockSizeBytes = 104857600 // 100MB + MaxBlockSizeBytes = 100 * 1024 * 1024 // BlockPartSizeBytes is the size of one block part. BlockPartSizeBytes uint32 = 65536 // 64kB @@ -23,6 +27,7 @@ const ( ABCIPubKeyTypeEd25519 = ed25519.KeyType ABCIPubKeyTypeSecp256k1 = secp256k1.KeyType + ABCIPubKeyTypeBls12381 = bls12381.KeyType ) var ABCIPubKeyTypesToNames = map[string]string{ @@ -30,6 +35,12 @@ var ABCIPubKeyTypesToNames = map[string]string{ ABCIPubKeyTypeSecp256k1: secp256k1.PubKeyName, } +func init() { + if bls12381.Enabled { + ABCIPubKeyTypesToNames[ABCIPubKeyTypeBls12381] = bls12381.PubKeyName + } +} + // ConsensusParams contains consensus critical parameters that determine the // validity of blocks. type ConsensusParams struct { @@ -37,17 +48,17 @@ type ConsensusParams struct { Evidence EvidenceParams `json:"evidence"` Validator ValidatorParams `json:"validator"` Version VersionParams `json:"version"` - ABCI ABCIParams `json:"abci"` + Synchrony SynchronyParams `json:"synchrony"` + Feature FeatureParams `json:"feature"` } -// BlockParams define limits on the block size and gas plus minimum time -// between blocks. +// BlockParams define limits on the block size and gas. type BlockParams struct { MaxBytes int64 `json:"max_bytes"` MaxGas int64 `json:"max_gas"` } -// EvidenceParams determine how we handle evidence of malfeasance. +// EvidenceParams determine the validity of evidences of Byzantine behavior. type EvidenceParams struct { MaxAgeNumBlocks int64 `json:"max_age_num_blocks"` // only accept new evidence more recent than this MaxAgeDuration time.Duration `json:"max_age_duration"` @@ -60,26 +71,74 @@ type ValidatorParams struct { PubKeyTypes []string `json:"pub_key_types"` } +// VersionParams contain the version of specific components of CometBFT. type VersionParams struct { App uint64 `json:"app"` } -// ABCIParams configure ABCI functionality specific to the Application Blockchain -// Interface. -type ABCIParams struct { +// FeatureParams configure the height from which features of CometBFT are enabled. +// A value of 0 means the feature is disabled. A value > 0 denotes +// the height at which the feature will be (or has been) enabled. +type FeatureParams struct { VoteExtensionsEnableHeight int64 `json:"vote_extensions_enable_height"` + PbtsEnableHeight int64 `json:"pbts_enable_height"` } // VoteExtensionsEnabled returns true if vote extensions are enabled at height h // and false otherwise. -func (a ABCIParams) VoteExtensionsEnabled(h int64) bool { - if h < 1 { - panic(fmt.Errorf("cannot check if vote extensions enabled for height %d (< 1)", h)) +func (p FeatureParams) VoteExtensionsEnabled(h int64) bool { + enabledHeight := p.VoteExtensionsEnableHeight + + return featureEnabled(enabledHeight, h, "Vote Extensions") +} + +// PbtsEnabled returns true if PBTS is enabled at height h and false otherwise. +func (p FeatureParams) PbtsEnabled(h int64) bool { + enabledHeight := p.PbtsEnableHeight + + return featureEnabled(enabledHeight, h, "PBTS") +} + +// featureEnabled returns true if `enabledHeight` points to a height that is smaller than `currentHeight“. +func featureEnabled(enableHeight int64, currentHeight int64, f string) bool { + if currentHeight < 1 { + panic(fmt.Errorf("cannot check if %s is enabled for height %d (< 1)", f, currentHeight)) } - if a.VoteExtensionsEnableHeight == 0 { + + if enableHeight <= 0 { return false } - return a.VoteExtensionsEnableHeight <= h + + return enableHeight <= currentHeight +} + +// SynchronyParams determine the validity of block timestamps. +// +// These parameters are part of the Proposer-Based Timestamps (PBTS) algorithm. +// For more information on the relationship of the synchrony parameters to +// block timestamps validity, refer to the PBTS specification: +// // https://github.com/cometbft/cometbft/tree/main/spec/consensus/proposer-based-timestamp +type SynchronyParams struct { + Precision time.Duration `json:"precision,string"` + MessageDelay time.Duration `json:"message_delay,string"` +} + +// InRound ensures an exponential back-off of SynchronyParams.MessageDelay for +// block timestamps validation, as the associated proposal rounds increase. +// +// The adaptation is achieve by increasing MessageDelay by a factor of 10% each +// subsequent round a proposal's timeliness is calculated, namely: +// +// MessageDelay(round) == MessageDelay * (1.1)^round +// +// The goal is facilitate the progression of consensus when improper synchrony +// parameters are set or become insufficient to preserve liveness. Refer to +// https://github.com/cometbft/cometbft/issues/2184 for more details. +func (sp SynchronyParams) InRound(round int32) SynchronyParams { + return SynchronyParams{ + Precision: sp.Precision, + MessageDelay: time.Duration(math.Pow(1.1, float64(round)) * float64(sp.MessageDelay)), + } } // DefaultConsensusParams returns a default ConsensusParams. @@ -89,7 +148,8 @@ func DefaultConsensusParams() *ConsensusParams { Evidence: DefaultEvidenceParams(), Validator: DefaultValidatorParams(), Version: DefaultVersionParams(), - ABCI: DefaultABCIParams(), + Feature: DefaultFeatureParams(), + Synchrony: DefaultSynchronyParams(), } } @@ -124,10 +184,20 @@ func DefaultVersionParams() VersionParams { } } -func DefaultABCIParams() ABCIParams { - return ABCIParams{ - // When set to 0, vote extensions are not required. +// Disabled by default. +func DefaultFeatureParams() FeatureParams { + return FeatureParams{ VoteExtensionsEnableHeight: 0, + PbtsEnableHeight: 0, + } +} + +func DefaultSynchronyParams() SynchronyParams { + // Default values determined based on experimental results and on + // https://github.com/cometbft/cometbft/issues/4246 + return SynchronyParams{ + Precision: 505 * time.Millisecond, + MessageDelay: 15 * time.Second, } } @@ -140,11 +210,11 @@ func IsValidPubkeyType(params ValidatorParams, pubkeyType string) bool { return false } -// Validate validates the ConsensusParams to ensure all values are within their +// ValidateBasic validates the ConsensusParams to ensure **all** values are within their // allowed limits, and returns an error if they are not. func (params ConsensusParams) ValidateBasic() error { if params.Block.MaxBytes == 0 { - return fmt.Errorf("block.MaxBytes cannot be 0") + return errors.New("block.MaxBytes cannot be 0") } if params.Block.MaxBytes < -1 { return fmt.Errorf("block.MaxBytes must be -1 or greater than 0. Got %d", @@ -167,7 +237,7 @@ func (params ConsensusParams) ValidateBasic() error { } if params.Evidence.MaxAgeDuration <= 0 { - return fmt.Errorf("evidence.MaxAgeDuration must be grater than 0 if provided, Got %v", + return fmt.Errorf("evidence.MaxAgeDuration must be greater than 0 if provided, Got %v", params.Evidence.MaxAgeDuration) } @@ -184,9 +254,24 @@ func (params ConsensusParams) ValidateBasic() error { return fmt.Errorf("evidence.MaxBytes must be non negative. Got: %d", params.Evidence.MaxBytes) } + if params.Feature.VoteExtensionsEnableHeight < 0 { + return fmt.Errorf("Feature.VoteExtensionsEnabledHeight cannot be negative. Got: %d", params.Feature.VoteExtensionsEnableHeight) + } - if params.ABCI.VoteExtensionsEnableHeight < 0 { - return fmt.Errorf("ABCI.VoteExtensionsEnableHeight cannot be negative. Got: %d", params.ABCI.VoteExtensionsEnableHeight) + if params.Feature.PbtsEnableHeight < 0 { + return fmt.Errorf("Feature.PbtsEnableHeight cannot be negative. Got: %d", params.Feature.PbtsEnableHeight) + } + + // Synchrony params are only relevant when PBTS is enabled + if params.Feature.PbtsEnableHeight > 0 { + if params.Synchrony.MessageDelay <= 0 { + return fmt.Errorf("synchrony.MessageDelay must be greater than 0. Got: %d", + params.Synchrony.MessageDelay) + } + if params.Synchrony.Precision <= 0 { + return fmt.Errorf("synchrony.Precision must be greater than 0. Got: %d", + params.Synchrony.Precision) + } } if len(params.Validator.PubKeyTypes) == 0 { @@ -205,27 +290,91 @@ func (params ConsensusParams) ValidateBasic() error { return nil } +// ValidateUpdate validates the updated Consensus Params +// if updated == nil, then pass. func (params ConsensusParams) ValidateUpdate(updated *cmtproto.ConsensusParams, h int64) error { - if updated.Abci == nil { + if updated == nil { + return nil + } + + var err error + // Validate feature update parameters. + if updated.Feature != nil { + err = validateUpdateFeatures(params.Feature, *updated.Feature, h) + } + return err +} + +// validateUpdateFeatures validates the updated PBTSEnableHeight. +// | r | params...EnableHeight | updated...EnableHeight | result (nil == pass) +// | 2 | * | < 0 | EnableHeight must be positive +// | 3 | <=0 | 0 | nil +// | 4 | X | X (>=0) | nil +// | 5 | > 0; <=height | 0 | Feature cannot be disabled once enabled +// | 6 | > 0; > height | 0 | nil (disable a previous proposal) +// | 7 | * | <=height | Feature cannot be updated to a past height +// | 8 | <=0 | > height (*) | nil +// | 9 | (> 0) <=height | > height (*) | Feature cannot be modified once enabled +// | 10 | (> 0) > height | > height (*) | nil +// The table above reflects all cases covered. +func validateUpdateFeatures(params FeatureParams, updated cmtproto.FeatureParams, h int64) error { + if updated.VoteExtensionsEnableHeight != nil { + err := validateUpdateFeatureEnableHeight(params.VoteExtensionsEnableHeight, updated.VoteExtensionsEnableHeight.Value, h, "Vote Extensions") + if err != nil { + return err + } + } + + if updated.PbtsEnableHeight != nil { + err := validateUpdateFeatureEnableHeight(params.PbtsEnableHeight, updated.PbtsEnableHeight.Value, h, "PBTS") + if err != nil { + return err + } + } + return nil +} + +func validateUpdateFeatureEnableHeight(param int64, updated int64, h int64, featureName string) error { + // 2 + if updated < 0 { + return fmt.Errorf("%s EnableHeight must be positive", featureName) + } + // 3 + if param <= 0 && updated == 0 { + return nil + } + // 4 (implicit: updated >= 0) + if param == updated { return nil } - if params.ABCI.VoteExtensionsEnableHeight == updated.Abci.VoteExtensionsEnableHeight { + // 5 & 6 + if param > 0 && updated == 0 { + // 5 + if param <= h { + return fmt.Errorf("%s cannot be disabled once enabled"+ + "enabled height: %d, current height: %d", + featureName, param, h) + } + // 6 return nil } - if params.ABCI.VoteExtensionsEnableHeight != 0 && updated.Abci.VoteExtensionsEnableHeight == 0 { - return errors.New("vote extensions cannot be disabled once enabled") + // 7 (implicit: updated > 0) + if updated <= h { + return fmt.Errorf("%s cannot be updated to a past or current height, "+ + "enabled height: %d, enable height: %d, current height %d", + featureName, param, updated, h) } - if updated.Abci.VoteExtensionsEnableHeight <= h { - return fmt.Errorf("VoteExtensionsEnableHeight cannot be updated to a past height, "+ - "initial height: %d, current height %d", - params.ABCI.VoteExtensionsEnableHeight, h) + // 8 (implicit: updated > h) + if param <= 0 { + return nil } - if params.ABCI.VoteExtensionsEnableHeight <= h { - return fmt.Errorf("VoteExtensionsEnableHeight cannot be modified once"+ - "the initial height has occurred, "+ - "initial height: %d, current height %d", - params.ABCI.VoteExtensionsEnableHeight, h) + // 9 (implicit: param > 0 && updated > h) + if param <= h { + return fmt.Errorf("%s cannot be modified once enabled"+ + "enabled height: %d, current height: %d", + featureName, param, h) } + // 10 (implicit: param > h && updated > h) return nil } @@ -254,7 +403,7 @@ func (params ConsensusParams) Hash() []byte { } // Update returns a copy of the params with updates from the non-zero fields of p2. -// NOTE: note: must not modify the original +// NOTE: note: must not modify the original. func (params ConsensusParams) Update(params2 *cmtproto.ConsensusParams) ConsensusParams { res := params // explicit copy @@ -280,9 +429,24 @@ func (params ConsensusParams) Update(params2 *cmtproto.ConsensusParams) Consensu if params2.Version != nil { res.Version.App = params2.Version.App } - if params2.Abci != nil { - res.ABCI.VoteExtensionsEnableHeight = params2.Abci.GetVoteExtensionsEnableHeight() + if params2.Feature != nil { + if params2.Feature.VoteExtensionsEnableHeight != nil { + res.Feature.VoteExtensionsEnableHeight = params2.Feature.GetVoteExtensionsEnableHeight().Value + } + + if params2.Feature.PbtsEnableHeight != nil { + res.Feature.PbtsEnableHeight = params2.Feature.GetPbtsEnableHeight().Value + } + } + if params2.Synchrony != nil { + if params2.Synchrony.MessageDelay != nil { + res.Synchrony.MessageDelay = *params2.Synchrony.GetMessageDelay() + } + if params2.Synchrony.Precision != nil { + res.Synchrony.Precision = *params2.Synchrony.GetPrecision() + } } + return res } @@ -303,8 +467,13 @@ func (params *ConsensusParams) ToProto() cmtproto.ConsensusParams { Version: &cmtproto.VersionParams{ App: params.Version.App, }, - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: params.ABCI.VoteExtensionsEnableHeight, + Feature: &cmtproto.FeatureParams{ + PbtsEnableHeight: &gogo.Int64Value{Value: params.Feature.PbtsEnableHeight}, + VoteExtensionsEnableHeight: &gogo.Int64Value{Value: params.Feature.VoteExtensionsEnableHeight}, + }, + Synchrony: &cmtproto.SynchronyParams{ + MessageDelay: ¶ms.Synchrony.MessageDelay, + Precision: ¶ms.Synchrony.Precision, }, } } @@ -326,9 +495,24 @@ func ConsensusParamsFromProto(pbParams cmtproto.ConsensusParams) ConsensusParams Version: VersionParams{ App: pbParams.Version.App, }, + Feature: FeatureParams{ + VoteExtensionsEnableHeight: pbParams.GetFeature().GetVoteExtensionsEnableHeight().GetValue(), + PbtsEnableHeight: pbParams.GetFeature().GetPbtsEnableHeight().GetValue(), + }, } - if pbParams.Abci != nil { - c.ABCI.VoteExtensionsEnableHeight = pbParams.Abci.GetVoteExtensionsEnableHeight() + if pbParams.GetSynchrony().GetMessageDelay() != nil { + c.Synchrony.MessageDelay = *pbParams.GetSynchrony().GetMessageDelay() + } + if pbParams.GetSynchrony().GetPrecision() != nil { + c.Synchrony.Precision = *pbParams.GetSynchrony().GetPrecision() + } + if pbParams.GetAbci().GetVoteExtensionsEnableHeight() > 0 { //nolint: staticcheck + // Value set before the upgrade to V1. We can safely overwrite here because + // ABCIParams and FeatureParams being set is mutually exclusive (=V1). + if pbParams.GetFeature().GetVoteExtensionsEnableHeight().GetValue() > 0 { + panic("vote_extension_enable_height is set in two different places") + } + c.Feature.VoteExtensionsEnableHeight = pbParams.Abci.VoteExtensionsEnableHeight } return c } diff --git a/types/params_test.go b/types/params_test.go index ac4305f4835..43867b24ebb 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -6,87 +6,352 @@ import ( "testing" "time" + "github.com/cosmos/gogoproto/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" ) var ( - valEd25519 = []string{ABCIPubKeyTypeEd25519} - valSecp256k1 = []string{ABCIPubKeyTypeSecp256k1} + valEd25519 = []string{ABCIPubKeyTypeEd25519} + valSecp256k1 = []string{ABCIPubKeyTypeSecp256k1} + valEd25519AndSecp256k1 = []string{ABCIPubKeyTypeEd25519, ABCIPubKeyTypeSecp256k1} ) func TestConsensusParamsValidation(t *testing.T) { testCases := []struct { + name string params ConsensusParams valid bool }{ - // test block params - 0: {makeParams(1, 0, 2, 0, valEd25519, 0), true}, - 1: {makeParams(0, 0, 2, 0, valEd25519, 0), false}, - 2: {makeParams(47*1024*1024, 0, 2, 0, valEd25519, 0), true}, - 3: {makeParams(10, 0, 2, 0, valEd25519, 0), true}, - 4: {makeParams(100*1024*1024, 0, 2, 0, valEd25519, 0), true}, - 5: {makeParams(101*1024*1024, 0, 2, 0, valEd25519, 0), false}, - 6: {makeParams(1024*1024*1024, 0, 2, 0, valEd25519, 0), false}, - // test evidence params - 7: {makeParams(1, 0, 0, 0, valEd25519, 0), false}, - 8: {makeParams(1, 0, 2, 2, valEd25519, 0), false}, - 9: {makeParams(1000, 0, 2, 1, valEd25519, 0), true}, - 10: {makeParams(1, 0, -1, 0, valEd25519, 0), false}, - // test no pubkey type provided - 11: {makeParams(1, 0, 2, 0, []string{}, 0), false}, - // test invalid pubkey type provided - 12: {makeParams(1, 0, 2, 0, []string{"potatoes make good pubkeys"}, 0), false}, - 13: {makeParams(-1, 0, 2, 0, valEd25519, 0), true}, - 14: {makeParams(-2, 0, 2, 0, valEd25519, 0), false}, - } - for i, tc := range testCases { + // valid params + { + name: "minimal setup", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + }), + valid: true, + }, + { + name: "minimal setup, pbts enabled", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + precision: time.Nanosecond, + messageDelay: time.Nanosecond, + pbtsHeight: 1, + }), + valid: true, + }, + { + name: "minimal setup, pbts disabled", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + // Invalid Synchrony params, but this is ok + // since PBTS is disabled. + precision: 0, + messageDelay: 0, + }), + valid: true, + }, + // block params + { + name: "blockBytes se to 0", + params: makeParams(makeParamsArgs{ + blockBytes: 0, + evidenceAge: 2, + }), + valid: false, + }, + { + name: "blockBytes set to a big valid value", + params: makeParams(makeParamsArgs{ + blockBytes: 47 * 1024 * 1024, + evidenceAge: 2, + }), + valid: true, + }, + { + name: "blockBytes set to a small valid value", + params: makeParams(makeParamsArgs{ + blockBytes: 10, + evidenceAge: 2, + }), + valid: true, + }, + { + name: "blockBytes set to the biggest valid value", + params: makeParams(makeParamsArgs{ + blockBytes: 100 * 1024 * 1024, + evidenceAge: 2, + }), + valid: true, + }, + { + name: "blockBytes, biggest valid value, off-by-1", + params: makeParams(makeParamsArgs{ + blockBytes: 100*1024*1024 + 1, + evidenceAge: 2, + }), + valid: false, + }, + { + name: "blockBytes, biggest valid value, off-by-1MB", + params: makeParams(makeParamsArgs{ + blockBytes: 101 * 1024 * 1024, + evidenceAge: 2, + }), + valid: false, + }, + { + name: "blockBytes, value set to 1GB (too big)", + params: makeParams(makeParamsArgs{ + blockBytes: 1024 * 1024 * 1024, + evidenceAge: 2, + }), + valid: false, + }, + // blockBytes can be -1 + { + name: "blockBytes -1", + params: makeParams(makeParamsArgs{ + blockBytes: -1, + evidenceAge: 2, + }), + valid: true, + }, + { + name: "blockBytes -2", + params: makeParams(makeParamsArgs{ + blockBytes: -2, + evidenceAge: 2, + }), + valid: false, + }, + // evidence params + { + name: "evidenceAge 0", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 0, + maxEvidenceBytes: 0, + }), + valid: false, + }, + { + name: "evidenceAge negative", + params: makeParams(makeParamsArgs{ + blockBytes: 1 * 1024 * 1024, + evidenceAge: -1, + }), + valid: false, + }, + { + name: "maxEvidenceBytes not less than blockBytes", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + maxEvidenceBytes: 2, + }), + valid: false, + }, + { + name: "maxEvidenceBytes less than blockBytes", + params: makeParams(makeParamsArgs{ + blockBytes: 1000, + evidenceAge: 2, + maxEvidenceBytes: 1, + }), + valid: true, + }, + { + name: "maxEvidenceBytes 0", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 1, + maxEvidenceBytes: 0, + }), + valid: true, + }, + // pubkey params + { + name: "empty pubkeyTypes", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + pubkeyTypes: []string{}, + }), + valid: false, + }, + { + name: "bad pubkeyTypes", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + pubkeyTypes: []string{"potatoes make good pubkeys"}, + }), + valid: false, + }, + // pbts enabled, invalid synchrony params + { + name: "messageDelay 0", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + precision: time.Nanosecond, + messageDelay: 0, + pbtsHeight: 1, + }), + valid: false, + }, + { + name: "messageDelay negative", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + precision: time.Nanosecond, + messageDelay: -1, + pbtsHeight: 1, + }), + valid: false, + }, + { + name: "precision 0", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + precision: 0, + messageDelay: time.Nanosecond, + pbtsHeight: 1, + }), + valid: false, + }, + { + name: "precision negative", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + precision: -1, + messageDelay: time.Nanosecond, + pbtsHeight: 1, + }), + valid: false, + }, + // pbts enable height + { + name: "pbts height -1", + params: makeParams( + makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + precision: time.Nanosecond, + messageDelay: time.Nanosecond, + pbtsHeight: -1, + }), + valid: false, + }, + { + name: "pbts disabled", + params: makeParams( + makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + precision: time.Nanosecond, + messageDelay: time.Nanosecond, + pbtsHeight: 0, + }), + valid: true, + }, + { + name: "pbts enabled", + params: makeParams( + makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + precision: time.Nanosecond, + messageDelay: time.Nanosecond, + pbtsHeight: 1, + }), + valid: true, + }, + { + name: "pbts from height 100", + params: makeParams( + makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + precision: time.Nanosecond, + messageDelay: time.Nanosecond, + pbtsHeight: 100, + }), + valid: true, + }, + } + for _, tc := range testCases { if tc.valid { - assert.NoErrorf(t, tc.params.ValidateBasic(), "expected no error for valid params (#%d)", i) + require.NoErrorf(t, tc.params.ValidateBasic(), + "expected no error for valid params, test: '%s'", tc.name) } else { - assert.Errorf(t, tc.params.ValidateBasic(), "expected error for non valid params (#%d)", i) + require.Errorf(t, tc.params.ValidateBasic(), + "expected error for non valid params, test: '%s'", tc.name) } } } -func makeParams( - blockBytes, blockGas int64, - evidenceAge int64, - maxEvidenceBytes int64, - pubkeyTypes []string, - abciExtensionHeight int64, -) ConsensusParams { +type makeParamsArgs struct { + blockBytes int64 + blockGas int64 + evidenceAge int64 + maxEvidenceBytes int64 + pubkeyTypes []string + voteExtensionHeight int64 + pbtsHeight int64 + precision time.Duration + messageDelay time.Duration +} + +func makeParams(args makeParamsArgs) ConsensusParams { + if args.pubkeyTypes == nil { + args.pubkeyTypes = valEd25519 + } + return ConsensusParams{ Block: BlockParams{ - MaxBytes: blockBytes, - MaxGas: blockGas, + MaxBytes: args.blockBytes, + MaxGas: args.blockGas, }, Evidence: EvidenceParams{ - MaxAgeNumBlocks: evidenceAge, - MaxAgeDuration: time.Duration(evidenceAge), - MaxBytes: maxEvidenceBytes, + MaxAgeNumBlocks: args.evidenceAge, + MaxAgeDuration: time.Duration(args.evidenceAge), + MaxBytes: args.maxEvidenceBytes, }, Validator: ValidatorParams{ - PubKeyTypes: pubkeyTypes, + PubKeyTypes: args.pubkeyTypes, }, - ABCI: ABCIParams{ - VoteExtensionsEnableHeight: abciExtensionHeight, + Synchrony: SynchronyParams{ + Precision: args.precision, + MessageDelay: args.messageDelay, + }, + Feature: FeatureParams{ + VoteExtensionsEnableHeight: args.voteExtensionHeight, + PbtsEnableHeight: args.pbtsHeight, }, } } func TestConsensusParamsHash(t *testing.T) { params := []ConsensusParams{ - makeParams(4, 2, 3, 1, valEd25519, 0), - makeParams(1, 4, 3, 1, valEd25519, 0), - makeParams(1, 2, 4, 1, valEd25519, 0), - makeParams(2, 5, 7, 1, valEd25519, 0), - makeParams(1, 7, 6, 1, valEd25519, 0), - makeParams(9, 5, 4, 1, valEd25519, 0), - makeParams(7, 8, 9, 1, valEd25519, 0), - makeParams(4, 6, 5, 1, valEd25519, 0), + makeParams(makeParamsArgs{blockBytes: 4, blockGas: 2, evidenceAge: 3, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 4, evidenceAge: 3, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 4, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 2, blockGas: 5, evidenceAge: 7, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 7, evidenceAge: 6, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 9, blockGas: 5, evidenceAge: 4, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 7, blockGas: 8, evidenceAge: 9, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 4, blockGas: 6, evidenceAge: 5, maxEvidenceBytes: 1}), } hashes := make([][]byte, len(params)) @@ -106,20 +371,91 @@ func TestConsensusParamsHash(t *testing.T) { func TestConsensusParamsUpdate(t *testing.T) { testCases := []struct { - params ConsensusParams + intialParams ConsensusParams updates *cmtproto.ConsensusParams updatedParams ConsensusParams }{ // empty updates { - makeParams(1, 2, 3, 0, valEd25519, 0), - &cmtproto.ConsensusParams{}, - makeParams(1, 2, 3, 0, valEd25519, 0), + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + updates: &cmtproto.ConsensusParams{}, + updatedParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + }, + { + // update synchrony params + intialParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: time.Second, messageDelay: 3 * time.Second}), + updates: &cmtproto.ConsensusParams{ + Synchrony: &cmtproto.SynchronyParams{ + Precision: durationPtr(time.Second * 2), + MessageDelay: durationPtr(time.Second * 4), + }, + }, + updatedParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: 2 * time.Second, messageDelay: 4 * time.Second}), + }, + // update enable vote extensions only + { + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + updates: &cmtproto.ConsensusParams{ + Feature: &cmtproto.FeatureParams{ + VoteExtensionsEnableHeight: &types.Int64Value{Value: 1}, + }, + }, + updatedParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3, voteExtensionHeight: 1}), }, + { + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3, voteExtensionHeight: 1, pbtsHeight: 4}), + updates: &cmtproto.ConsensusParams{ + Feature: &cmtproto.FeatureParams{ + VoteExtensionsEnableHeight: &types.Int64Value{Value: 10}, + }, + }, + updatedParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3, voteExtensionHeight: 10, pbtsHeight: 4}), + }, + // update enabled pbts only + { + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + updates: &cmtproto.ConsensusParams{ + Feature: &cmtproto.FeatureParams{ + PbtsEnableHeight: &types.Int64Value{Value: 1}, + }, + }, + updatedParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3, pbtsHeight: 1}), + }, + { + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3, voteExtensionHeight: 4, pbtsHeight: 1}), + updates: &cmtproto.ConsensusParams{ + Feature: &cmtproto.FeatureParams{ + PbtsEnableHeight: &types.Int64Value{Value: 100}, + }, + }, + updatedParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3, pbtsHeight: 100, voteExtensionHeight: 4}), + }, + // update both pbts and vote extensions enable heights + { + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + updates: &cmtproto.ConsensusParams{ + Feature: &cmtproto.FeatureParams{ + VoteExtensionsEnableHeight: &types.Int64Value{Value: 1}, + PbtsEnableHeight: &types.Int64Value{Value: 1}, + }, + }, + updatedParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3, voteExtensionHeight: 1, pbtsHeight: 1}), + }, + { + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3, voteExtensionHeight: 1, pbtsHeight: 1}), + updates: &cmtproto.ConsensusParams{ + Feature: &cmtproto.FeatureParams{ + VoteExtensionsEnableHeight: &types.Int64Value{Value: 10}, + PbtsEnableHeight: &types.Int64Value{Value: 100}, + }, + }, + updatedParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3, voteExtensionHeight: 10, pbtsHeight: 100}), + }, + // fine updates { - makeParams(1, 2, 3, 0, valEd25519, 0), - &cmtproto.ConsensusParams{ + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + updates: &cmtproto.ConsensusParams{ Block: &cmtproto.BlockParams{ MaxBytes: 100, MaxGas: 200, @@ -133,17 +469,33 @@ func TestConsensusParamsUpdate(t *testing.T) { PubKeyTypes: valSecp256k1, }, }, - makeParams(100, 200, 300, 50, valSecp256k1, 0), + updatedParams: makeParams(makeParamsArgs{ + blockBytes: 100, blockGas: 200, + evidenceAge: 300, + maxEvidenceBytes: 50, + pubkeyTypes: valSecp256k1, + }), + }, + + // multiple pubkey types + { + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + updates: &cmtproto.ConsensusParams{ + Validator: &cmtproto.ValidatorParams{ + PubKeyTypes: valEd25519AndSecp256k1, + }, + }, + updatedParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3, pubkeyTypes: valEd25519AndSecp256k1}), }, } for _, tc := range testCases { - assert.Equal(t, tc.updatedParams, tc.params.Update(tc.updates)) + assert.Equal(t, tc.updatedParams, tc.intialParams.Update(tc.updates)) } } func TestConsensusParamsUpdate_AppVersion(t *testing.T) { - params := makeParams(1, 2, 3, 0, valEd25519, 0) + params := makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}) assert.EqualValues(t, 0, params.Version.App) @@ -153,84 +505,207 @@ func TestConsensusParamsUpdate_AppVersion(t *testing.T) { assert.EqualValues(t, 1, updated.Version.App) } -func TestConsensusParamsUpdate_VoteExtensionsEnableHeight(t *testing.T) { - t.Run("set to height but initial height already run", func(*testing.T) { - initialParams := makeParams(1, 0, 2, 0, valEd25519, 1) - update := &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 10, - }, - } - require.Error(t, initialParams.ValidateUpdate(update, 1)) - require.Error(t, initialParams.ValidateUpdate(update, 5)) - }) - t.Run("reset to 0", func(t *testing.T) { - initialParams := makeParams(1, 0, 2, 0, valEd25519, 1) - update := &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 0, - }, - } - require.Error(t, initialParams.ValidateUpdate(update, 1)) - }) - t.Run("set to height before current height run", func(*testing.T) { - initialParams := makeParams(1, 0, 2, 0, valEd25519, 100) - update := &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 10, - }, - } - require.Error(t, initialParams.ValidateUpdate(update, 11)) - require.Error(t, initialParams.ValidateUpdate(update, 99)) - }) - t.Run("set to height after current height run", func(*testing.T) { - initialParams := makeParams(1, 0, 2, 0, valEd25519, 300) - update := &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 99, - }, - } - require.NoError(t, initialParams.ValidateUpdate(update, 11)) - require.NoError(t, initialParams.ValidateUpdate(update, 98)) - }) - t.Run("no error when unchanged", func(*testing.T) { - initialParams := makeParams(1, 0, 2, 0, valEd25519, 100) - update := &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 100, - }, - } - require.NoError(t, initialParams.ValidateUpdate(update, 500)) - }) - t.Run("updated from 0 to 0", func(t *testing.T) { - initialParams := makeParams(1, 0, 2, 0, valEd25519, 0) - update := &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 0, - }, - } - require.NoError(t, initialParams.ValidateUpdate(update, 100)) - }) +func TestConsensusParamsUpdate_EnableHeight(t *testing.T) { + const nilTest = -10000000 + testCases := []struct { + name string + current int64 + from int64 + to int64 + expectedErr bool + }{ + {"no change: 3, 0 -> 0", 3, 0, 0, false}, + {"no change: 3, 100 -> 100, ", 3, 100, 100, false}, + {"no change: 100, 100 -> 100, ", 100, 100, 100, false}, + {"no change: 300, 100 -> 100, ", 300, 100, 100, false}, + {"first time: 4, 0 -> 5, ", 4, 0, 5, false}, + {"first time: 3, 0 -> 5, ", 3, 0, 5, false}, + {"first time: 5, 0 -> 5, ", 5, 0, 5, true}, + {"first time: 6, 0 -> 5, ", 6, 0, 5, true}, + {"first time: 50, 0 -> 5, ", 50, 0, 5, true}, + {"reset to 0: 4, 5 -> 0, ", 4, 5, 0, false}, + {"reset to 0: 5, 5 -> 0, ", 5, 5, 0, true}, + {"reset to 0: 6, 5 -> 0, ", 6, 5, 0, true}, + {"reset to 0: 10, 5 -> 0, ", 10, 5, 0, true}, + {"modify backwards: 1, 10 -> 5, ", 1, 10, 5, false}, + {"modify backwards: 4, 10 -> 5, ", 4, 10, 5, false}, + {"modify backwards: 5, 10 -> 5, ", 5, 10, 5, true}, + {"modify backwards: 6, 10 -> 5, ", 6, 10, 5, true}, + {"modify backwards: 9, 10 -> 5, ", 9, 10, 5, true}, + {"modify backwards: 10, 10 -> 5, ", 10, 10, 5, true}, + {"modify backwards: 11, 10 -> 5, ", 11, 10, 5, true}, + {"modify backwards: 100, 10 -> 5, ", 100, 10, 5, true}, + {"modify forward: 3, 10 -> 15, ", 3, 10, 15, false}, + {"modify forward: 9, 10 -> 15, ", 9, 10, 15, false}, + {"modify forward: 10, 10 -> 15, ", 10, 10, 15, true}, + {"modify forward: 11, 10 -> 15, ", 11, 10, 15, true}, + {"modify forward: 14, 10 -> 15, ", 14, 10, 15, true}, + {"modify forward: 15, 10 -> 15, ", 15, 10, 15, true}, + {"modify forward: 16, 10 -> 15, ", 16, 10, 15, true}, + {"modify forward: 100, 10 -> 15, ", 100, 10, 15, true}, + {"set to negative value: 3, 0 -> -5", 3, 0, -5, true}, + {"set to negative value: 3, -5 -> 100, ", 3, -5, 100, false}, + {"set to negative value: 3, -10 -> 3, ", 3, -10, 3, true}, + {"set to negative value: 3, -3 -> -3", 3, -3, -3, true}, + {"set to negative value: 100, -8 -> -9, ", 100, -8, -9, true}, + {"set to negative value: 300, -10 -> -8, ", 300, -10, -8, true}, + {"nil: 300, 400 -> nil, ", 300, 400, nilTest, false}, + {"nil: 300, 200 -> nil, ", 300, 200, nilTest, false}, + } + + // Test VoteExtensions enabling + for _, tc := range testCases { + t.Run(tc.name+" VE", func(*testing.T) { + initialParams := makeParams(makeParamsArgs{ + voteExtensionHeight: tc.from, + }) + update := &cmtproto.ConsensusParams{Feature: &cmtproto.FeatureParams{}} + if tc.to == nilTest { + update.Feature.VoteExtensionsEnableHeight = nil + } else { + update.Feature = &cmtproto.FeatureParams{ + VoteExtensionsEnableHeight: &types.Int64Value{Value: tc.to}, + } + } + if tc.expectedErr { + require.Error(t, initialParams.ValidateUpdate(update, tc.current)) + } else { + require.NoError(t, initialParams.ValidateUpdate(update, tc.current)) + } + }) + } + + // Test PBTS enabling + for _, tc := range testCases { + t.Run(tc.name+" PBTS", func(*testing.T) { + initialParams := makeParams(makeParamsArgs{ + pbtsHeight: tc.from, + }) + update := &cmtproto.ConsensusParams{Feature: &cmtproto.FeatureParams{}} + if tc.to == nilTest { + update.Feature.PbtsEnableHeight = nil + } else { + update.Feature = &cmtproto.FeatureParams{ + PbtsEnableHeight: &types.Int64Value{Value: tc.to}, + } + } + if tc.expectedErr { + require.Error(t, initialParams.ValidateUpdate(update, tc.current)) + } else { + require.NoError(t, initialParams.ValidateUpdate(update, tc.current)) + } + }) + } + + // Test PBTS and VE enabling + for _, tc := range testCases { + t.Run(tc.name+"VE PBTS", func(*testing.T) { + initialParams := makeParams(makeParamsArgs{ + voteExtensionHeight: tc.from, + pbtsHeight: tc.from, + }) + update := &cmtproto.ConsensusParams{Feature: &cmtproto.FeatureParams{}} + if tc.to == nilTest { + update.Feature.VoteExtensionsEnableHeight = nil + update.Feature.PbtsEnableHeight = nil + } else { + update.Feature = &cmtproto.FeatureParams{ + VoteExtensionsEnableHeight: &types.Int64Value{Value: tc.to}, + PbtsEnableHeight: &types.Int64Value{Value: tc.to}, + } + } + if tc.expectedErr { + require.Error(t, initialParams.ValidateUpdate(update, tc.current)) + } else { + require.NoError(t, initialParams.ValidateUpdate(update, tc.current)) + } + }) + } +} + +func consensusParamsForTestProto() []ConsensusParams { + return []ConsensusParams{ + makeParams(makeParamsArgs{blockBytes: 4, blockGas: 2, evidenceAge: 3, maxEvidenceBytes: 1, voteExtensionHeight: 1}), + makeParams(makeParamsArgs{blockBytes: 4, blockGas: 2, evidenceAge: 3, maxEvidenceBytes: 1, pbtsHeight: 1}), + makeParams(makeParamsArgs{blockBytes: 4, blockGas: 2, evidenceAge: 3, maxEvidenceBytes: 1, voteExtensionHeight: 1, pbtsHeight: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 4, evidenceAge: 3, maxEvidenceBytes: 1, voteExtensionHeight: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 4, evidenceAge: 3, maxEvidenceBytes: 1, pbtsHeight: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 4, evidenceAge: 3, maxEvidenceBytes: 1, voteExtensionHeight: 1, pbtsHeight: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 4, maxEvidenceBytes: 1, voteExtensionHeight: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 4, maxEvidenceBytes: 1, pbtsHeight: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 4, maxEvidenceBytes: 1, voteExtensionHeight: 1, pbtsHeight: 1}), + makeParams(makeParamsArgs{blockBytes: 2, blockGas: 5, evidenceAge: 7, maxEvidenceBytes: 1, voteExtensionHeight: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 7, evidenceAge: 6, maxEvidenceBytes: 1, voteExtensionHeight: 1}), + makeParams(makeParamsArgs{blockBytes: 9, blockGas: 5, evidenceAge: 4, maxEvidenceBytes: 1, voteExtensionHeight: 1}), + makeParams(makeParamsArgs{blockBytes: 7, blockGas: 8, evidenceAge: 9, maxEvidenceBytes: 1, voteExtensionHeight: 1}), + makeParams(makeParamsArgs{blockBytes: 4, blockGas: 6, evidenceAge: 5, maxEvidenceBytes: 1, voteExtensionHeight: 1}), + makeParams(makeParamsArgs{precision: time.Second, messageDelay: time.Minute}), + makeParams(makeParamsArgs{precision: time.Nanosecond, messageDelay: time.Millisecond}), + makeParams(makeParamsArgs{voteExtensionHeight: 100}), + makeParams(makeParamsArgs{pbtsHeight: 100}), + makeParams(makeParamsArgs{voteExtensionHeight: 100, pbtsHeight: 42}), + makeParams(makeParamsArgs{pbtsHeight: 100}), + } } func TestProto(t *testing.T) { - params := []ConsensusParams{ - makeParams(4, 2, 3, 1, valEd25519, 1), - makeParams(1, 4, 3, 1, valEd25519, 1), - makeParams(1, 2, 4, 1, valEd25519, 1), - makeParams(2, 5, 7, 1, valEd25519, 1), - makeParams(1, 7, 6, 1, valEd25519, 1), - makeParams(9, 5, 4, 1, valEd25519, 1), - makeParams(7, 8, 9, 1, valEd25519, 1), - makeParams(4, 6, 5, 1, valEd25519, 1), + params := consensusParamsForTestProto() + + for i := range params { + pbParams := params[i].ToProto() + + oriParams := ConsensusParamsFromProto(pbParams) + + assert.Equal(t, params[i], oriParams) } +} + +func TestProtoUpgrade(t *testing.T) { + params := consensusParamsForTestProto() for i := range params { pbParams := params[i].ToProto() + // Downgrade + if pbParams.GetFeature().GetVoteExtensionsEnableHeight().GetValue() > 0 { + pbParams.Abci = &cmtproto.ABCIParams{VoteExtensionsEnableHeight: pbParams.GetFeature().GetVoteExtensionsEnableHeight().GetValue()} //nolint: staticcheck + pbParams.Feature.VoteExtensionsEnableHeight = nil + } + oriParams := ConsensusParamsFromProto(pbParams) assert.Equal(t, params[i], oriParams) + } +} + +func durationPtr(t time.Duration) *time.Duration { + return &t +} +func TestParamsAdaptiveSynchronyParams(t *testing.T) { + originalSP := DefaultSynchronyParams() + assert.Equal(t, originalSP, originalSP.InRound(0), + "SynchronyParams(0) must be equal to SynchronyParams") + + lastSP := originalSP + for round := int32(1); round <= 10; round++ { + adaptedSP := originalSP.InRound(round) + assert.NotEqual(t, adaptedSP, lastSP) + assert.Equal(t, adaptedSP.Precision, lastSP.Precision, + "Precision must not change over rounds") + assert.Greater(t, adaptedSP.MessageDelay, lastSP.MessageDelay, + "MessageDelay must increase over rounds") + + // It should not increase a lot per round, say more than 25% + maxMessageDelay := lastSP.MessageDelay + lastSP.MessageDelay*25/100 + assert.LessOrEqual(t, adaptedSP.MessageDelay, maxMessageDelay, + "MessageDelay should not increase by more than 25% per round") + + lastSP = adaptedSP } + + assert.GreaterOrEqual(t, lastSP.MessageDelay, originalSP.MessageDelay*2, + "MessageDelay must at least double after 10 rounds") + assert.LessOrEqual(t, lastSP.MessageDelay, originalSP.MessageDelay*10, + "MessageDelay must not increase by more than 10 times after 10 rounds") } diff --git a/types/part_set.go b/types/part_set.go index 48476a9e354..ae2c1f4023e 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -6,18 +6,20 @@ import ( "fmt" "io" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto/merkle" - "github.com/cometbft/cometbft/libs/bits" + "github.com/cometbft/cometbft/internal/bits" cmtbytes "github.com/cometbft/cometbft/libs/bytes" cmtjson "github.com/cometbft/cometbft/libs/json" cmtmath "github.com/cometbft/cometbft/libs/math" cmtsync "github.com/cometbft/cometbft/libs/sync" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" ) var ( ErrPartSetUnexpectedIndex = errors.New("error part set unexpected index") ErrPartSetInvalidProof = errors.New("error part set invalid proof") + ErrPartTooBig = errors.New("error part size too big") + ErrPartInvalidSize = errors.New("error inner part with invalid size") ) type Part struct { @@ -29,7 +31,11 @@ type Part struct { // ValidateBasic performs basic validation. func (part *Part) ValidateBasic() error { if len(part.Bytes) > int(BlockPartSizeBytes) { - return fmt.Errorf("too big: %d bytes, max: %d", len(part.Bytes), BlockPartSizeBytes) + return ErrPartTooBig + } + // All parts except the last one should have the same constant size. + if int64(part.Index) < part.Proof.Total-1 && len(part.Bytes) != int(BlockPartSizeBytes) { + return ErrPartInvalidSize } if err := part.Proof.ValidateBasic(); err != nil { return fmt.Errorf("wrong Proof: %w", err) @@ -46,7 +52,7 @@ func (part *Part) String() string { // StringIndented returns an indented Part. // -// See merkle.Proof#StringIndented +// See merkle.Proof#StringIndented. func (part *Part) StringIndented(indent string) string { return fmt.Sprintf(`Part{#%v %s Bytes: %X... @@ -89,7 +95,7 @@ func PartFromProto(pb *cmtproto.Part) (*Part, error) { return part, part.ValidateBasic() } -//------------------------------------- +// ------------------------------------- type PartSetHeader struct { Total uint32 `json:"total"` @@ -99,7 +105,7 @@ type PartSetHeader struct { // String returns a string representation of PartSetHeader. // // 1. total number of parts -// 2. first 6 bytes of the hash +// 2. first 6 bytes of the hash. func (psh PartSetHeader) String() string { return fmt.Sprintf("%v:%X", psh.Total, cmtbytes.Fingerprint(psh.Hash)) } @@ -121,7 +127,7 @@ func (psh PartSetHeader) ValidateBasic() error { return nil } -// ToProto converts PartSetHeader to protobuf +// ToProto converts PartSetHeader to protobuf. func (psh *PartSetHeader) ToProto() cmtproto.PartSetHeader { if psh == nil { return cmtproto.PartSetHeader{} @@ -133,7 +139,7 @@ func (psh *PartSetHeader) ToProto() cmtproto.PartSetHeader { } } -// FromProto sets a protobuf PartSetHeader to the given pointer +// PartSetHeaderFromProto sets a protobuf PartSetHeader to the given pointer. func PartSetHeaderFromProto(ppsh *cmtproto.PartSetHeader) (*PartSetHeader, error) { if ppsh == nil { return nil, errors.New("nil PartSetHeader") @@ -151,7 +157,7 @@ func ProtoPartSetHeaderIsZero(ppsh *cmtproto.PartSetHeader) bool { return ppsh.Total == 0 && len(ppsh.Hash) == 0 } -//------------------------------------- +// ------------------------------------- type PartSet struct { total uint32 @@ -164,9 +170,13 @@ type PartSet struct { // a count of the total size (in bytes). Used to ensure that the // part set doesn't exceed the maximum block bytes byteSize int64 + + // Workaround to prevent the consensus Reactor from reading from an + // incomplete part set when the node is the round's proposer. + locked bool } -// Returns an immutable, full PartSet from the data bytes. +// NewPartSetFromData returns an immutable, full PartSet from the data bytes. // The data bytes are split into "partSize" chunks, and merkle tree computed. // CONTRACT: partSize is greater than zero. func NewPartSetFromData(data []byte, partSize uint32) *PartSet { @@ -174,7 +184,6 @@ func NewPartSetFromData(data []byte, partSize uint32) *PartSet { total := (uint32(len(data)) + partSize - 1) / partSize parts := make([]*Part, total) partsBytes := make([][]byte, total) - partsBitArray := bits.NewBitArray(int(total)) for i := uint32(0); i < total; i++ { part := &Part{ Index: i, @@ -182,13 +191,13 @@ func NewPartSetFromData(data []byte, partSize uint32) *PartSet { } parts[i] = part partsBytes[i] = part.Bytes - partsBitArray.SetIndex(int(i), true) } // Compute merkle proofs root, proofs := merkle.ProofsFromByteSlices(partsBytes) for i := uint32(0); i < total; i++ { parts[i].Proof = *proofs[i] } + partsBitArray := bits.NewBitArrayFromFn(int(total), func(int) bool { return true }) return &PartSet{ total: total, hash: root, @@ -199,7 +208,7 @@ func NewPartSetFromData(data []byte, partSize uint32) *PartSet { } } -// Returns an empty PartSet ready to be populated. +// NewPartSetFromHeader returns an empty PartSet ready to be populated. func NewPartSetFromHeader(header PartSetHeader) *PartSet { return &PartSet{ total: header.Total, @@ -289,6 +298,11 @@ func (ps *PartSet) AddPart(part *Part) (bool, error) { return false, nil } + // The proof should be compatible with the number of parts. + if part.Proof.Total != int64(ps.total) { + return false, ErrPartSetInvalidProof + } + // Check hash proof if part.Proof.Verify(ps.Hash(), part.Bytes) != nil { return false, ErrPartSetInvalidProof @@ -319,6 +333,24 @@ func (ps *PartSet) GetReader() io.Reader { return NewPartSetReader(ps.parts) } +func (ps *PartSet) IsLocked() bool { + ps.mtx.Lock() + defer ps.mtx.Unlock() + return ps.locked +} + +func (ps *PartSet) Lock() { + ps.mtx.Lock() + defer ps.mtx.Unlock() + ps.locked = true +} + +func (ps *PartSet) Unlock() { + ps.mtx.Lock() + defer ps.mtx.Unlock() + ps.locked = false +} + type PartSetReader struct { i int parts []*Part @@ -356,7 +388,7 @@ func (psr *PartSetReader) Read(p []byte) (n int, err error) { // StringShort returns a short version of String. // -// (Count of Total) +// (Count of Total). func (ps *PartSet) StringShort() string { if ps == nil { return "nil-PartSet" diff --git a/types/part_set_test.go b/types/part_set_test.go index c1f885260cd..26f21f79278 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -1,6 +1,7 @@ package types import ( + "fmt" "io" "testing" @@ -8,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/crypto/merkle" - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtrand "github.com/cometbft/cometbft/internal/rand" ) const ( @@ -28,6 +29,7 @@ func TestBasicPartSet(t *testing.T) { assert.True(t, partSet.IsComplete()) assert.EqualValues(t, nParts, partSet.Count()) assert.EqualValues(t, testPartSize*nParts, partSet.ByteSize()) + assert.False(t, partSet.IsLocked()) // Test adding parts to a new partSet. partSet2 := NewPartSetFromHeader(partSet.Header()) @@ -44,16 +46,17 @@ func TestBasicPartSet(t *testing.T) { // adding part with invalid index added, err := partSet2.AddPart(&Part{Index: 10000}) assert.False(t, added) - assert.Error(t, err) + require.Error(t, err) // adding existing part added, err = partSet2.AddPart(partSet2.GetPart(0)) assert.False(t, added) - assert.Nil(t, err) + require.NoError(t, err) assert.Equal(t, partSet.Hash(), partSet2.Hash()) assert.EqualValues(t, nParts, partSet2.Total()) assert.EqualValues(t, nParts*testPartSize, partSet.ByteSize()) assert.True(t, partSet2.IsComplete()) + assert.False(t, partSet2.IsLocked()) // Reconstruct data, assert that they are equal. data2Reader := partSet2.GetReader() @@ -61,6 +64,16 @@ func TestBasicPartSet(t *testing.T) { require.NoError(t, err) assert.Equal(t, data, data2) + + // Test locking + partSet2.Lock() + assert.True(t, partSet2.IsLocked()) + partSet2.Lock() + assert.True(t, partSet2.IsLocked()) + partSet2.Unlock() + assert.False(t, partSet2.IsLocked()) + partSet2.Unlock() + assert.False(t, partSet2.IsLocked()) } func TestWrongProof(t *testing.T) { @@ -86,6 +99,22 @@ func TestWrongProof(t *testing.T) { if added || err == nil { t.Errorf("expected to fail adding a part with bad bytes.") } + + // Test adding a part with wrong proof index. + part = partSet.GetPart(2) + part.Proof.Index = 1 + added, err = partSet2.AddPart(part) + if added || err == nil { + t.Errorf("expected to fail adding a part with bad proof index.") + } + + // Test adding a part with wrong proof total. + part = partSet.GetPart(3) + part.Proof.Total = int64(partSet.Total() - 1) + added, err = partSet2.AddPart(part) + if added || err == nil { + t.Errorf("expected to fail adding a part with bad proof total.") + } } func TestPartSetHeaderValidateBasic(t *testing.T) { @@ -94,11 +123,10 @@ func TestPartSetHeaderValidateBasic(t *testing.T) { malleatePartSetHeader func(*PartSetHeader) expectErr bool }{ - {"Good PartSet", func(psHeader *PartSetHeader) {}, false}, + {"Good PartSet", func(_ *PartSetHeader) {}, false}, {"Invalid Hash", func(psHeader *PartSetHeader) { psHeader.Hash = make([]byte, 1) }, true}, } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { data := cmtrand.Bytes(testPartSize * 100) ps := NewPartSetFromData(data, testPartSize) @@ -115,11 +143,21 @@ func TestPartValidateBasic(t *testing.T) { malleatePart func(*Part) expectErr bool }{ - {"Good Part", func(pt *Part) {}, false}, + {"Good Part", func(_ *Part) {}, false}, {"Too big part", func(pt *Part) { pt.Bytes = make([]byte, BlockPartSizeBytes+1) }, true}, + {"Good small last part", func(pt *Part) { + pt.Index = 1 + pt.Bytes = make([]byte, BlockPartSizeBytes-1) + pt.Proof.Total = 2 + }, false}, + {"Too small inner part", func(pt *Part) { + pt.Index = 0 + pt.Bytes = make([]byte, BlockPartSizeBytes-1) + pt.Proof.Total = 2 + }, true}, {"Too big proof", func(pt *Part) { pt.Proof = merkle.Proof{ - Total: 1, + Total: 2, Index: 1, LeafHash: make([]byte, 1024*1024), } @@ -127,7 +165,6 @@ func TestPartValidateBasic(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { data := cmtrand.Bytes(testPartSize * 100) ps := NewPartSetFromData(data, testPartSize) @@ -145,8 +182,10 @@ func TestParSetHeaderProtoBuf(t *testing.T) { expPass bool }{ {"success empty", &PartSetHeader{}, true}, - {"success", - &PartSetHeader{Total: 1, Hash: []byte("hash")}, true}, + { + "success", + &PartSetHeader{Total: 1, Hash: []byte("hash")}, true, + }, } for _, tc := range testCases { @@ -162,7 +201,6 @@ func TestParSetHeaderProtoBuf(t *testing.T) { } func TestPartProtoBuf(t *testing.T) { - proof := merkle.Proof{ Total: 1, Index: 1, @@ -175,8 +213,10 @@ func TestPartProtoBuf(t *testing.T) { }{ {"failure empty", &Part{}, false}, {"failure nil", nil, false}, - {"success", - &Part{Index: 1, Bytes: cmtrand.Bytes(32), Proof: proof}, true}, + { + "success", + &Part{Index: 1, Bytes: cmtrand.Bytes(32), Proof: proof}, true, + }, } for _, tc := range testCases { @@ -192,3 +232,15 @@ func TestPartProtoBuf(t *testing.T) { } } } + +func BenchmarkMakePartSet(b *testing.B) { + for nParts := 1; nParts <= 5; nParts++ { + b.Run(fmt.Sprintf("nParts=%d", nParts), func(b *testing.B) { + data := cmtrand.Bytes(testPartSize * nParts) + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewPartSetFromData(data, testPartSize) + } + }) + } +} diff --git a/types/priv_validator.go b/types/priv_validator.go index 340794d00c5..91f2bff25b7 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -5,18 +5,28 @@ import ( "errors" "fmt" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" ) // PrivValidator defines the functionality of a local CometBFT validator // that signs votes and proposals, and never double signs. type PrivValidator interface { + // GetPubKey returns the public key of the validator. GetPubKey() (crypto.PubKey, error) - SignVote(chainID string, vote *cmtproto.Vote) error + // FIXME: should use the domain types defined in this package, not the proto types + + // SignVote signs a canonical representation of the vote. If signExtension is + // true, it also signs the vote extension. + SignVote(chainID string, vote *cmtproto.Vote, signExtension bool) error + + // SignProposal signs a canonical representation of the proposal. SignProposal(chainID string, proposal *cmtproto.Proposal) error + + // SignBytes signs an arbitrary array of bytes. + SignBytes(bytes []byte) ([]byte, error) } type PrivValidatorsByAddress []PrivValidator @@ -42,7 +52,7 @@ func (pvs PrivValidatorsByAddress) Swap(i, j int) { pvs[i], pvs[j] = pvs[j], pvs[i] } -//---------------------------------------- +// ---------------------------------------- // MockPV // MockPV implements PrivValidator without any safety or persistence. @@ -64,13 +74,13 @@ func NewMockPVWithParams(privKey crypto.PrivKey, breakProposalSigning, breakVote return MockPV{privKey, breakProposalSigning, breakVoteSigning} } -// Implements PrivValidator. +// GetPubKey implements PrivValidator. func (pv MockPV) GetPubKey() (crypto.PubKey, error) { return pv.PrivKey.PubKey(), nil } -// Implements PrivValidator. -func (pv MockPV) SignVote(chainID string, vote *cmtproto.Vote) error { +// SignVote implements PrivValidator. +func (pv MockPV) SignVote(chainID string, vote *cmtproto.Vote, signExtension bool) error { useChainID := chainID if pv.breakVoteSigning { useChainID = "incorrect-chain-id" @@ -83,22 +93,24 @@ func (pv MockPV) SignVote(chainID string, vote *cmtproto.Vote) error { } vote.Signature = sig - var extSig []byte - // We only sign vote extensions for non-nil precommits - if vote.Type == cmtproto.PrecommitType && !ProtoBlockIDIsNil(&vote.BlockID) { - extSignBytes := VoteExtensionSignBytes(useChainID, vote) - extSig, err = pv.PrivKey.Sign(extSignBytes) - if err != nil { - return err + if signExtension { + var extSig []byte + // We only sign vote extensions for non-nil precommits + if vote.Type == PrecommitType && !ProtoBlockIDIsNil(&vote.BlockID) { + extSignBytes := VoteExtensionSignBytes(useChainID, vote) + extSig, err = pv.PrivKey.Sign(extSignBytes) + if err != nil { + return err + } + } else if len(vote.Extension) > 0 { + return errors.New("unexpected vote extension - vote extensions are only allowed in non-nil precommits") } - } else if len(vote.Extension) > 0 { - return errors.New("unexpected vote extension - vote extensions are only allowed in non-nil precommits") + vote.ExtensionSignature = extSig } - vote.ExtensionSignature = extSig return nil } -// Implements PrivValidator. +// SignProposal implements PrivValidator. func (pv MockPV) SignProposal(chainID string, proposal *cmtproto.Proposal) error { useChainID := chainID if pv.breakProposalSigning { @@ -114,6 +126,11 @@ func (pv MockPV) SignProposal(chainID string, proposal *cmtproto.Proposal) error return nil } +// SignBytes implements PrivValidator. +func (pv MockPV) SignBytes(bytes []byte) ([]byte, error) { + return pv.PrivKey.Sign(bytes) +} + func (pv MockPV) ExtractIntoValidator(votingPower int64) *Validator { pubKey, _ := pv.GetPubKey() return &Validator{ @@ -130,7 +147,7 @@ func (pv MockPV) String() string { } // XXX: Implement. -func (pv MockPV) DisableChecks() { +func (MockPV) DisableChecks() { // Currently this does nothing, // as MockPV has no safety checks at all. } @@ -141,13 +158,13 @@ type ErroringMockPV struct { var ErroringMockPVErr = errors.New("erroringMockPV always returns an error") -// Implements PrivValidator. -func (pv *ErroringMockPV) SignVote(string, *cmtproto.Vote) error { +// SignVote implements PrivValidator. +func (*ErroringMockPV) SignVote(string, *cmtproto.Vote, bool) error { return ErroringMockPVErr } -// Implements PrivValidator. -func (pv *ErroringMockPV) SignProposal(string, *cmtproto.Proposal) error { +// SignProposal implements PrivValidator. +func (*ErroringMockPV) SignProposal(string, *cmtproto.Proposal) error { return ErroringMockPVErr } diff --git a/types/proposal.go b/types/proposal.go index 9bae5e4efa4..bce67565dfa 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -5,9 +5,9 @@ import ( "fmt" "time" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" cmtbytes "github.com/cometbft/cometbft/libs/bytes" "github.com/cometbft/cometbft/libs/protoio" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" cmttime "github.com/cometbft/cometbft/types/time" ) @@ -23,7 +23,7 @@ var ( // a so-called Proof-of-Lock (POL) round, as noted in the POLRound. // If POLRound >= 0, then BlockID corresponds to the block that is locked in POLRound. type Proposal struct { - Type cmtproto.SignedMsgType + Type SignedMsgType Height int64 `json:"height"` Round int32 `json:"round"` // there can not be greater than 2_147_483_647 rounds POLRound int32 `json:"pol_round"` // -1 if null. @@ -34,24 +34,24 @@ type Proposal struct { // NewProposal returns a new Proposal. // If there is no POLRound, polRound should be -1. -func NewProposal(height int64, round int32, polRound int32, blockID BlockID) *Proposal { +func NewProposal(height int64, round int32, polRound int32, blockID BlockID, ts time.Time) *Proposal { return &Proposal{ - Type: cmtproto.ProposalType, + Type: ProposalType, Height: height, Round: round, BlockID: blockID, POLRound: polRound, - Timestamp: cmttime.Now(), + Timestamp: cmttime.Canonical(ts), } } // ValidateBasic performs basic validation. func (p *Proposal) ValidateBasic() error { - if p.Type != cmtproto.ProposalType { + if p.Type != ProposalType { return errors.New("invalid Type") } - if p.Height < 0 { - return errors.New("negative Height") + if p.Height <= 0 { + return errors.New("non positive Height") } if p.Round < 0 { return errors.New("negative Round") @@ -59,26 +59,53 @@ func (p *Proposal) ValidateBasic() error { if p.POLRound < -1 { return errors.New("negative POLRound (exception: -1)") } + if p.POLRound >= p.Round { + return errors.New("POLRound >= Round") + } if err := p.BlockID.ValidateBasic(); err != nil { - return fmt.Errorf("wrong BlockID: %v", err) + return fmt.Errorf("wrong BlockID: %w", err) } // ValidateBasic above would pass even if the BlockID was empty: if !p.BlockID.IsComplete() { return fmt.Errorf("expected a complete, non-empty BlockID, got: %v", p.BlockID) } - - // NOTE: Timestamp validation is subtle and handled elsewhere. - + // Times must be canonical + if cmttime.Canonical(p.Timestamp) != p.Timestamp { + return fmt.Errorf("expected a canonical timestamp, got: %v", p.Timestamp) + } if len(p.Signature) == 0 { return errors.New("signature is missing") } - if len(p.Signature) > MaxSignatureSize { return fmt.Errorf("signature is too big (max: %d)", MaxSignatureSize) } return nil } +// IsTimely validates that the proposal timestamp is 'timely' according to the +// proposer-based timestamp algorithm. To evaluate if a proposal is timely, its +// timestamp is compared to the local time of the validator when it receives +// the proposal along with the configured Precision and MessageDelay +// parameters. Specifically, a proposed proposal timestamp is considered timely +// if it is satisfies the following inequalities: +// +// proposalReceiveTime >= proposalTimestamp - Precision +// proposalReceiveTime <= proposalTimestamp + MessageDelay + Precision +// +// For more information on the meaning of 'timely', refer to the specification: +// https://github.com/cometbft/cometbft/tree/main/spec/consensus/proposer-based-timestamp +func (p *Proposal) IsTimely(recvTime time.Time, sp SynchronyParams) bool { + // lhs is `proposalTimestamp - Precision` in the first inequality + lhs := p.Timestamp.Add(-sp.Precision) + // rhs is `proposalTimestamp + MessageDelay + Precision` in the second inequality + rhs := p.Timestamp.Add(sp.MessageDelay).Add(sp.Precision) + + if recvTime.Before(lhs) || recvTime.After(rhs) { + return false + } + return true +} + // String returns a string representation of the Proposal. // // 1. height @@ -106,7 +133,7 @@ func (p *Proposal) String() string { // for backwards-compatibility with the Amino encoding, due to e.g. hardware // devices that rely on this encoding. // -// See CanonicalizeProposal +// See CanonicalizeProposal. func ProposalSignBytes(chainID string, p *cmtproto.Proposal) []byte { pb := CanonicalizeProposal(chainID, p) bz, err := protoio.MarshalDelimited(&pb) @@ -117,7 +144,7 @@ func ProposalSignBytes(chainID string, p *cmtproto.Proposal) []byte { return bz } -// ToProto converts Proposal to protobuf +// ToProto converts Proposal to protobuf. func (p *Proposal) ToProto() *cmtproto.Proposal { if p == nil { return &cmtproto.Proposal{} @@ -128,14 +155,14 @@ func (p *Proposal) ToProto() *cmtproto.Proposal { pb.Type = p.Type pb.Height = p.Height pb.Round = p.Round - pb.PolRound = p.POLRound + pb.PolRound = p.POLRound // FIXME: names do not match pb.Timestamp = p.Timestamp pb.Signature = p.Signature return pb } -// FromProto sets a protobuf Proposal to the given pointer. +// ProposalFromProto sets a protobuf Proposal to the given pointer. // It returns an error if the proposal is invalid. func ProposalFromProto(pp *cmtproto.Proposal) (*Proposal, error) { if pp == nil { @@ -153,7 +180,7 @@ func ProposalFromProto(pp *cmtproto.Proposal) (*Proposal, error) { p.Type = pp.Type p.Height = pp.Height p.Round = pp.Round - p.POLRound = pp.PolRound + p.POLRound = pp.PolRound // FIXME: names do not match p.Timestamp = pp.Timestamp p.Signature = pp.Signature diff --git a/types/proposal_test.go b/types/proposal_test.go index 0e7f574c4ac..dc1fdadcec3 100644 --- a/types/proposal_test.go +++ b/types/proposal_test.go @@ -1,6 +1,7 @@ package types import ( + "fmt" "math" "testing" "time" @@ -9,27 +10,34 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto/tmhash" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/protoio" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmttime "github.com/cometbft/cometbft/types/time" ) var ( testProposal *Proposal + testBlockID BlockID pbp *cmtproto.Proposal ) func init() { - var stamp, err = time.Parse(TimeFormat, "2018-02-11T07:09:22.765Z") + stamp, err := time.Parse(TimeFormat, "2018-02-11T07:09:22.765Z") if err != nil { panic(err) } + + testBlockID = BlockID{ + Hash: []byte("--June_15_2020_amino_was_removed"), + PartSetHeader: PartSetHeader{Total: 111, Hash: []byte("--June_15_2020_amino_was_removed")}, + } testProposal = &Proposal{ - Height: 12345, - Round: 23456, - BlockID: BlockID{Hash: []byte("--June_15_2020_amino_was_removed"), - PartSetHeader: PartSetHeader{Total: 111, Hash: []byte("--June_15_2020_amino_was_removed")}}, + Type: ProposalType, + Height: 12345, + Round: 23456, + BlockID: testBlockID, POLRound: -1, Timestamp: stamp, } @@ -60,8 +68,8 @@ func TestProposalVerifySignature(t *testing.T) { require.NoError(t, err) prop := NewProposal( - 4, 2, 2, - BlockID{cmtrand.Bytes(tmhash.Size), PartSetHeader{777, cmtrand.Bytes(tmhash.Size)}}) + 4, 2, 1, + BlockID{cmtrand.Bytes(tmhash.Size), PartSetHeader{777, cmtrand.Bytes(tmhash.Size)}}, cmttime.Now()) p := prop.ToProto() signBytes := ProposalSignBytes("test_chain_id", p) @@ -123,50 +131,72 @@ func BenchmarkProposalVerifySignature(b *testing.B) { } func TestProposalValidateBasic(t *testing.T) { - privVal := NewMockPV() + blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) + location, err := time.LoadLocation("CET") + require.NoError(t, err) + testCases := []struct { testName string malleateProposal func(*Proposal) expectErr bool }{ - {"Good Proposal", func(p *Proposal) {}, false}, - {"Invalid Type", func(p *Proposal) { p.Type = cmtproto.PrecommitType }, true}, + {"Good Proposal", func(*Proposal) {}, false}, + {"Test Proposal", func(p *Proposal) { + p.Type = testProposal.Type + p.Height = testProposal.Height + p.Round = testProposal.Round + p.BlockID = testProposal.BlockID + p.POLRound = testProposal.POLRound + p.Timestamp = testProposal.Timestamp + }, false}, + {"Invalid Type", func(p *Proposal) { p.Type = PrecommitType }, true}, {"Invalid Height", func(p *Proposal) { p.Height = -1 }, true}, + {"Zero Height", func(p *Proposal) { p.Height = 0 }, true}, {"Invalid Round", func(p *Proposal) { p.Round = -1 }, true}, {"Invalid POLRound", func(p *Proposal) { p.POLRound = -2 }, true}, + {"POLRound == Round", func(p *Proposal) { p.POLRound = p.Round }, true}, {"Invalid BlockId", func(p *Proposal) { p.BlockID = BlockID{[]byte{1, 2, 3}, PartSetHeader{111, []byte("blockparts")}} }, true}, {"Invalid Signature", func(p *Proposal) { p.Signature = make([]byte, 0) }, true}, + {"Small Signature", func(p *Proposal) { + p.Signature = make([]byte, MaxSignatureSize-1) + }, false}, {"Too big Signature", func(p *Proposal) { p.Signature = make([]byte, MaxSignatureSize+1) }, true}, + {"Non canonical time", func(p *Proposal) { + p.Timestamp = time.Now().In(location) + }, true}, + {"Not rounded time", func(p *Proposal) { + p.Timestamp = time.Now() + }, true}, } - blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) - for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { prop := NewProposal( - 4, 2, 2, - blockID) + 4, 2, 1, + blockID, cmttime.Now()) p := prop.ToProto() err := privVal.SignProposal("test_chain_id", p) prop.Signature = p.Signature require.NoError(t, err) + tc.malleateProposal(prop) - assert.Equal(t, tc.expectErr, prop.ValidateBasic() != nil, "Validate Basic had an unexpected result") + err = prop.ValidateBasic() + errMessage := fmt.Sprintf("Validate Basic had an unexpected error: %v", err) + assert.Equal(t, tc.expectErr, prop.ValidateBasic() != nil, errMessage) }) } } func TestProposalProtoBuf(t *testing.T) { - proposal := NewProposal(1, 2, 3, makeBlockID([]byte("hash"), 2, []byte("part_set_hash"))) + proposal := NewProposal(1, 2, 1, makeBlockID([]byte("hash"), 2, []byte("part_set_hash")), cmttime.Now()) proposal.Signature = []byte("sig") - proposal2 := NewProposal(1, 2, 3, BlockID{}) + proposal2 := NewProposal(1, 2, 1, BlockID{}, cmttime.Now()) testCases := []struct { msg string @@ -190,3 +220,69 @@ func TestProposalProtoBuf(t *testing.T) { } } } + +func TestProposalIsTimely(t *testing.T) { + timestamp, err := time.Parse(time.RFC3339, "2019-03-13T23:00:00Z") + sp := SynchronyParams{ + Precision: time.Nanosecond, + MessageDelay: 2 * time.Nanosecond, + } + require.NoError(t, err) + testCases := []struct { + name string + proposalHeight int64 + proposalTimestamp time.Time + proposalReceiveTime time.Time + expectTimely bool + }{ + // Timely requirements: + // proposalReceiveTime >= proposalTimestamp - PRECISION + // proposalReceiveTime <= proposalTimestamp + MSGDELAY + PRECISION + { + name: "timestamp in the past", + proposalHeight: 2, + proposalTimestamp: timestamp, + proposalReceiveTime: timestamp.Add(sp.Precision + sp.MessageDelay), + expectTimely: true, + }, + { + name: "timestamp far in the past", + proposalHeight: 2, + proposalTimestamp: timestamp, + proposalReceiveTime: timestamp.Add(sp.Precision + sp.MessageDelay + 1), + expectTimely: false, + }, + { + name: "timestamp in the future", + proposalHeight: 2, + proposalTimestamp: timestamp.Add(sp.Precision), + proposalReceiveTime: timestamp, + expectTimely: true, + }, + { + name: "timestamp far in the future", + proposalHeight: 2, + proposalTimestamp: timestamp.Add(sp.Precision + 1), + proposalReceiveTime: timestamp, + expectTimely: false, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + p := Proposal{ + Type: ProposalType, + Height: testCase.proposalHeight, + Timestamp: testCase.proposalTimestamp, + Round: 0, + POLRound: -1, + BlockID: testBlockID, + Signature: []byte{1}, + } + require.NoError(t, p.ValidateBasic()) + + ti := p.IsTimely(testCase.proposalReceiveTime, sp) + assert.Equal(t, testCase.expectTimely, ti) + }) + } +} diff --git a/types/protobuf.go b/types/protobuf.go index 4393f571c6e..527add7e1b1 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -2,15 +2,14 @@ package types import ( abci "github.com/cometbft/cometbft/abci/types" - "github.com/cometbft/cometbft/crypto" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" cryptoenc "github.com/cometbft/cometbft/crypto/encoding" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" ) -//------------------------------------------------------- +// ------------------------------------------------------- // TM2PB is used for converting CometBFT ABCI to protobuf ABCI. -// UNSTABLE +// UNSTABLE. var TM2PB = tm2pb{} type tm2pb struct{} @@ -59,43 +58,18 @@ func (tm2pb) PartSetHeader(header PartSetHeader) cmtproto.PartSetHeader { } } -// XXX: panics on unknown pubkey type -func (tm2pb) ValidatorUpdate(val *Validator) abci.ValidatorUpdate { - pk, err := cryptoenc.PubKeyToProto(val.PubKey) - if err != nil { - panic(err) - } - return abci.ValidatorUpdate{ - PubKey: pk, - Power: val.VotingPower, - } -} - -// XXX: panics on nil or unknown pubkey type func (tm2pb) ValidatorUpdates(vals *ValidatorSet) []abci.ValidatorUpdate { validators := make([]abci.ValidatorUpdate, vals.Size()) for i, val := range vals.Validators { - validators[i] = TM2PB.ValidatorUpdate(val) + validators[i] = abci.NewValidatorUpdate(val.PubKey, val.VotingPower) } return validators } -// XXX: panics on nil or unknown pubkey type -func (tm2pb) NewValidatorUpdate(pubkey crypto.PubKey, power int64) abci.ValidatorUpdate { - pubkeyABCI, err := cryptoenc.PubKeyToProto(pubkey) - if err != nil { - panic(err) - } - return abci.ValidatorUpdate{ - PubKey: pubkeyABCI, - Power: power, - } -} - -//---------------------------------------------------------------------------- +// ---------------------------------------------------------------------------- // PB2TM is used for converting protobuf ABCI to CometBFT ABCI. -// UNSTABLE +// UNSTABLE. var PB2TM = pb2tm{} type pb2tm struct{} @@ -103,11 +77,11 @@ type pb2tm struct{} func (pb2tm) ValidatorUpdates(vals []abci.ValidatorUpdate) ([]*Validator, error) { cmtVals := make([]*Validator, len(vals)) for i, v := range vals { - pub, err := cryptoenc.PubKeyFromProto(v.PubKey) + pubKey, err := cryptoenc.PubKeyFromTypeAndBytes(v.PubKeyType, v.PubKeyBytes) if err != nil { return nil, err } - cmtVals[i] = NewValidator(pub, v.Power) + cmtVals[i] = NewValidator(pubKey, v.Power) } return cmtVals, nil } diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 73a2c02a16b..a61afc5094e 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -12,44 +12,18 @@ import ( cryptoenc "github.com/cometbft/cometbft/crypto/encoding" ) -func TestABCIPubKey(t *testing.T) { - pkEd := ed25519.GenPrivKey().PubKey() - err := testABCIPubKey(t, pkEd) - assert.NoError(t, err) -} +func TestPubKey(t *testing.T) { + pk := ed25519.GenPrivKey().PubKey() -func testABCIPubKey(t *testing.T, pk crypto.PubKey) error { + // to proto abciPubKey, err := cryptoenc.PubKeyToProto(pk) require.NoError(t, err) + + // from proto pk2, err := cryptoenc.PubKeyFromProto(abciPubKey) require.NoError(t, err) - require.Equal(t, pk, pk2) - return nil -} - -func TestABCIValidators(t *testing.T) { - pkEd := ed25519.GenPrivKey().PubKey() - // correct validator - cmtValExpected := NewValidator(pkEd, 10) - - cmtVal := NewValidator(pkEd, 10) - - abciVal := TM2PB.ValidatorUpdate(cmtVal) - cmtVals, err := PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{abciVal}) - assert.Nil(t, err) - assert.Equal(t, cmtValExpected, cmtVals[0]) - - abciVals := TM2PB.ValidatorUpdates(NewValidatorSet(cmtVals)) - assert.Equal(t, []abci.ValidatorUpdate{abciVal}, abciVals) - - // val with address - cmtVal.Address = pkEd.Address() - - abciVal = TM2PB.ValidatorUpdate(cmtVal) - cmtVals, err = PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{abciVal}) - assert.Nil(t, err) - assert.Equal(t, cmtValExpected, cmtVals[0]) + require.Equal(t, pk, pk2) } type pubKeyEddie struct{} @@ -61,17 +35,30 @@ func (pubKeyEddie) Equals(crypto.PubKey) bool { return false } func (pubKeyEddie) String() string { return "" } func (pubKeyEddie) Type() string { return "pubKeyEddie" } -func TestABCIValidatorFromPubKeyAndPower(t *testing.T) { - pubkey := ed25519.GenPrivKey().PubKey() +func TestPubKey_UnknownType(t *testing.T) { + pk := pubKeyEddie{} + + // to proto + _, err := cryptoenc.PubKeyToProto(pk) + require.Error(t, err) +} + +func TestValidatorUpdates(t *testing.T) { + pkEd := ed25519.GenPrivKey().PubKey() + cmtValExpected := NewValidator(pkEd, 10) + abciVal := abci.NewValidatorUpdate(pkEd, 10) - abciVal := TM2PB.NewValidatorUpdate(pubkey, 10) - assert.Equal(t, int64(10), abciVal.Power) + // from proto + cmtVals, err := PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{abciVal}) + require.NoError(t, err) + assert.Equal(t, cmtValExpected, cmtVals[0]) - assert.Panics(t, func() { TM2PB.NewValidatorUpdate(nil, 10) }) - assert.Panics(t, func() { TM2PB.NewValidatorUpdate(pubKeyEddie{}, 10) }) + // to proto + abciVals := TM2PB.ValidatorUpdates(NewValidatorSet(cmtVals)) + assert.Equal(t, []abci.ValidatorUpdate{abciVal}, abciVals) } -func TestABCIValidatorWithoutPubKey(t *testing.T) { +func TestValidator_WithoutPubKey(t *testing.T) { pkEd := ed25519.GenPrivKey().PubKey() abciVal := TM2PB.Validator(NewValidator(pkEd, 10)) diff --git a/types/results.go b/types/results.go index 214fca90f98..abf42f6f760 100644 --- a/types/results.go +++ b/types/results.go @@ -23,7 +23,7 @@ func (a ABCIResults) Hash() []byte { return merkle.HashFromByteSlices(a.toByteSlices()) } -// ProveResult returns a merkle proof of one result from the set +// ProveResult returns a merkle proof of one result from the set. func (a ABCIResults) ProveResult(i int) merkle.Proof { _, proofs := merkle.ProofsFromByteSlices(a.toByteSlices()) return *proofs[i] diff --git a/types/results_test.go b/types/results_test.go index bf3db01ecaa..e08c67185d3 100644 --- a/types/results_test.go +++ b/types/results_test.go @@ -49,6 +49,6 @@ func TestABCIResults(t *testing.T) { proof := results.ProveResult(i) valid := proof.Verify(root, bz) - assert.NoError(t, valid, "%d", i) + require.NoError(t, valid, "%d", i) } } diff --git a/types/signable.go b/types/signable.go index 8ba2e6599a1..03ebd95daf5 100644 --- a/types/signable.go +++ b/types/signable.go @@ -1,16 +1,17 @@ package types import ( + "github.com/cometbft/cometbft/crypto/bls12381" "github.com/cometbft/cometbft/crypto/ed25519" cmtmath "github.com/cometbft/cometbft/libs/math" ) -var ( - // MaxSignatureSize is a maximum allowed signature size for the Proposal - // and Vote. - // XXX: secp256k1 does not have Size nor MaxSize defined. - MaxSignatureSize = cmtmath.MaxInt(ed25519.SignatureSize, 64) -) +// MaxSignatureSize is a maximum allowed signature size for the Proposal +// and Vote. +// XXX: secp256k1 does not have max signature size defined. +var MaxSignatureSize = cmtmath.MaxInt( + ed25519.SignatureSize, + bls12381.SignatureLength) // Signable is an interface for all signable things. // It typically removes signatures before serializing. diff --git a/types/signature_cache.go b/types/signature_cache.go new file mode 100644 index 00000000000..bde53e4c37d --- /dev/null +++ b/types/signature_cache.go @@ -0,0 +1,36 @@ +package types + +// The value type for the verified signature cache. +type SignatureCacheValue struct { + ValidatorAddress []byte + VoteSignBytes []byte +} + +type SignatureCache interface { + Add(key string, value SignatureCacheValue) + Get(key string) (SignatureCacheValue, bool) + Len() int +} + +type signatureCache struct { + cache map[string]SignatureCacheValue +} + +func NewSignatureCache() SignatureCache { + return &signatureCache{ + cache: make(map[string]SignatureCacheValue), + } +} + +func (sc *signatureCache) Add(key string, value SignatureCacheValue) { + sc.cache[key] = value +} + +func (sc *signatureCache) Get(key string) (SignatureCacheValue, bool) { + value, ok := sc.cache[key] + return value, ok +} + +func (sc *signatureCache) Len() int { + return len(sc.cache) +} diff --git a/types/signed_msg_type.go b/types/signed_msg_type.go index e8daccbb1b3..75f8f3a7f86 100644 --- a/types/signed_msg_type.go +++ b/types/signed_msg_type.go @@ -1,13 +1,37 @@ package types -import cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" +import cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" + +type SignedMsgType = cmtproto.SignedMsgType + +const ( + UnknownType SignedMsgType = cmtproto.UnknownType + PrevoteType SignedMsgType = cmtproto.PrevoteType + PrecommitType SignedMsgType = cmtproto.PrecommitType + ProposalType SignedMsgType = cmtproto.ProposalType +) // IsVoteTypeValid returns true if t is a valid vote type. -func IsVoteTypeValid(t cmtproto.SignedMsgType) bool { +func IsVoteTypeValid(t SignedMsgType) bool { switch t { - case cmtproto.PrevoteType, cmtproto.PrecommitType: + case PrevoteType, PrecommitType: return true default: return false } } + +var signedMsgTypeToShortName = map[SignedMsgType]string{ + UnknownType: "unknown", + PrevoteType: "prevote", + PrecommitType: "precommit", + ProposalType: "proposal", +} + +// Returns a short lowercase descriptor for a signed message type. +func SignedMsgTypeToShortString(t SignedMsgType) string { + if shortName, ok := signedMsgTypeToShortName[t]; ok { + return shortName + } + return "unknown" +} diff --git a/types/test_util.go b/types/test_util.go index 5cf0272bd6d..2a959ef3af9 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -7,14 +7,13 @@ import ( "github.com/stretchr/testify/require" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" + cmtversion "github.com/cometbft/cometbft/api/cometbft/version/v1" "github.com/cometbft/cometbft/version" ) func MakeExtCommit(blockID BlockID, height int64, round int32, - voteSet *VoteSet, validators []PrivValidator, now time.Time, extEnabled bool) (*ExtendedCommit, error) { - + voteSet *VoteSet, validators []PrivValidator, now time.Time, extEnabled bool, +) (*ExtendedCommit, error) { // all sign for i := 0; i < len(validators); i++ { pubKey, err := validators[i].GetPubKey() @@ -26,7 +25,7 @@ func MakeExtCommit(blockID BlockID, height int64, round int32, ValidatorIndex: int32(i), Height: height, Round: round, - Type: cmtproto.PrecommitType, + Type: PrecommitType, BlockID: blockID, Timestamp: now, } @@ -37,12 +36,12 @@ func MakeExtCommit(blockID BlockID, height int64, round int32, } } - var enableHeight int64 + p := DefaultFeatureParams() if extEnabled { - enableHeight = height + p.VoteExtensionsEnableHeight = height } - return voteSet.MakeExtendedCommit(ABCIParams{VoteExtensionsEnableHeight: enableHeight}), nil + return voteSet.MakeExtendedCommit(p), nil } func signAddVote(privVal PrivValidator, vote *Vote, voteSet *VoteSet) (bool, error) { @@ -61,9 +60,9 @@ func MakeVote( valIndex int32, height int64, round int32, - step cmtproto.SignedMsgType, + step SignedMsgType, blockID BlockID, - time time.Time, + votetime time.Time, ) (*Vote, error) { pubKey, err := val.GetPubKey() if err != nil { @@ -77,10 +76,10 @@ func MakeVote( Round: round, Type: step, BlockID: blockID, - Timestamp: time, + Timestamp: votetime, } - extensionsEnabled := step == cmtproto.PrecommitType + extensionsEnabled := step == PrecommitType if _, err := SignAndCheckVote(vote, val, chainID, extensionsEnabled); err != nil { return nil, err } @@ -95,10 +94,12 @@ func MakeVoteNoError( valIndex int32, height int64, round int32, - step cmtproto.SignedMsgType, + step SignedMsgType, blockID BlockID, time time.Time, ) *Vote { + t.Helper() + vote, err := MakeVote(val, chainID, valIndex, height, round, step, blockID, time) require.NoError(t, err) return vote diff --git a/types/time/mocks/source.go b/types/time/mocks/source.go new file mode 100644 index 00000000000..85a0e0af2af --- /dev/null +++ b/types/time/mocks/source.go @@ -0,0 +1,46 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + time "time" + + mock "github.com/stretchr/testify/mock" +) + +// Source is an autogenerated mock type for the Source type +type Source struct { + mock.Mock +} + +// Now provides a mock function with given fields: +func (_m *Source) Now() time.Time { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Now") + } + + var r0 time.Time + if rf, ok := ret.Get(0).(func() time.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// NewSource creates a new instance of Source. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSource(t interface { + mock.TestingT + Cleanup(func()) +}) *Source { + mock := &Source{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/types/time/now.go b/types/time/now.go new file mode 100644 index 00000000000..abea9a18d7b --- /dev/null +++ b/types/time/now.go @@ -0,0 +1,13 @@ +//go:build !clock_skew +// +build !clock_skew + +package time + +import ( + "time" +) + +// Now returns the current time in UTC with no monotonic component. +func Now() time.Time { + return Canonical(time.Now()) +} diff --git a/types/time/now_skew.go b/types/time/now_skew.go new file mode 100644 index 00000000000..8a975f8c649 --- /dev/null +++ b/types/time/now_skew.go @@ -0,0 +1,29 @@ +//go:build clock_skew +// +build clock_skew + +package time + +import ( + "fmt" + "os" + "time" +) + +var clockSkew time.Duration + +// Now returns the current time in UTC with no monotonic component. +func Now() time.Time { + return Canonical(time.Now().Add(clockSkew)) +} + +func init() { + skewStr := os.Getenv("COMETBFT_CLOCK_SKEW") + if len(skewStr) == 0 { + return + } + skew, err := time.ParseDuration(skewStr) + if err != nil { + panic(fmt.Sprintf("contents of env variable COMETBFT_CLOCK_SKEW (%q) must be empty or a duration expression", skewStr)) + } + clockSkew = skew +} diff --git a/types/time/time.go b/types/time/time.go index 022bdf574f8..1077aa1ebe7 100644 --- a/types/time/time.go +++ b/types/time/time.go @@ -5,11 +5,6 @@ import ( "time" ) -// Now returns the current time in UTC with no monotonic component. -func Now() time.Time { - return Canonical(time.Now()) -} - // Canonical returns UTC time with no monotonic component. // Stripping the monotonic component is for time equality. // See https://github.com/tendermint/tendermint/pull/2203#discussion_r215064334 @@ -17,6 +12,33 @@ func Canonical(t time.Time) time.Time { return t.Round(0).UTC() } +//go:generate ../../scripts/mockery_generate.sh Source + +// Source is an interface that defines a way to fetch the current time. +type Source interface { + Now() time.Time +} + +// Until returns the duration until t. +// It is shorthand for t.Sub(time.Now()). +func Until(t time.Time) time.Duration { + return t.Sub(Now()) +} + +// Since returns the time elapsed since t. +// It is shorthand for time.Now().Sub(t). +func Since(t time.Time) time.Duration { + return Now().Sub(t) +} + +// DefaultSource implements the Source interface using the system clock provided by the standard library. +type DefaultSource struct{} + +func (DefaultSource) Now() time.Time { + return Now() +} + +// TODO: find which commit removed this and make sure it's in our list // WeightedTime for computing a median. type WeightedTime struct { Time time.Time @@ -54,5 +76,5 @@ func WeightedMedian(weightedTimes []*WeightedTime, totalVotingPower int64) (res median -= weightedTime.Weight } } - return + return res } diff --git a/types/time/time_test.go b/types/time/time_test.go index 1b1a30e5058..93982c5301e 100644 --- a/types/time/time_test.go +++ b/types/time/time_test.go @@ -22,7 +22,7 @@ func TestWeightedMedian(t *testing.T) { median := WeightedMedian(m, totalVotingPower) assert.Equal(t, t2, median) // median always returns value between values of correct processes - assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && + assert.True(t, true, (median.After(t1) || median.Equal(t1)) && (median.Before(t3) || median.Equal(t3))) m[1] = NewWeightedTime(t1, 40) // correct processes @@ -33,7 +33,7 @@ func TestWeightedMedian(t *testing.T) { median = WeightedMedian(m, totalVotingPower) assert.Equal(t, t2, median) // median always returns value between values of correct processes - assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && + assert.True(t, true, (median.After(t1) || median.Equal(t1)) && (median.Before(t2) || median.Equal(t2))) m = make([]*WeightedTime, 8) @@ -51,6 +51,6 @@ func TestWeightedMedian(t *testing.T) { median = WeightedMedian(m, totalVotingPower) assert.Equal(t, t3, median) // median always returns value between values of correct processes - assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && + assert.True(t, (median.After(t1) || median.Equal(t1)) && (median.Before(t4) || median.Equal(t4))) } diff --git a/types/tx.go b/types/tx.go index 0ba14dbadd0..1be0031cfe7 100644 --- a/types/tx.go +++ b/types/tx.go @@ -6,13 +6,13 @@ import ( "errors" "fmt" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto/merkle" "github.com/cometbft/cometbft/crypto/tmhash" cmtbytes "github.com/cometbft/cometbft/libs/bytes" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" ) -// TxKeySize is the size of the transaction key index +// TxKeySize is the size of the transaction key index. const TxKeySize = sha256.Size type ( @@ -26,7 +26,7 @@ type ( ) // Hash computes the TMHASH hash of the wire encoded transaction. -func (tx Tx) Hash() []byte { +func (tx Tx) Hash() cmtbytes.HexBytes { return tmhash.Sum(tx) } @@ -49,7 +49,7 @@ func (txs Txs) Hash() []byte { return merkle.HashFromByteSlices(hl) } -// Index returns the index of this transaction in the list, or -1 if not found +// Index returns the index of this transaction in the list, or -1 if not found. func (txs Txs) Index(tx Tx) int { for i := range txs { if bytes.Equal(txs[i], tx) { @@ -59,7 +59,7 @@ func (txs Txs) Index(tx Tx) int { return -1 } -// IndexByHash returns the index of this transaction hash in the list, or -1 if not found +// IndexByHash returns the index of this transaction hash in the list, or -1 if not found. func (txs Txs) IndexByHash(hash []byte) int { for i := range txs { if bytes.Equal(txs[i].Hash(), hash) { @@ -90,8 +90,14 @@ func (txs Txs) hashList() [][]byte { // Txs is a slice of transactions. Sorting a Txs value orders the transactions // lexicographically. -func (txs Txs) Len() int { return len(txs) } + +// Deprecated: Do not use. +func (txs Txs) Len() int { return len(txs) } + +// Deprecated: Do not use. func (txs Txs) Swap(i, j int) { txs[i], txs[j] = txs[j], txs[i] } + +// Deprecated: Do not use. func (txs Txs) Less(i, j int) bool { return bytes.Compare(txs[i], txs[j]) == -1 } @@ -107,7 +113,7 @@ func ToTxs(txl [][]byte) Txs { func (txs Txs) Validate(maxSizeBytes int64) error { var size int64 for _, tx := range txs { - size += int64(len(tx)) + size += ComputeProtoSizeForTxs([]Tx{tx}) if size > maxSizeBytes { return fmt.Errorf("transaction data size exceeds maximum %d", maxSizeBytes) } @@ -156,7 +162,6 @@ func (tp TxProof) Validate(dataHash []byte) error { } func (tp TxProof) ToProto() cmtproto.TxProof { - pbProof := tp.Proof.ToProto() pbtp := cmtproto.TxProof{ @@ -167,8 +172,8 @@ func (tp TxProof) ToProto() cmtproto.TxProof { return pbtp } -func TxProofFromProto(pb cmtproto.TxProof) (TxProof, error) { +func TxProofFromProto(pb cmtproto.TxProof) (TxProof, error) { pbProof, err := merkle.ProofFromProto(pb.Proof) if err != nil { return TxProof{}, err diff --git a/types/tx_test.go b/types/tx_test.go index f5de93ae276..9bd8c3c89af 100644 --- a/types/tx_test.go +++ b/types/tx_test.go @@ -8,9 +8,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - cmtrand "github.com/cometbft/cometbft/libs/rand" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" + cmtrand "github.com/cometbft/cometbft/internal/rand" ctest "github.com/cometbft/cometbft/libs/test" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" ) func makeTxs(cnt, size int) Txs { @@ -71,8 +71,8 @@ func TestValidTxProof(t *testing.T) { assert.EqualValues(t, root, proof.RootHash, "%d: %d", h, i) assert.EqualValues(t, tx, proof.Data, "%d: %d", h, i) assert.EqualValues(t, txs[i].Hash(), proof.Leaf(), "%d: %d", h, i) - assert.Nil(t, proof.Validate(root), "%d: %d", h, i) - assert.NotNil(t, proof.Validate([]byte("foobar")), "%d: %d", h, i) + require.NoError(t, proof.Validate(root), "%d: %d", h, i) + require.Error(t, proof.Validate([]byte("foobar")), "%d: %d", h, i) // read-write must also work var ( @@ -87,8 +87,8 @@ func TestValidTxProof(t *testing.T) { require.NoError(t, err) p2, err = TxProofFromProto(pb2) - if assert.Nil(t, err, "%d: %d: %+v", h, i, err) { - assert.Nil(t, p2.Validate(root), "%d: %d", h, i) + if assert.NoError(t, err, "%d: %d: %+v", h, i, err) { //nolint:testifylint // require.Error doesn't work with the conditional here + require.NoError(t, p2.Validate(root), "%d: %d", h, i) } } } @@ -102,6 +102,7 @@ func TestTxProofUnchangable(t *testing.T) { } func testTxProofUnchangable(t *testing.T) { + t.Helper() // make some proof txs := makeTxs(randInt(2, 100), randInt(16, 128)) root := txs.Hash() @@ -109,7 +110,7 @@ func testTxProofUnchangable(t *testing.T) { proof := txs.Proof(i) // make sure it is valid to start with - assert.Nil(t, proof.Validate(root)) + require.NoError(t, proof.Validate(root)) pbProof := proof.ToProto() bin, err := pbProof.Marshal() require.NoError(t, err) @@ -123,9 +124,9 @@ func testTxProofUnchangable(t *testing.T) { } } -// This makes sure that the proof doesn't deserialize into something valid. +// assertBadProof makes sure that the proof doesn't deserialize into something valid. func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) { - + t.Helper() var ( proof TxProof pbProof cmtproto.TxProof diff --git a/types/utils.go b/types/utils.go index 60e82fe3fd7..94942fea695 100644 --- a/types/utils.go +++ b/types/utils.go @@ -7,7 +7,7 @@ import "reflect" // - https://dave.cheney.net/2017/08/09/typed-nils-in-go-2 // - https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I/discussion // - https://github.com/golang/go/issues/21538 -func isTypedNil(o interface{}) bool { +func isTypedNil(o any) bool { rv := reflect.ValueOf(o) switch rv.Kind() { case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice: @@ -17,8 +17,8 @@ func isTypedNil(o interface{}) bool { } } -// Returns true if it has zero length. -func isEmpty(o interface{}) bool { +// isEmpty returns true if it has zero length. +func isEmpty(o any) bool { rv := reflect.ValueOf(o) switch rv.Kind() { case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: diff --git a/types/validation.go b/types/validation.go index 22ac0793481..eae67802381 100644 --- a/types/validation.go +++ b/types/validation.go @@ -1,9 +1,11 @@ package types import ( + "bytes" "errors" "fmt" + "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/batch" "github.com/cometbft/cometbft/crypto/tmhash" cmtmath "github.com/cometbft/cometbft/libs/math" @@ -13,7 +15,9 @@ import ( const batchVerifyThreshold = 2 func shouldBatchVerify(vals *ValidatorSet, commit *Commit) bool { - return len(commit.Signatures) >= batchVerifyThreshold && batch.SupportsBatchVerifier(vals.GetProposer().PubKey) + return len(commit.Signatures) >= batchVerifyThreshold && + batch.SupportsBatchVerifier(vals.GetProposer().PubKey) && + vals.AllKeysHaveSameType() } // VerifyCommit verifies +2/3 of the set had signed the given commit. @@ -24,7 +28,8 @@ func shouldBatchVerify(vals *ValidatorSet, commit *Commit) bool { // includes which validators signed. For instance, Gaia incentivizes proposers // with a bonus for including more than +2/3 of the signatures. func VerifyCommit(chainID string, vals *ValidatorSet, blockID BlockID, - height int64, commit *Commit) error { + height int64, commit *Commit, +) error { // run a basic validation of the arguments if err := verifyBasicValsAndCommit(vals, commit, height, blockID); err != nil { return err @@ -43,22 +48,70 @@ func VerifyCommit(chainID string, vals *ValidatorSet, blockID BlockID, // attempt to batch verify if shouldBatchVerify(vals, commit) { return verifyCommitBatch(chainID, vals, commit, - votingPowerNeeded, ignore, count, true, true) + votingPowerNeeded, ignore, count, true, true, nil, nil) } // if verification failed or is not supported then fallback to single verification return verifyCommitSingle(chainID, vals, commit, votingPowerNeeded, - ignore, count, true, true) + ignore, count, true, true, nil) } // LIGHT CLIENT VERIFICATION METHODS // VerifyCommitLight verifies +2/3 of the set had signed the given commit. // -// This method is primarily used by the light client and does not check all the +// This method is primarily used by the light client and does NOT check all the // signatures. -func VerifyCommitLight(chainID string, vals *ValidatorSet, blockID BlockID, - height int64, commit *Commit) error { +func VerifyCommitLight( + chainID string, + vals *ValidatorSet, + blockID BlockID, + height int64, + commit *Commit, +) error { + return verifyCommitLightInternal(chainID, vals, blockID, height, commit, false, nil) +} + +// VerifyCommitLightWithCache verifies +2/3 of the set had signed the given commit. +// +// This method is primarily used by the light client and does NOT check all the +// signatures. +// The cache provided will be used to skip signature verification for entries where the +// key (signature), validator pubkey, and vote sign bytes all match. +// Additionally, any verified signatures will be added to the cache. +func VerifyCommitLightWithCache( + chainID string, + vals *ValidatorSet, + blockID BlockID, + height int64, + commit *Commit, + verifiedSignatureCache SignatureCache, +) error { + return verifyCommitLightInternal(chainID, vals, blockID, height, commit, false, verifiedSignatureCache) +} + +// VerifyCommitLightAllSignatures verifies +2/3 of the set had signed the given commit. +// +// This method DOES check all the signatures. +func VerifyCommitLightAllSignatures( + chainID string, + vals *ValidatorSet, + blockID BlockID, + height int64, + commit *Commit, +) error { + return verifyCommitLightInternal(chainID, vals, blockID, height, commit, true, nil) +} + +func verifyCommitLightInternal( + chainID string, + vals *ValidatorSet, + blockID BlockID, + height int64, + commit *Commit, + countAllSignatures bool, + verifiedSignatureCache SignatureCache, +) error { // run a basic validation of the arguments if err := verifyBasicValsAndCommit(vals, commit, height, blockID); err != nil { return err @@ -71,28 +124,87 @@ func VerifyCommitLight(chainID string, vals *ValidatorSet, blockID BlockID, ignore := func(c CommitSig) bool { return c.BlockIDFlag != BlockIDFlagCommit } // count all the remaining signatures - count := func(c CommitSig) bool { return true } + count := func(_ CommitSig) bool { return true } // attempt to batch verify if shouldBatchVerify(vals, commit) { return verifyCommitBatch(chainID, vals, commit, - votingPowerNeeded, ignore, count, false, true) + votingPowerNeeded, ignore, count, countAllSignatures, true, nil, verifiedSignatureCache) } // if verification failed or is not supported then fallback to single verification return verifyCommitSingle(chainID, vals, commit, votingPowerNeeded, - ignore, count, false, true) + ignore, count, countAllSignatures, true, verifiedSignatureCache) } // VerifyCommitLightTrusting verifies that trustLevel of the validator set signed -// this commit. +// this commit. "Trusting" means that we trust the validator set to be correct. +// +// NOTE the given validators do not necessarily correspond to the validator set +// for this commit, but there may be some intersection. +// +// This method is primarily used by the light client and does NOT check all the +// signatures. +// +// CONTRACT: must run ValidateBasic() on commit before verifying. +func VerifyCommitLightTrusting( + chainID string, + vals *ValidatorSet, + commit *Commit, + trustLevel cmtmath.Fraction, +) error { + return verifyCommitLightTrustingInternal(chainID, vals, commit, trustLevel, false, nil) +} + +// VerifyCommitLightTrustingWithCache verifies that trustLevel of the validator set signed +// this commit. "Trusting" means that we trust the validator set to be correct. // // NOTE the given validators do not necessarily correspond to the validator set // for this commit, but there may be some intersection. // -// This method is primarily used by the light client and does not check all the +// This method is primarily used by the light client and does NOT check all the // signatures. -func VerifyCommitLightTrusting(chainID string, vals *ValidatorSet, commit *Commit, trustLevel cmtmath.Fraction) error { +// +// CONTRACT: must run ValidateBasic() on commit before verifying. +// The cache provided will be used to skip signature verification for entries where the +// key (signature), validator pubkey, and vote sign bytes all match. +// Additionally, any verified signatures will be added to the cache. +func VerifyCommitLightTrustingWithCache( + chainID string, + vals *ValidatorSet, + commit *Commit, + trustLevel cmtmath.Fraction, + verifiedSignatureCache SignatureCache, +) error { + return verifyCommitLightTrustingInternal(chainID, vals, commit, trustLevel, false, verifiedSignatureCache) +} + +// VerifyCommitLightTrustingAllSignatures verifies that trustLevel of the validator +// set signed this commit. "Trusting" means that we trust the validator set to be correct. +// +// NOTE the given validators do not necessarily correspond to the validator set +// for this commit, but there may be some intersection. +// +// This method DOES check all the signatures. +// +// CONTRACT: must run ValidateBasic() on commit before verifying. +func VerifyCommitLightTrustingAllSignatures( + chainID string, + vals *ValidatorSet, + commit *Commit, + trustLevel cmtmath.Fraction, +) error { + return verifyCommitLightTrustingInternal(chainID, vals, commit, trustLevel, true, nil) +} + +func verifyCommitLightTrustingInternal( + chainID string, + vals *ValidatorSet, + commit *Commit, + trustLevel cmtmath.Fraction, + countAllSignatures bool, + verifiedSignatureCache SignatureCache, +) error { // sanity checks if vals == nil { return errors.New("nil validator set") @@ -115,19 +227,19 @@ func VerifyCommitLightTrusting(chainID string, vals *ValidatorSet, commit *Commi ignore := func(c CommitSig) bool { return c.BlockIDFlag != BlockIDFlagCommit } // count all the remaining signatures - count := func(c CommitSig) bool { return true } + count := func(_ CommitSig) bool { return true } // attempt to batch verify commit. As the validator set doesn't necessarily // correspond with the validator set that signed the block we need to look // up by address rather than index. if shouldBatchVerify(vals, commit) { return verifyCommitBatch(chainID, vals, commit, - votingPowerNeeded, ignore, count, false, false) + votingPowerNeeded, ignore, count, countAllSignatures, false, nil, verifiedSignatureCache) } // attempt with single verification return verifyCommitSingle(chainID, vals, commit, votingPowerNeeded, - ignore, count, false, false) + ignore, count, countAllSignatures, false, verifiedSignatureCache) } // ValidateHash returns an error if the hash is not empty, but its @@ -159,6 +271,8 @@ func verifyCommitBatch( countSig func(CommitSig) bool, countAllSignatures bool, lookUpByIndex bool, + batchVerifier crypto.BatchVerifier, + verifiedSignatureCache SignatureCache, ) error { var ( val *Validator @@ -168,11 +282,14 @@ func verifyCommitBatch( talliedVotingPower int64 ) // attempt to create a batch verifier - bv, ok := batch.CreateBatchVerifier(vals.GetProposer().PubKey) + bv, ok := batchVerifier, true + if batchVerifier == nil { + bv, ok = batch.CreateBatchVerifier(vals.GetProposer().PubKey) + } // re-check if batch verification is supported if !ok || len(commit.Signatures) < batchVerifyThreshold { // This should *NEVER* happen. - return fmt.Errorf("unsupported signature algorithm or insufficient signatures for batch verification") + return errors.New("unsupported signature algorithm or insufficient signatures for batch verification") } for idx, commitSig := range commit.Signatures { @@ -181,12 +298,12 @@ func verifyCommitBatch( continue } - // If the vals and commit have a 1-to-1 correspondance we can retrieve + // If the vals and commit have a 1-to-1 correspondence we can retrieve // them by index else we need to retrieve them by address if lookUpByIndex { val = vals.Validators[idx] } else { - valIdx, val = vals.GetByAddress(commitSig.ValidatorAddress) + valIdx, val = vals.GetByAddressMut(commitSig.ValidatorAddress) // if the signature doesn't belong to anyone in the validator set // then we just skip over it @@ -206,11 +323,19 @@ func verifyCommitBatch( // Validate signature. voteSignBytes := commit.VoteSignBytes(chainID, int32(idx)) - // add the key, sig and message to the verifier - if err := bv.Add(val.PubKey, voteSignBytes, commitSig.Signature); err != nil { - return err + cacheHit := false + if verifiedSignatureCache != nil { + cacheVal, sigIsInCache := verifiedSignatureCache.Get(string(commitSig.Signature)) + cacheHit = sigIsInCache && bytes.Equal(cacheVal.ValidatorAddress, val.PubKey.Address()) && bytes.Equal(cacheVal.VoteSignBytes, voteSignBytes) + } + + if !cacheHit { + // add the key, sig and message to the verifier + if err := bv.Add(val.PubKey, voteSignBytes, commitSig.Signature); err != nil { + return err + } + batchSigIdxs = append(batchSigIdxs, idx) } - batchSigIdxs = append(batchSigIdxs, idx) // If this signature counts then add the voting power of the validator // to the tally @@ -231,29 +356,51 @@ func verifyCommitBatch( return ErrNotEnoughVotingPowerSigned{Got: got, Needed: needed} } + // if every signature was in the cache, the batch verifier is empty and we shouldn't call verify + if len(batchSigIdxs) == 0 { + return nil + } + // attempt to verify the batch. ok, validSigs := bv.Verify() if ok { // success + if verifiedSignatureCache != nil { + for i := range validSigs { + idx := batchSigIdxs[i] + sig := commit.Signatures[idx] + verifiedSignatureCache.Add(string(sig.Signature), SignatureCacheValue{ + ValidatorAddress: sig.ValidatorAddress, + VoteSignBytes: commit.VoteSignBytes(chainID, int32(idx)), + }) + } + } + return nil } // one or more of the signatures is invalid, find and return the first // invalid signature. for i, ok := range validSigs { + // go back from the batch index to the commit.Signatures index + idx := batchSigIdxs[i] + sig := commit.Signatures[idx] if !ok { - // go back from the batch index to the commit.Signatures index - idx := batchSigIdxs[i] - sig := commit.Signatures[idx] return fmt.Errorf("wrong signature (#%d): %X", idx, sig) } + if verifiedSignatureCache != nil { + verifiedSignatureCache.Add(string(sig.Signature), SignatureCacheValue{ + ValidatorAddress: sig.ValidatorAddress, + VoteSignBytes: commit.VoteSignBytes(chainID, int32(idx)), + }) + } } // execution reaching here is a bug, and one of the following has // happened: // * non-zero tallied voting power, empty batch (impossible?) // * bv.Verify() returned `false, []bool{true, ..., true}` (BUG) - return fmt.Errorf("BUG: batch verification failed with no invalid signatures") + return errors.New("BUG: batch verification failed with no invalid signatures") } // Single Verification @@ -262,7 +409,7 @@ func verifyCommitBatch( // If a key does not support batch verification, or batch verification fails this will be used // This method is used to check all the signatures included in a commit. // It is used in consensus for validating a block LastCommit. -// CONTRACT: both commit and validator set should have passed validate basic +// CONTRACT: both commit and validator set should have passed validate basic. func verifyCommitSingle( chainID string, vals *ValidatorSet, @@ -272,6 +419,7 @@ func verifyCommitSingle( countSig func(CommitSig) bool, countAllSignatures bool, lookUpByIndex bool, + verifiedSignatureCache SignatureCache, ) error { var ( val *Validator @@ -285,7 +433,11 @@ func verifyCommitSingle( continue } - // If the vals and commit have a 1-to-1 correspondance we can retrieve + if commitSig.ValidateBasic() != nil { + return fmt.Errorf("invalid signatures from %v at index %d", val, idx) + } + + // If the vals and commit have a 1-to-1 correspondence we can retrieve // them by index else we need to retrieve them by address if lookUpByIndex { val = vals.Validators[idx] @@ -307,10 +459,29 @@ func verifyCommitSingle( seenVals[valIdx] = idx } + if val.PubKey == nil { + return fmt.Errorf("validator %v has a nil PubKey at index %d", val, idx) + } + voteSignBytes = commit.VoteSignBytes(chainID, int32(idx)) - if !val.PubKey.VerifySignature(voteSignBytes, commitSig.Signature) { - return fmt.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) + cacheKey, cacheHit := "", false + if verifiedSignatureCache != nil { + cacheKey = string(commitSig.Signature) + cacheVal, sigIsInCache := verifiedSignatureCache.Get(cacheKey) + cacheHit = sigIsInCache && bytes.Equal(cacheVal.ValidatorAddress, val.PubKey.Address()) && bytes.Equal(cacheVal.VoteSignBytes, voteSignBytes) + } + + if !cacheHit { + if !val.PubKey.VerifySignature(voteSignBytes, commitSig.Signature) { + return fmt.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) + } + if verifiedSignatureCache != nil { + verifiedSignatureCache.Add(cacheKey, SignatureCacheValue{ + ValidatorAddress: val.PubKey.Address(), + VoteSignBytes: voteSignBytes, + }) + } } // If this signature counts then add the voting power of the validator diff --git a/types/validation_test.go b/types/validation_test.go index a6cdf818c3e..c5dd2df4bad 100644 --- a/types/validation_test.go +++ b/types/validation_test.go @@ -1,14 +1,15 @@ package types import ( + "strconv" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cryptomocks "github.com/cometbft/cometbft/crypto/mocks" cmtmath "github.com/cometbft/cometbft/libs/math" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmttime "github.com/cometbft/cometbft/types/time" ) // Check VerifyCommit, VerifyCommitLight and VerifyCommitLightTrusting basic @@ -24,7 +25,7 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { ) testCases := []struct { - description string + description, description2 string // description2, if not empty, is checked against VerifyCommitLightTrusting // vote chainID chainID string // vote blockID @@ -41,25 +42,27 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { expErr bool }{ - {"good (batch verification)", chainID, blockID, 3, height, 3, 0, 0, false}, - {"good (single verification)", chainID, blockID, 1, height, 1, 0, 0, false}, + {"good (batch verification)", "", chainID, blockID, 3, height, 3, 0, 0, false}, + {"good (single verification)", "", chainID, blockID, 1, height, 1, 0, 0, false}, - {"wrong signature (#0)", "EpsilonEridani", blockID, 2, height, 2, 0, 0, true}, - {"wrong block ID", chainID, makeBlockIDRandom(), 2, height, 2, 0, 0, true}, - {"wrong height", chainID, blockID, 1, height - 1, 1, 0, 0, true}, + {"wrong signature (#0)", "", "EpsilonEridani", blockID, 2, height, 2, 0, 0, true}, + {"wrong block ID", "", chainID, makeBlockIDRandom(), 2, height, 2, 0, 0, true}, + {"wrong height", "", chainID, blockID, 1, height - 1, 1, 0, 0, true}, - {"wrong set size: 4 vs 3", chainID, blockID, 4, height, 3, 0, 0, true}, - {"wrong set size: 1 vs 2", chainID, blockID, 1, height, 2, 0, 0, true}, + {"wrong set size: 4 vs 3", "", chainID, blockID, 4, height, 3, 0, 0, true}, + {"wrong set size: 1 vs 2", "double vote from Validator", chainID, blockID, 1, height, 2, 0, 0, true}, - {"insufficient voting power: got 30, needed more than 66", chainID, blockID, 10, height, 3, 2, 5, true}, - {"insufficient voting power: got 0, needed more than 6", chainID, blockID, 1, height, 0, 0, 1, true}, - {"insufficient voting power: got 60, needed more than 60", chainID, blockID, 9, height, 6, 3, 0, true}, + {"insufficient voting power: got 30, needed more than 66", "", chainID, blockID, 10, height, 3, 2, 5, true}, + {"insufficient voting power: got 0, needed more than 6", "", chainID, blockID, 1, height, 0, 0, 1, true}, // absent + {"insufficient voting power: got 0, needed more than 6", "", chainID, blockID, 1, height, 0, 1, 0, true}, // nil + {"insufficient voting power: got 60, needed more than 60", "", chainID, blockID, 9, height, 6, 3, 0, true}, } for _, tc := range testCases { - tc := tc - t.Run(tc.description, func(t *testing.T) { - _, valSet, vals := randVoteSet(tc.height, round, cmtproto.PrecommitType, tc.valSize, 10, false) + countAllSignatures := false + f := func(t *testing.T) { + t.Helper() + _, valSet, vals := randVoteSet(tc.height, round, PrecommitType, tc.valSize, 10, false) totalVotes := tc.blockVotes + tc.absentVotes + tc.nilVotes sigs := make([]CommitSig, totalVotes) vi := 0 @@ -69,7 +72,6 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { vi++ } for i := 0; i < tc.blockVotes+tc.nilVotes; i++ { - pubKey, err := vals[vi%len(vals)].GetPubKey() require.NoError(t, err) vote := &Vote{ @@ -77,9 +79,9 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { ValidatorIndex: int32(vi), Height: tc.height, Round: round, - Type: cmtproto.PrecommitType, + Type: PrecommitType, BlockID: tc.blockID, - Timestamp: time.Now(), + Timestamp: cmttime.Now(), } if i >= tc.blockVotes { vote.BlockID = BlockID{} @@ -87,7 +89,7 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { v := vote.ToProto() - require.NoError(t, vals[vi%len(vals)].SignVote(tc.chainID, v)) + require.NoError(t, vals[vi%len(vals)].SignVote(tc.chainID, v, false)) vote.Signature = v.Signature sigs[vi] = vote.CommitSig() @@ -103,35 +105,51 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { err := valSet.VerifyCommit(chainID, blockID, height, commit) if tc.expErr { - if assert.Error(t, err, "VerifyCommit") { + if assert.Error(t, err, "VerifyCommit") { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Contains(t, err.Error(), tc.description, "VerifyCommit") } } else { - assert.NoError(t, err, "VerifyCommit") + require.NoError(t, err, "VerifyCommit") } - err = valSet.VerifyCommitLight(chainID, blockID, height, commit) + if countAllSignatures { + err = valSet.VerifyCommitLightAllSignatures(chainID, blockID, height, commit) + } else { + err = valSet.VerifyCommitLight(chainID, blockID, height, commit) + } if tc.expErr { - if assert.Error(t, err, "VerifyCommitLight") { + if assert.Error(t, err, "VerifyCommitLight") { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Contains(t, err.Error(), tc.description, "VerifyCommitLight") } } else { - assert.NoError(t, err, "VerifyCommitLight") + require.NoError(t, err, "VerifyCommitLight") } // only a subsection of the tests apply to VerifyCommitLightTrusting - if totalVotes != tc.valSize || !tc.blockID.Equals(blockID) || tc.height != height { - tc.expErr = false + expErr := tc.expErr + if (!countAllSignatures && totalVotes != tc.valSize) || totalVotes < tc.valSize || !tc.blockID.Equals(blockID) || tc.height != height { + expErr = false } - err = valSet.VerifyCommitLightTrusting(chainID, commit, trustLevel) - if tc.expErr { - if assert.Error(t, err, "VerifyCommitLightTrusting") { - assert.Contains(t, err.Error(), tc.description, "VerifyCommitLightTrusting") + if countAllSignatures { + err = valSet.VerifyCommitLightTrustingAllSignatures(chainID, commit, trustLevel) + } else { + err = valSet.VerifyCommitLightTrusting(chainID, commit, trustLevel) + } + if expErr { + if assert.Error(t, err, "VerifyCommitLightTrusting") { //nolint:testifylint // require.Error doesn't work with the conditional here + errStr := tc.description2 + if len(errStr) == 0 { + errStr = tc.description + } + assert.Contains(t, err.Error(), errStr, "VerifyCommitLightTrusting") } } else { - assert.NoError(t, err, "VerifyCommitLightTrusting") + require.NoError(t, err, "VerifyCommitLightTrusting") } - }) + } + t.Run(tc.description+"/"+strconv.FormatBool(countAllSignatures), f) + countAllSignatures = true + t.Run(tc.description+"/"+strconv.FormatBool(countAllSignatures), f) } } @@ -142,8 +160,8 @@ func TestValidatorSet_VerifyCommit_CheckAllSignatures(t *testing.T) { blockID = makeBlockIDRandom() ) - voteSet, valSet, vals := randVoteSet(h, 0, cmtproto.PrecommitType, 4, 10, false) - extCommit, err := MakeExtCommit(blockID, h, 0, voteSet, vals, time.Now(), false) + voteSet, valSet, vals := randVoteSet(h, 0, PrecommitType, 4, 10, false) + extCommit, err := MakeExtCommit(blockID, h, 0, voteSet, vals, cmttime.Now(), false) require.NoError(t, err) commit := extCommit.ToCommit() require.NoError(t, valSet.VerifyCommit(chainID, blockID, h, commit)) @@ -151,75 +169,93 @@ func TestValidatorSet_VerifyCommit_CheckAllSignatures(t *testing.T) { // malleate 4th signature vote := voteSet.GetByIndex(3) v := vote.ToProto() - err = vals[3].SignVote("CentaurusA", v) + err = vals[3].SignVote("CentaurusA", v, true) require.NoError(t, err) vote.Signature = v.Signature vote.ExtensionSignature = v.ExtensionSignature commit.Signatures[3] = vote.CommitSig() err = valSet.VerifyCommit(chainID, blockID, h, commit) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Contains(t, err.Error(), "wrong signature (#3)") } } -func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSigned(t *testing.T) { +func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajOfVotingPowerSignedIffNotAllSigs(t *testing.T) { var ( chainID = "test_chain_id" h = int64(3) blockID = makeBlockIDRandom() ) - voteSet, valSet, vals := randVoteSet(h, 0, cmtproto.PrecommitType, 4, 10, false) - extCommit, err := MakeExtCommit(blockID, h, 0, voteSet, vals, time.Now(), false) + voteSet, valSet, vals := randVoteSet(h, 0, PrecommitType, 4, 10, false) + extCommit, err := MakeExtCommit(blockID, h, 0, voteSet, vals, cmttime.Now(), false) require.NoError(t, err) commit := extCommit.ToCommit() require.NoError(t, valSet.VerifyCommit(chainID, blockID, h, commit)) + err = valSet.VerifyCommitLightAllSignatures(chainID, blockID, h, commit) + require.NoError(t, err) + // malleate 4th signature (3 signatures are enough for 2/3+) vote := voteSet.GetByIndex(3) v := vote.ToProto() - err = vals[3].SignVote("CentaurusA", v) + err = vals[3].SignVote("CentaurusA", v, true) require.NoError(t, err) vote.Signature = v.Signature vote.ExtensionSignature = v.ExtensionSignature commit.Signatures[3] = vote.CommitSig() err = valSet.VerifyCommitLight(chainID, blockID, h, commit) - assert.NoError(t, err) + require.NoError(t, err) + err = valSet.VerifyCommitLightAllSignatures(chainID, blockID, h, commit) + require.Error(t, err) // counting all signatures detects the malleated signature } -func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotingPowerSigned(t *testing.T) { +func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelSignedIffNotAllSigs(t *testing.T) { var ( chainID = "test_chain_id" h = int64(3) blockID = makeBlockIDRandom() ) - voteSet, valSet, vals := randVoteSet(h, 0, cmtproto.PrecommitType, 4, 10, false) - extCommit, err := MakeExtCommit(blockID, h, 0, voteSet, vals, time.Now(), false) + voteSet, valSet, vals := randVoteSet(h, 0, PrecommitType, 4, 10, false) + extCommit, err := MakeExtCommit(blockID, h, 0, voteSet, vals, cmttime.Now(), false) require.NoError(t, err) commit := extCommit.ToCommit() require.NoError(t, valSet.VerifyCommit(chainID, blockID, h, commit)) + err = valSet.VerifyCommitLightTrustingAllSignatures( + chainID, + commit, + cmtmath.Fraction{Numerator: 1, Denominator: 3}, + ) + require.NoError(t, err) + // malleate 3rd signature (2 signatures are enough for 1/3+ trust level) vote := voteSet.GetByIndex(2) v := vote.ToProto() - err = vals[2].SignVote("CentaurusA", v) + err = vals[2].SignVote("CentaurusA", v, true) require.NoError(t, err) vote.Signature = v.Signature vote.ExtensionSignature = v.ExtensionSignature commit.Signatures[2] = vote.CommitSig() err = valSet.VerifyCommitLightTrusting(chainID, commit, cmtmath.Fraction{Numerator: 1, Denominator: 3}) - assert.NoError(t, err) + require.NoError(t, err) + err = valSet.VerifyCommitLightTrustingAllSignatures( + chainID, + commit, + cmtmath.Fraction{Numerator: 1, Denominator: 3}, + ) + require.Error(t, err) // counting all signatures detects the malleated signature } func TestValidatorSet_VerifyCommitLightTrusting(t *testing.T) { var ( blockID = makeBlockIDRandom() - voteSet, originalValset, vals = randVoteSet(1, 1, cmtproto.PrecommitType, 6, 1, false) - extCommit, err = MakeExtCommit(blockID, 1, 1, voteSet, vals, time.Now(), false) + voteSet, originalValset, vals = randVoteSet(1, 1, PrecommitType, 6, 1, false) + extCommit, err = MakeExtCommit(blockID, 1, 1, voteSet, vals, cmttime.Now(), false) newValSet, _ = RandValidatorSet(2, 1) ) require.NoError(t, err) @@ -250,24 +286,282 @@ func TestValidatorSet_VerifyCommitLightTrusting(t *testing.T) { err = tc.valSet.VerifyCommitLightTrusting("test_chain_id", commit, cmtmath.Fraction{Numerator: 1, Denominator: 3}) if tc.err { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) } } } +func TestValidatorSet_VerifyCommitLightTrustingWithCache_UpdatesCache(t *testing.T) { + var ( + blockID = makeBlockIDRandom() + voteSet, originalValset, vals = randVoteSet(1, 1, PrecommitType, 6, 1, false) + extCommit, err = MakeExtCommit(blockID, 1, 1, voteSet, vals, cmttime.Now(), false) + newValSet, _ = RandValidatorSet(2, 1) + ) + require.NoError(t, err) + commit := extCommit.ToCommit() + + valSet := NewValidatorSet(append(originalValset.Validators, newValSet.Validators...)) + cache := NewSignatureCache() + err = valSet.VerifyCommitLightTrustingWithCache("test_chain_id", commit, cmtmath.Fraction{Numerator: 1, Denominator: 3}, cache) + require.NoError(t, err) + require.Equal(t, 3, cache.Len()) // 8 validators, getting to 1/3 takes 3 signatures + + cacheVal, ok := cache.Get(string(commit.Signatures[0].Signature)) + require.True(t, ok) + require.Equal(t, originalValset.Validators[0].PubKey.Address().Bytes(), cacheVal.ValidatorAddress) + require.Equal(t, commit.VoteSignBytes("test_chain_id", 0), cacheVal.VoteSignBytes) + + cacheVal, ok = cache.Get(string(commit.Signatures[1].Signature)) + require.True(t, ok) + require.Equal(t, originalValset.Validators[1].PubKey.Address().Bytes(), cacheVal.ValidatorAddress) + require.Equal(t, commit.VoteSignBytes("test_chain_id", 1), cacheVal.VoteSignBytes) + + cacheVal, ok = cache.Get(string(commit.Signatures[2].Signature)) + require.True(t, ok) + require.Equal(t, originalValset.Validators[2].PubKey.Address().Bytes(), cacheVal.ValidatorAddress) + require.Equal(t, commit.VoteSignBytes("test_chain_id", 2), cacheVal.VoteSignBytes) +} + +func TestValidatorSet_VerifyCommitLightTrustingWithCache_UsesCache(t *testing.T) { + var ( + blockID = makeBlockIDRandom() + voteSet, originalValset, vals = randVoteSet(1, 1, PrecommitType, 6, 1, false) + extCommit, err = MakeExtCommit(blockID, 1, 1, voteSet, vals, cmttime.Now(), false) + newValSet, _ = RandValidatorSet(2, 1) + ) + require.NoError(t, err) + commit := extCommit.ToCommit() + + valSet := NewValidatorSet(append(newValSet.Validators, originalValset.Validators...)) + + cache := NewSignatureCache() + cache.Add(string(commit.Signatures[0].Signature), SignatureCacheValue{ + ValidatorAddress: valSet.Validators[0].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 0), + }) + cache.Add(string(commit.Signatures[1].Signature), SignatureCacheValue{ + ValidatorAddress: valSet.Validators[1].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 1), + }) + cache.Add(string(commit.Signatures[2].Signature), SignatureCacheValue{ + ValidatorAddress: valSet.Validators[2].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 2), + }) + + err = valSet.VerifyCommitLightTrustingWithCache("test_chain_id", commit, cmtmath.Fraction{Numerator: 1, Denominator: 3}, cache) + require.NoError(t, err) + require.Equal(t, 3, cache.Len()) // no new signature checks, so no new cache entries +} + +func TestValidatorSet_VerifyCommitLightWithCache_UpdatesCache(t *testing.T) { + var ( + blockID = makeBlockIDRandom() + voteSet, originalValset, vals = randVoteSet(1, 1, PrecommitType, 6, 1, false) + extCommit, err = MakeExtCommit(blockID, 1, 1, voteSet, vals, cmttime.Now(), false) + ) + require.NoError(t, err) + commit := extCommit.ToCommit() + + cache := NewSignatureCache() + err = originalValset.VerifyCommitLightWithCache("test_chain_id", blockID, 1, commit, cache) + require.NoError(t, err) + + require.Equal(t, 5, cache.Len()) // 6 validators, getting to 2/3 takes 5 signatures + + cacheVal, ok := cache.Get(string(commit.Signatures[0].Signature)) + require.True(t, ok) + require.Equal(t, originalValset.Validators[0].PubKey.Address().Bytes(), cacheVal.ValidatorAddress) + require.Equal(t, commit.VoteSignBytes("test_chain_id", 0), cacheVal.VoteSignBytes) + + cacheVal, ok = cache.Get(string(commit.Signatures[1].Signature)) + require.True(t, ok) + require.Equal(t, originalValset.Validators[1].PubKey.Address().Bytes(), cacheVal.ValidatorAddress) + require.Equal(t, commit.VoteSignBytes("test_chain_id", 1), cacheVal.VoteSignBytes) + + cacheVal, ok = cache.Get(string(commit.Signatures[2].Signature)) + require.True(t, ok) + require.Equal(t, originalValset.Validators[2].PubKey.Address().Bytes(), cacheVal.ValidatorAddress) + require.Equal(t, commit.VoteSignBytes("test_chain_id", 2), cacheVal.VoteSignBytes) + + cacheVal, ok = cache.Get(string(commit.Signatures[3].Signature)) + require.True(t, ok) + require.Equal(t, originalValset.Validators[3].PubKey.Address().Bytes(), cacheVal.ValidatorAddress) + require.Equal(t, commit.VoteSignBytes("test_chain_id", 3), cacheVal.VoteSignBytes) + + cacheVal, ok = cache.Get(string(commit.Signatures[4].Signature)) + require.True(t, ok) + require.Equal(t, originalValset.Validators[4].PubKey.Address().Bytes(), cacheVal.ValidatorAddress) + require.Equal(t, commit.VoteSignBytes("test_chain_id", 4), cacheVal.VoteSignBytes) +} + +func TestValidatorSet_VerifyCommitLightWithCache_UsesCache(t *testing.T) { + var ( + blockID = makeBlockIDRandom() + voteSet, originalValset, vals = randVoteSet(1, 1, PrecommitType, 6, 1, false) + extCommit, err = MakeExtCommit(blockID, 1, 1, voteSet, vals, cmttime.Now(), false) + ) + require.NoError(t, err) + commit := extCommit.ToCommit() + + cache := NewSignatureCache() + cache.Add(string(commit.Signatures[0].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[0].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 0), + }) + cache.Add(string(commit.Signatures[1].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[1].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 1), + }) + cache.Add(string(commit.Signatures[2].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[2].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 2), + }) + cache.Add(string(commit.Signatures[3].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[3].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 3), + }) + cache.Add(string(commit.Signatures[4].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[4].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 4), + }) + + err = originalValset.VerifyCommitLightWithCache("test_chain_id", blockID, 1, commit, cache) + require.NoError(t, err) + require.Equal(t, 5, cache.Len()) // no new signature checks, so no new cache entries +} + func TestValidatorSet_VerifyCommitLightTrustingErrorsOnOverflow(t *testing.T) { var ( blockID = makeBlockIDRandom() - voteSet, valSet, vals = randVoteSet(1, 1, cmtproto.PrecommitType, 1, MaxTotalVotingPower, false) - extCommit, err = MakeExtCommit(blockID, 1, 1, voteSet, vals, time.Now(), false) + voteSet, valSet, vals = randVoteSet(1, 1, PrecommitType, 1, MaxTotalVotingPower, false) + extCommit, err = MakeExtCommit(blockID, 1, 1, voteSet, vals, cmttime.Now(), false) ) require.NoError(t, err) err = valSet.VerifyCommitLightTrusting("test_chain_id", extCommit.ToCommit(), cmtmath.Fraction{Numerator: 25, Denominator: 55}) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Contains(t, err.Error(), "int64 overflow") } } + +func TestValidation_verifyCommitBatch_UsesCache(t *testing.T) { + var ( + blockID = makeBlockIDRandom() + voteSet, originalValset, vals = randVoteSet(1, 1, PrecommitType, 6, 1, false) + extCommit, err = MakeExtCommit(blockID, 1, 1, voteSet, vals, cmttime.Now(), false) + ) + require.NoError(t, err) + commit := extCommit.ToCommit() + + cache := NewSignatureCache() + cache.Add(string(commit.Signatures[0].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[0].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 0), + }) + cache.Add(string(commit.Signatures[1].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[1].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 1), + }) + cache.Add(string(commit.Signatures[2].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[2].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 2), + }) + cache.Add(string(commit.Signatures[3].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[3].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 3), + }) + cache.Add(string(commit.Signatures[4].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[4].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 4), + }) + + // ignore all commit signatures that are not for the block + ignore := func(c CommitSig) bool { return c.BlockIDFlag != BlockIDFlagCommit } + + // count all the remaining signatures + count := func(_ CommitSig) bool { return true } + + bv := cryptomocks.NewBatchVerifier(t) + + err = verifyCommitBatch("test_chain_id", originalValset, commit, 4, ignore, count, false, true, bv, cache) + require.NoError(t, err) + bv.AssertNotCalled(t, "Add") + bv.AssertNotCalled(t, "Verify") +} + +func TestValidation_verifyCommitSingle_UsesCache(t *testing.T) { + var ( + blockID = makeBlockIDRandom() + voteSet, originalValset, vals = randVoteSet(1, 1, PrecommitType, 6, 1, false) + extCommit, err = MakeExtCommit(blockID, 1, 1, voteSet, vals, cmttime.Now(), false) + ) + require.NoError(t, err) + commit := extCommit.ToCommit() + + cache := NewSignatureCache() + cache.Add(string(commit.Signatures[0].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[0].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 0), + }) + cache.Add(string(commit.Signatures[1].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[1].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 1), + }) + cache.Add(string(commit.Signatures[2].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[2].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 2), + }) + cache.Add(string(commit.Signatures[3].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[3].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 3), + }) + cache.Add(string(commit.Signatures[4].Signature), SignatureCacheValue{ + ValidatorAddress: originalValset.Validators[4].PubKey.Address(), + VoteSignBytes: commit.VoteSignBytes("test_chain_id", 4), + }) + + // ignore all commit signatures that are not for the block + ignore := func(c CommitSig) bool { return c.BlockIDFlag != BlockIDFlagCommit } + + // count all the remaining signatures + count := func(_ CommitSig) bool { return true } + + mockValPubkeys := []*cryptomocks.PubKey{ + cryptomocks.NewPubKey(t), + cryptomocks.NewPubKey(t), + cryptomocks.NewPubKey(t), + cryptomocks.NewPubKey(t), + cryptomocks.NewPubKey(t), + } + + mockValPubkeys[0].On("Address").Return(originalValset.Validators[0].PubKey.Address()) + mockValPubkeys[1].On("Address").Return(originalValset.Validators[1].PubKey.Address()) + mockValPubkeys[2].On("Address").Return(originalValset.Validators[2].PubKey.Address()) + mockValPubkeys[3].On("Address").Return(originalValset.Validators[3].PubKey.Address()) + mockValPubkeys[4].On("Address").Return(originalValset.Validators[4].PubKey.Address()) + + originalValset.Validators[0].PubKey = mockValPubkeys[0] + originalValset.Validators[1].PubKey = mockValPubkeys[1] + originalValset.Validators[2].PubKey = mockValPubkeys[2] + originalValset.Validators[3].PubKey = mockValPubkeys[3] + originalValset.Validators[4].PubKey = mockValPubkeys[4] + + err = verifyCommitSingle("test_chain_id", originalValset, commit, 4, ignore, count, false, true, cache) + require.NoError(t, err) + + mockValPubkeys[0].AssertCalled(t, "Address") + mockValPubkeys[1].AssertCalled(t, "Address") + mockValPubkeys[2].AssertCalled(t, "Address") + mockValPubkeys[3].AssertCalled(t, "Address") + mockValPubkeys[4].AssertCalled(t, "Address") + + mockValPubkeys[0].AssertNotCalled(t, "VerifySignature") + mockValPubkeys[1].AssertNotCalled(t, "VerifySignature") + mockValPubkeys[2].AssertNotCalled(t, "VerifySignature") + mockValPubkeys[3].AssertNotCalled(t, "VerifySignature") + mockValPubkeys[4].AssertNotCalled(t, "VerifySignature") +} diff --git a/types/validator.go b/types/validator.go index 886b32756d8..0d4b042a1a5 100644 --- a/types/validator.go +++ b/types/validator.go @@ -4,17 +4,22 @@ import ( "bytes" "errors" "fmt" + "strconv" "strings" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto" ce "github.com/cometbft/cometbft/crypto/encoding" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cometbft/cometbft/internal/keytypes" + cmtrand "github.com/cometbft/cometbft/internal/rand" ) +// ErrUnsupportedPubKeyType is returned when a public key type is not supported. +var ErrUnsupportedPubKeyType = errors.New("unsupported pubkey type, must be one of: " + keytypes.SupportedKeyTypesStr()) + // Volatile state for each Validator // NOTE: The ProposerPriority is not included in Validator.Hash(); -// make sure to update that method if changes are made here +// make sure to update that method if changes are made here. type Validator struct { Address Address `json:"address"` PubKey crypto.PubKey `json:"pub_key"` @@ -46,21 +51,26 @@ func (v *Validator) ValidateBasic() error { return errors.New("validator has negative voting power") } - if len(v.Address) != crypto.AddressSize { - return fmt.Errorf("validator address is the wrong size: %v", v.Address) + addr := v.PubKey.Address() + if !bytes.Equal(v.Address, addr) { + return fmt.Errorf("validator address is incorrectly derived from pubkey. Exp: %v, got %v", addr, v.Address) + } + + if !keytypes.IsSupported(v.PubKey.Type()) { + return ErrUnsupportedPubKeyType } return nil } -// Creates a new copy of the validator so we can mutate ProposerPriority. +// Copy creates a new copy of the validator so we can mutate ProposerPriority. // Panics if the validator is nil. func (v *Validator) Copy() *Validator { vCopy := *v return &vCopy } -// Returns the one with higher ProposerPriority. +// CompareProposerPriority returns the one with higher ProposerPriority. func (v *Validator) CompareProposerPriority(other *Validator) *Validator { if v == nil { return other @@ -88,7 +98,7 @@ func (v *Validator) CompareProposerPriority(other *Validator) *Validator { // 1. address // 2. public key // 3. voting power -// 4. proposer priority +// 4. proposer priority. func (v *Validator) String() string { if v == nil { return "nil-Validator" @@ -102,12 +112,16 @@ func (v *Validator) String() string { // ValidatorListString returns a prettified validator list for logging purposes. func ValidatorListString(vals []*Validator) string { - chunks := make([]string, len(vals)) + var sb strings.Builder for i, val := range vals { - chunks[i] = fmt.Sprintf("%s:%d", val.Address, val.VotingPower) + if i > 0 { + sb.WriteString(",") + } + sb.WriteString(val.Address.String()) + sb.WriteString(":") + sb.WriteString(strconv.FormatInt(val.VotingPower, 10)) } - - return strings.Join(chunks, ",") + return sb.String() } // Bytes computes the unique encoding of a validator with a given voting power. @@ -132,20 +146,20 @@ func (v *Validator) Bytes() []byte { return bz } -// ToProto converts Valiator to protobuf +// ToProto converts Validator to protobuf. func (v *Validator) ToProto() (*cmtproto.Validator, error) { if v == nil { return nil, errors.New("nil validator") } - pk, err := ce.PubKeyToProto(v.PubKey) - if err != nil { - return nil, err + if v.PubKey == nil { + return nil, errors.New("nil pubkey") } vp := cmtproto.Validator{ Address: v.Address, - PubKey: pk, + PubKeyType: v.PubKey.Type(), + PubKeyBytes: v.PubKey.Bytes(), VotingPower: v.VotingPower, ProposerPriority: v.ProposerPriority, } @@ -153,16 +167,19 @@ func (v *Validator) ToProto() (*cmtproto.Validator, error) { return &vp, nil } -// FromProto sets a protobuf Validator to the given pointer. +// ValidatorFromProto sets a protobuf Validator to the given pointer. // It returns an error if the public key is invalid. func ValidatorFromProto(vp *cmtproto.Validator) (*Validator, error) { if vp == nil { return nil, errors.New("nil validator") } - pk, err := ce.PubKeyFromProto(vp.PubKey) + pk, err := ce.PubKeyFromTypeAndBytes(vp.PubKeyType, vp.PubKeyBytes) if err != nil { - return nil, err + pk, err = ce.PubKeyFromProto(*vp.PubKey) + if err != nil { + return nil, err + } } v := new(Validator) v.Address = vp.GetAddress() @@ -173,11 +190,11 @@ func ValidatorFromProto(vp *cmtproto.Validator) (*Validator, error) { return v, nil } -//---------------------------------------- +// ---------------------------------------- // RandValidator // RandValidator returns a randomized validator, useful for testing. -// UNSTABLE +// UNSTABLE. func RandValidator(randPower bool, minPower int64) (*Validator, PrivValidator) { privVal := NewMockPV() votePower := minPower diff --git a/types/validator_set.go b/types/validator_set.go index 330d540baf4..12403a650e2 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "encoding/binary" "errors" "fmt" "math" @@ -9,9 +10,10 @@ import ( "sort" "strings" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto/merkle" + "github.com/cometbft/cometbft/crypto/tmhash" cmtmath "github.com/cometbft/cometbft/libs/math" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" ) const ( @@ -35,6 +37,9 @@ const ( var ErrTotalVotingPowerOverflow = fmt.Errorf("total voting power of resulting valset exceeds max %d", MaxTotalVotingPower) +// ErrProposerNotInVals is returned if the proposer is not in the validator set. +var ErrProposerNotInVals = errors.New("proposer not in validator set") + // ValidatorSet represent a set of *Validator at a given height. // // The validators can be fetched by address or index. @@ -55,6 +60,8 @@ type ValidatorSet struct { // cached (unexported) totalVotingPower int64 + // true if all validators have the same type of public key or if the set is empty. + allKeysHaveSameType bool } // NewValidatorSet initializes a ValidatorSet by copying over the values from @@ -68,7 +75,9 @@ type ValidatorSet struct { // MaxVotesCount - commits by a validator set larger than this will fail // validation. func NewValidatorSet(valz []*Validator) *ValidatorSet { - vals := &ValidatorSet{} + vals := &ValidatorSet{ + allKeysHaveSameType: true, + } err := vals.updateWithChangeSet(valz, false) if err != nil { panic(fmt.Sprintf("Cannot create validator set: %v", err)) @@ -94,7 +103,13 @@ func (vals *ValidatorSet) ValidateBasic() error { return fmt.Errorf("proposer failed validate basic, error: %w", err) } - return nil + for _, val := range vals.Validators { + if bytes.Equal(val.Address, vals.Proposer.Address) { + return nil + } + } + + return ErrProposerNotInVals } // IsNilOrEmpty returns true if validator set is nil or empty. @@ -177,7 +192,7 @@ func (vals *ValidatorSet) incrementProposerPriority() *Validator { return mostest } -// Should not be called on an empty validator set. +// computeAvgProposerPriority should not be called on an empty validator set. func (vals *ValidatorSet) computeAvgProposerPriority() int64 { n := int64(len(vals.Validators)) sum := big.NewInt(0) @@ -193,7 +208,8 @@ func (vals *ValidatorSet) computeAvgProposerPriority() int64 { panic(fmt.Sprintf("Cannot represent avg ProposerPriority as an int64 %v", avg)) } -// Compute the difference between the max and min ProposerPriority of that set. +// computeMaxMinPriorityDiff computes the difference between the max and min +// ProposerPriority of that set. func computeMaxMinPriorityDiff(vals *ValidatorSet) int64 { if vals.IsNilOrEmpty() { panic("empty validator set") @@ -233,7 +249,7 @@ func (vals *ValidatorSet) shiftByAvgProposerPriority() { } } -// Makes a copy of the validator list. +// validatorListCopy makes a copy of the validator list. func validatorListCopy(valsList []*Validator) []*Validator { if valsList == nil { return nil @@ -248,9 +264,10 @@ func validatorListCopy(valsList []*Validator) []*Validator { // Copy each validator into a new ValidatorSet. func (vals *ValidatorSet) Copy() *ValidatorSet { return &ValidatorSet{ - Validators: validatorListCopy(vals.Validators), - Proposer: vals.Proposer, - totalVotingPower: vals.totalVotingPower, + Validators: validatorListCopy(vals.Validators), + Proposer: vals.Proposer, + totalVotingPower: vals.totalVotingPower, + allKeysHaveSameType: vals.allKeysHaveSameType, } } @@ -268,9 +285,21 @@ func (vals *ValidatorSet) HasAddress(address []byte) bool { // GetByAddress returns an index of the validator with address and validator // itself (copy) if found. Otherwise, -1 and nil are returned. func (vals *ValidatorSet) GetByAddress(address []byte) (index int32, val *Validator) { + i, val := vals.GetByAddressMut(address) + if i == -1 { + return -1, nil + } + return i, val.Copy() +} + +// GetByAddressMut returns an index of the validator with address and the +// direct validator object if found. Mutations on this return value affect the validator set. +// This method should be used by callers who will not mutate Val. +// Otherwise, -1 and nil are returned. +func (vals *ValidatorSet) GetByAddressMut(address []byte) (index int32, val *Validator) { for idx, val := range vals.Validators { if bytes.Equal(val.Address, address) { - return int32(idx), val.Copy() + return int32(idx), val } } return -1, nil @@ -293,7 +322,7 @@ func (vals *ValidatorSet) Size() int { return len(vals.Validators) } -// Forces recalculation of the set's total voting power. +// updateTotalVotingPower forces recalculation of the set's total voting power. // Panics if total voting power is bigger than MaxTotalVotingPower. func (vals *ValidatorSet) updateTotalVotingPower() { sum := int64(0) @@ -342,8 +371,18 @@ func (vals *ValidatorSet) findProposer() *Validator { return proposer } +func (vals *ValidatorSet) ValidatorBlocksTheChain(address []byte) bool { + _, val := vals.GetByAddress(address) + if val == nil { + return false + } + return val.VotingPower > (vals.TotalVotingPower()-1)/3 +} + // Hash returns the Merkle root hash build using validators (as leaves) in the // set. +// +// See merkle.HashFromByteSlices. func (vals *ValidatorSet) Hash() []byte { bzs := make([][]byte, len(vals.Validators)) for i, val := range vals.Validators { @@ -352,6 +391,23 @@ func (vals *ValidatorSet) Hash() []byte { return merkle.HashFromByteSlices(bzs) } +// ProposerPriorityHash returns the tmhash of the proposer priorities. +// Validator set must be sorted to get the same hash. +// If the validator set is empty, nil is returned. +func (vals *ValidatorSet) ProposerPriorityHash() []byte { + if len(vals.Validators) == 0 { + return nil + } + + buf := make([]byte, binary.MaxVarintLen64*len(vals.Validators)) + total := 0 + for _, val := range vals.Validators { + n := binary.PutVarint(buf, val.ProposerPriority) + total += n + } + return tmhash.Sum(buf[:total]) +} + // Iterate will run the given function over the set. func (vals *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { for i, val := range vals.Validators { @@ -362,7 +418,8 @@ func (vals *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { } } -// Checks changes against duplicates, splits the changes in updates and +// processChanges checks changes against duplicates, +// splits the changes in updates and // removals, sorts them by address. // // Returns: @@ -423,14 +480,14 @@ func processChanges(origChanges []*Validator) (updates, removals []*Validator, e // Note that this will be < 2 * MaxTotalVotingPower in case high power validators are removed and // validators are added/ updated with high power values. // -// err - non-nil if the maximum allowed total voting power would be exceeded +// err - non-nil if the maximum allowed total voting power would be exceeded. func verifyUpdates( updates []*Validator, vals *ValidatorSet, removedPower int64, ) (tvpAfterUpdatesBeforeRemovals int64, err error) { delta := func(update *Validator, vals *ValidatorSet) int64 { - _, val := vals.GetByAddress(update.Address) + _, val := vals.GetByAddressMut(update.Address) if val != nil { return update.VotingPower - val.VotingPower } @@ -477,7 +534,7 @@ func numNewValidators(updates []*Validator, vals *ValidatorSet) int { func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotalVotingPower int64) { for _, valUpdate := range updates { address := valUpdate.Address - _, val := vals.GetByAddress(address) + _, val := vals.GetByAddressMut(address) if val == nil { // add val // Set ProposerPriority to -C*totalVotingPower (with C ~= 1.125) to make sure validators can't @@ -535,13 +592,14 @@ func (vals *ValidatorSet) applyUpdates(updates []*Validator) { vals.Validators = merged[:i] } -// Checks that the validators to be removed are part of the validator set. +// verifyRemovals checks that the validators to be removed are part of the +// validator set. // No changes are made to the validator set 'vals'. func verifyRemovals(deletes []*Validator, vals *ValidatorSet) (votingPower int64, err error) { removedVotingPower := int64(0) for _, valUpdate := range deletes { address := valUpdate.Address - _, val := vals.GetByAddress(address) + _, val := vals.GetByAddressMut(address) if val == nil { return removedVotingPower, fmt.Errorf("failed to find validator %X to remove", address) } @@ -553,7 +611,8 @@ func verifyRemovals(deletes []*Validator, vals *ValidatorSet) (votingPower int64 return removedVotingPower, nil } -// Removes the validators specified in 'deletes' from validator set 'vals'. +// applyRemovals removes the validators specified in 'deletes' from validator +// set 'vals'. // Should not fail as verification has been done before. // Expects vals to be sorted by address (done by applyUpdates). func (vals *ValidatorSet) applyRemovals(deletes []*Validator) { @@ -582,7 +641,8 @@ func (vals *ValidatorSet) applyRemovals(deletes []*Validator) { vals.Validators = merged[:i] } -// Main function used by UpdateWithChangeSet() and NewValidatorSet(). +// updateWithChangeSet is the main function used by UpdateWithChangeSet( +// ) and NewValidatorSet(). // If 'allowDeletes' is false then delete operations (identified by validators with voting power 0) // are not allowed and will trigger an error if present in 'changes'. // The 'allowDeletes' flag is set to false by NewValidatorSet() and to true by UpdateWithChangeSet(). @@ -627,6 +687,9 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes vals.applyUpdates(updates) vals.applyRemovals(deletes) + // Should go after additions. + vals.checkAllKeysHaveSameType() + vals.updateTotalVotingPower() // will panic if total voting power > MaxTotalVotingPower // Scale and center. @@ -656,7 +719,7 @@ func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error { } // VerifyCommit verifies +2/3 of the set had signed the given commit and all -// other signatures are valid +// other signatures are valid. func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit, ) error { @@ -666,18 +729,75 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, // LIGHT CLIENT VERIFICATION METHODS // VerifyCommitLight verifies +2/3 of the set had signed the given commit. +// It does NOT count all signatures. func (vals *ValidatorSet) VerifyCommitLight(chainID string, blockID BlockID, height int64, commit *Commit, ) error { return VerifyCommitLight(chainID, vals, blockID, height, commit) } +// VerifyCommitLightWithCache verifies +2/3 of the set had signed the given commit. +// It does NOT count all signatures. +// +// The cache provided will be used to skip signature verification for entries where the +// key (signature), validator pubkey, and vote sign bytes all match. +// Additionally, any verified signatures will be added to the cache. +func (vals *ValidatorSet) VerifyCommitLightWithCache(chainID string, blockID BlockID, + height int64, commit *Commit, + verifiedSignatureCache SignatureCache, +) error { + return VerifyCommitLightWithCache(chainID, vals, blockID, height, commit, verifiedSignatureCache) +} + +// VerifyCommitLight verifies +2/3 of the set had signed the given commit. +// It DOES count all signatures. +func (vals *ValidatorSet) VerifyCommitLightAllSignatures(chainID string, blockID BlockID, + height int64, commit *Commit, +) error { + return VerifyCommitLightAllSignatures(chainID, vals, blockID, height, commit) +} + // VerifyCommitLightTrusting verifies that trustLevel of the validator set signed // this commit. -func (vals *ValidatorSet) VerifyCommitLightTrusting(chainID string, commit *Commit, trustLevel cmtmath.Fraction) error { +// It does NOT count all signatures. +// CONTRACT: must run ValidateBasic() on commit before verifying. +func (vals *ValidatorSet) VerifyCommitLightTrusting( + chainID string, + commit *Commit, + trustLevel cmtmath.Fraction, +) error { return VerifyCommitLightTrusting(chainID, vals, commit, trustLevel) } +// VerifyCommitLightTrusting verifies that trustLevel of the validator set signed +// this commit. +// It does NOT count all signatures. +// CONTRACT: must run ValidateBasic() on commit before verifying. +// +// The cache provided will be used to skip signature verification for entries where the +// key (signature), validator pubkey, and vote sign bytes all match. +// Additionally, any verified signatures will be added to the cache. +func (vals *ValidatorSet) VerifyCommitLightTrustingWithCache( + chainID string, + commit *Commit, + trustLevel cmtmath.Fraction, + verifiedSignatureCache SignatureCache, +) error { + return VerifyCommitLightTrustingWithCache(chainID, vals, commit, trustLevel, verifiedSignatureCache) +} + +// VerifyCommitLightTrusting verifies that trustLevel of the validator set signed +// this commit. +// It DOES count all signatures. +// CONTRACT: must run ValidateBasic() on commit before verifying. +func (vals *ValidatorSet) VerifyCommitLightTrustingAllSignatures( + chainID string, + commit *Commit, + trustLevel cmtmath.Fraction, +) error { + return VerifyCommitLightTrustingAllSignatures(chainID, vals, commit, trustLevel) +} + // findPreviousProposer reverses the compare proposer priority function to find the validator // with the lowest proposer priority which would have been the previous proposer. // @@ -696,7 +816,37 @@ func (vals *ValidatorSet) findPreviousProposer() *Validator { return previousProposer } -//----------------- +func (vals *ValidatorSet) checkAllKeysHaveSameType() { + if vals.Size() == 0 { + vals.allKeysHaveSameType = true + return + } + + firstKeyType := "" + for _, val := range vals.Validators { + if firstKeyType == "" { + // XXX: Should only be the case in tests. + if val.PubKey == nil { + continue + } + firstKeyType = val.PubKey.Type() + } + if val.PubKey.Type() != firstKeyType { + vals.allKeysHaveSameType = false + return + } + } + + vals.allKeysHaveSameType = true +} + +// AllKeysHaveSameType returns true if all validators have the same type of +// public key or if the set is empty. +func (vals *ValidatorSet) AllKeysHaveSameType() bool { + return vals.allKeysHaveSameType +} + +// ----------------- // IsErrNotEnoughVotingPowerSigned returns true if err is // ErrNotEnoughVotingPowerSigned. @@ -715,7 +865,7 @@ func (e ErrNotEnoughVotingPowerSigned) Error() string { return fmt.Sprintf("invalid commit -- insufficient voting power: got %d, needed more than %d", e.Got, e.Needed) } -//---------------- +// ---------------- // String returns a string representation of ValidatorSet. // @@ -732,7 +882,7 @@ func (vals *ValidatorSet) StringIndented(indent string) string { return "nil-ValidatorSet" } var valStrings []string - vals.Iterate(func(index int, val *Validator) bool { + vals.Iterate(func(_ int, val *Validator) bool { valStrings = append(valStrings, val.String()) return false }) @@ -747,7 +897,7 @@ func (vals *ValidatorSet) StringIndented(indent string) string { indent) } -//------------------------------------- +// ------------------------------------- // ValidatorsByVotingPower implements sort.Interface for []*Validator based on // the VotingPower and Address fields. @@ -780,7 +930,7 @@ func (valz ValidatorsByAddress) Swap(i, j int) { valz[i], valz[j] = valz[j], valz[i] } -// ToProto converts ValidatorSet to protobuf +// ToProto converts ValidatorSet to protobuf. func (vals *ValidatorSet) ToProto() (*cmtproto.ValidatorSet, error) { if vals.IsNilOrEmpty() { return &cmtproto.ValidatorSet{}, nil // validator set should never be nil @@ -812,7 +962,7 @@ func (vals *ValidatorSet) ToProto() (*cmtproto.ValidatorSet, error) { // ValidatorSetFromProto sets a protobuf ValidatorSet to the given pointer. // It returns an error if any of the validators from the set or the proposer -// is invalid +// is invalid. func ValidatorSetFromProto(vp *cmtproto.ValidatorSet) (*ValidatorSet, error) { if vp == nil { return nil, errors.New("nil validator set") // validator set should never be nil, bigger issues are at play if empty @@ -828,6 +978,7 @@ func ValidatorSetFromProto(vp *cmtproto.ValidatorSet) (*ValidatorSet, error) { valsProto[i] = v } vals.Validators = valsProto + vals.checkAllKeysHaveSameType() p, err := ValidatorFromProto(vp.GetProposer()) if err != nil { @@ -864,13 +1015,14 @@ func ValidatorSetFromExistingValidators(valz []*Validator) (*ValidatorSet, error vals := &ValidatorSet{ Validators: valz, } + vals.checkAllKeysHaveSameType() vals.Proposer = vals.findPreviousProposer() vals.updateTotalVotingPower() sort.Sort(ValidatorsByVotingPower(vals.Validators)) return vals, nil } -//---------------------------------------- +// ---------------------------------------- // RandValidatorSet returns a randomized validator set (size: +numValidators+), // where each validator has a voting power of +votingPower+. diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 04cc26da669..c61251ed194 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -12,11 +12,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" + "github.com/cometbft/cometbft/crypto/secp256k1" + cmtrand "github.com/cometbft/cometbft/internal/rand" cmtmath "github.com/cometbft/cometbft/libs/math" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" ) func TestValidatorSetBasic(t *testing.T) { @@ -52,7 +53,7 @@ func TestValidatorSetBasic(t *testing.T) { }, vset.Hash()) // add val = randValidator(vset.TotalVotingPower()) - assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) + require.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) assert.True(t, vset.HasAddress(val.Address)) idx, _ = vset.GetByAddress(val.Address) @@ -67,20 +68,21 @@ func TestValidatorSetBasic(t *testing.T) { // update val = randValidator(vset.TotalVotingPower()) - assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) + require.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) _, val = vset.GetByAddress(val.Address) val.VotingPower += 100 proposerPriority := val.ProposerPriority val.ProposerPriority = 0 - assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) + require.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) _, val = vset.GetByAddress(val.Address) assert.Equal(t, proposerPriority, val.ProposerPriority) } -func TestValidatorSetValidateBasic(t *testing.T) { +func TestValidatorSet_ValidateBasic(t *testing.T) { val, _ := RandValidator(false, 1) badVal := &Validator{} + val2, _ := RandValidator(false, 1) testCases := []struct { vals ValidatorSet @@ -121,16 +123,24 @@ func TestValidatorSetValidateBasic(t *testing.T) { err: false, msg: "", }, + { + vals: ValidatorSet{ + Validators: []*Validator{val}, + Proposer: val2, + }, + err: true, + msg: ErrProposerNotInVals.Error(), + }, } for _, tc := range testCases { err := tc.vals.ValidateBasic() if tc.err { - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, tc.msg, err.Error()) } } else { - assert.NoError(t, err) + require.NoError(t, err) } } } @@ -150,6 +160,30 @@ func TestCopy(t *testing.T) { } } +func TestValidatorSet_ProposerPriorityHash(t *testing.T) { + vset := NewValidatorSet(nil) + assert.Equal(t, []byte(nil), vset.ProposerPriorityHash()) + + vset = randValidatorSet(3) + assert.NotNil(t, vset.ProposerPriorityHash()) + + // Marshaling and unmarshalling do not affect ProposerPriorityHash + bz, err := vset.ToProto() + assert.NoError(t, err) + vsetProto, err := ValidatorSetFromProto(bz) + assert.NoError(t, err) + assert.Equal(t, vset.ProposerPriorityHash(), vsetProto.ProposerPriorityHash()) + + // Copy does not affect ProposerPriorityHash + vsetCopy := vset.Copy() + assert.Equal(t, vset.ProposerPriorityHash(), vsetCopy.ProposerPriorityHash()) + + // Incrementing priorities changes ProposerPriorityHash() but not Hash() + vset.IncrementProposerPriority(1) + assert.Equal(t, vset.Hash(), vsetCopy.Hash()) + assert.NotEqual(t, vset.ProposerPriorityHash(), vsetCopy.ProposerPriorityHash()) +} + // Test that IncrementProposerPriority requires positive times. func TestIncrementProposerPriorityPositiveTimes(t *testing.T) { vset := NewValidatorSet([]*Validator{ @@ -182,7 +216,7 @@ func BenchmarkValidatorSetCopy(b *testing.B) { } } -//------------------------------------------------------------------- +// ------------------------------------------------------------------- func TestProposerSelection1(t *testing.T) { vset := NewValidatorSet([]*Validator{ @@ -261,56 +295,60 @@ func TestProposerSelection2(t *testing.T) { valList = []*Validator{val0, val1, val2} propCount := make([]int, 3) vals = NewValidatorSet(valList) - N := 1 - for i := 0; i < 120*N; i++ { + n := 1 + for i := 0; i < 120*n; i++ { prop := vals.GetProposer() ii := prop.Address[19] propCount[ii]++ vals.IncrementProposerPriority(1) } - if propCount[0] != 40*N { + if propCount[0] != 40*n { t.Fatalf( "Expected prop count for validator with 4/12 of voting power to be %d/%d. Got %d/%d", - 40*N, - 120*N, + 40*n, + 120*n, propCount[0], - 120*N, + 120*n, ) } - if propCount[1] != 50*N { + if propCount[1] != 50*n { t.Fatalf( "Expected prop count for validator with 5/12 of voting power to be %d/%d. Got %d/%d", - 50*N, - 120*N, + 50*n, + 120*n, propCount[1], - 120*N, + 120*n, ) } - if propCount[2] != 30*N { + if propCount[2] != 30*n { t.Fatalf( "Expected prop count for validator with 3/12 of voting power to be %d/%d. Got %d/%d", - 30*N, - 120*N, + 30*n, + 120*n, propCount[2], - 120*N, + 120*n, ) } } func TestProposerSelection3(t *testing.T) { - vset := NewValidatorSet([]*Validator{ + vals := []*Validator{ newValidator([]byte("avalidator_address12"), 1), newValidator([]byte("bvalidator_address12"), 1), newValidator([]byte("cvalidator_address12"), 1), newValidator([]byte("dvalidator_address12"), 1), - }) + } - proposerOrder := make([]*Validator, 4) for i := 0; i < 4; i++ { - // need to give all validators to have keys pk := ed25519.GenPrivKey().PubKey() - vset.Validators[i].PubKey = pk + vals[i].PubKey = pk + vals[i].Address = pk.Address() + } + sort.Sort(ValidatorsByAddress(vals)) + vset := NewValidatorSet(vals) + proposerOrder := make([]*Validator, 4) + for i := 0; i < 4; i++ { proposerOrder[i] = vset.GetProposer() vset.IncrementProposerPriority(1) } @@ -326,7 +364,7 @@ func TestProposerSelection3(t *testing.T) { got := vset.GetProposer().Address expected := proposerOrder[j%4].Address if !bytes.Equal(got, expected) { - t.Fatalf(fmt.Sprintf("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j)) + t.Fatalf("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j) } // serialize, deserialize, check proposer @@ -337,13 +375,11 @@ func TestProposerSelection3(t *testing.T) { if i != 0 { if !bytes.Equal(got, computed.Address) { t.Fatalf( - fmt.Sprintf( - "vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)", - got, - computed.Address, - i, - j, - ), + "vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)", + got, + computed.Address, + i, + j, ) } } @@ -403,7 +439,7 @@ func (vals *ValidatorSet) toBytes() []byte { return bz } -func (vals *ValidatorSet) fromBytes(b []byte) *ValidatorSet { +func (*ValidatorSet) fromBytes(b []byte) *ValidatorSet { pbvs := new(cmtproto.ValidatorSet) err := pbvs.Unmarshal(b) if err != nil { @@ -419,7 +455,7 @@ func (vals *ValidatorSet) fromBytes(b []byte) *ValidatorSet { return vs } -//------------------------------------------------------------------- +// ------------------------------------------------------------------- func TestValidatorSetTotalVotingPowerPanicsOnOverflow(t *testing.T) { // NewValidatorSet calls IncrementProposerPriority which calls TotalVotingPower() @@ -695,7 +731,7 @@ func TestSafeSubClip(t *testing.T) { assert.EqualValues(t, math.MaxInt64, safeSubClip(math.MaxInt64, -10)) } -//------------------------------------------------------------------- +// ------------------------------------------------------------------- func TestEmptySet(t *testing.T) { var valList []*Validator @@ -710,17 +746,17 @@ func TestEmptySet(t *testing.T) { v1 := newValidator([]byte("v1"), 100) v2 := newValidator([]byte("v2"), 100) valList = []*Validator{v1, v2} - assert.NoError(t, valSet.UpdateWithChangeSet(valList)) + require.NoError(t, valSet.UpdateWithChangeSet(valList)) verifyValidatorSet(t, valSet) // Delete all validators from set v1 = newValidator([]byte("v1"), 0) v2 = newValidator([]byte("v2"), 0) delList := []*Validator{v1, v2} - assert.Error(t, valSet.UpdateWithChangeSet(delList)) + require.Error(t, valSet.UpdateWithChangeSet(delList)) // Attempt delete from empty set - assert.Error(t, valSet.UpdateWithChangeSet(delList)) + require.Error(t, valSet.UpdateWithChangeSet(delList)) } func TestUpdatesForNewValidatorSet(t *testing.T) { @@ -791,8 +827,9 @@ func valSetTotalProposerPriority(valSet *ValidatorSet) int64 { } func verifyValidatorSet(t *testing.T, valSet *ValidatorSet) { + t.Helper() // verify that the capacity and length of validators is the same - assert.Equal(t, len(valSet.Validators), cap(valSet.Validators)) + assert.Len(t, valSet.Validators, cap(valSet.Validators)) // verify that the set's total voting power has been updated tvp := valSet.totalVotingPower @@ -809,7 +846,7 @@ func verifyValidatorSet(t *testing.T, valSet *ValidatorSet) { // verify that priorities are scaled dist := computeMaxMinPriorityDiff(valSet) - assert.True(t, dist <= PriorityWindowSizeFactor*tvp, + assert.LessOrEqual(t, dist, PriorityWindowSizeFactor*tvp, "expected priority distance < %d. Got %d", PriorityWindowSizeFactor*tvp, dist) } @@ -836,6 +873,7 @@ type valSetErrTestCase struct { } func executeValSetErrTestCase(t *testing.T, idx int, tt valSetErrTestCase) { + t.Helper() // create a new set and apply updates, keeping copies for the checks valSet := createNewValidatorSet(tt.startVals) valSetCopy := valSet.Copy() @@ -844,7 +882,7 @@ func executeValSetErrTestCase(t *testing.T, idx int, tt valSetErrTestCase) { err := valSet.UpdateWithChangeSet(valList) // for errors check the validator set has not been changed - assert.Error(t, err, "test %d", idx) + require.Error(t, err, "test %d", idx) assert.Equal(t, valSet, valSetCopy, "test %v", idx) // check the parameter list has not changed @@ -908,6 +946,49 @@ func TestValSetUpdatesDuplicateEntries(t *testing.T) { } } +func permuteValidatorSlice(vals []*Validator, start int, res [][]*Validator) [][]*Validator { + if start == len(vals)-1 { + // Make a copy of the current permutation and add it to the result + perm := make([]*Validator, len(vals)) + copy(perm, vals) + return append(res, perm) + } + + for i := start; i < len(vals); i++ { + // Swap the current element with the start + vals[start], vals[i] = vals[i], vals[start] + + // Recurse for the next element + res = permuteValidatorSlice(vals, start+1, res) + + // Backtrack (swap back) + vals[start], vals[i] = vals[i], vals[start] + } + return res +} + +func TestValSetTestOrderingPower(t *testing.T) { + sortedVals := []*Validator{ + {Address: []byte("validator10"), VotingPower: 20}, + {Address: []byte("validator12"), VotingPower: 20}, + {Address: []byte("validator13"), VotingPower: 15}, + {Address: []byte("validator44"), VotingPower: 12}, + {Address: []byte("validator32"), VotingPower: 10}, + {Address: []byte("validator16"), VotingPower: 5}, + {Address: []byte("validator17"), VotingPower: 5}, + } + allPerms := permuteValidatorSlice(sortedVals, 0, nil) + for _, vals := range allPerms { + t.Log("testing valset", "valset", vals) + valset := NewValidatorSet(vals) + for i, val := range valset.Validators { + sortedVal := sortedVals[i] + require.Equal(t, val.Address, sortedVal.Address) + require.Equal(t, val.VotingPower, sortedVal.VotingPower) + } + } +} + func TestValSetUpdatesOverflows(t *testing.T) { maxVP := MaxTotalVotingPower testCases := []valSetErrTestCase{ @@ -1010,7 +1091,7 @@ func TestValSetUpdatesBasicTestsExecute(t *testing.T) { valSet := createNewValidatorSet(tt.startVals) valList := createNewValidatorList(tt.updateVals) err := valSet.UpdateWithChangeSet(valList) - assert.NoError(t, err, "test %d", i) + require.NoError(t, err, "test %d", i) valListCopy := validatorListCopy(valSet.Validators) // check that the voting power in the set's validators is not changing if the voting power @@ -1019,7 +1100,6 @@ func TestValSetUpdatesBasicTestsExecute(t *testing.T) { if len(valList) > 0 { valList[0].VotingPower++ assert.Equal(t, toTestValList(valListCopy), toTestValList(valSet.Validators), "test %v", i) - } // check the final validator list is as expected and the set is properly scaled and centered. @@ -1063,7 +1143,7 @@ func TestValSetUpdatesOrderIndependenceTestsExecute(t *testing.T) { valSet := createNewValidatorSet(tt.startVals) valSetCopy := valSet.Copy() valList := createNewValidatorList(tt.updateVals) - assert.NoError(t, valSetCopy.UpdateWithChangeSet(valList)) + require.NoError(t, valSetCopy.UpdateWithChangeSet(valList)) // save the result as expected for next updates valSetExp := valSetCopy.Copy() @@ -1077,19 +1157,19 @@ func TestValSetUpdatesOrderIndependenceTestsExecute(t *testing.T) { valList := createNewValidatorList(permutation(tt.updateVals)) // check there was no error and the set is properly scaled and centered. - assert.NoError(t, valSetCopy.UpdateWithChangeSet(valList), + require.NoError(t, valSetCopy.UpdateWithChangeSet(valList), "test %v failed for permutation %v", i, valList) verifyValidatorSet(t, valSetCopy) // verify the resulting test is same as the expected - assert.Equal(t, valSetCopy, valSetExp, + assert.Equal(t, valSetExp, valSetCopy, "test %v failed for permutation %v", i, valList) } } } // This tests the private function validator_set.go:applyUpdates() function, used only for additions and changes. -// Should perform a proper merge of updatedVals and startVals +// Should perform a proper merge of updatedVals and startVals. func TestValSetApplyUpdatesTestsExecute(t *testing.T) { valSetUpdatesBasicTests := []struct { startVals []testVal @@ -1155,7 +1235,7 @@ func TestValSetApplyUpdatesTestsExecute(t *testing.T) { valSet.applyUpdates(valList) // check the new list of validators for proper merge - assert.Equal(t, toTestValList(valSet.Validators), tt.expectedVals, "test %v", i) + assert.Equal(t, tt.expectedVals, toTestValList(valSet.Validators), "test %v", i) } } @@ -1224,6 +1304,7 @@ func randTestVSetCfg(nBase, nAddMax int) testVSetCfg { } func applyChangesToValSet(t *testing.T, expErr error, valSet *ValidatorSet, valsLists ...[]testVal) { + t.Helper() changes := make([]testVal, 0) for _, valsList := range valsLists { changes = append(changes, valsList...) @@ -1233,7 +1314,7 @@ func applyChangesToValSet(t *testing.T, expErr error, valSet *ValidatorSet, vals if expErr != nil { assert.Equal(t, expErr, err) } else { - assert.NoError(t, err) + require.NoError(t, err) } } @@ -1278,7 +1359,6 @@ func TestValSetUpdatePriorityOrderTests(t *testing.T) { } for _, cfg := range testCases { - // create a new validator set valSet := createNewValidatorSet(cfg.startVals) verifyValidatorSet(t, valSet) @@ -1289,6 +1369,7 @@ func TestValSetUpdatePriorityOrderTests(t *testing.T) { } func verifyValSetUpdatePriorityOrder(t *testing.T, valSet *ValidatorSet, cfg testVSetCfg, nMaxElections int32) { + t.Helper() // Run election up to nMaxElections times, sort validators by priorities valSet.IncrementProposerPriority(cmtrand.Int31()%nMaxElections + 1) @@ -1332,7 +1413,7 @@ func TestNewValidatorSetFromExistingValidators(t *testing.T) { assert.NotEqual(t, valSet, newValSet) existingValSet, err := ValidatorSetFromExistingValidators(valSet.Validators) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, valSet, existingValSet) assert.Equal(t, valSet.CopyIncrementProposerPriority(3), existingValSet.CopyIncrementProposerPriority(3)) } @@ -1421,7 +1502,6 @@ func TestValSetUpdateOverflowRelated(t *testing.T) { } for _, tt := range testCases { - tt := tt t.Run(tt.name, func(t *testing.T) { valSet := createNewValidatorSet(tt.startVals) verifyValidatorSet(t, valSet) @@ -1505,7 +1585,7 @@ func TestValidatorSetProtoBuf(t *testing.T) { } // --------------------- -// Sort validators by priority and address +// Sort validators by priority and address. type validatorsByPriority []*Validator func (valz validatorsByPriority) Len() int { @@ -1526,7 +1606,7 @@ func (valz validatorsByPriority) Swap(i, j int) { valz[i], valz[j] = valz[j], valz[i] } -//------------------------------------- +// ------------------------------------- type testValsByVotingPower []testVal @@ -1536,7 +1616,7 @@ func (tvals testValsByVotingPower) Len() int { func (tvals testValsByVotingPower) Less(i, j int) bool { if tvals[i].power == tvals[j].power { - return bytes.Compare([]byte(tvals[i].name), []byte(tvals[j].name)) == -1 + return strings.Compare(tvals[i].name, tvals[j].name) == -1 } return tvals[i].power > tvals[j].power } @@ -1546,7 +1626,7 @@ func (tvals testValsByVotingPower) Swap(i, j int) { } // ------------------------------------- -// Benchmark tests +// Benchmark tests. func BenchmarkUpdates(b *testing.B) { const ( n = 100 @@ -1570,6 +1650,174 @@ func BenchmarkUpdates(b *testing.B) { for i := 0; i < b.N; i++ { // Add m validators to valSetCopy valSetCopy := valSet.Copy() - assert.NoError(b, valSetCopy.UpdateWithChangeSet(newValList)) + require.NoError(b, valSetCopy.UpdateWithChangeSet(newValList)) + } +} + +func TestVerifyCommitWithInvalidProposerKey(t *testing.T) { + vs := &ValidatorSet{ + Validators: []*Validator{{}, {}}, + } + commit := &Commit{ + Height: 100, + Signatures: []CommitSig{{}, {}}, + } + var bid BlockID + cid := "" + err := vs.VerifyCommit(cid, bid, 100, commit) + require.Error(t, err) +} + +func TestVerifyCommitSingleWithInvalidSignatures(t *testing.T) { + vs := &ValidatorSet{ + Validators: []*Validator{{}, {}}, + } + commit := &Commit{ + Height: 100, + Signatures: []CommitSig{{}, {}}, + } + cid := "" + votingPowerNeeded := vs.TotalVotingPower() * 2 / 3 + + // ignore all absent signatures + ignore := func(c CommitSig) bool { return c.BlockIDFlag == BlockIDFlagAbsent } + + // only count the signatures that are for the block + count := func(c CommitSig) bool { return c.BlockIDFlag == BlockIDFlagCommit } + + err := verifyCommitSingle(cid, vs, commit, votingPowerNeeded, ignore, count, true, true, nil) + require.Error(t, err) + + cache := NewSignatureCache() + err = verifyCommitSingle(cid, vs, commit, votingPowerNeeded, ignore, count, true, true, cache) + require.Error(t, err) + require.Equal(t, 0, cache.Len()) +} + +func TestValidatorSet_AllKeysHaveSameType(t *testing.T) { + testCases := []struct { + vals *ValidatorSet + sameType bool + }{ + { + vals: NewValidatorSet([]*Validator{}), + sameType: true, + }, + { + vals: randValidatorSet(1), + sameType: true, + }, + { + vals: randValidatorSet(2), + sameType: true, + }, + { + vals: NewValidatorSet([]*Validator{randValidator(100), NewValidator(secp256k1.GenPrivKey().PubKey(), 200)}), + sameType: false, + }, + } + + for i, tc := range testCases { + if tc.sameType { + assert.True(t, tc.vals.AllKeysHaveSameType(), "test %d", i) + } else { + assert.False(t, tc.vals.AllKeysHaveSameType(), "test %d", i) + } + } +} + +func TestValidatorSet_BlockingChain(t *testing.T) { + testCases := []struct { + tcName string + vals []testVal + blockingVals []string + }{ + { + "1 validator", + []testVal{{"v1", 1}}, + []string{"v1"}, + }, + { + "2 validators", + []testVal{{"v1", 1}, {"v2", 1}}, + []string{"v1", "v2"}, + }, + { + "3 validators", + []testVal{{"v1", 1}, {"v2", 1}, {"v3", 1}}, + []string{"v1", "v2", "v3"}, + }, + { + "4 validators", + []testVal{{"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}}, + nil, + }, + { + "many validators", + []testVal{ + {"v01", 1}, + {"v02", 1}, + {"v03", 1}, + {"v04", 1}, + {"v05", 1}, + {"v06", 1}, + {"v07", 1}, + {"v08", 1}, + {"v09", 1}, + {"v10", 1}, + {"v11", 1}, + {"v12", 1}, + {"v13", 1}, + {"v14", 1}, + {"v15", 1}, + }, + nil, + }, + { + "2 validators, only 1 blocking", + []testVal{{"v1", 1}, {"v2", 3}}, + []string{"v2"}, + }, + { + "2 validators, only 1 blocking, high power, borderline 1 - v1 blocking", + []testVal{{"v1", 3333}, {"v2", 6666}}, + []string{"v1", "v2"}, + }, + { + "2 validators, only 1 blocking, high power, borderline 2 - v1 non-blocking", + []testVal{{"v1", 3332}, {"v2", 6666}}, + []string{"v2"}, + }, + { + "2 validators, only 1 blocking, high power, borderline 3 - v1 non-blocking", + []testVal{{"v1", 3332}, {"v2", 6665}}, + []string{"v2"}, + }, + { + "2 validators, only 1 blocking, high power, borderline 3 - v1 blocking", + []testVal{{"v1", 3332}, {"v2", 6664}}, + []string{"v1", "v2"}, + }, + { + "2 validators, only 1 blocking, high power, borderline 4 - v1 blocking", + []testVal{{"v1", 3332}, {"v2", 6663}}, + []string{"v1", "v2"}, + }, + } + + for _, tt := range testCases { + t.Run(tt.tcName, func(t *testing.T) { + valSet := createNewValidatorSet(tt.vals) + valsMap := make(map[string]struct{}, len(tt.blockingVals)) + for _, addr := range tt.blockingVals { + _, ok := valsMap[addr] + require.False(t, ok) + valsMap[addr] = struct{}{} + } + for _, val := range tt.vals { + _, blocking := valsMap[val.name] + require.Equal(t, blocking, valSet.ValidatorBlocksTheChain([]byte(val.name)), "validator %s", val.name) + } + }) } } diff --git a/types/validator_test.go b/types/validator_test.go index 5eb2ed7bf1c..cb31d51037d 100644 --- a/types/validator_test.go +++ b/types/validator_test.go @@ -1,10 +1,13 @@ package types import ( + "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/crypto" ) func TestValidatorProtoBuf(t *testing.T) { @@ -38,6 +41,13 @@ func TestValidatorProtoBuf(t *testing.T) { } } +type unsupportedPubKey struct{} + +func (unsupportedPubKey) Address() crypto.Address { return nil } +func (unsupportedPubKey) Bytes() []byte { return nil } +func (unsupportedPubKey) VerifySignature([]byte, []byte) bool { return false } +func (unsupportedPubKey) Type() string { return "unsupportedPubKey" } + func TestValidatorValidateBasic(t *testing.T) { priv := NewMockPV() pubKey, _ := priv.GetPubKey() @@ -74,7 +84,7 @@ func TestValidatorValidateBasic(t *testing.T) { Address: nil, }, err: true, - msg: "validator address is the wrong size: ", + msg: fmt.Sprintf("validator address is incorrectly derived from pubkey. Exp: %v, got ", pubKey.Address()), }, { val: &Validator{ @@ -82,18 +92,44 @@ func TestValidatorValidateBasic(t *testing.T) { Address: []byte{'a'}, }, err: true, - msg: "validator address is the wrong size: 61", + msg: fmt.Sprintf("validator address is incorrectly derived from pubkey. Exp: %v, got 61", pubKey.Address()), + }, + { + val: &Validator{ + PubKey: unsupportedPubKey{}, + Address: unsupportedPubKey{}.Address(), + }, + err: true, + msg: ErrUnsupportedPubKeyType.Error(), }, } for _, tc := range testCases { err := tc.val.ValidateBasic() if tc.err { - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, tc.msg, err.Error()) } } else { - assert.NoError(t, err) + require.NoError(t, err) } } } + +// TestValidatorCopy tests if the Copy() method of a validator does +// a deep copy of all the fields. +func TestValidatorCopy(t *testing.T) { + priv := NewMockPV() + pubKey, _ := priv.GetPubKey() + val := &Validator{ + Address: pubKey.Address(), + PubKey: pubKey, + VotingPower: 10, + ProposerPriority: 1, + } + copyVal := val.Copy() + assert.Equal(t, val.Address.Bytes(), copyVal.Address.Bytes()) + assert.Equal(t, val.PubKey.Bytes(), copyVal.PubKey.Bytes()) + assert.Equal(t, val.VotingPower, copyVal.VotingPower) + assert.Equal(t, val.ProposerPriority, copyVal.ProposerPriority) +} diff --git a/types/vote.go b/types/vote.go index 0ec96eb2cfb..8ad9f7b5897 100644 --- a/types/vote.go +++ b/types/vote.go @@ -6,10 +6,11 @@ import ( "fmt" "time" + cmtcons "github.com/cometbft/cometbft/api/cometbft/consensus/v1" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto" cmtbytes "github.com/cometbft/cometbft/libs/bytes" "github.com/cometbft/cometbft/libs/protoio" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" ) const ( @@ -24,6 +25,7 @@ var ( ErrVoteInvalidValidatorIndex = errors.New("invalid validator index") ErrVoteInvalidValidatorAddress = errors.New("invalid validator address") ErrVoteInvalidSignature = errors.New("invalid signature") + ErrVoteNoSignature = errors.New("no signature") ErrVoteInvalidBlockHash = errors.New("invalid block hash") ErrVoteNonDeterministicSignature = errors.New("non-deterministic signature") ErrVoteNil = errors.New("nil vote") @@ -47,22 +49,32 @@ func NewConflictingVoteError(vote1, vote2 *Vote) *ErrVoteConflictingVotes { } } +// The vote extension is only valid for non-nil precommits. +type ErrVoteExtensionInvalid struct { + ExtSignature []byte + Reason string +} + +func (err *ErrVoteExtensionInvalid) Error() string { + return fmt.Sprintf("invalid vote extension: %s; extension signature: %X", err.Reason, err.ExtSignature) +} + // Address is hex bytes. type Address = crypto.Address // Vote represents a prevote, precommit, or commit vote from validators for // consensus. type Vote struct { - Type cmtproto.SignedMsgType `json:"type"` - Height int64 `json:"height"` - Round int32 `json:"round"` // assume there will not be greater than 2_147_483_647 rounds - BlockID BlockID `json:"block_id"` // zero if vote is nil. - Timestamp time.Time `json:"timestamp"` - ValidatorAddress Address `json:"validator_address"` - ValidatorIndex int32 `json:"validator_index"` - Signature []byte `json:"signature"` - Extension []byte `json:"extension"` - ExtensionSignature []byte `json:"extension_signature"` + Type SignedMsgType `json:"type"` + Height int64 `json:"height"` + Round int32 `json:"round"` // assume there will not be greater than 2_147_483_647 rounds + BlockID BlockID `json:"block_id"` // zero if vote is nil. + Timestamp time.Time `json:"timestamp"` + ValidatorAddress Address `json:"validator_address"` + ValidatorIndex int32 `json:"validator_index"` + Signature []byte `json:"signature"` + Extension []byte `json:"extension"` + ExtensionSignature []byte `json:"extension_signature"` } // VoteFromProto attempts to convert the given serialization (Protobuf) type to @@ -135,7 +147,7 @@ func (vote *Vote) ExtendedCommitSig() ExtendedCommitSig { // for backwards-compatibility with the Amino encoding, due to e.g. hardware // devices that rely on this encoding. // -// See CanonicalizeVote +// See CanonicalizeVote. func VoteSignBytes(chainID string, vote *cmtproto.Vote) []byte { pb := CanonicalizeVote(chainID, vote) bz, err := protoio.MarshalDelimited(&pb) @@ -177,7 +189,7 @@ func (vote *Vote) Copy() *Vote { // 7. first 6 bytes of block hash // 8. first 6 bytes of signature // 9. first 6 bytes of vote extension -// 10. timestamp +// 10. timestamp. func (vote *Vote) String() string { if vote == nil { return nilVoteStr @@ -185,9 +197,9 @@ func (vote *Vote) String() string { var typeString string switch vote.Type { - case cmtproto.PrevoteType: + case PrevoteType: typeString = "Prevote" - case cmtproto.PrecommitType: + case PrecommitType: typeString = "Precommit" default: panic("Unknown vote type") @@ -236,9 +248,9 @@ func (vote *Vote) VerifyVoteAndExtension(chainID string, pubKey crypto.PubKey) e return err } // We only verify vote extension signatures for non-nil precommits. - if vote.Type == cmtproto.PrecommitType && !ProtoBlockIDIsNil(&v.BlockID) { + if vote.Type == PrecommitType && !ProtoBlockIDIsNil(&v.BlockID) { if len(vote.ExtensionSignature) == 0 { - return errors.New("expected vote extension signature") + return ErrVoteNoSignature } extSignBytes := VoteExtensionSignBytes(chainID, v) @@ -252,11 +264,14 @@ func (vote *Vote) VerifyVoteAndExtension(chainID string, pubKey crypto.PubKey) e // VerifyExtension checks whether the vote extension signature corresponds to the // given chain ID and public key. func (vote *Vote) VerifyExtension(chainID string, pubKey crypto.PubKey) error { - if vote.Type != cmtproto.PrecommitType || vote.BlockID.IsNil() { + if vote.Type != PrecommitType || vote.BlockID.IsNil() { return nil } v := vote.ToProto() extSignBytes := VoteExtensionSignBytes(chainID, v) + if len(vote.ExtensionSignature) == 0 { + return ErrVoteNoSignature + } if !pubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) { return ErrVoteInvalidSignature } @@ -282,7 +297,7 @@ func (vote *Vote) ValidateBasic() error { // NOTE: Timestamp validation is subtle and handled elsewhere. if err := vote.BlockID.ValidateBasic(); err != nil { - return fmt.Errorf("wrong BlockID: %v", err) + return fmt.Errorf("wrong BlockID: %w", err) } // BlockID.ValidateBasic would not err if we for instance have an empty hash but a @@ -311,7 +326,7 @@ func (vote *Vote) ValidateBasic() error { // We should only ever see vote extensions in non-nil precommits, otherwise // this is a violation of the specification. // https://github.com/tendermint/tendermint/issues/8487 - if vote.Type != cmtproto.PrecommitType || vote.BlockID.IsNil() { + if vote.Type != PrecommitType || vote.BlockID.IsNil() { if len(vote.Extension) > 0 { return fmt.Errorf( "unexpected vote extension; vote type %d, isNil %t", @@ -323,7 +338,7 @@ func (vote *Vote) ValidateBasic() error { } } - if vote.Type == cmtproto.PrecommitType && !vote.BlockID.IsNil() { + if vote.Type == PrecommitType && !vote.BlockID.IsNil() { // It's possible that this vote has vote extensions but // they could also be disabled and thus not present thus // we can't do all checks @@ -332,11 +347,11 @@ func (vote *Vote) ValidateBasic() error { } // NOTE: extended votes should have a signature regardless of - // of whether there is any data in the extension or not however + // whether there is any data in the extension or not however // we don't know if extensions are enabled so we can only // enforce the signature when extension size is not nil if len(vote.ExtensionSignature) == 0 && len(vote.Extension) != 0 { - return fmt.Errorf("vote extension signature absent on vote with extension") + return ErrVoteNoSignature } } @@ -347,7 +362,7 @@ func (vote *Vote) ValidateBasic() error { // on precommit vote types. func (vote *Vote) EnsureExtension() error { // We should always see vote extension signatures in non-nil precommits - if vote.Type != cmtproto.PrecommitType { + if vote.Type != PrecommitType { return nil } if vote.BlockID.IsNil() { @@ -360,7 +375,7 @@ func (vote *Vote) EnsureExtension() error { } // ToProto converts the handwritten type to proto generated type -// return type, nil if everything converts safely, otherwise nil, error +// return type, nil if everything converts safely, otherwise nil, error. func (vote *Vote) ToProto() *cmtproto.Vote { if vote == nil { return nil @@ -396,6 +411,9 @@ func VotesToProto(votes []*Vote) []*cmtproto.Vote { return res } +// SignAndCheckVote signs the vote with the given privVal and checks the vote. +// It returns an error if the vote is invalid and a boolean indicating if the +// error is recoverable or not. func SignAndCheckVote( vote *Vote, privVal PrivValidator, @@ -403,35 +421,51 @@ func SignAndCheckVote( extensionsEnabled bool, ) (bool, error) { v := vote.ToProto() - if err := privVal.SignVote(chainID, v); err != nil { - // Failing to sign a vote has always been a recoverable error, this function keeps it that way - return true, err // true = recoverable + if err := privVal.SignVote(chainID, v, extensionsEnabled); err != nil { + // Failing to sign a vote has always been a recoverable error, this + // function keeps it that way. + return true, err } vote.Signature = v.Signature - isPrecommit := vote.Type == cmtproto.PrecommitType + isPrecommit := vote.Type == PrecommitType if !isPrecommit && extensionsEnabled { // Non-recoverable because the caller passed parameters that don't make sense - return false, fmt.Errorf("only Precommit votes may have extensions enabled; vote type: %d", vote.Type) + return false, &ErrVoteExtensionInvalid{ + Reason: "inconsistent values of `isPrecommit` and `extensionsEnabled`", + ExtSignature: v.ExtensionSignature, + } } isNil := vote.BlockID.IsNil() extSignature := (len(v.ExtensionSignature) > 0) - if extSignature == (!isPrecommit || isNil) { + + // Error if prevote contains an extension signature + if extSignature && (!isPrecommit || isNil) { // Non-recoverable because the vote is malformed - return false, fmt.Errorf( - "extensions must be present IFF vote is a non-nil Precommit; present %t, vote type %d, is nil %t", - extSignature, - vote.Type, - isNil, - ) + return false, &ErrVoteExtensionInvalid{ + Reason: "vote extension signature must not be present in prevotes or nil-precommits", + ExtSignature: v.ExtensionSignature, + } } vote.ExtensionSignature = nil if extensionsEnabled { + // Error if missing extension signature for non-nil Precommit + if !extSignature && isPrecommit && !isNil { + // Non-recoverable because the vote is malformed + return false, &ErrVoteExtensionInvalid{ + Reason: "vote extension signature must be present if extensions are enabled", + ExtSignature: v.ExtensionSignature, + } + } + vote.ExtensionSignature = v.ExtensionSignature } + vote.Timestamp = v.Timestamp return true, nil } + +var _ Wrapper = &cmtcons.Vote{} diff --git a/types/vote_set.go b/types/vote_set.go index 6b9c70efed5..efc7699a044 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -5,10 +5,9 @@ import ( "fmt" "strings" - "github.com/cometbft/cometbft/libs/bits" + "github.com/cometbft/cometbft/internal/bits" cmtjson "github.com/cometbft/cometbft/libs/json" cmtsync "github.com/cometbft/cometbft/libs/sync" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" ) const ( @@ -62,7 +61,7 @@ type VoteSet struct { chainID string height int64 round int32 - signedMsgType cmtproto.SignedMsgType + signedMsgType SignedMsgType valSet *ValidatorSet extensionsEnabled bool @@ -78,7 +77,8 @@ type VoteSet struct { // NewVoteSet instantiates all fields of a new vote set. This constructor requires // that no vote extension data be present on the votes that are added to the set. func NewVoteSet(chainID string, height int64, round int32, - signedMsgType cmtproto.SignedMsgType, valSet *ValidatorSet) *VoteSet { + signedMsgType SignedMsgType, valSet *ValidatorSet, +) *VoteSet { if height == 0 { panic("Cannot make VoteSet for height == 0, doesn't make sense.") } @@ -101,7 +101,8 @@ func NewVoteSet(chainID string, height int64, round int32, // The VoteSet constructed with NewExtendedVoteSet verifies the vote extension // data for every vote added to the set. func NewExtendedVoteSet(chainID string, height int64, round int32, - signedMsgType cmtproto.SignedMsgType, valSet *ValidatorSet) *VoteSet { + signedMsgType SignedMsgType, valSet *ValidatorSet, +) *VoteSet { vs := NewVoteSet(chainID, height, round, signedMsgType, valSet) vs.extensionsEnabled = true return vs @@ -111,7 +112,7 @@ func (voteSet *VoteSet) ChainID() string { return voteSet.chainID } -// Implements VoteSetReader. +// GetHeight implements VoteSetReader. func (voteSet *VoteSet) GetHeight() int64 { if voteSet == nil { return 0 @@ -119,7 +120,7 @@ func (voteSet *VoteSet) GetHeight() int64 { return voteSet.height } -// Implements VoteSetReader. +// GetRound implements VoteSetReader. func (voteSet *VoteSet) GetRound() int32 { if voteSet == nil { return -1 @@ -127,7 +128,7 @@ func (voteSet *VoteSet) GetRound() int32 { return voteSet.round } -// Implements VoteSetReader. +// Type implements VoteSetReader. func (voteSet *VoteSet) Type() byte { if voteSet == nil { return 0x00 @@ -135,7 +136,7 @@ func (voteSet *VoteSet) Type() byte { return byte(voteSet.signedMsgType) } -// Implements VoteSetReader. +// Size implements VoteSetReader. func (voteSet *VoteSet) Size() int { if voteSet == nil { return 0 @@ -143,7 +144,7 @@ func (voteSet *VoteSet) Size() int { return voteSet.valSet.Size() } -// Returns added=true if vote is valid and new. +// AddVote returns added=true if vote is valid and new. // Otherwise returns err=ErrVote[ // // UnexpectedStep | InvalidIndex | InvalidAddress | @@ -153,7 +154,7 @@ func (voteSet *VoteSet) Size() int { // Conflicting votes return added=*, err=ErrVoteConflictingVotes. // NOTE: vote should not be mutated after adding. // NOTE: VoteSet must not be nil -// NOTE: Vote must not be nil +// NOTE: Vote must not be nil. func (voteSet *VoteSet) AddVote(vote *Vote) (added bool, err error) { if voteSet == nil { panic("AddVote() on nil VoteSet") @@ -206,7 +207,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { } // If we already know of this vote, return false. - if existing, ok := voteSet.getVote(valIndex, blockKey); ok { + if existing, ok := voteSet.getVote(valIndex, blockKey, &vote.BlockID); ok { if bytes.Equal(existing.Signature, vote.Signature) { return false, nil // duplicate } @@ -241,9 +242,9 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { return added, nil } -// Returns (vote, true) if vote exists for valIndex and blockKey. -func (voteSet *VoteSet) getVote(valIndex int32, blockKey string) (vote *Vote, ok bool) { - if existing := voteSet.votes[valIndex]; existing != nil && existing.BlockID.Key() == blockKey { +// getVote returns (vote, true) if vote exists for valIndex and blockKey. +func (voteSet *VoteSet) getVote(valIndex int32, blockKey string, blockID *BlockID) (vote *Vote, ok bool) { + if existing := voteSet.votes[valIndex]; existing != nil && blockID.Equals(existing.BlockID) { return existing, true } if existing := voteSet.votesByBlock[blockKey].getByIndex(valIndex); existing != nil { @@ -252,7 +253,7 @@ func (voteSet *VoteSet) getVote(valIndex int32, blockKey string) (vote *Vote, ok return nil, false } -// Assumes signature is valid. +// addVerifiedVote assumes signature is valid. // If conflicting vote exists, returns it. func (voteSet *VoteSet) addVerifiedVote( vote *Vote, @@ -268,7 +269,7 @@ func (voteSet *VoteSet) addVerifiedVote( } conflicting = existing // Replace vote if blockKey matches voteSet.maj23. - if voteSet.maj23 != nil && voteSet.maj23.Key() == blockKey { + if voteSet.maj23 != nil && voteSet.maj23.Equals(vote.BlockID) { voteSet.votes[valIndex] = vote voteSet.votesBitArray.SetIndex(int(valIndex), true) } @@ -330,7 +331,7 @@ func (voteSet *VoteSet) addVerifiedVote( // NOTE: if there are too many peers, or too much peer churn, // this can cause memory issues. // TODO: implement ability to remove peers too -// NOTE: VoteSet must not be nil +// NOTE: VoteSet must not be nil. func (voteSet *VoteSet) SetPeerMaj23(peerID P2PID, blockID BlockID) error { if voteSet == nil { panic("SetPeerMaj23() on nil VoteSet") @@ -339,7 +340,6 @@ func (voteSet *VoteSet) SetPeerMaj23(peerID P2PID, blockID BlockID) error { defer voteSet.mtx.Unlock() blockKey := blockID.Key() - // Make sure peer hasn't already told us something. if existing, ok := voteSet.peerMaj23s[peerID]; ok { if existing.Equals(blockID) { @@ -366,7 +366,7 @@ func (voteSet *VoteSet) SetPeerMaj23(peerID P2PID, blockID BlockID) error { return nil } -// Implements VoteSetReader. +// BitArray implements VoteSetReader. func (voteSet *VoteSet) BitArray() *bits.BitArray { if voteSet == nil { return nil @@ -420,7 +420,7 @@ func (voteSet *VoteSet) GetByAddress(address []byte) *Vote { } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() - valIndex, val := voteSet.valSet.GetByAddress(address) + valIndex, val := voteSet.valSet.GetByAddressMut(address) if val == nil { panic("GetByAddress(address) returned nil") } @@ -436,12 +436,12 @@ func (voteSet *VoteSet) HasTwoThirdsMajority() bool { return voteSet.maj23 != nil } -// Implements VoteSetReader. +// IsCommit implements VoteSetReader. func (voteSet *VoteSet) IsCommit() bool { if voteSet == nil { return false } - if voteSet.signedMsgType != cmtproto.PrecommitType { + if voteSet.signedMsgType != PrecommitType { return false } voteSet.mtx.Lock() @@ -481,7 +481,7 @@ func (voteSet *VoteSet) TwoThirdsMajority() (blockID BlockID, ok bool) { return BlockID{}, false } -//-------------------------------------------------------------------------------- +// -------------------------------------------------------------------------------- // Strings and JSON const nilVoteSetString = "nil-VoteSet" @@ -529,7 +529,7 @@ func (voteSet *VoteSet) StringIndented(indent string) string { indent) } -// Marshal the VoteSet to JSON. Same as String(), just in JSON, +// MarshalJSON marshals the VoteSet to JSON. Same as String(), just in JSON, // and without the height/round/signedMsgType (since its already included in the votes). func (voteSet *VoteSet) MarshalJSON() ([]byte, error) { voteSet.mtx.Lock() @@ -543,16 +543,16 @@ func (voteSet *VoteSet) MarshalJSON() ([]byte, error) { // More human readable JSON of the vote set // NOTE: insufficient for unmarshalling from (compressed votes) -// TODO: make the peerMaj23s nicer to read (eg just the block hash) +// TODO: make the peerMaj23s nicer to read (eg just the block hash). type VoteSetJSON struct { Votes []string `json:"votes"` VotesBitArray string `json:"votes_bit_array"` PeerMaj23s map[P2PID]BlockID `json:"peer_maj_23s"` } -// Return the bit-array of votes including +// BitArrayString returns the bit-array of votes including // the fraction of power that has voted like: -// "BA{29:xx__x__x_x___x__x_______xxx__} 856/1304 = 0.66" +// "BA{29:xx__x__x_x___x__x_______xxx__} 856/1304 = 0.66". func (voteSet *VoteSet) BitArrayString() string { voteSet.mtx.Lock() defer voteSet.mtx.Unlock() @@ -565,7 +565,7 @@ func (voteSet *VoteSet) bitArrayString() string { return fmt.Sprintf("%s %d/%d = %.2f", bAString, voted, total, fracVoted) } -// Returns a list of votes compressed to more readable strings. +// VoteStrings returns a list of votes compressed to more readable strings. func (voteSet *VoteSet) VoteStrings() []string { voteSet.mtx.Lock() defer voteSet.mtx.Unlock() @@ -592,7 +592,7 @@ func (voteSet *VoteSet) voteStrings() []string { // 4. first 2/3+ majority // 5. fraction of voted power // 6. votes bit array -// 7. 2/3+ majority for each peer +// 7. 2/3+ majority for each peer. func (voteSet *VoteSet) StringShort() string { if voteSet == nil { return nilVoteSetString @@ -617,14 +617,14 @@ func (voteSet *VoteSet) LogString() string { return fmt.Sprintf("Votes:%d/%d(%.3f)", voted, total, frac) } -// return the power voted, the total, and the fraction -func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) { - voted, total := voteSet.sum, voteSet.valSet.TotalVotingPower() - fracVoted := float64(voted) / float64(total) +// sumTotalFrac returns the power voted, the total, and the fraction. +func (voteSet *VoteSet) sumTotalFrac() (voted, total int64, fracVoted float64) { + voted, total = voteSet.sum, voteSet.valSet.TotalVotingPower() + fracVoted = float64(voted) / float64(total) return voted, total, fracVoted } -//-------------------------------------------------------------------------------- +// -------------------------------------------------------------------------------- // Commit // MakeExtendedCommit constructs a Commit from the VoteSet. It only includes @@ -632,11 +632,11 @@ func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) { // // Panics if the vote type is not PrecommitType or if there's no +2/3 votes for // a single block. -func (voteSet *VoteSet) MakeExtendedCommit(ap ABCIParams) *ExtendedCommit { +func (voteSet *VoteSet) MakeExtendedCommit(fp FeatureParams) *ExtendedCommit { voteSet.mtx.Lock() defer voteSet.mtx.Unlock() - if voteSet.signedMsgType != cmtproto.PrecommitType { + if voteSet.signedMsgType != PrecommitType { panic("Cannot MakeExtendCommit() unless VoteSet.Type is PrecommitType") } @@ -663,20 +663,20 @@ func (voteSet *VoteSet) MakeExtendedCommit(ap ABCIParams) *ExtendedCommit { BlockID: *voteSet.maj23, ExtendedSignatures: sigs, } - if err := ec.EnsureExtensions(ap.VoteExtensionsEnabled(ec.Height)); err != nil { + if err := ec.EnsureExtensions(fp.VoteExtensionsEnabled(ec.Height)); err != nil { panic(fmt.Errorf("problem with vote extension data when making extended commit of height %d; %w", ec.Height, err)) } return ec } -//-------------------------------------------------------------------------------- +// -------------------------------------------------------------------------------- /* Votes for a particular block There are two ways a *blockVotes gets created for a blockKey. 1. first (non-conflicting) vote of a validator w/ blockKey (peerMaj23=false) -2. A peer claims to have a 2/3 majority w/ blockKey (peerMaj23=true) +2. A peer claims to have a 2/3 majority w/ blockKey (peerMaj23=true). */ type blockVotes struct { peerMaj23 bool // peer claims to have maj23 @@ -710,15 +710,15 @@ func (vs *blockVotes) getByIndex(index int32) *Vote { return vs.votes[index] } -//-------------------------------------------------------------------------------- +// -------------------------------------------------------------------------------- -// Common interface between *consensus.VoteSet and types.Commit +// Common interface between *consensus.VoteSet and types.Commit. type VoteSetReader interface { GetHeight() int64 GetRound() int32 Type() byte Size() int BitArray() *bits.BitArray - GetByIndex(int32) *Vote + GetByIndex(idx int32) *Vote IsCommit() bool } diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 1b1c1b5b57b..62925fb4959 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -8,14 +8,13 @@ import ( "github.com/stretchr/testify/require" "github.com/cometbft/cometbft/crypto" - cmtrand "github.com/cometbft/cometbft/libs/rand" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmtrand "github.com/cometbft/cometbft/internal/rand" cmttime "github.com/cometbft/cometbft/types/time" ) func TestVoteSet_AddVote_Good(t *testing.T) { height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, cmtproto.PrevoteType, 10, 1, false) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1, false) val0 := privValidators[0] val0p, err := val0.GetPubKey() @@ -32,7 +31,7 @@ func TestVoteSet_AddVote_Good(t *testing.T) { ValidatorIndex: 0, // since privValidators are in order Height: height, Round: round, - Type: cmtproto.PrevoteType, + Type: PrevoteType, Timestamp: cmttime.Now(), BlockID: BlockID{nil, PartSetHeader{}}, } @@ -47,7 +46,7 @@ func TestVoteSet_AddVote_Good(t *testing.T) { func TestVoteSet_AddVote_Bad(t *testing.T) { height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, cmtproto.PrevoteType, 10, 1, false) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1, false) voteProto := &Vote{ ValidatorAddress: nil, @@ -55,7 +54,7 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { Height: height, Round: round, Timestamp: cmttime.Now(), - Type: cmtproto.PrevoteType, + Type: PrevoteType, BlockID: BlockID{nil, PartSetHeader{}}, } @@ -113,24 +112,73 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 3) - added, err := signAddVote(privValidators[3], withType(vote, byte(cmtproto.PrecommitType)), voteSet) + added, err := signAddVote(privValidators[3], withType(vote, byte(PrecommitType)), voteSet) if added || err == nil { t.Errorf("expected VoteSet.Add to fail, wrong type") } } +} + +func Benchmark_2_3_Maj(b *testing.B) { + height, round := int64(1), int32(0) + voteProto := &Vote{ + ValidatorAddress: nil, // NOTE: must fill in + ValidatorIndex: -1, // NOTE: must fill in + Height: height, + Round: round, + Type: PrevoteType, + Timestamp: cmttime.Now(), + BlockID: BlockID{nil, PartSetHeader{}}, + } + blockPartsTotal := uint32(123) + blockPartSetHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} + for i := 0; i < b.N; i++ { + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 100, 1, false) + for i := int32(0); i < int32(100); i += 4 { + pubKey, _ := privValidators[i].GetPubKey() + adrr := pubKey.Address() + vote := withValidator(voteProto, adrr, i) + _, err := signAddVote(privValidators[i], withBlockHash(vote, nil), voteSet) + require.NoError(b, err) + _, _ = voteSet.TwoThirdsMajority() + + pubKey, _ = privValidators[i+1].GetPubKey() + adrr = pubKey.Address() + vote = withValidator(voteProto, adrr, i+1) + _, err = signAddVote(privValidators[i+1], vote, voteSet) + require.NoError(b, err) + _, _ = voteSet.TwoThirdsMajority() + + pubKey, _ = privValidators[i+2].GetPubKey() + adrr = pubKey.Address() + vote = withValidator(voteProto, adrr, i+2) + blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} + _, err = signAddVote(privValidators[i+2], withBlockPartSetHeader(vote, blockPartsHeader), voteSet) + require.NoError(b, err) + _, _ = voteSet.TwoThirdsMajority() + + pubKey, _ = privValidators[i+3].GetPubKey() + adrr = pubKey.Address() + vote = withValidator(voteProto, adrr, i+3) + blockPartsHeader = PartSetHeader{blockPartsTotal + 1, blockPartSetHeader.Hash} + _, err = signAddVote(privValidators[i+3], withBlockPartSetHeader(vote, blockPartsHeader), voteSet) + require.NoError(b, err) + _, _ = voteSet.TwoThirdsMajority() + } + } } func TestVoteSet_2_3Majority(t *testing.T) { height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, cmtproto.PrevoteType, 10, 1, false) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1, false) voteProto := &Vote{ ValidatorAddress: nil, // NOTE: must fill in ValidatorIndex: -1, // NOTE: must fill in Height: height, Round: round, - Type: cmtproto.PrevoteType, + Type: PrevoteType, Timestamp: cmttime.Now(), BlockID: BlockID{nil, PartSetHeader{}}, } @@ -173,7 +221,7 @@ func TestVoteSet_2_3Majority(t *testing.T) { func TestVoteSet_2_3MajorityRedux(t *testing.T) { height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, cmtproto.PrevoteType, 100, 1, false) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 100, 1, false) blockHash := crypto.CRandBytes(32) blockPartsTotal := uint32(123) @@ -185,7 +233,7 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) { Height: height, Round: round, Timestamp: cmttime.Now(), - Type: cmtproto.PrevoteType, + Type: PrevoteType, BlockID: BlockID{blockHash, blockPartSetHeader}, } @@ -272,7 +320,7 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) { func TestVoteSet_Conflicts(t *testing.T) { height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, cmtproto.PrevoteType, 4, 1, false) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 4, 1, false) blockHash1 := cmtrand.Bytes(32) blockHash2 := cmtrand.Bytes(32) @@ -282,7 +330,7 @@ func TestVoteSet_Conflicts(t *testing.T) { Height: height, Round: round, Timestamp: cmttime.Now(), - Type: cmtproto.PrevoteType, + Type: PrevoteType, BlockID: BlockID{nil, PartSetHeader{}}, } @@ -304,7 +352,7 @@ func TestVoteSet_Conflicts(t *testing.T) { vote := withValidator(voteProto, val0Addr, 0) added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash1), voteSet) assert.False(t, added, "conflicting vote") - assert.Error(t, err, "conflicting vote") + require.Error(t, err, "conflicting vote") } // start tracking blockHash1 @@ -316,7 +364,7 @@ func TestVoteSet_Conflicts(t *testing.T) { vote := withValidator(voteProto, val0Addr, 0) added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash1), voteSet) assert.True(t, added, "called SetPeerMaj23()") - assert.Error(t, err, "conflicting vote") + require.Error(t, err, "conflicting vote") } // attempt tracking blockHash2, should fail because already set for peerA. @@ -328,13 +376,13 @@ func TestVoteSet_Conflicts(t *testing.T) { vote := withValidator(voteProto, val0Addr, 0) added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash2), voteSet) assert.False(t, added, "duplicate SetPeerMaj23() from peerA") - assert.Error(t, err, "conflicting vote") + require.Error(t, err, "conflicting vote") } // val1 votes for blockHash1. { pv, err := privValidators[1].GetPubKey() - assert.NoError(t, err) + require.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 1) added, err := signAddVote(privValidators[1], withBlockHash(vote, blockHash1), voteSet) @@ -354,7 +402,7 @@ func TestVoteSet_Conflicts(t *testing.T) { // val2 votes for blockHash2. { pv, err := privValidators[2].GetPubKey() - assert.NoError(t, err) + require.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 2) added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash2), voteSet) @@ -378,12 +426,12 @@ func TestVoteSet_Conflicts(t *testing.T) { // val2 votes for blockHash1. { pv, err := privValidators[2].GetPubKey() - assert.NoError(t, err) + require.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 2) added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash1), voteSet) assert.True(t, added) - assert.Error(t, err, "conflicting vote") + require.Error(t, err, "conflicting vote") } // check @@ -397,7 +445,7 @@ func TestVoteSet_Conflicts(t *testing.T) { func TestVoteSet_MakeCommit(t *testing.T) { height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, cmtproto.PrecommitType, 10, 1, true) + voteSet, _, privValidators := randVoteSet(height, round, PrecommitType, 10, 1, true) blockHash, blockPartSetHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} voteProto := &Vote{ @@ -406,14 +454,14 @@ func TestVoteSet_MakeCommit(t *testing.T) { Height: height, Round: round, Timestamp: cmttime.Now(), - Type: cmtproto.PrecommitType, + Type: PrecommitType, BlockID: BlockID{blockHash, blockPartSetHeader}, } // 6 out of 10 voted for some block. for i := int32(0); i < 6; i++ { pv, err := privValidators[i].GetPubKey() - assert.NoError(t, err) + require.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, i) _, err = signAddVote(privValidators[i], vote, voteSet) @@ -423,13 +471,14 @@ func TestVoteSet_MakeCommit(t *testing.T) { } // MakeCommit should fail. - veHeightParam := ABCIParams{VoteExtensionsEnableHeight: height} + veHeightParam := DefaultFeatureParams() + veHeightParam.VoteExtensionsEnableHeight = height assert.Panics(t, func() { voteSet.MakeExtendedCommit(veHeightParam) }, "Doesn't have +2/3 majority") // 7th voted for some other block. { pv, err := privValidators[6].GetPubKey() - assert.NoError(t, err) + require.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 6) vote = withBlockHash(vote, cmtrand.Bytes(32)) @@ -442,7 +491,7 @@ func TestVoteSet_MakeCommit(t *testing.T) { // The 8th voted like everyone else. { pv, err := privValidators[7].GetPubKey() - assert.NoError(t, err) + require.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 7) _, err = signAddVote(privValidators[7], vote, voteSet) @@ -452,7 +501,7 @@ func TestVoteSet_MakeCommit(t *testing.T) { // The 9th voted for nil. { pv, err := privValidators[8].GetPubKey() - assert.NoError(t, err) + require.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 8) vote.BlockID = BlockID{} @@ -464,7 +513,7 @@ func TestVoteSet_MakeCommit(t *testing.T) { extCommit := voteSet.MakeExtendedCommit(veHeightParam) // Commit should have 10 elements - assert.Equal(t, 10, len(extCommit.ExtendedSignatures)) + assert.Len(t, extCommit.ExtendedSignatures, 10) // Ensure that Commit is good. if err := extCommit.ValidateBasic(); err != nil { @@ -511,9 +560,9 @@ func TestVoteSet_VoteExtensionsEnabled(t *testing.T) { valSet, privValidators := RandValidatorSet(5, 10) var voteSet *VoteSet if tc.requireExtensions { - voteSet = NewExtendedVoteSet("test_chain_id", height, round, cmtproto.PrecommitType, valSet) + voteSet = NewExtendedVoteSet("test_chain_id", height, round, PrecommitType, valSet) } else { - voteSet = NewVoteSet("test_chain_id", height, round, cmtproto.PrecommitType, valSet) + voteSet = NewVoteSet("test_chain_id", height, round, PrecommitType, valSet) } val0 := privValidators[0] @@ -530,12 +579,12 @@ func TestVoteSet_VoteExtensionsEnabled(t *testing.T) { ValidatorIndex: 0, Height: height, Round: round, - Type: cmtproto.PrecommitType, + Type: PrecommitType, Timestamp: cmttime.Now(), BlockID: BlockID{blockHash, blockPartSetHeader}, } v := vote.ToProto() - err = val0.SignVote(voteSet.ChainID(), v) + err = val0.SignVote(voteSet.ChainID(), v, true) require.NoError(t, err) vote.Signature = v.Signature @@ -555,18 +604,18 @@ func TestVoteSet_VoteExtensionsEnabled(t *testing.T) { } } -// NOTE: privValidators are in order +// NOTE: privValidators are in order. func randVoteSet( height int64, round int32, - signedMsgType cmtproto.SignedMsgType, + signedMsgType SignedMsgType, numValidators int, votingPower int64, extEnabled bool, ) (*VoteSet, *ValidatorSet, []PrivValidator) { valSet, privValidators := RandValidatorSet(numValidators, votingPower) if extEnabled { - if signedMsgType != cmtproto.PrecommitType { + if signedMsgType != PrecommitType { return nil, nil, nil } return NewExtendedVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators @@ -574,7 +623,7 @@ func randVoteSet( return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators } -// Convenience: Return new vote with different validator address/index +// Convenience: Return new vote with different validator address/index. func withValidator(vote *Vote, addr []byte, idx int32) *Vote { vote = vote.Copy() vote.ValidatorAddress = addr @@ -582,35 +631,35 @@ func withValidator(vote *Vote, addr []byte, idx int32) *Vote { return vote } -// Convenience: Return new vote with different height +// Convenience: Return new vote with different height. func withHeight(vote *Vote, height int64) *Vote { vote = vote.Copy() vote.Height = height return vote } -// Convenience: Return new vote with different round +// Convenience: Return new vote with different round. func withRound(vote *Vote, round int32) *Vote { vote = vote.Copy() vote.Round = round return vote } -// Convenience: Return new vote with different type +// Convenience: Return new vote with different type. func withType(vote *Vote, signedMsgType byte) *Vote { vote = vote.Copy() - vote.Type = cmtproto.SignedMsgType(signedMsgType) + vote.Type = SignedMsgType(signedMsgType) return vote } -// Convenience: Return new vote with different blockHash +// Convenience: Return new vote with different blockHash. func withBlockHash(vote *Vote, blockHash []byte) *Vote { vote = vote.Copy() vote.BlockID.Hash = blockHash return vote } -// Convenience: Return new vote with different blockParts +// Convenience: Return new vote with different blockParts. func withBlockPartSetHeader(vote *Vote, blockPartsHeader PartSetHeader) *Vote { vote = vote.Copy() vote.BlockID.PartSetHeader = blockPartsHeader diff --git a/types/vote_test.go b/types/vote_test.go index 9c7e8777f71..fa44e8bb437 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -1,6 +1,7 @@ package types import ( + "fmt" "testing" "time" @@ -8,32 +9,33 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/crypto/tmhash" "github.com/cometbft/cometbft/libs/protoio" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" cmttime "github.com/cometbft/cometbft/types/time" ) func examplePrevote() *Vote { - return exampleVote(byte(cmtproto.PrevoteType)) + return exampleVote(byte(PrevoteType)) } func examplePrecommit() *Vote { - vote := exampleVote(byte(cmtproto.PrecommitType)) + vote := exampleVote(byte(PrecommitType)) + vote.Extension = []byte("extension") vote.ExtensionSignature = []byte("signature") return vote } func exampleVote(t byte) *Vote { - var stamp, err = time.Parse(TimeFormat, "2017-12-25T03:00:01.234Z") + stamp, err := time.Parse(TimeFormat, "2017-12-25T03:00:01.234Z") if err != nil { panic(err) } return &Vote{ - Type: cmtproto.SignedMsgType(t), + Type: SignedMsgType(t), Height: 12345, Round: 2, Timestamp: stamp, @@ -61,7 +63,6 @@ func TestVoteSignable(t *testing.T) { } func TestVoteSignBytesTestVectors(t *testing.T) { - tests := []struct { chainID string vote *Vote @@ -74,7 +75,7 @@ func TestVoteSignBytesTestVectors(t *testing.T) { }, // with proper (fixed size) height and round (PreCommit): 1: { - "", &Vote{Height: 1, Round: 1, Type: cmtproto.PrecommitType}, + "", &Vote{Height: 1, Round: 1, Type: PrecommitType}, []byte{ 0x21, // length 0x8, // (field_number << 3) | wire_type @@ -85,11 +86,12 @@ func TestVoteSignBytesTestVectors(t *testing.T) { 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round 0x2a, // (field_number << 3) | wire_type // remaining fields (timestamp): - 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1}, + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1, + }, }, // with proper (fixed size) height and round (PreVote): 2: { - "", &Vote{Height: 1, Round: 1, Type: cmtproto.PrevoteType}, + "", &Vote{Height: 1, Round: 1, Type: PrevoteType}, []byte{ 0x21, // length 0x8, // (field_number << 3) | wire_type @@ -100,7 +102,8 @@ func TestVoteSignBytesTestVectors(t *testing.T) { 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round 0x2a, // (field_number << 3) | wire_type // remaining fields (timestamp): - 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1}, + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1, + }, }, 3: { "", &Vote{Height: 1, Round: 1}, @@ -112,7 +115,8 @@ func TestVoteSignBytesTestVectors(t *testing.T) { 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round // remaining fields (timestamp): 0x2a, - 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1}, + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1, + }, }, // containing non-empty chain_id: 4: { @@ -128,7 +132,8 @@ func TestVoteSignBytesTestVectors(t *testing.T) { 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1, // timestamp // (field_number << 3) | wire_type 0x32, - 0xd, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64}, // chainID + 0xd, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, + }, // chainID }, // containing vote extension 5: { @@ -143,7 +148,7 @@ func TestVoteSignBytesTestVectors(t *testing.T) { 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height 0x19, // (field_number << 3) | wire_type 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round - // remaning fields: + // remaining fields: 0x2a, // (field_number << 3) | wire_type 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1, // timestamp // (field_number << 3) | wire_type @@ -180,7 +185,7 @@ func TestVoteVerifySignature(t *testing.T) { signBytes := VoteSignBytes("test_chain_id", v) // sign it - err = privVal.SignVote("test_chain_id", v) + err = privVal.SignVote("test_chain_id", v, false) require.NoError(t, err) // verify the same vote @@ -246,12 +251,12 @@ func TestVoteExtension(t *testing.T) { Height: height, Round: round, Timestamp: cmttime.Now(), - Type: cmtproto.PrecommitType, + Type: PrecommitType, BlockID: makeBlockIDRandom(), } v := vote.ToProto() - err = privVal.SignVote("test_chain_id", v) + err = privVal.SignVote("test_chain_id", v, true) require.NoError(t, err) vote.Signature = v.Signature if tc.includeSignature { @@ -270,17 +275,16 @@ func TestVoteExtension(t *testing.T) { func TestIsVoteTypeValid(t *testing.T) { tc := []struct { name string - in cmtproto.SignedMsgType + in SignedMsgType out bool }{ - {"Prevote", cmtproto.PrevoteType, true}, - {"Precommit", cmtproto.PrecommitType, true}, - {"InvalidType", cmtproto.SignedMsgType(0x3), false}, + {"Prevote", PrevoteType, true}, + {"Precommit", PrecommitType, true}, + {"InvalidType", SignedMsgType(0x3), false}, } for _, tt := range tc { - tt := tt - t.Run(tt.name, func(st *testing.T) { + t.Run(tt.name, func(_ *testing.T) { if rs := IsVoteTypeValid(tt.in); rs != tt.out { t.Errorf("got unexpected Vote type. Expected:\n%v\nGot:\n%v", rs, tt.out) } @@ -297,19 +301,19 @@ func TestVoteVerify(t *testing.T) { vote.ValidatorAddress = pubkey.Address() err = vote.Verify("test_chain_id", ed25519.GenPrivKey().PubKey()) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, ErrVoteInvalidValidatorAddress, err) } err = vote.Verify("test_chain_id", pubkey) - if assert.Error(t, err) { + if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here assert.Equal(t, ErrVoteInvalidSignature, err) } } func TestVoteString(t *testing.T) { str := examplePrecommit().String() - expected := `Vote{56789:6AF1F4111082 12345/02/SIGNED_MSG_TYPE_PRECOMMIT(Precommit) 8B01023386C3 000000000000 000000000000 @ 2017-12-25T03:00:01.234Z}` //nolint:lll //ignore line length for tests + expected := `Vote{56789:6AF1F4111082 12345/02/SIGNED_MSG_TYPE_PRECOMMIT(Precommit) 8B01023386C3 000000000000 657874656E73 @ 2017-12-25T03:00:01.234Z}` //nolint:lll //ignore line length for tests if str != expected { t.Errorf("got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str) } @@ -321,11 +325,12 @@ func TestVoteString(t *testing.T) { } } -func signVote(t *testing.T, pv PrivValidator, chainID string, vote *Vote) { +func signVote(t *testing.T, pv PrivValidator, vote *Vote) { t.Helper() + chainID := "test_chain_id" v := vote.ToProto() - require.NoError(t, pv.SignVote(chainID, v)) + require.NoError(t, pv.SignVote(chainID, v, true)) vote.Signature = v.Signature vote.ExtensionSignature = v.ExtensionSignature } @@ -338,12 +343,12 @@ func TestValidVotes(t *testing.T) { vote *Vote malleateVote func(*Vote) }{ - {"good prevote", examplePrevote(), func(v *Vote) {}}, + {"good prevote", examplePrevote(), func(_ *Vote) {}}, {"good precommit without vote extension", examplePrecommit(), func(v *Vote) { v.Extension = nil }}, {"good precommit with vote extension", examplePrecommit(), func(v *Vote) { v.Extension = []byte("extension") }}, } for _, tc := range testCases { - signVote(t, privVal, "test_chain_id", tc.vote) + signVote(t, privVal, tc.vote) tc.malleateVote(tc.vote) require.NoError(t, tc.vote.ValidateBasic(), "ValidateBasic for %s", tc.name) require.NoError(t, tc.vote.EnsureExtension(), "EnsureExtension for %s", tc.name) @@ -368,13 +373,13 @@ func TestInvalidVotes(t *testing.T) { } for _, tc := range testCases { prevote := examplePrevote() - signVote(t, privVal, "test_chain_id", prevote) + signVote(t, privVal, prevote) tc.malleateVote(prevote) require.Error(t, prevote.ValidateBasic(), "ValidateBasic for %s in invalid prevote", tc.name) require.NoError(t, prevote.EnsureExtension(), "EnsureExtension for %s in invalid prevote", tc.name) precommit := examplePrecommit() - signVote(t, privVal, "test_chain_id", precommit) + signVote(t, privVal, precommit) tc.malleateVote(precommit) require.Error(t, precommit.ValidateBasic(), "ValidateBasic for %s in invalid precommit", tc.name) require.NoError(t, precommit.EnsureExtension(), "EnsureExtension for %s in invalid precommit", tc.name) @@ -393,7 +398,7 @@ func TestInvalidPrevotes(t *testing.T) { } for _, tc := range testCases { prevote := examplePrevote() - signVote(t, privVal, "test_chain_id", prevote) + signVote(t, privVal, prevote) tc.malleateVote(prevote) require.Error(t, prevote.ValidateBasic(), "ValidateBasic for %s", tc.name) require.NoError(t, prevote.EnsureExtension(), "EnsureExtension for %s", tc.name) @@ -415,7 +420,7 @@ func TestInvalidPrecommitExtensions(t *testing.T) { } for _, tc := range testCases { precommit := examplePrecommit() - signVote(t, privVal, "test_chain_id", precommit) + signVote(t, privVal, precommit) tc.malleateVote(precommit) // ValidateBasic ensures that vote extensions, if present, are well formed require.Error(t, precommit.ValidateBasic(), "ValidateBasic for %s", tc.name) @@ -440,7 +445,7 @@ func TestEnsureVoteExtension(t *testing.T) { } for _, tc := range testCases { precommit := examplePrecommit() - signVote(t, privVal, "test_chain_id", precommit) + signVote(t, privVal, precommit) tc.malleateVote(precommit) if tc.expectError { require.Error(t, precommit.EnsureExtension(), "EnsureExtension for %s", tc.name) @@ -454,7 +459,7 @@ func TestVoteProtobuf(t *testing.T) { privVal := NewMockPV() vote := examplePrecommit() v := vote.ToProto() - err := privVal.SignVote("test_chain_id", v) + err := privVal.SignVote("test_chain_id", v, false) vote.Signature = v.Signature require.NoError(t, err) @@ -486,3 +491,110 @@ func TestVoteProtobuf(t *testing.T) { } } } + +func TestSignAndCheckVote(t *testing.T) { + privVal := NewMockPV() + + testCases := []struct { + name string + extensionsEnabled bool + vote *Vote + expectError bool + }{ + { + name: "precommit with extension signature", + extensionsEnabled: true, + vote: examplePrecommit(), + expectError: false, + }, + { + name: "precommit with extension signature", + extensionsEnabled: false, + vote: examplePrecommit(), + expectError: false, + }, + { + name: "precommit with extension signature for a nil block", + extensionsEnabled: true, + vote: func() *Vote { + v := examplePrecommit() + v.BlockID = BlockID{make([]byte, 0), PartSetHeader{0, make([]byte, 0)}} + return v + }(), + expectError: true, + }, + { + name: "precommit with extension signature for a nil block", + extensionsEnabled: false, + vote: func() *Vote { + v := examplePrecommit() + v.BlockID = BlockID{make([]byte, 0), PartSetHeader{0, make([]byte, 0)}} + return v + }(), + expectError: true, + }, + { + name: "precommit without extension", + extensionsEnabled: true, + vote: func() *Vote { + v := examplePrecommit() + v.Extension = make([]byte, 0) + return v + }(), + expectError: false, + }, + { + name: "precommit without extension", + extensionsEnabled: false, + vote: func() *Vote { + v := examplePrecommit() + v.Extension = make([]byte, 0) + return v + }(), + expectError: false, + }, + { + name: "prevote", + extensionsEnabled: true, + vote: examplePrevote(), + expectError: true, + }, + { + name: "prevote", + extensionsEnabled: false, + vote: examplePrevote(), + expectError: false, + }, + { + name: "prevote with extension", + extensionsEnabled: true, + vote: func() *Vote { + v := examplePrevote() + v.Extension = []byte("extension") + return v + }(), + expectError: true, + }, + { + name: "prevote with extension", + extensionsEnabled: false, + vote: func() *Vote { + v := examplePrevote() + v.Extension = []byte("extension") + return v + }(), + expectError: false, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s (extensionsEnabled: %t) ", tc.name, tc.extensionsEnabled), func(t *testing.T) { + _, err := SignAndCheckVote(tc.vote, privVal, "test_chain_id", tc.extensionsEnabled) + if tc.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/types/wrapper.go b/types/wrapper.go new file mode 100644 index 00000000000..9d5b26a0411 --- /dev/null +++ b/types/wrapper.go @@ -0,0 +1,27 @@ +package types + +import ( + "github.com/cosmos/gogoproto/proto" +) + +// Unwrapper is a Protobuf message that can contain a variety of inner messages +// (e.g. via oneof fields). If a Channel's message type implements Unwrapper, the +// p2p layer will automatically unwrap inbound messages so that reactors do not have to do this themselves. +type Unwrapper interface { + proto.Message + + // Unwrap will unwrap the inner message contained in this message. + Unwrap() (proto.Message, error) +} + +// Wrapper is a companion type to Unwrapper. It is a Protobuf message that can contain a variety of inner messages. The p2p layer will automatically wrap outbound messages so that the reactors do not have to do it themselves. +type Wrapper interface { + proto.Message + + // Wrap will take the underlying message and wrap it in its wrapper type. + // + // NOTE: The consumer should only use the result to marshal the message into + // the wire format. Dynamic casts to any of the declared wrapper types + // may not produce the expected result. + Wrap() proto.Message +} diff --git a/version/version.go b/version/version.go index 91b2d8bc693..cf20163cf2d 100644 --- a/version/version.go +++ b/version/version.go @@ -1,11 +1,11 @@ package version const ( - // TMVersionDefault is the used as the fallback version of CometBFT - // when not using git describe. It is formatted with semantic versioning. - TMCoreSemVer = "0.39.0-dev" - // ABCISemVer is the semantic version of the ABCI protocol - ABCISemVer = "2.0.0" + // CMTSemVer is used as the fallback version of CometBFT + // when not using git describe. It uses semantic versioning format. + CMTSemVer = "1.0.0-dev" + // ABCISemVer is the semantic version of the ABCI protocol. + ABCISemVer = "2.2.0" ABCIVersion = ABCISemVer // P2PProtocol versions all p2p behavior and msgs. // This includes proposer selection. @@ -16,6 +16,6 @@ const ( BlockProtocol uint64 = 11 ) -// TMGitCommitHash uses git rev-parse HEAD to find commit hash which is helpful -// for the engineering team when working with the cometbft binary. See Makefile -var TMGitCommitHash = "" +// CMTGitCommitHash uses git rev-parse HEAD to find commit hash which is helpful +// for the engineering team when working with the cometbft binary. See Makefile. +var CMTGitCommitHash = ""